From 8f3f582af0f88a1a6aa6abccc1859bdeec05a71b Mon Sep 17 00:00:00 2001 From: jackofallops <11830746+jackofallops@users.noreply.github.com> Date: Tue, 5 Nov 2024 09:58:35 +0000 Subject: [PATCH] `storage` - add support for `storage_account_id` to `azurerm_storage_container` and `azurerm_storage_share` (#27733) * start of dataplane / rm separation * refactor resrouce functions * review comments and feedback * missed review comment * missed review comment * update comment * missed review comments --- .../storage_share_create_poller.go | 52 ++ .../storage/storage_container_data_source.go | 168 ++-- .../storage_container_data_source_test.go | 38 +- .../storage/storage_container_resource.go | 431 +++++++---- .../storage_container_resource_test.go | 473 ++++++++++-- .../storage/storage_share_data_source.go | 154 ++-- .../storage/storage_share_data_source_test.go | 38 +- .../storage/storage_share_resource.go | 616 ++++++++++----- .../storage/storage_share_resource_test.go | 727 ++++++++++++++++-- .../docs/d/storage_container.html.markdown | 17 +- website/docs/d/storage_share.html.markdown | 22 +- .../docs/r/storage_container.html.markdown | 8 +- website/docs/r/storage_share.html.markdown | 24 +- 13 files changed, 2230 insertions(+), 538 deletions(-) create mode 100644 internal/services/storage/custompollers/storage_share_create_poller.go diff --git a/internal/services/storage/custompollers/storage_share_create_poller.go b/internal/services/storage/custompollers/storage_share_create_poller.go new file mode 100644 index 000000000000..2833afe3ef62 --- /dev/null +++ b/internal/services/storage/custompollers/storage_share_create_poller.go @@ -0,0 +1,52 @@ +package custompollers + +import ( + "context" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/fileshares" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" +) + +var _ pollers.PollerType = storageShareCreatePoller{} + +type storageShareCreatePoller struct { + id fileshares.ShareId + client *fileshares.FileSharesClient + payload fileshares.FileShare +} + +func NewStorageShareCreatePoller(client *fileshares.FileSharesClient, id fileshares.ShareId, payload fileshares.FileShare) *storageShareCreatePoller { + return &storageShareCreatePoller{ + id: id, + client: client, + payload: payload, + } +} + +func (p storageShareCreatePoller) Poll(ctx context.Context) (*pollers.PollResult, error) { + // Note - Whilst this is an antipattern for the Provider, the API provides no way currently to poll for deletion + // to ensure it's removed. To support rapid delete then re-creation we check for 409's that indicate the resource + // is still being removed. + resp, err := p.client.Create(ctx, p.id, p.payload, fileshares.DefaultCreateOperationOptions()) + if err != nil { + if response.WasConflict(resp.HttpResponse) { + return &pollers.PollResult{ + PollInterval: 5 * time.Second, + Status: pollers.PollingStatusInProgress, + }, nil + } + + return &pollers.PollResult{ + HttpResponse: nil, + PollInterval: 5 * time.Second, + Status: pollers.PollingStatusFailed, + }, err + } + + return &pollers.PollResult{ + PollInterval: 5, + Status: pollers.PollingStatusSucceeded, + }, nil +} diff --git a/internal/services/storage/storage_container_data_source.go b/internal/services/storage/storage_container_data_source.go index b82840f72d9c..3ed6e9ba9f6a 100644 --- a/internal/services/storage/storage_container_data_source.go +++ b/internal/services/storage/storage_container_data_source.go @@ -7,17 +7,20 @@ import ( "fmt" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/client" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) func dataSourceStorageContainer() *pluginsdk.Resource { - return &pluginsdk.Resource{ + r := &pluginsdk.Resource{ Read: dataSourceStorageContainerRead, Timeouts: &pluginsdk.ResourceTimeout{ @@ -30,9 +33,10 @@ func dataSourceStorageContainer() *pluginsdk.Resource { Required: true, }, - "storage_account_name": { - Type: pluginsdk.TypeString, - Required: true, + "storage_account_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: commonids.ValidateStorageAccountID, }, "container_access_type": { @@ -52,7 +56,6 @@ func dataSourceStorageContainer() *pluginsdk.Resource { "metadata": MetaDataComputedSchema(), - // TODO: support for ACL's, Legal Holds and Immutability Policies "has_immutability_policy": { Type: pluginsdk.TypeBool, Computed: true, @@ -62,77 +65,144 @@ func dataSourceStorageContainer() *pluginsdk.Resource { Type: pluginsdk.TypeBool, Computed: true, }, + }, + } - "resource_manager_id": { - Type: pluginsdk.TypeString, - Computed: true, + if !features.FivePointOhBeta() { + r.Schema["resource_manager_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Computed: true, + Deprecated: "this property has been deprecated in favour of `id` and will be removed in version 5.0 of the Provider.", + } + + r.Schema["storage_account_name"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + ExactlyOneOf: []string{ + "storage_account_name", + "storage_account_id", }, - }, + } + + r.Schema["storage_account_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: commonids.ValidateStorageAccountID, + ExactlyOneOf: []string{ + "storage_account_name", + "storage_account_id", + }, + } } + + return r } func dataSourceStorageContainerRead(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage + containerClient := meta.(*clients.Client).Storage.ResourceManager.BlobContainers subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() containerName := d.Get("name").(string) - accountName := d.Get("storage_account_name").(string) - - account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) - if err != nil { - return fmt.Errorf("retrieving Storage Account %q for Container %q: %v", accountName, containerName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q for Container %q", accountName, containerName) - } - - containersDataPlaneClient, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) - if err != nil { - return fmt.Errorf("building Containers Client: %v", err) - } - // Determine the blob endpoint, so we can build a data plane ID - endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeBlob) - if err != nil { - return fmt.Errorf("determining Blob endpoint: %v", err) + if !features.FivePointOhBeta() { + storageClient := meta.(*clients.Client).Storage + accountName := d.Get("storage_account_name").(string) + if accountName != "" { + account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) + if err != nil { + return fmt.Errorf("retrieving Storage Account %q for Container %q: %v", accountName, containerName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q for Container %q", accountName, containerName) + } + + containersDataPlaneClient, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } + + // Determine the blob endpoint, so we can build a data plane ID + endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeBlob) + if err != nil { + return fmt.Errorf("determining Blob endpoint: %v", err) + } + + // Parse the blob endpoint as a data plane account ID + accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := containers.NewContainerID(*accountId, containerName) + + props, err := containersDataPlaneClient.Get(ctx, containerName) + if err != nil { + return fmt.Errorf("retrieving %s: %v", id, err) + } + if props == nil { + return fmt.Errorf("retrieving %s: result was nil", id) + } + + d.SetId(id.ID()) + + d.Set("name", containerName) + d.Set("storage_account_name", accountName) + d.Set("container_access_type", flattenStorageContainerAccessLevel(props.AccessLevel)) + + d.Set("default_encryption_scope", props.DefaultEncryptionScope) + d.Set("encryption_scope_override_enabled", !props.EncryptionScopeOverrideDisabled) + + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) + } + + d.Set("has_immutability_policy", props.HasImmutabilityPolicy) + d.Set("has_legal_hold", props.HasLegalHold) + + resourceManagerId := commonids.NewStorageContainerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, account.StorageAccountId.StorageAccountName, containerName) + d.Set("resource_manager_id", resourceManagerId.ID()) + + return nil + } } - // Parse the blob endpoint as a data plane account ID - accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) + accountId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { - return fmt.Errorf("parsing Account ID: %v", err) + return err } - id := containers.NewContainerID(*accountId, containerName) + id := commonids.NewStorageContainerID(accountId.SubscriptionId, accountId.ResourceGroupName, accountId.StorageAccountName, containerName) - props, err := containersDataPlaneClient.Get(ctx, containerName) + container, err := containerClient.Get(ctx, id) if err != nil { return fmt.Errorf("retrieving %s: %v", id, err) } - if props == nil { - return fmt.Errorf("retrieving %s: result was nil", id) - } - d.SetId(id.ID()) + if model := container.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("name", containerName) + d.Set("container_access_type", containerAccessTypeConversionMap[string(pointer.From(props.PublicAccess))]) - d.Set("name", containerName) - d.Set("storage_account_name", accountName) - d.Set("container_access_type", flattenStorageContainerAccessLevel(props.AccessLevel)) + d.Set("default_encryption_scope", props.DefaultEncryptionScope) + d.Set("encryption_scope_override_enabled", !pointer.From(props.DenyEncryptionScopeOverride)) - d.Set("default_encryption_scope", props.DefaultEncryptionScope) - d.Set("encryption_scope_override_enabled", !props.EncryptionScopeOverrideDisabled) + if err = d.Set("metadata", FlattenMetaData(pointer.From(props.Metadata))); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) + } - if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %v", err) - } + d.Set("has_immutability_policy", props.HasImmutabilityPolicy) + d.Set("has_legal_hold", props.HasLegalHold) - d.Set("has_immutability_policy", props.HasImmutabilityPolicy) - d.Set("has_legal_hold", props.HasLegalHold) + if !features.FivePointOhBeta() { + d.Set("resource_manager_id", id.ID()) + } + } + } - resourceManagerId := commonids.NewStorageContainerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, account.StorageAccountId.StorageAccountName, containerName) - d.Set("resource_manager_id", resourceManagerId.ID()) + d.SetId(id.ID()) return nil } diff --git a/internal/services/storage/storage_container_data_source_test.go b/internal/services/storage/storage_container_data_source_test.go index b878446c32c6..8b40f8b0ea35 100644 --- a/internal/services/storage/storage_container_data_source_test.go +++ b/internal/services/storage/storage_container_data_source_test.go @@ -9,11 +9,12 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" ) type StorageContainerDataSource struct{} -func TestAccDataSourceStorageContainer_basic(t *testing.T) { +func TestAccStorageContainerDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_storage_container", "test") data.DataSourceTest(t, []acceptance.TestStep{ @@ -34,6 +35,41 @@ func TestAccDataSourceStorageContainer_basic(t *testing.T) { func (d StorageContainerDataSource) basic(data acceptance.TestData) string { return fmt.Sprintf(` + +%s + +data "azurerm_storage_container" "test" { + name = azurerm_storage_container.test.name + storage_account_id = azurerm_storage_account.test.id +} +`, StorageContainerResource{}.complete(data)) +} + +func TestAccStorageContainerDataSource_basicDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "data.azurerm_storage_container", "test") + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: StorageContainerDataSource{}.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("container_access_type").HasValue("private"), + check.That(data.ResourceName).Key("has_immutability_policy").HasValue("false"), + check.That(data.ResourceName).Key("default_encryption_scope").HasValue(fmt.Sprintf("acctestEScontainer%d", data.RandomInteger)), + check.That(data.ResourceName).Key("encryption_scope_override_enabled").HasValue("true"), + check.That(data.ResourceName).Key("metadata.%").HasValue("2"), + check.That(data.ResourceName).Key("metadata.k1").HasValue("v1"), + check.That(data.ResourceName).Key("metadata.k2").HasValue("v2"), + ), + }, + }) +} + +func (d StorageContainerDataSource) basicDeprecated(data acceptance.TestData) string { + return fmt.Sprintf(` provider "azurerm" { features {} } diff --git a/internal/services/storage/storage_container_resource.go b/internal/services/storage/storage_container_resource.go index 4515f646b3ac..eba3cb33c103 100644 --- a/internal/services/storage/storage_container_resource.go +++ b/internal/services/storage/storage_container_resource.go @@ -6,11 +6,16 @@ package storage import ( "fmt" "log" + "strings" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/blobcontainers" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/client" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" @@ -22,15 +27,34 @@ import ( "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) +var containerAccessTypeConversionMap = map[string]string{ + "blob": "Blob", + "container": "Container", + "private": "None", + "Blob": "blob", + "Container": "container", + "None": "private", + "": "private", +} + func resourceStorageContainer() *pluginsdk.Resource { - return &pluginsdk.Resource{ + r := &pluginsdk.Resource{ Create: resourceStorageContainerCreate, Read: resourceStorageContainerRead, Delete: resourceStorageContainerDelete, Update: resourceStorageContainerUpdate, Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { - _, err := containers.ParseContainerID(id, storageDomainSuffix) + if !features.FivePointOhBeta() { + if strings.HasPrefix(id, "/subscriptions/") { + _, err := commonids.ParseStorageContainerID(id) + return err + } + _, err := containers.ParseContainerID(id, storageDomainSuffix) + return err + } + + _, err := commonids.ParseStorageContainerID(id) return err }), @@ -54,11 +78,11 @@ func resourceStorageContainer() *pluginsdk.Resource { ValidateFunc: validate.StorageContainerName, }, - "storage_account_name": { + "storage_account_id": { Type: pluginsdk.TypeString, Required: true, ForceNew: true, - ValidateFunc: validate.StorageAccountName, + ValidateFunc: commonids.ValidateStorageAccountID, }, "container_access_type": { @@ -90,7 +114,6 @@ func resourceStorageContainer() *pluginsdk.Resource { "metadata": MetaDataComputedSchema(), - // TODO: support for ACL's, Legal Holds and Immutability Policies "has_immutability_policy": { Type: pluginsdk.TypeBool, Computed: true, @@ -100,80 +123,145 @@ func resourceStorageContainer() *pluginsdk.Resource { Type: pluginsdk.TypeBool, Computed: true, }, - - "resource_manager_id": { - Type: pluginsdk.TypeString, - Computed: true, - }, }, } + + if !features.FivePointOhBeta() { + r.Schema["storage_account_name"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validate.StorageAccountName, + ExactlyOneOf: []string{"storage_account_id", "storage_account_name"}, + Deprecated: "the `storage_account_name` property has been deprecated in favour of `storage_account_id` and will be removed in version 5.0 of the Provider.", + } + + r.Schema["storage_account_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: commonids.ValidateStorageAccountID, + ExactlyOneOf: []string{"storage_account_id", "storage_account_name"}, + } + + r.Schema["resource_manager_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Computed: true, + Deprecated: "this property has been deprecated in favour of `id` and will be removed in version 5.0 of the Provider.", + } + } + + return r } func resourceStorageContainerCreate(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage subscriptionId := meta.(*clients.Client).Account.SubscriptionId + containerClient := meta.(*clients.Client).Storage.ResourceManager.BlobContainers ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() containerName := d.Get("name").(string) - accountName := d.Get("storage_account_name").(string) accessLevelRaw := d.Get("container_access_type").(string) accessLevel := expandStorageContainerAccessLevel(accessLevelRaw) - metaDataRaw := d.Get("metadata").(map[string]interface{}) metaData := ExpandMetaData(metaDataRaw) - account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %v", accountName, containerName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q", accountName) - } - - containersDataPlaneClient, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) - if err != nil { - return fmt.Errorf("building storage client: %v", err) - } - - // Determine the blob endpoint, so we can build a data plane ID - endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeBlob) - if err != nil { - return fmt.Errorf("determining Blob endpoint: %v", err) + if !features.FivePointOhBeta() { + storageClient := meta.(*clients.Client).Storage + if accountName := d.Get("storage_account_name").(string); accountName != "" { + account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Container %q: %v", accountName, containerName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", accountName) + } + + containersDataPlaneClient, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building storage client: %v", err) + } + + // Determine the blob endpoint, so we can build a data plane ID + endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeBlob) + if err != nil { + return fmt.Errorf("determining Blob endpoint: %v", err) + } + + // Parse the blob endpoint as a data plane account ID + accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := containers.NewContainerID(*accountId, containerName) + + exists, err := containersDataPlaneClient.Exists(ctx, containerName) + if err != nil { + return fmt.Errorf("checking for existing %s: %v", id, err) + } + if exists != nil && *exists { + return tf.ImportAsExistsError("azurerm_storage_container", id.ID()) + } + + log.Printf("[INFO] Creating %s", id) + input := containers.CreateInput{ + AccessLevel: accessLevel, + MetaData: metaData, + } + + if encryptionScope := d.Get("default_encryption_scope"); encryptionScope.(string) != "" { + input.DefaultEncryptionScope = encryptionScope.(string) + input.EncryptionScopeOverrideDisabled = false + + if encryptionScopeOverrideEnabled := d.Get("encryption_scope_override_enabled"); !encryptionScopeOverrideEnabled.(bool) { + input.EncryptionScopeOverrideDisabled = true + } + } + + if err = containersDataPlaneClient.Create(ctx, containerName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) + } + d.SetId(id.ID()) + + return resourceStorageContainerRead(d, meta) + } } - // Parse the blob endpoint as a data plane account ID - accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) + accountId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { - return fmt.Errorf("parsing Account ID: %v", err) + return err } - id := containers.NewContainerID(*accountId, containerName) + id := commonids.NewStorageContainerID(subscriptionId, accountId.ResourceGroupName, accountId.StorageAccountName, containerName) - exists, err := containersDataPlaneClient.Exists(ctx, containerName) + existing, err := containerClient.Get(ctx, id) if err != nil { - return fmt.Errorf("checking for existing %s: %v", id, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %q: %v", id, err) + } } - if exists != nil && *exists { + if !response.WasNotFound(existing.HttpResponse) { return tf.ImportAsExistsError("azurerm_storage_container", id.ID()) } - log.Printf("[INFO] Creating %s", id) - input := containers.CreateInput{ - AccessLevel: accessLevel, - MetaData: metaData, + payload := blobcontainers.BlobContainer{ + Properties: &blobcontainers.ContainerProperties{ + PublicAccess: pointer.To(blobcontainers.PublicAccess(containerAccessTypeConversionMap[accessLevelRaw])), + Metadata: pointer.To(metaData), + }, } if encryptionScope := d.Get("default_encryption_scope"); encryptionScope.(string) != "" { - input.DefaultEncryptionScope = encryptionScope.(string) - input.EncryptionScopeOverrideDisabled = false + payload.Properties.DefaultEncryptionScope = pointer.To(encryptionScope.(string)) + payload.Properties.DenyEncryptionScopeOverride = pointer.To(false) if encryptionScopeOverrideEnabled := d.Get("encryption_scope_override_enabled"); !encryptionScopeOverrideEnabled.(bool) { - input.EncryptionScopeOverrideDisabled = true + payload.Properties.DenyEncryptionScopeOverride = pointer.To(true) } } - if err = containersDataPlaneClient.Create(ctx, containerName, input); err != nil { + if _, err = containerClient.Create(ctx, id, payload); err != nil { return fmt.Errorf("creating %s: %v", id, err) } @@ -183,147 +271,226 @@ func resourceStorageContainerCreate(d *pluginsdk.ResourceData, meta interface{}) } func resourceStorageContainerUpdate(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage subscriptionId := meta.(*clients.Client).Account.SubscriptionId + containerClient := meta.(*clients.Client).Storage.ResourceManager.BlobContainers ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := containers.ParseContainerID(d.Id(), storageClient.StorageDomainSuffix) - if err != nil { - return err - } + if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") { + storageClient := meta.(*clients.Client).Storage + id, err := containers.ParseContainerID(d.Id(), storageClient.StorageDomainSuffix) + if err != nil { + return err + } - account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountId.AccountName, id.ContainerName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) - } + account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountId.AccountName, id.ContainerName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + } + if d.HasChange("container_access_type") { + log.Printf("[DEBUG] Updating Access Level for %s...", id) - if d.HasChange("container_access_type") { - log.Printf("[DEBUG] Updating Access Level for %s...", id) + // Updating metadata does not work with AAD authentication, returns a cryptic 404 + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } - // Updating metadata does not work with AAD authentication, returns a cryptic 404 - client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) - if err != nil { - return fmt.Errorf("building Containers Client: %v", err) + accessLevelRaw := d.Get("container_access_type").(string) + accessLevel := expandStorageContainerAccessLevel(accessLevelRaw) + + if err = client.UpdateAccessLevel(ctx, id.ContainerName, accessLevel); err != nil { + return fmt.Errorf("updating Access Level for %s: %v", id, err) + } + + log.Printf("[DEBUG] Updated Access Level for %s", id) } - accessLevelRaw := d.Get("container_access_type").(string) - accessLevel := expandStorageContainerAccessLevel(accessLevelRaw) + if d.HasChange("metadata") { + log.Printf("[DEBUG] Updating Metadata for %s...", id) + + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } + + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) + + if err = client.UpdateMetaData(ctx, id.ContainerName, metaData); err != nil { + return fmt.Errorf("updating Metadata for %s: %v", id, err) + } - if err = client.UpdateAccessLevel(ctx, id.ContainerName, accessLevel); err != nil { - return fmt.Errorf("updating Access Level for %s: %v", id, err) + log.Printf("[DEBUG] Updated Metadata for %s", id) } - log.Printf("[DEBUG] Updated Access Level for %s", id) + return resourceStorageContainerRead(d, meta) } - if d.HasChange("metadata") { - log.Printf("[DEBUG] Updating Metadata for %s...", id) + id, err := commonids.ParseStorageContainerID(d.Id()) + if err != nil { + return err + } - client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) - if err != nil { - return fmt.Errorf("building Containers Client: %v", err) - } + update := blobcontainers.BlobContainer{ + Properties: &blobcontainers.ContainerProperties{}, + } - metaDataRaw := d.Get("metadata").(map[string]interface{}) - metaData := ExpandMetaData(metaDataRaw) + if d.HasChange("container_access_type") { + accessLevelRaw := d.Get("container_access_type").(string) + update.Properties.PublicAccess = pointer.To(blobcontainers.PublicAccess(containerAccessTypeConversionMap[accessLevelRaw])) + } - if err = client.UpdateMetaData(ctx, id.ContainerName, metaData); err != nil { - return fmt.Errorf("updating Metadata for %s: %v", id, err) - } + if d.HasChange("metadata") { + metaDataRaw := d.Get("metadata").(map[string]interface{}) + update.Properties.Metadata = pointer.To(ExpandMetaData(metaDataRaw)) + } - log.Printf("[DEBUG] Updated Metadata for %s", id) + if _, err := containerClient.Update(ctx, *id, update); err != nil { + return fmt.Errorf("updating %s: %v", id, err) } return resourceStorageContainerRead(d, meta) } func resourceStorageContainerRead(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage + containerClient := meta.(*clients.Client).Storage.ResourceManager.BlobContainers subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := containers.ParseContainerID(d.Id(), storageClient.StorageDomainSuffix) - if err != nil { - return err - } + if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") { + storageClient := meta.(*clients.Client).Storage + id, err := containers.ParseContainerID(d.Id(), storageClient.StorageDomainSuffix) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountId.AccountName, id.ContainerName, err) + } + if account == nil { + log.Printf("[DEBUG] Unable to locate Account %q for Storage Container %q - assuming removed & removing from state", id.AccountId.AccountName, id.ContainerName) + d.SetId("") + return nil + } + + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } + + props, err := client.Get(ctx, id.ContainerName) + if err != nil { + return fmt.Errorf("retrieving %s: %v", id, err) + } + if props == nil { + log.Printf("[DEBUG] Container %q was not found in %s - assuming removed & removing from state", id.ContainerName, id.AccountId) + d.SetId("") + return nil + } + + d.Set("name", id.ContainerName) + d.Set("storage_account_name", id.AccountId.AccountName) + + d.Set("container_access_type", flattenStorageContainerAccessLevel(props.AccessLevel)) + + d.Set("default_encryption_scope", props.DefaultEncryptionScope) + d.Set("encryption_scope_override_enabled", !props.EncryptionScopeOverrideDisabled) + + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) + } + + d.Set("has_immutability_policy", props.HasImmutabilityPolicy) + d.Set("has_legal_hold", props.HasLegalHold) + + resourceManagerId := commonids.NewStorageContainerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, id.AccountId.AccountName, id.ContainerName) + d.Set("resource_manager_id", resourceManagerId.ID()) - account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountId.AccountName, id.ContainerName, err) - } - if account == nil { - log.Printf("[DEBUG] Unable to locate Account %q for Storage Container %q - assuming removed & removing from state", id.AccountId.AccountName, id.ContainerName) - d.SetId("") return nil } - client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + id, err := commonids.ParseStorageContainerID(d.Id()) if err != nil { - return fmt.Errorf("building Containers Client: %v", err) + return err } - props, err := client.Get(ctx, id.ContainerName) + existing, err := containerClient.Get(ctx, *id) if err != nil { - return fmt.Errorf("retrieving %s: %v", id, err) - } - if props == nil { - log.Printf("[DEBUG] Container %q was not found in %s - assuming removed & removing from state", id.ContainerName, id.AccountId) - d.SetId("") - return nil + if response.WasNotFound(existing.HttpResponse) { + log.Printf("[DEBUG] %q was not found, removing from state", *id) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %v", *id, err) } - d.Set("name", id.ContainerName) - d.Set("storage_account_name", id.AccountId.AccountName) - - d.Set("container_access_type", flattenStorageContainerAccessLevel(props.AccessLevel)) - - d.Set("default_encryption_scope", props.DefaultEncryptionScope) - d.Set("encryption_scope_override_enabled", !props.EncryptionScopeOverrideDisabled) - - if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %v", err) + if model := existing.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("name", id.ContainerName) + d.Set("storage_account_id", commonids.NewStorageAccountID(id.SubscriptionId, id.ResourceGroupName, id.StorageAccountName).ID()) + d.Set("container_access_type", containerAccessTypeConversionMap[string(pointer.From(props.PublicAccess))]) + d.Set("default_encryption_scope", props.DefaultEncryptionScope) + d.Set("encryption_scope_override_enabled", !pointer.From(props.DenyEncryptionScopeOverride)) + d.Set("metadata", FlattenMetaData(pointer.From(props.Metadata))) + + d.Set("has_immutability_policy", props.HasImmutabilityPolicy) + d.Set("has_legal_hold", props.HasLegalHold) + if !features.FivePointOhBeta() { + d.Set("resource_manager_id", id.ID()) + } + } } - d.Set("has_immutability_policy", props.HasImmutabilityPolicy) - d.Set("has_legal_hold", props.HasLegalHold) - - resourceManagerId := commonids.NewStorageContainerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, id.AccountId.AccountName, id.ContainerName) - d.Set("resource_manager_id", resourceManagerId.ID()) - return nil } func resourceStorageContainerDelete(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage subscriptionId := meta.(*clients.Client).Account.SubscriptionId + containerClient := meta.(*clients.Client).Storage.ResourceManager.BlobContainers ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := containers.ParseContainerID(d.Id(), storageClient.StorageDomainSuffix) - if err != nil { - return err - } + if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") { + storageClient := meta.(*clients.Client).Storage - account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountId.AccountName, id.ContainerName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + id, err := containers.ParseContainerID(d.Id(), storageClient.StorageDomainSuffix) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountId.AccountName, id.ContainerName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + } + + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } + + if err = client.Delete(ctx, id.ContainerName); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) + } + + return nil } - client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + id, err := commonids.ParseStorageContainerID(d.Id()) if err != nil { - return fmt.Errorf("building Containers Client: %v", err) + return err } - if err = client.Delete(ctx, id.ContainerName); err != nil { - return fmt.Errorf("deleting %s: %v", id, err) + if _, err := containerClient.Delete(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %v", d.Id(), err) } return nil diff --git a/internal/services/storage/storage_container_resource_test.go b/internal/services/storage/storage_container_resource_test.go index 072c351c40d4..d45d5ecc51da 100644 --- a/internal/services/storage/storage_container_resource_test.go +++ b/internal/services/storage/storage_container_resource_test.go @@ -9,17 +9,38 @@ import ( "strings" "testing" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/terraform-provider-azurerm/utils" "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) type StorageContainerResource struct{} +func TestAccStorageContainer_basicDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageContainer_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -35,7 +56,52 @@ func TestAccStorageContainer_basic(t *testing.T) { }) } +func TestAccStorageContainer_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageContainer_deleteAndRecreateDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.template(data), + }, + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageContainer_deleteAndRecreate(t *testing.T) { + t.Skip("skipping until https://github.com/Azure/azure-rest-api-specs/issues/30456 is resolved") data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -60,6 +126,25 @@ func TestAccStorageContainer_deleteAndRecreate(t *testing.T) { }) } +func TestAccStorageContainer_basicAzureADAuthDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicAzureADAuthDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageContainer_basicAzureADAuth(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -75,6 +160,25 @@ func TestAccStorageContainer_basicAzureADAuth(t *testing.T) { }) } +func TestAccStorageContainer_requiresImportDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImportDeprecated), + }) +} + func TestAccStorageContainer_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -90,6 +194,33 @@ func TestAccStorageContainer_requiresImport(t *testing.T) { }) } +func TestAccStorageContainer_updateDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.updateDeprecated(data, "private", "yes"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("container_access_type").HasValue("private"), + ), + }, + { + Config: r.updateDeprecated(data, "container", "no"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("container_access_type").HasValue("container"), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageContainer_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -113,6 +244,25 @@ func TestAccStorageContainer_update(t *testing.T) { }) } +func TestAccStorageContainer_encryptionScopeDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.encryptionScopeDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageContainer_encryptionScope(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -128,6 +278,39 @@ func TestAccStorageContainer_encryptionScope(t *testing.T) { }) } +func TestAccStorageContainer_metaDataDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") + r := StorageContainerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.metaDataDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.metaDataUpdatedDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.metaDataEmptyDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageContainer_metaData(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} @@ -157,28 +340,40 @@ func TestAccStorageContainer_metaData(t *testing.T) { }) } -func TestAccStorageContainer_disappears(t *testing.T) { +func TestAccStorageContainer_rootDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} data.ResourceTest(t, r, []acceptance.TestStep{ - data.DisappearsStep(acceptance.DisappearsStepData{ - Config: r.basic, - TestResource: r, - }), + { + Config: r.rootDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue("$root"), + ), + }, + data.ImportStep(), }) } -func TestAccStorageContainer_root(t *testing.T) { +func TestAccStorageContainer_webDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as test is not valid in 5.0") + } + data := acceptance.BuildTestData(t, "azurerm_storage_container", "test") r := StorageContainerResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.root(data), + Config: r.webDeprecated(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("name").HasValue("$root"), + check.That(data.ResourceName).Key("name").HasValue("$web"), ), }, data.ImportStep(), @@ -202,56 +397,56 @@ func TestAccStorageContainer_web(t *testing.T) { } func (r StorageContainerResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := containers.ParseContainerID(state.ID, client.Storage.StorageDomainSuffix) - if err != nil { - return nil, err - } + if !features.FivePointOhBeta() && !strings.HasPrefix(state.ID, "/subscriptions") { + id, err := containers.ParseContainerID(state.ID, client.Storage.StorageDomainSuffix) + if err != nil { + return nil, err + } - account, err := client.Storage.FindAccount(ctx, client.Account.SubscriptionId, id.AccountId.AccountName) - if err != nil { - return nil, fmt.Errorf("retrieving Account %q for Container %q: %+v", id.AccountId.AccountName, id.ContainerName, err) - } - if account == nil { - return nil, fmt.Errorf("unable to locate Storage Account %q", id.AccountId.AccountName) - } + account, err := client.Storage.FindAccount(ctx, client.Account.SubscriptionId, id.AccountId.AccountName) + if err != nil { + return nil, fmt.Errorf("retrieving Account %q for Container %q: %+v", id.AccountId.AccountName, id.ContainerName, err) + } + if account == nil { + return nil, fmt.Errorf("unable to locate Storage Account %q", id.AccountId.AccountName) + } - containersClient, err := client.Storage.ContainersDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) - if err != nil { - return nil, fmt.Errorf("building Containers Client: %+v", err) - } + containersClient, err := client.Storage.ContainersDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return nil, fmt.Errorf("building Containers Client: %+v", err) + } - prop, err := containersClient.Get(ctx, id.ContainerName) - if err != nil { - return nil, fmt.Errorf("retrieving Container %q in %s: %+v", id.ContainerName, id.AccountId, err) - } + prop, err := containersClient.Get(ctx, id.ContainerName) + if err != nil { + return nil, fmt.Errorf("retrieving Container %q in %s: %+v", id.ContainerName, id.AccountId, err) + } - return utils.Bool(prop != nil), nil -} + return pointer.To(prop != nil), nil + } -func (r StorageContainerResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := containers.ParseContainerID(state.ID, client.Storage.StorageDomainSuffix) + id, err := commonids.ParseStorageContainerID(state.ID) if err != nil { return nil, err } - account, err := client.Storage.FindAccount(ctx, client.Account.SubscriptionId, id.AccountId.AccountName) + existing, err := client.Storage.ResourceManager.BlobContainers.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Account %q for Container %q: %+v", id.AccountId.AccountName, id.ContainerName, err) - } - if account == nil { - return nil, fmt.Errorf("unable to locate Storage Account %q", id.AccountId.AccountName) + return nil, fmt.Errorf("retrieving %s: %+v", id, err) } - containersClient, err := client.Storage.ContainersDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) - if err != nil { - return nil, fmt.Errorf("building Containers Client: %+v", err) - } + return pointer.To(existing.Model != nil), nil +} - if err = containersClient.Delete(ctx, id.ContainerName); err != nil { - return nil, fmt.Errorf("deleting Container %q in %s: %+v", id.ContainerName, id.AccountId, err) - } +func (r StorageContainerResource) basicDeprecated(data acceptance.TestData) string { + return fmt.Sprintf(` +%s - return utils.Bool(true), nil +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_name = azurerm_storage_account.test.name + container_access_type = "private" +} +`, r.template(data)) } func (r StorageContainerResource) basic(data acceptance.TestData) string { @@ -261,13 +456,39 @@ func (r StorageContainerResource) basic(data acceptance.TestData) string { resource "azurerm_storage_container" "test" { name = "vhds" - storage_account_name = azurerm_storage_account.test.name + storage_account_id = azurerm_storage_account.test.id container_access_type = "private" } `, template) } -func (r StorageContainerResource) basicAzureADAuth(data acceptance.TestData) string { +func (r StorageContainerResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%[1]s + +resource "azurerm_storage_encryption_scope" "test" { + name = "acctestEScontainer%[3]d" + storage_account_id = azurerm_storage_account.test.id + source = "Microsoft.Storage" +} + +resource "azurerm_storage_container" "test" { + name = "acctest-container-%[2]s" + storage_account_id = azurerm_storage_account.test.id + container_access_type = "private" + default_encryption_scope = azurerm_storage_encryption_scope.test.name + encryption_scope_override_enabled = true + + metadata = { + k1 = "v1" + k2 = "v2" + } +} +`, template, data.RandomString, data.RandomInteger) +} + +func (r StorageContainerResource) basicAzureADAuthDeprecated(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { storage_use_azuread = true @@ -299,6 +520,50 @@ resource "azurerm_storage_container" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString) } +func (r StorageContainerResource) basicAzureADAuth(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + storage_use_azuread = true + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" + + tags = { + environment = "staging" + } +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_id = azurerm_storage_account.test.id + container_access_type = "private" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString) +} + +func (r StorageContainerResource) requiresImportDeprecated(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_container" "import" { + name = azurerm_storage_container.test.name + storage_account_name = azurerm_storage_container.test.storage_account_name + container_access_type = azurerm_storage_container.test.container_access_type +} +`, r.basicDeprecated(data)) +} + func (r StorageContainerResource) requiresImport(data acceptance.TestData) string { template := r.basic(data) return fmt.Sprintf(` @@ -306,13 +571,13 @@ func (r StorageContainerResource) requiresImport(data acceptance.TestData) strin resource "azurerm_storage_container" "import" { name = azurerm_storage_container.test.name - storage_account_name = azurerm_storage_container.test.storage_account_name + storage_account_id = azurerm_storage_container.test.storage_account_id container_access_type = azurerm_storage_container.test.container_access_type } `, template) } -func (r StorageContainerResource) update(data acceptance.TestData, accessType, metadataVal string) string { +func (r StorageContainerResource) updateDeprecated(data acceptance.TestData, accessType, metadataVal string) string { template := r.template(data) return fmt.Sprintf(` %s @@ -329,7 +594,24 @@ resource "azurerm_storage_container" "test" { `, template, accessType, metadataVal) } -func (r StorageContainerResource) encryptionScope(data acceptance.TestData) string { +func (r StorageContainerResource) update(data acceptance.TestData, accessType, metadataVal string) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_id = azurerm_storage_account.test.id + container_access_type = "%s" + metadata = { + foo = "bar" + test = "%s" + } +} +`, template, accessType, metadataVal) +} + +func (r StorageContainerResource) encryptionScopeDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %[1]s @@ -350,11 +632,66 @@ resource "azurerm_storage_container" "test" { `, template, data.RandomInteger) } +func (r StorageContainerResource) encryptionScope(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%[1]s + +resource "azurerm_storage_encryption_scope" "test" { + name = "acctestEScontainer%[2]d" + storage_account_id = azurerm_storage_account.test.id + source = "Microsoft.Storage" +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_id = azurerm_storage_account.test.id + container_access_type = "private" + + default_encryption_scope = azurerm_storage_encryption_scope.test.name +} +`, template, data.RandomInteger) +} + +func (r StorageContainerResource) metaDataDeprecated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_name = azurerm_storage_account.test.name + container_access_type = "private" + + metadata = { + hello = "world" + } +} +`, template) +} + func (r StorageContainerResource) metaData(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_id = azurerm_storage_account.test.id + container_access_type = "private" + + metadata = { + hello = "world" + } +} +`, template) +} + +func (r StorageContainerResource) metaDataUpdatedDeprecated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + resource "azurerm_storage_container" "test" { name = "vhds" storage_account_name = azurerm_storage_account.test.name @@ -362,6 +699,7 @@ resource "azurerm_storage_container" "test" { metadata = { hello = "world" + panda = "pops" } } `, template) @@ -374,7 +712,7 @@ func (r StorageContainerResource) metaDataUpdated(data acceptance.TestData) stri resource "azurerm_storage_container" "test" { name = "vhds" - storage_account_name = azurerm_storage_account.test.name + storage_account_id = azurerm_storage_account.test.id container_access_type = "private" metadata = { @@ -385,7 +723,7 @@ resource "azurerm_storage_container" "test" { `, template) } -func (r StorageContainerResource) metaDataEmpty(data acceptance.TestData) string { +func (r StorageContainerResource) metaDataEmptyDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -400,16 +738,43 @@ resource "azurerm_storage_container" "test" { `, template) } -func (r StorageContainerResource) root(data acceptance.TestData) string { +func (r StorageContainerResource) metaDataEmpty(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_id = azurerm_storage_account.test.id + container_access_type = "private" + + metadata = {} +} +`, template) +} + +func (r StorageContainerResource) rootDeprecated(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + resource "azurerm_storage_container" "test" { name = "$root" storage_account_name = azurerm_storage_account.test.name container_access_type = "private" } +`, r.template(data)) +} + +func (r StorageContainerResource) webDeprecated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_container" "test" { + name = "$web" + storage_account_name = azurerm_storage_account.test.name + container_access_type = "private" +} `, template) } @@ -420,7 +785,7 @@ func (r StorageContainerResource) web(data acceptance.TestData) string { resource "azurerm_storage_container" "test" { name = "$web" - storage_account_name = azurerm_storage_account.test.name + storage_account_id = azurerm_storage_account.test.id container_access_type = "private" } `, template) diff --git a/internal/services/storage/storage_share_data_source.go b/internal/services/storage/storage_share_data_source.go index 20ed71b8af6d..c53eb099804f 100644 --- a/internal/services/storage/storage_share_data_source.go +++ b/internal/services/storage/storage_share_data_source.go @@ -7,7 +7,11 @@ import ( "fmt" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/fileshares" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/client" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" @@ -17,7 +21,7 @@ import ( ) func dataSourceStorageShare() *pluginsdk.Resource { - return &pluginsdk.Resource{ + r := &pluginsdk.Resource{ Read: dataSourceStorageShareRead, Timeouts: &pluginsdk.ResourceTimeout{ @@ -30,9 +34,10 @@ func dataSourceStorageShare() *pluginsdk.Resource { Required: true, }, - "storage_account_name": { - Type: pluginsdk.TypeString, - Required: true, + "storage_account_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: commonids.ValidateStorageAccountID, }, "metadata": MetaDataComputedSchema(), @@ -74,74 +79,133 @@ func dataSourceStorageShare() *pluginsdk.Resource { Type: pluginsdk.TypeInt, Computed: true, }, + }, + } - "resource_manager_id": { - Type: pluginsdk.TypeString, - Computed: true, + if !features.FivePointOhBeta() { + r.Schema["storage_account_name"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ExactlyOneOf: []string{ + "storage_account_name", + "storage_account_id", }, - }, + } + + r.Schema["storage_account_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ExactlyOneOf: []string{ + "storage_account_name", + "storage_account_id", + }, + ValidateFunc: commonids.ValidateStorageAccountID, + } + + r.Schema["resource_manager_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Computed: true, + Deprecated: "this property has been deprecated in favour of `id` and will be removed in version 5.0 of the Provider.", + } } + + return r } func dataSourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage + sharesClient := meta.(*clients.Client).Storage.ResourceManager.FileShares subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() shareName := d.Get("name").(string) - accountName := d.Get("storage_account_name").(string) - account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) - if err != nil { - return fmt.Errorf("retrieving Storage Account %q for Share %q: %s", accountName, shareName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q for Share %q", accountName, shareName) + if !features.FivePointOhBeta() { + storageClient := meta.(*clients.Client).Storage + if accountName := d.Get("storage_account_name").(string); accountName != "" { + account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) + if err != nil { + return fmt.Errorf("retrieving Storage Account %q for Share %q: %s", accountName, shareName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q for Share %q", accountName, shareName) + } + + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + sharesDataPlaneClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building FileShares Client: %v", err) + } + + // Determine the file endpoint, so we can build a data plane ID + endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile) + if err != nil { + return fmt.Errorf("determining File endpoint: %v", err) + } + + // Parse the file endpoint as a data plane account ID + accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := shares.NewShareID(*accountId, shareName).ID() + + props, err := sharesDataPlaneClient.Get(ctx, shareName) + if err != nil { + return fmt.Errorf("retrieving %s: %v", id, err) + } + if props == nil { + return fmt.Errorf("%s was not found", id) + } + d.SetId(id) + + d.Set("name", shareName) + d.Set("storage_account_name", accountName) + d.Set("quota", props.QuotaGB) + if err = d.Set("acl", flattenStorageShareACLsDeprecated(props.ACLs)); err != nil { + return fmt.Errorf("setting `acl`: %v", err) + } + + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) + } + + resourceManagerId := parse.NewStorageShareResourceManagerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, account.StorageAccountId.StorageAccountName, "default", shareName) + d.Set("resource_manager_id", resourceManagerId.ID()) + + return nil + } } - // The files API does not support bearer tokens (@manicminer, 2024-02-15) - sharesDataPlaneClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + accountId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { - return fmt.Errorf("building FileShares Client: %v", err) + return err } - // Determine the file endpoint, so we can build a data plane ID - endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile) - if err != nil { - return fmt.Errorf("determining File endpoint: %v", err) - } + id := fileshares.NewShareID(accountId.SubscriptionId, accountId.ResourceGroupName, accountId.StorageAccountName, shareName) - // Parse the file endpoint as a data plane account ID - accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) - if err != nil { - return fmt.Errorf("parsing Account ID: %v", err) - } - - id := shares.NewShareID(*accountId, shareName).ID() - - props, err := sharesDataPlaneClient.Get(ctx, shareName) + share, err := sharesClient.Get(ctx, id, fileshares.DefaultGetOperationOptions()) if err != nil { return fmt.Errorf("retrieving %s: %v", id, err) } - if props == nil { - return fmt.Errorf("%s was not found", id) - } - d.SetId(id) d.Set("name", shareName) - d.Set("storage_account_name", accountName) - d.Set("quota", props.QuotaGB) - if err = d.Set("acl", flattenStorageShareACLs(props.ACLs)); err != nil { - return fmt.Errorf("setting `acl`: %v", err) + d.Set("storage_account_id", accountId.ID()) + + if model := share.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("quota", props.ShareQuota) + d.Set("acl", flattenStorageShareACLs(pointer.From(props.SignedIdentifiers))) + d.Set("metadata", FlattenMetaData(pointer.From(props.Metadata))) + } } - if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %v", err) + if !features.FivePointOhBeta() { + d.Set("resource_manager_id", id.ID()) } - resourceManagerId := parse.NewStorageShareResourceManagerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, account.StorageAccountId.StorageAccountName, "default", shareName) - d.Set("resource_manager_id", resourceManagerId.ID()) + d.SetId(id.ID()) return nil } diff --git a/internal/services/storage/storage_share_data_source_test.go b/internal/services/storage/storage_share_data_source_test.go index 105b031756d0..379211116df2 100644 --- a/internal/services/storage/storage_share_data_source_test.go +++ b/internal/services/storage/storage_share_data_source_test.go @@ -9,16 +9,21 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" ) type dataSourceStorageShare struct{} -func TestAccDataSourceStorageShare_basic(t *testing.T) { +func TestAccDataSourceStorageShare_basicDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + data := acceptance.BuildTestData(t, "data.azurerm_storage_share", "test") data.DataSourceTest(t, []acceptance.TestStep{ { - Config: dataSourceStorageShare{}.basic(data), + Config: dataSourceStorageShare{}.basicDeprecated(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("quota").HasValue("120"), check.That(data.ResourceName).Key("metadata.%").HasValue("2"), @@ -29,7 +34,23 @@ func TestAccDataSourceStorageShare_basic(t *testing.T) { }) } -func (d dataSourceStorageShare) basic(data acceptance.TestData) string { +func TestAccStorageShareDataSource_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_storage_share", "test") + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: dataSourceStorageShare{}.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("quota").HasValue("5"), + check.That(data.ResourceName).Key("metadata.%").HasValue("2"), + check.That(data.ResourceName).Key("metadata.hello").HasValue("world"), + check.That(data.ResourceName).Key("metadata.foo").HasValue("bar"), + ), + }, + }) +} + +func (d dataSourceStorageShare) basicDeprecated(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -75,3 +96,14 @@ data "azurerm_storage_share" "test" { } `, data.RandomString, data.Locations.Primary, data.RandomString, data.RandomString) } + +func (d dataSourceStorageShare) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_storage_share" "test" { + name = azurerm_storage_share.test.name + storage_account_id = azurerm_storage_account.test.id +} +`, StorageShareResource{}.complete(data)) +} diff --git a/internal/services/storage/storage_share_resource.go b/internal/services/storage/storage_share_resource.go index 76106cf93a3c..eabcfe900324 100644 --- a/internal/services/storage/storage_share_resource.go +++ b/internal/services/storage/storage_share_resource.go @@ -9,10 +9,17 @@ import ( "strings" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/fileshares" "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/client" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/custompollers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" @@ -25,14 +32,23 @@ import ( ) func resourceStorageShare() *pluginsdk.Resource { - return &pluginsdk.Resource{ + r := &pluginsdk.Resource{ Create: resourceStorageShareCreate, Read: resourceStorageShareRead, Update: resourceStorageShareUpdate, Delete: resourceStorageShareDelete, Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { - _, err := shares.ParseShareID(id, storageDomainSuffix) + if !features.FivePointOhBeta() { + if strings.HasPrefix(id, "/subscriptions") { + _, err := fileshares.ParseShareID(id) + return err + } + _, err := shares.ParseShareID(id, storageDomainSuffix) + return err + } + + _, err := fileshares.ParseShareID(id) return err }), @@ -57,10 +73,11 @@ func resourceStorageShare() *pluginsdk.Resource { ValidateFunc: validate.StorageShareName, }, - "storage_account_name": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, + "storage_account_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: commonids.ValidateStorageAccountID, }, "quota": { @@ -71,6 +88,17 @@ func resourceStorageShare() *pluginsdk.Resource { "metadata": MetaDataComputedSchema(), + "enabled_protocol": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(shares.SMB), + string(shares.NFS), + }, false), + Default: string(shares.SMB), + }, + "acl": { Type: pluginsdk.TypeSet, Optional: true, @@ -89,12 +117,12 @@ func resourceStorageShare() *pluginsdk.Resource { "start": { Type: pluginsdk.TypeString, Optional: true, - ValidateFunc: validation.StringIsNotEmpty, + ValidateFunc: validation.IsRFC3339Time, }, "expiry": { Type: pluginsdk.TypeString, Optional: true, - ValidateFunc: validation.StringIsNotEmpty, + ValidateFunc: validation.IsRFC3339Time, }, "permissions": { Type: pluginsdk.TypeString, @@ -108,22 +136,6 @@ func resourceStorageShare() *pluginsdk.Resource { }, }, - "enabled_protocol": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - string(shares.SMB), - string(shares.NFS), - }, false), - Default: string(shares.SMB), - }, - - "resource_manager_id": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "url": { Type: pluginsdk.TypeString, Computed: true, @@ -143,153 +155,292 @@ func resourceStorageShare() *pluginsdk.Resource { }, }, } + + if !features.FivePointOhBeta() { + r.Schema["storage_account_name"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{ + "storage_account_name", + "storage_account_id", + }, + Deprecated: "This property has been deprecated and will be replaced by `storage_account_id` in version 5.0 of the provider.", + } + + r.Schema["storage_account_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{ + "storage_account_name", + "storage_account_id", + }, + } + + r.Schema["resource_manager_id"] = &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Computed: true, + Deprecated: "this property is deprecated and will be removed 5.0 and replaced by the `id` property.", + } + } + + return r } func resourceStorageShareCreate(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage subscriptionId := meta.(*clients.Client).Account.SubscriptionId + sharesClient := meta.(*clients.Client).Storage.ResourceManager.FileShares ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - accountName := d.Get("storage_account_name").(string) - shareName := d.Get("name").(string) - quota := d.Get("quota").(int) + if !features.FivePointOhBeta() { + storageClient := meta.(*clients.Client).Storage + if accountName := d.Get("storage_account_name").(string); accountName != "" { + shareName := d.Get("name").(string) + quota := d.Get("quota").(int) + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) - metaDataRaw := d.Get("metadata").(map[string]interface{}) - metaData := ExpandMetaData(metaDataRaw) + account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Share %q: %v", accountName, shareName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", accountName) + } - aclsRaw := d.Get("acl").(*pluginsdk.Set).List() - acls := expandStorageShareACLs(aclsRaw) + // Determine the file endpoint, so we can build a data plane ID + endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile) + if err != nil { + return fmt.Errorf("determining File endpoint: %v", err) + } - account, err := storageClient.FindAccount(ctx, subscriptionId, accountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %v", accountName, shareName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q", accountName) - } + // Parse the file endpoint as a data plane account ID + accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } - // Determine the file endpoint, so we can build a data plane ID - endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile) - if err != nil { - return fmt.Errorf("determining File endpoint: %v", err) - } + id := shares.NewShareID(*accountId, shareName) - // Parse the file endpoint as a data plane account ID - accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix) - if err != nil { - return fmt.Errorf("parsing Account ID: %v", err) - } + protocol := shares.ShareProtocol(d.Get("enabled_protocol").(string)) + if protocol == shares.NFS { + // Only FileStorage (whose sku tier is Premium only) storage account is able to have NFS file shares. + // See: https://learn.microsoft.com/en-us/azure/storage/files/storage-files-quick-create-use-linux#applies-to + if account.Kind != storageaccounts.KindFileStorage { + return fmt.Errorf("NFS File Share is only supported for Storage Account with kind %q but got `%s`", string(storageaccounts.KindFileStorage), account.Kind) + } + } + + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + fileSharesDataPlaneClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building File Share Client: %v", err) + } + + exists, err := fileSharesDataPlaneClient.Exists(ctx, shareName) + if err != nil { + return fmt.Errorf("checking for existing %s: %v", id, err) + } + if exists != nil && *exists { + return tf.ImportAsExistsError("azurerm_storage_share", id.ID()) + } + + log.Printf("[INFO] Creating Share %q in Storage Account %q", shareName, accountName) + input := shares.CreateInput{ + QuotaInGB: quota, + MetaData: metaData, + EnabledProtocol: protocol, + } + + if accessTier := d.Get("access_tier").(string); accessTier != "" { + tier := shares.AccessTier(accessTier) + input.AccessTier = &tier + } - id := shares.NewShareID(*accountId, shareName) + if err = fileSharesDataPlaneClient.Create(ctx, shareName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) + } + + d.SetId(id.ID()) + + aclsRaw := d.Get("acl").(*pluginsdk.Set).List() + acls := expandStorageShareACLsDeprecated(aclsRaw) + if err = fileSharesDataPlaneClient.UpdateACLs(ctx, shareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil { + return fmt.Errorf("setting ACLs for %s: %v", id, err) + } - protocol := shares.ShareProtocol(d.Get("enabled_protocol").(string)) - if protocol == shares.NFS { - // Only FileStorage (whose sku tier is Premium only) storage account is able to have NFS file shares. - // See: https://learn.microsoft.com/en-us/azure/storage/files/storage-files-quick-create-use-linux#applies-to - if account.Kind != storageaccounts.KindFileStorage { - return fmt.Errorf("NFS File Share is only supported for Storage Account with kind %q but got `%s`", string(storageaccounts.KindFileStorage), account.Kind) + return resourceStorageShareRead(d, meta) } } - // The files API does not support bearer tokens (@manicminer, 2024-02-15) - client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + accountId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { - return fmt.Errorf("building File Share Client: %v", err) + return err } - exists, err := client.Exists(ctx, shareName) + id := fileshares.NewShareID(accountId.SubscriptionId, accountId.ResourceGroupName, accountId.StorageAccountName, d.Get("name").(string)) + + existing, err := sharesClient.Get(ctx, id, fileshares.DefaultGetOperationOptions()) if err != nil { - return fmt.Errorf("checking for existing %s: %v", id, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %q: %v", id, err) + } } - if exists != nil && *exists { + if !response.WasNotFound(existing.HttpResponse) { return tf.ImportAsExistsError("azurerm_storage_share", id.ID()) } - log.Printf("[INFO] Creating Share %q in Storage Account %q", shareName, accountName) - input := shares.CreateInput{ - QuotaInGB: quota, - MetaData: metaData, - EnabledProtocol: protocol, + payload := fileshares.FileShare{ + Properties: &fileshares.FileShareProperties{ + EnabledProtocols: pointer.To(fileshares.EnabledProtocols(d.Get("enabled_protocol").(string))), + Metadata: pointer.To(ExpandMetaData(d.Get("metadata").(map[string]interface{}))), + ShareQuota: pointer.To(int64(d.Get("quota").(int))), + SignedIdentifiers: expandStorageShareACLs(d.Get("acl").(*pluginsdk.Set).List()), + }, } - if accessTier := d.Get("access_tier").(string); accessTier != "" { - tier := shares.AccessTier(accessTier) - input.AccessTier = &tier + if sharedAccessTier, ok := d.GetOk("access_tier"); ok && sharedAccessTier.(string) != "" { + payload.Properties.AccessTier = pointer.To(fileshares.ShareAccessTier(sharedAccessTier.(string))) } - if err = client.Create(ctx, shareName, input); err != nil { + pollerType := custompollers.NewStorageShareCreatePoller(sharesClient, id, payload) + poller := pollers.NewPoller(pollerType, 5*time.Second, pollers.DefaultNumberOfDroppedConnectionsToAllow) + + if err = poller.PollUntilDone(ctx); err != nil { return fmt.Errorf("creating %s: %v", id, err) } d.SetId(id.ID()) - if err = client.UpdateACLs(ctx, shareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil { - return fmt.Errorf("setting ACLs for %s: %v", id, err) - } - return resourceStorageShareRead(d, meta) } func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage + sharesClient := meta.(*clients.Client).Storage.ResourceManager.FileShares subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix) - if err != nil { - return err - } + if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") { + storageClient := meta.(*clients.Client).Storage + id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountId.AccountName, id.ShareName, err) + } + if account == nil { + log.Printf("[WARN] Unable to determine Account %q for Storage Share %q - assuming removed & removing from state", id.AccountId.AccountName, id.ShareName) + d.SetId("") + return nil + } + + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + } + + props, err := client.Get(ctx, id.ShareName) + if err != nil { + return err + } + if props == nil { + log.Printf("[DEBUG] File Share %q was not found in %s - assuming removed & removing from state", id.ShareName, account.StorageAccountId) + d.SetId("") + return nil + } + + d.Set("name", id.ShareName) + d.Set("storage_account_name", id.AccountId.AccountName) + d.Set("quota", props.QuotaGB) + d.Set("url", id.ID()) + d.Set("enabled_protocol", string(props.EnabledProtocol)) + + accessTier := "" + if props.AccessTier != nil { + accessTier = string(*props.AccessTier) + } + d.Set("access_tier", accessTier) + + if err := d.Set("acl", flattenStorageShareACLsDeprecated(props.ACLs)); err != nil { + return fmt.Errorf("flattening `acl`: %+v", err) + } + + if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("flattening `metadata`: %+v", err) + } + + resourceManagerId := parse.NewStorageShareResourceManagerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, account.StorageAccountId.StorageAccountName, "default", id.ShareName) + d.Set("resource_manager_id", resourceManagerId.ID()) - account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountId.AccountName, id.ShareName, err) - } - if account == nil { - log.Printf("[WARN] Unable to determine Account %q for Storage Share %q - assuming removed & removing from state", id.AccountId.AccountName, id.ShareName) - d.SetId("") return nil } - // The files API does not support bearer tokens (@manicminer, 2024-02-15) - client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + id, err := fileshares.ParseShareID(d.Id()) if err != nil { - return fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + return err } - props, err := client.Get(ctx, id.ShareName) + existing, err := sharesClient.Get(ctx, *id, fileshares.DefaultGetOperationOptions()) if err != nil { - return err - } - if props == nil { - log.Printf("[DEBUG] File Share %q was not found in %s - assuming removed & removing from state", id.ShareName, account.StorageAccountId) - d.SetId("") - return nil + if response.WasNotFound(existing.HttpResponse) { + log.Printf("[DEBUG] %q was not found, removing from state", *id) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %v", *id, err) } + d.Set("storage_account_id", commonids.NewStorageAccountID(id.SubscriptionId, id.ResourceGroupName, id.StorageAccountName).ID()) d.Set("name", id.ShareName) - d.Set("storage_account_name", id.AccountId.AccountName) - d.Set("quota", props.QuotaGB) - d.Set("url", id.ID()) - d.Set("enabled_protocol", string(props.EnabledProtocol)) - accessTier := "" - if props.AccessTier != nil { - accessTier = string(*props.AccessTier) + if model := existing.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("quota", props.ShareQuota) + // Resource Manager treats nil and "SMB" as the same and we may not get a full response here + enabledProtocols := fileshares.EnabledProtocolsSMB + if props.EnabledProtocols != nil { + enabledProtocols = *props.EnabledProtocols + } + d.Set("enabled_protocol", string(enabledProtocols)) + d.Set("access_tier", string(pointer.From(props.AccessTier))) + d.Set("acl", flattenStorageShareACLs(pointer.From(props.SignedIdentifiers))) + d.Set("metadata", FlattenMetaData(pointer.From(props.Metadata))) + } + } + + if !features.FivePointOhBeta() { + d.Set("resource_manager_id", id.ID()) + } + + // TODO - The following section for `url` will need to be updated to go-azure-sdk when the Giovanni Deprecation process has been completed + account, err := meta.(*clients.Client).Storage.FindAccount(ctx, subscriptionId, id.StorageAccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.StorageAccountName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", id.StorageAccountName) } - d.Set("access_tier", accessTier) - if err := d.Set("acl", flattenStorageShareACLs(props.ACLs)); err != nil { - return fmt.Errorf("flattening `acl`: %+v", err) + // Determine the file endpoint, so we can build a data plane ID + endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile) + if err != nil { + return fmt.Errorf("determining File endpoint: %v", err) } - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("flattening `metadata`: %+v", err) + // Parse the file endpoint as a data plane account ID + accountId, err := accounts.ParseAccountID(*endpoint, meta.(*clients.Client).Storage.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) } - resourceManagerId := parse.NewStorageShareResourceManagerID(account.StorageAccountId.SubscriptionId, account.StorageAccountId.ResourceGroupName, account.StorageAccountId.StorageAccountName, "default", id.ShareName) - d.Set("resource_manager_id", resourceManagerId.ID()) + d.Set("url", shares.NewShareID(*accountId, id.ShareName).ID()) return nil } @@ -297,126 +448,179 @@ func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error func resourceStorageShareUpdate(d *pluginsdk.ResourceData, meta interface{}) error { storageClient := meta.(*clients.Client).Storage subscriptionId := meta.(*clients.Client).Account.SubscriptionId + sharesClient := meta.(*clients.Client).Storage.ResourceManager.FileShares ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix) - if err != nil { - return err + if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") { + id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountId.AccountName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + } + + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + } + + if d.HasChange("quota") { + log.Printf("[DEBUG] Updating the Quota for %s", id) + quota := d.Get("quota").(int) + + if err = client.UpdateQuota(ctx, id.ShareName, quota); err != nil { + return fmt.Errorf("updating Quota for %s: %v", id, err) + } + + log.Printf("[DEBUG] Updated the Quota for %s", id) + } + + if d.HasChange("metadata") { + log.Printf("[DEBUG] Updating the MetaData for %s", id) + + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) + + if err = client.UpdateMetaData(ctx, id.ShareName, metaData); err != nil { + return fmt.Errorf("updating MetaData for %s: %v", id, err) + } + + log.Printf("[DEBUG] Updated the MetaData for %s", id) + } + + if d.HasChange("acl") { + log.Printf("[DEBUG] Updating the ACLs for %s", id) + + aclsRaw := d.Get("acl").(*pluginsdk.Set).List() + acls := expandStorageShareACLsDeprecated(aclsRaw) + + if err = client.UpdateACLs(ctx, id.ShareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil { + return fmt.Errorf("updating ACLs for %s: %v", id, err) + } + + log.Printf("[DEBUG] Updated ACLs for %s", id) + } + + if d.HasChange("access_tier") { + tier := shares.AccessTier(d.Get("access_tier").(string)) + err = pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutUpdate), func() *pluginsdk.RetryError { + err = client.UpdateTier(ctx, id.ShareName, tier) + if err != nil { + if strings.Contains(err.Error(), "Cannot change access tier at this moment") { + return pluginsdk.RetryableError(err) + } + return pluginsdk.NonRetryableError(err) + } + time.Sleep(30 * time.Second) + return nil + }) + if err != nil { + return fmt.Errorf("updating access tier %s: %+v", id, err) + } + + log.Printf("[DEBUG] Updated Access Tier for %s", id) + } + + return resourceStorageShareRead(d, meta) } - account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + id, err := fileshares.ParseShareID(d.Id()) if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountId.AccountName, id.ShareName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + return err } - // The files API does not support bearer tokens (@manicminer, 2024-02-15) - client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) - if err != nil { - return fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + update := fileshares.FileShare{ + Properties: &fileshares.FileShareProperties{}, } if d.HasChange("quota") { - log.Printf("[DEBUG] Updating the Quota for %s", id) quota := d.Get("quota").(int) - - if err = client.UpdateQuota(ctx, id.ShareName, quota); err != nil { - return fmt.Errorf("updating Quota for %s: %v", id, err) - } - - log.Printf("[DEBUG] Updated the Quota for %s", id) + update.Properties.ShareQuota = pointer.To(int64(quota)) } if d.HasChange("metadata") { - log.Printf("[DEBUG] Updating the MetaData for %s", id) - metaDataRaw := d.Get("metadata").(map[string]interface{}) metaData := ExpandMetaData(metaDataRaw) - if err = client.UpdateMetaData(ctx, id.ShareName, metaData); err != nil { - return fmt.Errorf("updating MetaData for %s: %v", id, err) - } - - log.Printf("[DEBUG] Updated the MetaData for %s", id) + update.Properties.Metadata = pointer.To(metaData) } if d.HasChange("acl") { - log.Printf("[DEBUG] Updating the ACLs for %s", id) - - aclsRaw := d.Get("acl").(*pluginsdk.Set).List() - acls := expandStorageShareACLs(aclsRaw) - - if err = client.UpdateACLs(ctx, id.ShareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil { - return fmt.Errorf("updating ACLs for %s: %v", id, err) - } - - log.Printf("[DEBUG] Updated ACLs for %s", id) + update.Properties.SignedIdentifiers = expandStorageShareACLs(d.Get("acl").(*pluginsdk.Set).List()) } if d.HasChange("access_tier") { - log.Printf("[DEBUG] Updating Access Tier for %s", id) - tier := shares.AccessTier(d.Get("access_tier").(string)) - err = pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutUpdate), func() *pluginsdk.RetryError { - err = client.UpdateTier(ctx, id.ShareName, tier) - if err != nil { - if strings.Contains(err.Error(), "Cannot change access tier at this moment") { - return pluginsdk.RetryableError(err) - } - return pluginsdk.NonRetryableError(err) - } - time.Sleep(30 * time.Second) - return nil - }) - if err != nil { - return fmt.Errorf("updating access tier %s: %+v", id, err) - } + update.Properties.AccessTier = pointer.To(fileshares.ShareAccessTier(tier)) + } - log.Printf("[DEBUG] Updated Access Tier for %s", id) + if _, err = sharesClient.Update(ctx, *id, update); err != nil { + return fmt.Errorf("updating %s: %v", id, err) } return resourceStorageShareRead(d, meta) } func resourceStorageShareDelete(d *pluginsdk.ResourceData, meta interface{}) error { - storageClient := meta.(*clients.Client).Storage subscriptionId := meta.(*clients.Client).Account.SubscriptionId + fileSharesClient := meta.(*clients.Client).Storage.ResourceManager.FileShares ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix) - if err != nil { - return err - } + if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") { + storageClient := meta.(*clients.Client).Storage + id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix) + if err != nil { + return err + } - account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) - if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountId.AccountName, id.ShareName, err) - } - if account == nil { - return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + account, err := storageClient.FindAccount(ctx, subscriptionId, id.AccountId.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountId.AccountName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) + } + + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + } + + if err = client.Delete(ctx, id.ShareName); err != nil { + if strings.Contains(err.Error(), "The specified share does not exist") { + return nil + } + return fmt.Errorf("deleting %s: %v", id, err) + } + + return nil } - // The files API does not support bearer tokens (@manicminer, 2024-02-15) - client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + id, err := fileshares.ParseShareID(d.Id()) if err != nil { - return fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + return err } - if err = client.Delete(ctx, id.ShareName); err != nil { - if strings.Contains(err.Error(), "The specified share does not exist") { - return nil + if resp, err := fileSharesClient.Delete(ctx, *id, fileshares.DefaultDeleteOperationOptions()); err != nil { + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("deleting %s: %v", id, err) } - return fmt.Errorf("deleting %s: %v", id, err) } return nil } -func expandStorageShareACLs(input []interface{}) []shares.SignedIdentifier { +func expandStorageShareACLsDeprecated(input []interface{}) []shares.SignedIdentifier { results := make([]shares.SignedIdentifier, 0) for _, v := range input { @@ -439,7 +643,7 @@ func expandStorageShareACLs(input []interface{}) []shares.SignedIdentifier { return results } -func flattenStorageShareACLs(input []shares.SignedIdentifier) []interface{} { +func flattenStorageShareACLsDeprecated(input []shares.SignedIdentifier) []interface{} { result := make([]interface{}, 0) for _, v := range input { @@ -459,3 +663,47 @@ func flattenStorageShareACLs(input []shares.SignedIdentifier) []interface{} { return result } + +func expandStorageShareACLs(input []interface{}) *[]fileshares.SignedIdentifier { + results := make([]fileshares.SignedIdentifier, 0) + + for _, v := range input { + acl := v.(map[string]interface{}) + + policies := acl["access_policy"].([]interface{}) + policy := policies[0].(map[string]interface{}) + + identifier := fileshares.SignedIdentifier{ + Id: pointer.To(acl["id"].(string)), + AccessPolicy: &fileshares.AccessPolicy{ + StartTime: pointer.To(policy["start"].(string)), + ExpiryTime: pointer.To(policy["expiry"].(string)), + Permission: pointer.To(policy["permissions"].(string)), + }, + } + results = append(results, identifier) + } + + return pointer.To(results) +} + +func flattenStorageShareACLs(input []fileshares.SignedIdentifier) []interface{} { + result := make([]interface{}, 0) + + for _, v := range input { + output := map[string]interface{}{ + "id": v.Id, + "access_policy": []interface{}{ + map[string]interface{}{ + "start": v.AccessPolicy.StartTime, + "expiry": v.AccessPolicy.ExpiryTime, + "permissions": v.AccessPolicy.Permission, + }, + }, + } + + result = append(result, output) + } + + return result +} diff --git a/internal/services/storage/storage_share_resource_test.go b/internal/services/storage/storage_share_resource_test.go index e85955af5041..0bca4e41aeb7 100644 --- a/internal/services/storage/storage_share_resource_test.go +++ b/internal/services/storage/storage_share_resource_test.go @@ -6,18 +6,40 @@ package storage_test import ( "context" "fmt" + "strings" "testing" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/fileshares" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/features" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/terraform-provider-azurerm/utils" "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" ) type StorageShareResource struct{} +func TestAccStorageShare_basicDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("enabled_protocol").HasValue("SMB"), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -27,13 +49,46 @@ func TestAccStorageShare_basic(t *testing.T) { Config: r.basic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("enabled_protocol").HasValue("SMB"), ), }, data.ImportStep(), }) } +func TestAccStorageShare_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageShare_requiresImportDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImportDeprecated), + }) +} + func TestAccStorageShare_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -49,25 +104,32 @@ func TestAccStorageShare_requiresImport(t *testing.T) { }) } -func TestAccStorageShare_disappears(t *testing.T) { +func TestAccStorageShare_disappearsDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ - Config: r.basic, + Config: r.basicDeprecated, TestResource: r, }), }) } -func TestAccStorageShare_deleteAndRecreate(t *testing.T) { +func TestAccStorageShare_deleteAndRecreateDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.basicDeprecated(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -77,7 +139,33 @@ func TestAccStorageShare_deleteAndRecreate(t *testing.T) { Config: r.template(data), }, { - Config: r.basic(data), + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageShare_metaDataDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.metaDataDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.metaDataUpdatedDeprecated(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -108,11 +196,44 @@ func TestAccStorageShare_metaData(t *testing.T) { }) } +func TestAccStorageShare_aclDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.aclDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.aclUpdatedDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_acl(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), { Config: r.acl(data), Check: acceptance.ComposeTestCheckFunc( @@ -127,6 +248,32 @@ func TestAccStorageShare_acl(t *testing.T) { ), }, data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageShare_aclGhostedRecallDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.aclGhostedRecallDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), }) } @@ -145,6 +292,31 @@ func TestAccStorageShare_aclGhostedRecall(t *testing.T) { }) } +func TestAccStorageShare_updateQuotaDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.updateQuotaDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("quota").HasValue("5"), + ), + }, + }) +} + func TestAccStorageShare_updateQuota(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -166,6 +338,32 @@ func TestAccStorageShare_updateQuota(t *testing.T) { }) } +func TestAccStorageShare_largeQuotaDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.largeQuotaDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.largeQuotaUpdateDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_largeQuota(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -188,6 +386,32 @@ func TestAccStorageShare_largeQuota(t *testing.T) { }) } +func TestAccStorageShare_accessTierStandardDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.accessTierStandardDeprecated(data, "Cool"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.accessTierStandardDeprecated(data, "Hot"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_accessTierStandard(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -210,6 +434,25 @@ func TestAccStorageShare_accessTierStandard(t *testing.T) { }) } +func TestAccStorageShare_accessTierPremiumDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.accessTierPremiumDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_accessTierPremium(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -225,6 +468,25 @@ func TestAccStorageShare_accessTierPremium(t *testing.T) { }) } +func TestAccStorageShare_nfsProtocolDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.protocolDeprecated(data, "NFS"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_nfsProtocol(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -241,6 +503,32 @@ func TestAccStorageShare_nfsProtocol(t *testing.T) { } // TestAccStorageShare_protocolUpdate is to ensure destroy-then-create of the storage share can tolerant the "ShareBeingDeleted" issue. +func TestAccStorageShare_protocolUpdateDeprecated(t *testing.T) { + if features.FivePointOhBeta() { + t.Skip("skipping as not valid in 5.0") + } + + data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") + r := StorageShareResource{} + + data.ResourceTestIgnoreRecreate(t, r, []acceptance.TestStep{ + { + Config: r.protocolDeprecated(data, "NFS"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.protocolDeprecated(data, "SMB"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShare_protocolUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share", "test") r := StorageShareResource{} @@ -264,32 +552,47 @@ func TestAccStorageShare_protocolUpdate(t *testing.T) { } func (r StorageShareResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := shares.ParseShareID(state.ID, client.Storage.StorageDomainSuffix) - if err != nil { - return nil, err + if !features.FivePointOhBeta() && !strings.HasPrefix(state.ID, "/subscriptions") { + id, err := shares.ParseShareID(state.ID, client.Storage.StorageDomainSuffix) + if err != nil { + return nil, err + } + + account, err := client.Storage.FindAccount(ctx, client.Account.SubscriptionId, id.AccountId.AccountName) + if err != nil { + return nil, fmt.Errorf("retrieving Account %q for Share %q: %+v", id.AccountId.AccountName, id.ShareName, err) + } + if account == nil { + return nil, fmt.Errorf("unable to determine Account %q for Storage Share %q", id.AccountId.AccountName, id.ShareName) + } + + sharesClient, err := client.Storage.FileSharesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return nil, fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + } + + props, err := sharesClient.Get(ctx, id.ShareName) + if err != nil { + return nil, fmt.Errorf("retrieving File Share %q in %s: %+v", id.ShareName, account.StorageAccountId, err) + } + + return pointer.To(props != nil), nil } - account, err := client.Storage.FindAccount(ctx, client.Account.SubscriptionId, id.AccountId.AccountName) + id, err := fileshares.ParseShareID(state.ID) if err != nil { - return nil, fmt.Errorf("retrieving Account %q for Share %q: %+v", id.AccountId.AccountName, id.ShareName, err) - } - if account == nil { - return nil, fmt.Errorf("unable to determine Account %q for Storage Share %q", id.AccountId.AccountName, id.ShareName) + return nil, err } - - sharesClient, err := client.Storage.FileSharesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) + existing, err := client.Storage.ResourceManager.FileShares.Get(ctx, *id, fileshares.DefaultGetOperationOptions()) if err != nil { - return nil, fmt.Errorf("building File Share Client for %s: %+v", account.StorageAccountId, err) + return nil, fmt.Errorf("retrieving %s: %+v", id, err) } - props, err := sharesClient.Get(ctx, id.ShareName) - if err != nil { - return nil, fmt.Errorf("retrieving File Share %q in %s: %+v", id.ShareName, account.StorageAccountId, err) - } + return pointer.To(existing.Model != nil), nil - return utils.Bool(props != nil), nil } +// Destroy is deprecated for this resource. From 5.0 this will no longer use the Data Plane client. func (r StorageShareResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := shares.ParseShareID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { @@ -312,10 +615,10 @@ func (r StorageShareResource) Destroy(ctx context.Context, client *clients.Clien return nil, fmt.Errorf("deleting File Share %q in %s: %+v", id.ShareName, account.StorageAccountId, err) } - return utils.Bool(true), nil + return pointer.To(true), nil } -func (r StorageShareResource) basic(data acceptance.TestData) string { +func (r StorageShareResource) basicDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -328,24 +631,84 @@ resource "azurerm_storage_share" "test" { `, template, data.RandomString) } -func (r StorageShareResource) metaData(data acceptance.TestData) string { +func (r StorageShareResource) basic(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s resource "azurerm_storage_share" "test" { - name = "testshare%s" - storage_account_name = azurerm_storage_account.test.name - quota = 5 - - metadata = { - hello = "world" - } + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 } `, template, data.RandomString) } -func (r StorageShareResource) metaDataUpdated(data acceptance.TestData) string { +func (r StorageShareResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 + access_tier = "Cool" + enabled_protocol = "SMB" + + acl { + id = "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" + + access_policy { + permissions = "rwd" + start = "2019-07-02T09:38:21Z" + expiry = "2019-07-02T10:38:21Z" + } + } + + metadata = { + hello = "world" + foo = "bar" + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) metaDataDeprecated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_name = azurerm_storage_account.test.name + quota = 5 + + metadata = { + hello = "world" + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) metaData(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 + + metadata = { + hello = "world" + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) metaDataUpdatedDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -363,7 +726,25 @@ resource "azurerm_storage_share" "test" { `, template, data.RandomString) } -func (r StorageShareResource) acl(data acceptance.TestData) string { +func (r StorageShareResource) metaDataUpdated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 + + metadata = { + hello = "world" + happy = "birthday" + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) aclDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -386,7 +767,30 @@ resource "azurerm_storage_share" "test" { `, template, data.RandomString) } -func (r StorageShareResource) aclGhostedRecall(data acceptance.TestData) string { +func (r StorageShareResource) acl(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 + + acl { + id = "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" + + access_policy { + permissions = "rwd" + start = "2019-07-02T09:38:21Z" + expiry = "2019-07-02T10:38:21Z" + } + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) aclGhostedRecallDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -406,7 +810,27 @@ resource "azurerm_storage_share" "test" { `, template, data.RandomString) } -func (r StorageShareResource) aclUpdated(data acceptance.TestData) string { +func (r StorageShareResource) aclGhostedRecall(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 + + acl { + id = "GhostedRecall" + access_policy { + permissions = "r" + } + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) aclUpdatedDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -438,20 +862,64 @@ resource "azurerm_storage_share" "test" { `, template, data.RandomString) } -func (r StorageShareResource) requiresImport(data acceptance.TestData) string { - template := r.basic(data) +func (r StorageShareResource) aclUpdated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 + + acl { + id = "AAAANDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" + + access_policy { + permissions = "rwd" + start = "2019-07-02T09:38:21Z" + expiry = "2019-07-02T10:38:21Z" + } + } + acl { + id = "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" + + access_policy { + permissions = "rwd" + start = "2019-07-02T09:38:21Z" + expiry = "2019-07-02T10:38:21Z" + } + } +} +`, template, data.RandomString) +} + +func (r StorageShareResource) requiresImportDeprecated(data acceptance.TestData) string { + template := r.basicDeprecated(data) return fmt.Sprintf(` %s resource "azurerm_storage_share" "import" { name = azurerm_storage_share.test.name storage_account_name = azurerm_storage_share.test.storage_account_name - quota = 5 + quota = azurerm_storage_share.test.quota } `, template) } -func (r StorageShareResource) updateQuota(data acceptance.TestData) string { +func (r StorageShareResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "import" { + name = azurerm_storage_share.test.name + storage_account_id = azurerm_storage_share.test.storage_account_id + quota = azurerm_storage_share.test.quota +} +`, r.basic(data)) +} + +func (r StorageShareResource) updateQuotaDeprecated(data acceptance.TestData) string { template := r.template(data) return fmt.Sprintf(` %s @@ -464,7 +932,20 @@ resource "azurerm_storage_share" "test" { `, template, data.RandomString) } -func (r StorageShareResource) largeQuota(data acceptance.TestData) string { +func (r StorageShareResource) updateQuota(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 5 +} +`, template, data.RandomString) +} + +func (r StorageShareResource) largeQuotaDeprecated(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -496,7 +977,39 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) } -func (r StorageShareResource) largeQuotaUpdate(data acceptance.TestData) string { +func (r StorageShareResource) largeQuota(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-storageshare-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestshare%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Premium" + account_replication_type = "LRS" + account_kind = "FileStorage" + + tags = { + environment = "staging" + } +} + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 6000 +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) +} + +func (r StorageShareResource) largeQuotaUpdateDeprecated(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -528,7 +1041,39 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) } -func (r StorageShareResource) accessTierStandard(data acceptance.TestData, tier string) string { +func (r StorageShareResource) largeQuotaUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-storageshare-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestshare%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Premium" + account_replication_type = "LRS" + account_kind = "FileStorage" + + tags = { + environment = "staging" + } +} + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 10000 +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) +} + +func (r StorageShareResource) accessTierStandardDeprecated(data acceptance.TestData, tier string) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -559,7 +1104,38 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, tier) } -func (r StorageShareResource) accessTierPremium(data acceptance.TestData) string { +func (r StorageShareResource) accessTierStandard(data acceptance.TestData, tier string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} + storage_use_azuread = true +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" + account_kind = "StorageV2" +} + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 100 + enabled_protocol = "SMB" + access_tier = "%s" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, tier) +} + +func (r StorageShareResource) accessTierPremiumDeprecated(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -589,7 +1165,37 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) } -func (r StorageShareResource) protocol(data acceptance.TestData, protocol string) string { +func (r StorageShareResource) accessTierPremium(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Premium" + account_replication_type = "LRS" + account_kind = "FileStorage" +} + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + quota = 100 + enabled_protocol = "SMB" + access_tier = "Premium" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) +} + +func (r StorageShareResource) protocolDeprecated(data acceptance.TestData, protocol string) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -618,6 +1224,35 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, protocol) } +func (r StorageShareResource) protocol(data acceptance.TestData, protocol string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "FileStorage" + account_tier = "Premium" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "test" { + name = "testshare%s" + storage_account_id = azurerm_storage_account.test.id + enabled_protocol = "%s" + quota = 100 +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, protocol) +} + func (r StorageShareResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/website/docs/d/storage_container.html.markdown b/website/docs/d/storage_container.html.markdown index 62be44b89610..6b85fbc66488 100644 --- a/website/docs/d/storage_container.html.markdown +++ b/website/docs/d/storage_container.html.markdown @@ -13,9 +13,14 @@ Use this data source to access information about an existing Storage Container. ## Example Usage ```hcl +data "azurerm_storage_account" "example" { + name = "exampleaccount" + resource_group_name = "examples" +} + data "azurerm_storage_container" "example" { - name = "example-container-name" - storage_account_name = "example-storage-account-name" + name = "example-container-name" + storage_account_id = data.azurerm_storage_account.example.id } ``` @@ -25,7 +30,11 @@ The following arguments are supported: * `name` - The name of the Container. -* `storage_account_name` - The name of the Storage Account where the Container exists. +* `storage_account_name` - (Optional) The name of the Storage Account where the Container exists. This property is deprecated in favour of `storage_account_id`. + +* `storage_account_id` - (Optional) The name of the Storage Account where the Container exists. This property will become Required in version 5.0 of the Provider. + +~> **NOTE:** One of `storage_account_name` or `storage_account_id` must be specified. When specifying `storage_account_id` the resource will use the Resource Manager API, rather than the Data Plane API. ## Attributes Reference @@ -41,7 +50,7 @@ The following arguments are supported: * `metadata` - A mapping of MetaData for this Container. -* `resource_manager_id` - The Resource Manager ID of this Storage Container. +* `id` - The Resource Manager ID of this Storage Container. ## Timeouts diff --git a/website/docs/d/storage_share.html.markdown b/website/docs/d/storage_share.html.markdown index d4e0bbbdda16..c8892a2d78ff 100644 --- a/website/docs/d/storage_share.html.markdown +++ b/website/docs/d/storage_share.html.markdown @@ -15,14 +15,16 @@ Use this data source to access information about an existing File Share. ## Example Usage ```hcl -data "azurerm_storage_share" "example" { - name = "existing" - storage_account_name = "existing" +data "azurerm_storage_account" "example" { + name = "exampleaccount" + resource_group_name = "examples" } -output "id" { - value = data.azurerm_storage_share.example.id +data "azurerm_storage_share" "example" { + name = "existing" + storage_account_id = data.azurerm_storage_account.example.id } + ``` ## Arguments Reference @@ -31,7 +33,11 @@ The following arguments are supported: * `name` - (Required) The name of the share. -* `storage_account_name` - (Required) The name of the storage account. +* `storage_account_name` - (Optional) The name of the storage account in which the share exists. This property is deprecated in favour of `storage_account_id`. + +* `storage_account_id` - (Optional) The ID of the storage account in which the share exists. + +~> **NOTE:** One of `storage_account_name` or `storage_account_id` must be specified. When specifying `storage_account_id` the resource will use the Resource Manager API, rather than the Data Plane API. ## Attributes Reference @@ -57,9 +63,9 @@ A `access_policy` block has the following attributes: * `permissions` - The permissions which should be associated with this Shared Identifier. Possible value is combination of `r` (read), `w` (write), `d` (delete), and `l` (list). -* `start` - The time at which this Access Policy should be valid from, in [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) format. +* `start` - The time at which this Access Policy is valid from. -* `expiry` - The time at which this Access Policy should be valid until, in [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) format. +* `expiry` - The time at which this Access Policy is valid until. ## Timeouts diff --git a/website/docs/r/storage_container.html.markdown b/website/docs/r/storage_container.html.markdown index bad59ba65a3b..18b371b4018b 100644 --- a/website/docs/r/storage_container.html.markdown +++ b/website/docs/r/storage_container.html.markdown @@ -32,7 +32,7 @@ resource "azurerm_storage_account" "example" { resource "azurerm_storage_container" "example" { name = "vhds" - storage_account_name = azurerm_storage_account.example.name + storage_account_id = azurerm_storage_account.example.id container_access_type = "private" } ``` @@ -43,7 +43,11 @@ The following arguments are supported: * `name` - (Required) The name of the Container which should be created within the Storage Account. Changing this forces a new resource to be created. -* `storage_account_name` - (Required) The name of the Storage Account where the Container should be created. Changing this forces a new resource to be created. +* `storage_account_name` - (Optional) The name of the Storage Account where the Container should be created. Changing this forces a new resource to be created. This property is deprecated in favour of `storage_account_id`. + +* `storage_account_id` - (Optional) The name of the Storage Account where the Container should be created. Changing this forces a new resource to be created. + +~> **NOTE:** One of `storage_account_name` or `storage_account_id` must be specified. When specifying `storage_account_id` the resource will use the Resource Manager API, rather than the Data Plane API. * `container_access_type` - (Optional) The Access Level configured for this Container. Possible values are `blob`, `container` or `private`. Defaults to `private`. diff --git a/website/docs/r/storage_share.html.markdown b/website/docs/r/storage_share.html.markdown index 7fd0544df73d..04e59174cffe 100644 --- a/website/docs/r/storage_share.html.markdown +++ b/website/docs/r/storage_share.html.markdown @@ -31,17 +31,17 @@ resource "azurerm_storage_account" "example" { } resource "azurerm_storage_share" "example" { - name = "sharename" - storage_account_name = azurerm_storage_account.example.name - quota = 50 + name = "sharename" + storage_account_id = azurerm_storage_account.example.id + quota = 50 acl { id = "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" access_policy { permissions = "rwdl" - start = "2019-07-02T09:38:21.0000000Z" - expiry = "2019-07-02T10:38:21.0000000Z" + start = "2019-07-02T09:38:21Z" + expiry = "2019-07-02T10:38:21Z" } } } @@ -53,7 +53,11 @@ The following arguments are supported: * `name` - (Required) The name of the share. Must be unique within the storage account where the share is located. Changing this forces a new resource to be created. -* `storage_account_name` - (Required) Specifies the storage account in which to create the share. Changing this forces a new resource to be created. +* `storage_account_name` - (Optional) Specifies the storage account in which to create the share. Changing this forces a new resource to be created. This property is deprecated in favour of `storage_account_id`. + +* `storage_account_id` - (Optional) Specifies the storage account in which to create the share. Changing this forces a new resource to be created. + +~> **NOTE:** One of `storage_account_name` or `storage_account_id` must be specified. When specifying `storage_account_id` the resource will use the Resource Manager API, rather than the Data Plane API. * `access_tier` - (Optional) The access tier of the File Share. Possible values are `Hot`, `Cool` and `TransactionOptimized`, `Premium`. @@ -89,9 +93,9 @@ A `access_policy` block supports the following: ~> **Note:** Permission order is strict at the service side, and permissions need to be listed in the order above. -* `start` - (Optional) The time at which this Access Policy should be valid from, in [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) format. +* `start` - (Optional) The time at which this Access Policy should be valid from. When using `storage_account_id` this should be in RFC3339 format. If using the deprecated `storage_account_name` property, this uses the [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) format. -* `expiry` - (Optional) The time at which this Access Policy should be valid until, in [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) format. +* `expiry` - (Optional) The time at which this Access Policy should be valid untilWhen using `storage_account_id` this should be in RFC3339 format. If using the deprecated `storage_account_name` property, this uses the [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) format. ## Attributes Reference @@ -114,8 +118,8 @@ The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/l ## Import -Storage Shares can be imported using the `resource id`, e.g. +Storage Shares can be imported using the `id`, e.g. ```shell -terraform import azurerm_storage_share.exampleShare https://account1.file.core.windows.net/share1 +terraform import azurerm_storage_share.exampleShare /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Storage/storageAccounts/myAccount/fileServices/default/shares/exampleShare ```