Skip to content

Commit

Permalink
Merge pull request #9917 from ross-p-smith/adls-root-acl
Browse files Browse the repository at this point in the history
Allow ADLS File System to have ACLs added to the root
  • Loading branch information
tombuildsstuff authored Jan 27, 2021
2 parents 38d2002 + e6df439 commit 698386c
Show file tree
Hide file tree
Showing 8 changed files with 457 additions and 84 deletions.
69 changes: 69 additions & 0 deletions azurerm/internal/services/storage/parse/storage_filesystem_ace.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
package parse

import (
"github.com/google/uuid"
"github.com/tombuildsstuff/giovanni/storage/accesscontrol"
)

func ExpandDataLakeGen2AceList(input []interface{}) (*accesscontrol.ACL, error) {
if len(input) == 0 {
return nil, nil
}
aceList := make([]accesscontrol.ACE, len(input))

for i := 0; i < len(input); i++ {
v := input[i].(map[string]interface{})

isDefault := false
if scopeRaw, ok := v["scope"]; ok {
if scopeRaw.(string) == "default" {
isDefault = true
}
}

tagType := accesscontrol.TagType(v["type"].(string))

var id *uuid.UUID
if raw, ok := v["id"]; ok && raw != "" {
idTemp, err := uuid.Parse(raw.(string))
if err != nil {
return nil, err
}
id = &idTemp
}

permissions := v["permissions"].(string)

ace := accesscontrol.ACE{
IsDefault: isDefault,
TagType: tagType,
TagQualifier: id,
Permissions: permissions,
}
aceList[i] = ace
}

return &accesscontrol.ACL{Entries: aceList}, nil
}

func FlattenDataLakeGen2AceList(acl accesscontrol.ACL) []interface{} {
output := make([]interface{}, len(acl.Entries))

for i, v := range acl.Entries {
ace := make(map[string]interface{})

scope := "access"
if v.IsDefault {
scope = "default"
}
ace["scope"] = scope
ace["type"] = string(v.TagType)
if v.TagQualifier != nil {
ace["id"] = v.TagQualifier.String()
}
ace["permissions"] = v.Permissions

output[i] = ace
}
return output
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,16 @@ import (
"time"

"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils"
"github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/filesystems"
"github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/paths"
"github.com/tombuildsstuff/giovanni/storage/accesscontrol"
)

func resourceStorageDataLakeGen2FileSystem() *schema.Resource {
Expand Down Expand Up @@ -73,13 +76,45 @@ func resourceStorageDataLakeGen2FileSystem() *schema.Resource {
},

"properties": MetaDataSchema(),

"ace": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"scope": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"default", "access"}, false),
Default: "access",
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"user", "group", "mask", "other"}, false),
},
"id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.IsUUID,
},
"permissions": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validate.ADLSAccessControlPermissions,
},
},
},
},
},
}
}

func resourceStorageDataLakeGen2FileSystemCreate(d *schema.ResourceData, meta interface{}) error {
accountsClient := meta.(*clients.Client).Storage.AccountsClient
client := meta.(*clients.Client).Storage.FileSystemsClient
pathClient := meta.(*clients.Client).Storage.ADLSGen2PathsClient
ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d)
defer cancel()

Expand All @@ -88,6 +123,12 @@ func resourceStorageDataLakeGen2FileSystemCreate(d *schema.ResourceData, meta in
return err
}

aceRaw := d.Get("ace").([]interface{})
acl, err := parse.ExpandDataLakeGen2AceList(aceRaw)
if err != nil {
return fmt.Errorf("Error parsing ace list: %s", err)
}

// confirm the storage account exists, otherwise Data Plane API requests will fail
storageAccount, err := accountsClient.GetProperties(ctx, storageID.ResourceGroup, storageID.Name, "")
if err != nil {
Expand Down Expand Up @@ -123,13 +164,29 @@ func resourceStorageDataLakeGen2FileSystemCreate(d *schema.ResourceData, meta in
return fmt.Errorf("Error creating File System %q in Storage Account %q: %s", fileSystemName, storageID.Name, err)
}

if acl != nil {
log.Printf("[INFO] Creating acl %q in File System %q in Storage Account %q.", acl, fileSystemName, storageID.Name)
var aclString *string
v := acl.String()
aclString = &v
accessControlInput := paths.SetAccessControlInput{
ACL: aclString,
Owner: nil,
Group: nil,
}
if _, err := pathClient.SetAccessControl(ctx, storageID.Name, fileSystemName, "/", accessControlInput); err != nil {
return fmt.Errorf("Error setting access control for root path in File System %q in Storage Account %q: %s", fileSystemName, storageID.Name, err)
}
}

d.SetId(id)
return resourceStorageDataLakeGen2FileSystemRead(d, meta)
}

func resourceStorageDataLakeGen2FileSystemUpdate(d *schema.ResourceData, meta interface{}) error {
accountsClient := meta.(*clients.Client).Storage.AccountsClient
client := meta.(*clients.Client).Storage.FileSystemsClient
pathClient := meta.(*clients.Client).Storage.ADLSGen2PathsClient
ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d)
defer cancel()

Expand All @@ -143,6 +200,12 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *schema.ResourceData, meta in
return err
}

aceRaw := d.Get("ace").([]interface{})
acl, err := parse.ExpandDataLakeGen2AceList(aceRaw)
if err != nil {
return fmt.Errorf("Error parsing ace list: %s", err)
}

// confirm the storage account exists, otherwise Data Plane API requests will fail
storageAccount, err := accountsClient.GetProperties(ctx, storageID.ResourceGroup, storageID.Name, "")
if err != nil {
Expand All @@ -164,12 +227,28 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *schema.ResourceData, meta in
return fmt.Errorf("Error updating Properties for File System %q in Storage Account %q: %s", id.DirectoryName, id.AccountName, err)
}

if acl != nil {
log.Printf("[INFO] Creating acl %q in File System %q in Storage Account %q.", acl, id.DirectoryName, id.AccountName)
var aclString *string
v := acl.String()
aclString = &v
accessControlInput := paths.SetAccessControlInput{
ACL: aclString,
Owner: nil,
Group: nil,
}
if _, err := pathClient.SetAccessControl(ctx, id.AccountName, id.DirectoryName, "/", accessControlInput); err != nil {
return fmt.Errorf("Error setting access control for root path in File System %q in Storage Account %q: %s", id.DirectoryName, id.AccountName, err)
}
}

return resourceStorageDataLakeGen2FileSystemRead(d, meta)
}

func resourceStorageDataLakeGen2FileSystemRead(d *schema.ResourceData, meta interface{}) error {
accountsClient := meta.(*clients.Client).Storage.AccountsClient
client := meta.(*clients.Client).Storage.FileSystemsClient
pathClient := meta.(*clients.Client).Storage.ADLSGen2PathsClient
ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d)
defer cancel()

Expand Down Expand Up @@ -213,6 +292,25 @@ func resourceStorageDataLakeGen2FileSystemRead(d *schema.ResourceData, meta inte
return fmt.Errorf("Error setting `properties`: %+v", err)
}

// The above `getStatus` API request doesn't return the ACLs
// Have to make a `getAccessControl` request, but that doesn't return all fields either!
pathResponse, err := pathClient.GetProperties(ctx, id.AccountName, id.DirectoryName, "/", paths.GetPropertiesActionGetAccessControl)
if err != nil {
if utils.ResponseWasNotFound(pathResponse.Response) {
log.Printf("[INFO] Root path does not exist in File System %q in Storage Account %q - removing from state...", id.DirectoryName, id.AccountName)
d.SetId("")
return nil
}

return fmt.Errorf("Error retrieving ACLs for Root path in File System %q in Storage Account %q: %+v", id.DirectoryName, id.AccountName, err)
}

acl, err := accesscontrol.ParseACL(pathResponse.ACL)
if err != nil {
return fmt.Errorf("Error parsing response ACL %q: %s", pathResponse.ACL, err)
}
d.Set("ace", parse.FlattenDataLakeGen2AceList(acl))

return nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,43 @@ func TestAccStorageDataLakeGen2FileSystem_requiresImport(t *testing.T) {
})
}

func TestAccStorageDataLakeGen2FileSystem_withDefaultACL(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_storage_data_lake_gen2_filesystem", "test")
r := StorageDataLakeGen2FileSystemResource{}

data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.withDefaultACL(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.RequiresImportErrorStep(r.requiresImport),
})
}

func TestAccStorageDataLakeGen2FileSystem_UpdateDefaultACL(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_storage_data_lake_gen2_filesystem", "test")
r := StorageDataLakeGen2FileSystemResource{}

data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.withDefaultACL(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
{
Config: r.withExecuteACLForSPN(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func TestAccStorageDataLakeGen2FileSystem_properties(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_storage_data_lake_gen2_filesystem", "test")
r := StorageDataLakeGen2FileSystemResource{}
Expand Down Expand Up @@ -168,3 +205,93 @@ resource "azurerm_storage_account" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomString)
}

func (r StorageDataLakeGen2FileSystemResource) withDefaultACL(data acceptance.TestData) string {
template := r.template(data)
return fmt.Sprintf(`
%s
data "azurerm_client_config" "current" {
}
resource "azurerm_role_assignment" "storageAccountRoleAssignment" {
scope = azurerm_storage_account.test.id
role_definition_name = "Storage Blob Data Contributor"
principal_id = data.azurerm_client_config.current.object_id
}
resource "azurerm_storage_data_lake_gen2_filesystem" "test" {
name = "acctest-%[2]d"
storage_account_id = azurerm_storage_account.test.id
ace {
type = "user"
permissions = "rwx"
}
ace {
type = "group"
permissions = "r-x"
}
ace {
type = "other"
permissions = "---"
}
depends_on = [
azurerm_role_assignment.storageAccountRoleAssignment
]
}
`, template, data.RandomInteger)
}

func (r StorageDataLakeGen2FileSystemResource) withExecuteACLForSPN(data acceptance.TestData) string {
template := r.template(data)
return fmt.Sprintf(`
%s
data "azurerm_client_config" "current" {
}
resource "azurerm_role_assignment" "storageAccountRoleAssignment" {
scope = azurerm_storage_account.test.id
role_definition_name = "Storage Blob Data Contributor"
principal_id = data.azurerm_client_config.current.object_id
}
resource "azuread_application" "test" {
name = "acctestspa%[2]d"
}
resource "azuread_service_principal" "test" {
application_id = azuread_application.test.application_id
}
resource "azurerm_storage_data_lake_gen2_filesystem" "test" {
name = "acctest-%[2]d"
storage_account_id = azurerm_storage_account.test.id
ace {
type = "user"
permissions = "rwx"
}
ace {
type = "user"
id = azuread_service_principal.test.object_id
permissions = "--x"
}
ace {
type = "group"
permissions = "r-x"
}
ace {
type = "mask"
permissions = "r-x"
}
ace {
type = "other"
permissions = "---"
}
depends_on = [
azurerm_role_assignment.storageAccountRoleAssignment,
azuread_service_principal.test
]
}
`, template, data.RandomInteger)
}
Loading

0 comments on commit 698386c

Please sign in to comment.