From 519873ae5c735013f4a667e4d331c8719ee7ff44 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Wed, 16 Oct 2019 11:29:38 -0400 Subject: [PATCH 1/7] Support for launching HDInsight cluster with Data Lake Gen2 Filesystem --- azurerm/helpers/azure/hdinsight.go | 88 +++++++++++++++---- .../resource_arm_hdinsight_hadoop_cluster.go | 5 +- .../resource_arm_hdinsight_hbase_cluster.go | 5 +- ...arm_hdinsight_interactive_query_cluster.go | 5 +- .../resource_arm_hdinsight_kafka_cluster.go | 5 +- ...ource_arm_hdinsight_ml_services_cluster.go | 5 +- .../resource_arm_hdinsight_rserver_cluster.go | 5 +- .../resource_arm_hdinsight_spark_cluster.go | 5 +- .../resource_arm_hdinsight_storm_cluster.go | 5 +- .../r/hdinsight_hadoop_cluster.html.markdown | 10 ++- 10 files changed, 104 insertions(+), 34 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 03aebe9e8357..d455415ae1ab 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -157,14 +157,32 @@ func SchemaHDInsightsStorageAccounts() *schema.Schema { Schema: map[string]*schema.Schema{ "storage_account_key": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, Sensitive: true, ValidateFunc: validate.NoEmptyStrings, }, "storage_container_id": { Type: schema.TypeString, - Required: true, + Optional: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "filesystem_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "storage_resource_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "managed_identity_resource_id": { + Type: schema.TypeString, + Optional: true, ForceNew: true, ValidateFunc: validate.NoEmptyStrings, }, @@ -178,32 +196,70 @@ func SchemaHDInsightsStorageAccounts() *schema.Schema { } } -func ExpandHDInsightsStorageAccounts(input []interface{}) (*[]hdinsight.StorageAccount, error) { +// ExpandHDInsightsStorageAccounts returns an array of StorageAccount structs, as well as a ClusterIdentity +// populated with any managed identities required for accessing Data Lake Gen2 storage. +func ExpandHDInsightsStorageAccounts(input []interface{}) (*[]hdinsight.StorageAccount, *hdinsight.ClusterIdentity, error) { results := make([]hdinsight.StorageAccount, 0) + var clusterIndentity *hdinsight.ClusterIdentity + for _, vs := range input { v := vs.(map[string]interface{}) storageAccountKey := v["storage_account_key"].(string) - storageContainerId := v["storage_container_id"].(string) + + storageContainerID := v["storage_container_id"].(string) + + fileSystemID := v["filesystem_id"].(string) + storageResourceID := v["storage_resource_id"].(string) + managedIdentityResourceID := v["managed_identity_resource_id"].(string) + isDefault := v["is_default"].(bool) - // https://foo.blob.core.windows.net/example - uri, err := url.Parse(storageContainerId) - if err != nil { - return nil, fmt.Errorf("Error parsing %q: %s", storageContainerId, err) - } + if fileSystemID == "" && storageResourceID == "" && managedIdentityResourceID == "" && storageContainerID != "" && storageAccountKey != "" { + uri, err := url.Parse(storageContainerID) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %q: %s", storageContainerID, err) + } + + result := hdinsight.StorageAccount{ + Name: utils.String(uri.Host), + Container: utils.String(strings.TrimPrefix(uri.Path, "/")), + Key: utils.String(storageAccountKey), + IsDefault: utils.Bool(isDefault), + } + results = append(results, result) + } else if fileSystemID != "" && storageResourceID != "" && managedIdentityResourceID != "" && storageContainerID == "" && storageAccountKey == "" { + uri, err := url.Parse(fileSystemID) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %q: %s", storageContainerID, err) + } + + if clusterIndentity == nil { + clusterIndentity = &hdinsight.ClusterIdentity{ + Type: hdinsight.UserAssigned, + UserAssignedIdentities: make(map[string]*hdinsight.ClusterIdentityUserAssignedIdentitiesValue), + } + } + + // ... API doesn't seem to require client_id or principal_id, so pass in an empty ClusterIdentityUserAssignedIdentitiesValue + clusterIndentity.UserAssignedIdentities[managedIdentityResourceID] = &hdinsight.ClusterIdentityUserAssignedIdentitiesValue{} - result := hdinsight.StorageAccount{ - Name: utils.String(uri.Host), - Container: utils.String(strings.TrimPrefix(uri.Path, "/")), - Key: utils.String(storageAccountKey), - IsDefault: utils.Bool(isDefault), + result := hdinsight.StorageAccount{ + Name: utils.String(uri.Host), // https://storageaccountname.dfs.core.windows.net/filesystemname -> storageaccountname.dfs.core.windows.net + ResourceID: utils.String(storageResourceID), + FileSystem: utils.String(uri.Path[1:]), // https://storageaccountname.dfs.core.windows.net/filesystemname -> filesystemname + MsiResourceID: utils.String(managedIdentityResourceID), + IsDefault: utils.Bool(isDefault), + } + results = append(results, result) + } else { + return nil, nil, fmt.Errorf(`specify either storage_container_id AND storage_account_key (for WASB blob storage), ` + + `or filesystem_id AND storage_resource_id AND managed_identity_resource_id (for ata Lake Storage Gen 2)`) } - results = append(results, result) } - return &results, nil + return &results, clusterIndentity, nil } type HDInsightNodeDefinition struct { diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 453d1fcad6d0..9dcbd1ed242e 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -138,7 +138,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -185,7 +185,8 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster.go b/azurerm/resource_arm_hdinsight_hbase_cluster.go index 2c0a90f2b4d2..040c5c566a11 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster.go @@ -136,7 +136,7 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -183,7 +183,8 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go index 28a975993cb7..fde8533e5713 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go @@ -136,7 +136,7 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -183,7 +183,8 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster.go b/azurerm/resource_arm_hdinsight_kafka_cluster.go index d5a7f69baca0..de7cefb9e45d 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster.go @@ -137,7 +137,7 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -184,7 +184,8 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index 2f9ba98a4044..d89be281b9cd 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -153,7 +153,7 @@ func resourceArmHDInsightMLServicesClusterCreate(d *schema.ResourceData, meta in gateway := expandHDInsightsMLServicesConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -200,7 +200,8 @@ func resourceArmHDInsightMLServicesClusterCreate(d *schema.ResourceData, meta in Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index 8f67f44c5d7d..04460a624a5a 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -153,7 +153,7 @@ func resourceArmHDInsightRServerClusterCreate(d *schema.ResourceData, meta inter gateway := expandHDInsightsRServerConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -200,7 +200,8 @@ func resourceArmHDInsightRServerClusterCreate(d *schema.ResourceData, meta inter Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_spark_cluster.go b/azurerm/resource_arm_hdinsight_spark_cluster.go index 5d44a2fca2ab..47ff63ad614a 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster.go @@ -136,7 +136,7 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -183,7 +183,8 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_storm_cluster.go b/azurerm/resource_arm_hdinsight_storm_cluster.go index 922fe3f9f237..df2e422dbc82 100644 --- a/azurerm/resource_arm_hdinsight_storm_cluster.go +++ b/azurerm/resource_arm_hdinsight_storm_cluster.go @@ -137,7 +137,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -184,7 +184,8 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index c677ace1e5ab..c3111da4f359 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -161,12 +161,18 @@ A `storage_account` block supports the following: -> **NOTE:** One of the `storage_account` blocks must be marked as the default. -* `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. +* `storage_account_key` - (Required for Blob storage) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. -* `storage_container_id` - (Required) The ID of the Storage Container. Changing this forces a new resource to be created. +* `storage_container_id` - (Required for Blob storage) The ID of the Storage Container. Changing this forces a new resource to be created. -> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. +* `storage_resource_id` - (Required for Gen2 storage) The resource ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required for Gen2 storage) The ID of the Gen2 filesystem. See `azurerm_storage_data_lake_gen2_filesystem`. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required for Gen2 storage) The ID managed identity for access to the Gen2 filesystem. Changing this forces a new resource to be created. + --- A `worker_node` block supports the following: From 50a9975c11d3e83b11c758d10283f6f27626e3b1 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Mon, 4 Nov 2019 14:40:23 -0500 Subject: [PATCH 2/7] Use separate block for gen2 storage + WIP tests --- azurerm/helpers/azure/hdinsight.go | 110 ++++++++------ .../resource_arm_hdinsight_hadoop_cluster.go | 5 +- ...ource_arm_hdinsight_hadoop_cluster_test.go | 142 +++++++++++++++++- .../resource_arm_hdinsight_hbase_cluster.go | 5 +- ...arm_hdinsight_interactive_query_cluster.go | 5 +- .../resource_arm_hdinsight_kafka_cluster.go | 5 +- ...ource_arm_hdinsight_ml_services_cluster.go | 5 +- .../resource_arm_hdinsight_rserver_cluster.go | 5 +- .../resource_arm_hdinsight_spark_cluster.go | 5 +- .../resource_arm_hdinsight_storm_cluster.go | 5 +- 10 files changed, 235 insertions(+), 57 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index d455415ae1ab..2bc7fb9c6e99 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -152,37 +152,53 @@ func FlattenHDInsightsConfigurations(input map[string]*string) []interface{} { func SchemaHDInsightsStorageAccounts() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "storage_account_key": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, Sensitive: true, ValidateFunc: validate.NoEmptyStrings, }, "storage_container_id": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validate.NoEmptyStrings, }, - "filesystem_id": { + "is_default": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + }, + }, + } +} + +func SchemaHDInsightsGen2StorageAccounts() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_resource_id": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validate.NoEmptyStrings, }, - "storage_resource_id": { + "filesystem_id": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validate.NoEmptyStrings, }, "managed_identity_resource_id": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validate.NoEmptyStrings, }, @@ -198,17 +214,35 @@ func SchemaHDInsightsStorageAccounts() *schema.Schema { // ExpandHDInsightsStorageAccounts returns an array of StorageAccount structs, as well as a ClusterIdentity // populated with any managed identities required for accessing Data Lake Gen2 storage. -func ExpandHDInsightsStorageAccounts(input []interface{}) (*[]hdinsight.StorageAccount, *hdinsight.ClusterIdentity, error) { +func ExpandHDInsightsStorageAccounts(storageAccounts []interface{}, gen2storageAccounts []interface{}) (*[]hdinsight.StorageAccount, *hdinsight.ClusterIdentity, error) { results := make([]hdinsight.StorageAccount, 0) var clusterIndentity *hdinsight.ClusterIdentity - for _, vs := range input { + for _, vs := range storageAccounts { v := vs.(map[string]interface{}) storageAccountKey := v["storage_account_key"].(string) - storageContainerID := v["storage_container_id"].(string) + isDefault := v["is_default"].(bool) + + uri, err := url.Parse(storageContainerID) + + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %q: %s", storageContainerID, err) + } + + result := hdinsight.StorageAccount{ + Name: utils.String(uri.Host), + Container: utils.String(strings.TrimPrefix(uri.Path, "/")), + Key: utils.String(storageAccountKey), + IsDefault: utils.Bool(isDefault), + } + results = append(results, result) + } + + for _, vs := range gen2storageAccounts { + v := vs.(map[string]interface{}) fileSystemID := v["filesystem_id"].(string) storageResourceID := v["storage_resource_id"].(string) @@ -216,47 +250,29 @@ func ExpandHDInsightsStorageAccounts(input []interface{}) (*[]hdinsight.StorageA isDefault := v["is_default"].(bool) - if fileSystemID == "" && storageResourceID == "" && managedIdentityResourceID == "" && storageContainerID != "" && storageAccountKey != "" { - uri, err := url.Parse(storageContainerID) - if err != nil { - return nil, nil, fmt.Errorf("Error parsing %q: %s", storageContainerID, err) - } - - result := hdinsight.StorageAccount{ - Name: utils.String(uri.Host), - Container: utils.String(strings.TrimPrefix(uri.Path, "/")), - Key: utils.String(storageAccountKey), - IsDefault: utils.Bool(isDefault), - } - results = append(results, result) - } else if fileSystemID != "" && storageResourceID != "" && managedIdentityResourceID != "" && storageContainerID == "" && storageAccountKey == "" { - uri, err := url.Parse(fileSystemID) - if err != nil { - return nil, nil, fmt.Errorf("Error parsing %q: %s", storageContainerID, err) - } + uri, err := url.Parse(fileSystemID) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %q: %s", fileSystemID, err) + } - if clusterIndentity == nil { - clusterIndentity = &hdinsight.ClusterIdentity{ - Type: hdinsight.UserAssigned, - UserAssignedIdentities: make(map[string]*hdinsight.ClusterIdentityUserAssignedIdentitiesValue), - } + if clusterIndentity == nil { + clusterIndentity = &hdinsight.ClusterIdentity{ + Type: hdinsight.UserAssigned, + UserAssignedIdentities: make(map[string]*hdinsight.ClusterIdentityUserAssignedIdentitiesValue), } + } - // ... API doesn't seem to require client_id or principal_id, so pass in an empty ClusterIdentityUserAssignedIdentitiesValue - clusterIndentity.UserAssignedIdentities[managedIdentityResourceID] = &hdinsight.ClusterIdentityUserAssignedIdentitiesValue{} + // ... API doesn't seem to require client_id or principal_id, so pass in an empty ClusterIdentityUserAssignedIdentitiesValue + clusterIndentity.UserAssignedIdentities[managedIdentityResourceID] = &hdinsight.ClusterIdentityUserAssignedIdentitiesValue{} - result := hdinsight.StorageAccount{ - Name: utils.String(uri.Host), // https://storageaccountname.dfs.core.windows.net/filesystemname -> storageaccountname.dfs.core.windows.net - ResourceID: utils.String(storageResourceID), - FileSystem: utils.String(uri.Path[1:]), // https://storageaccountname.dfs.core.windows.net/filesystemname -> filesystemname - MsiResourceID: utils.String(managedIdentityResourceID), - IsDefault: utils.Bool(isDefault), - } - results = append(results, result) - } else { - return nil, nil, fmt.Errorf(`specify either storage_container_id AND storage_account_key (for WASB blob storage), ` + - `or filesystem_id AND storage_resource_id AND managed_identity_resource_id (for ata Lake Storage Gen 2)`) + result := hdinsight.StorageAccount{ + Name: utils.String(uri.Host), // https://storageaccountname.dfs.core.windows.net/filesystemname -> storageaccountname.dfs.core.windows.net + ResourceID: utils.String(storageResourceID), + FileSystem: utils.String(uri.Path[1:]), // https://storageaccountname.dfs.core.windows.net/filesystemname -> filesystemname + MsiResourceID: utils.String(managedIdentityResourceID), + IsDefault: utils.Bool(isDefault), } + results = append(results, result) } return &results, clusterIndentity, nil diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 9dcbd1ed242e..b6c2cbee0417 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -89,6 +89,8 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -138,7 +140,8 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 40b7d8796956..8a18fac65404 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -250,6 +250,43 @@ func TestAccAzureRMHDInsightHadoopCluster_complete(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_gen2storage(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_gen2storage(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` @@ -581,6 +618,59 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, rInt, rInt, rInt) } +func testAccAzureRMHDInsightHadoopCluster_gen2storage(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + hadoop = "2.7" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -598,9 +688,57 @@ resource "azurerm_storage_account" "test" { resource "azurerm_storage_container" "test" { name = "acctest" - resource_group_name = "${azurerm_resource_group.test.name}" storage_account_name = "${azurerm_storage_account.test.name}" container_access_type = "private" } -`, rInt, location, rString) + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + + + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_definition" "storage-owner" { + // https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#storage-blob-data-owner + name = "storage-owner-role" + scope = "${data.azurerm_subscription.primary.id}" + permissions { + actions = ["*"] + data_actions = ["*"] + not_actions = [] + } + assignable_scopes = [ + "${data.azurerm_subscription.primary.id}", + ] +} + +resource "azurerm_role_assignment" "test" { + name = "hdinsight-test-storage" + scope = "${data.azurerm_subscription.primary.id}" + // https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#storage-blob-data-owner + role_definition_id = "${azurerm_role_definition.storage-owner.id}" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} + +`, rInt, location, rString, rString) } diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster.go b/azurerm/resource_arm_hdinsight_hbase_cluster.go index 040c5c566a11..67fbf0e7dbd5 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster.go @@ -87,6 +87,8 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -136,7 +138,8 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go index fde8533e5713..1177e7780c07 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go @@ -87,6 +87,8 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -136,7 +138,8 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster.go b/azurerm/resource_arm_hdinsight_kafka_cluster.go index de7cefb9e45d..a620196084aa 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster.go @@ -88,6 +88,8 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -137,7 +139,8 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index d89be281b9cd..fd74b947d940 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -89,6 +89,8 @@ func resourceArmHDInsightMLServicesCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -153,7 +155,8 @@ func resourceArmHDInsightMLServicesClusterCreate(d *schema.ResourceData, meta in gateway := expandHDInsightsMLServicesConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index 04460a624a5a..c2050f8661b4 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -89,6 +89,8 @@ func resourceArmHDInsightRServerCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -153,7 +155,8 @@ func resourceArmHDInsightRServerClusterCreate(d *schema.ResourceData, meta inter gateway := expandHDInsightsRServerConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_spark_cluster.go b/azurerm/resource_arm_hdinsight_spark_cluster.go index 47ff63ad614a..8056b4fb2869 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster.go @@ -87,6 +87,8 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -136,7 +138,8 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_storm_cluster.go b/azurerm/resource_arm_hdinsight_storm_cluster.go index df2e422dbc82..f9604558a473 100644 --- a/azurerm/resource_arm_hdinsight_storm_cluster.go +++ b/azurerm/resource_arm_hdinsight_storm_cluster.go @@ -88,6 +88,8 @@ func resourceArmHDInsightStormCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -137,7 +139,8 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } From fed994056b28ed2d2a3960fd6217bb4244cabbf9 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Fri, 8 Nov 2019 15:21:40 -0500 Subject: [PATCH 3/7] More progress on tests --- ...ource_arm_hdinsight_hadoop_cluster_test.go | 35 ++--- ...source_arm_hdinsight_hbase_cluster_test.go | 129 ++++++++++++++++++ ...dinsight_interactive_query_cluster_test.go | 129 ++++++++++++++++++ 3 files changed, 273 insertions(+), 20 deletions(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 8a18fac65404..216d5302616e 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -619,7 +619,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } func testAccAzureRMHDInsightHadoopCluster_gen2storage(rInt int, rString string, location string) string { - template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) + template := testAccAzureRMHDInsightHadoopCluster_gen2template(rInt, rString, location) return fmt.Sprintf(` %s @@ -692,6 +692,16 @@ resource "azurerm_storage_container" "test" { container_access_type = "private" } +`, rInt, location, rString) +} + +func testAccAzureRMHDInsightHadoopCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + resource "azurerm_storage_account" "gen2test" { name = "accgen2test%s" resource_group_name = azurerm_resource_group.test.name @@ -718,27 +728,12 @@ resource "azurerm_user_assigned_identity" "test" { data "azurerm_subscription" "primary" {} -resource "azurerm_role_definition" "storage-owner" { - // https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#storage-blob-data-owner - name = "storage-owner-role" - scope = "${data.azurerm_subscription.primary.id}" - permissions { - actions = ["*"] - data_actions = ["*"] - not_actions = [] - } - assignable_scopes = [ - "${data.azurerm_subscription.primary.id}", - ] -} resource "azurerm_role_assignment" "test" { - name = "hdinsight-test-storage" - scope = "${data.azurerm_subscription.primary.id}" - // https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#storage-blob-data-owner - role_definition_id = "${azurerm_role_definition.storage-owner.id}" - principal_id = "${azurerm_user_assigned_identity.test.principal_id}" + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" } -`, rInt, location, rString, rString) +`, rInt, location, rString) } diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster_test.go b/azurerm/resource_arm_hdinsight_hbase_cluster_test.go index 93bc42b0d06c..a48fa6dbfd28 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightHBaseCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightHBaseCluster_gen2basic(t *testing.T) { + resourceName := "azurerm_hdinsight_hbase_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hbase_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_gen2basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightHBaseCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -302,6 +339,59 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightHBaseCluster_gen2basic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHBaseCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_hbase_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + hbase = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightHBaseCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHBaseCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -604,3 +694,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightHBaseCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go index 2d0d1b3bd80d..6070a1429e6a 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightInteractiveQueryCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(t *testing.T) { + resourceName := "azurerm_hdinsight_interactive_query_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_interactive_query_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightInteractiveQueryCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -302,6 +339,59 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightInteractiveQueryCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_interactive_query_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + interactive_hive = "2.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D14_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_A4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightInteractiveQueryCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightInteractiveQueryCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -604,3 +694,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightInteractiveQueryCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} From 9b5167c66874ac7d42fe8f99f16e736ad03701ef Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Tue, 12 Nov 2019 12:46:30 -0500 Subject: [PATCH 4/7] Update more tests --- azurerm/helpers/azure/hdinsight.go | 2 + ...ource_arm_hdinsight_hadoop_cluster_test.go | 2 + ...source_arm_hdinsight_hbase_cluster_test.go | 2 + ...dinsight_interactive_query_cluster_test.go | 2 + ...source_arm_hdinsight_kafka_cluster_test.go | 132 ++++++++++++++++++ ...ource_arm_hdinsight_ml_services_cluster.go | 2 - .../resource_arm_hdinsight_rserver_cluster.go | 2 - ...source_arm_hdinsight_spark_cluster_test.go | 131 +++++++++++++++++ 8 files changed, 271 insertions(+), 4 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index d046d42ed7a3..c76f692bf3ff 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -183,6 +183,8 @@ func SchemaHDInsightsGen2StorageAccounts() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, Optional: true, + // HDInsight doesn't seem to allow adding more than one gen2 cluster right now. + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "storage_resource_id": { diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 1960577df344..b2f026f731a6 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -808,6 +808,8 @@ func testAccAzureRMHDInsightHadoopCluster_gen2storage(rInt int, rString string, return fmt.Sprintf(` %s resource "azurerm_hdinsight_hadoop_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + name = "acctesthdi-%d" resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster_test.go b/azurerm/resource_arm_hdinsight_hbase_cluster_test.go index a48fa6dbfd28..e528c3bdb274 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster_test.go @@ -703,6 +703,8 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_storage_account" "gen2test" { + depends_on = [azurerm_role_assignment.test] + name = "accgen2test%s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go index 6070a1429e6a..54841b63434c 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go @@ -345,6 +345,8 @@ func testAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(rInt int, rString %s resource "azurerm_hdinsight_interactive_query_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + name = "acctesthdi-%d" resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster_test.go b/azurerm/resource_arm_hdinsight_kafka_cluster_test.go index 866efb9ccc6b..56af36c49311 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightKafkaCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightKafkaCluster_gen2storage(t *testing.T) { + resourceName := "azurerm_hdinsight_kafka_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_kafka_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_gen2storage(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightKafkaCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -303,6 +340,62 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightKafkaCluster_gen2storage(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightKafkaCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_kafka_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + kafka = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + number_of_disks_per_node = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightKafkaCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightKafkaCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -609,3 +702,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightKafkaCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index fd74b947d940..215fb7107f88 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -89,8 +89,6 @@ func resourceArmHDInsightMLServicesCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), - "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), - "roles": { Type: schema.TypeList, Required: true, diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index c2050f8661b4..e2353249f452 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -89,8 +89,6 @@ func resourceArmHDInsightRServerCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), - "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), - "roles": { Type: schema.TypeList, Required: true, diff --git a/azurerm/resource_arm_hdinsight_spark_cluster_test.go b/azurerm/resource_arm_hdinsight_spark_cluster_test.go index e8d74dfe6d93..aad2892d06b8 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightSparkCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightSparkCluster_gen2basic(t *testing.T) { + resourceName := "azurerm_hdinsight_spark_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_spark_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_gen2basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightSparkCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -302,6 +339,61 @@ resource "azurerm_hdinsight_spark_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightSparkCluster_gen2basic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightSparkCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_spark_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + spark = "2.3" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_A4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_A4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + } + + zookeeper_node { + vm_size = "Medium" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightSparkCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightSparkCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -604,3 +696,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightSparkCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} From aab1ead21d317f48011ead327f0a43d7b2ec203c Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Thu, 14 Nov 2019 09:02:30 -0500 Subject: [PATCH 5/7] Update documentation --- .../r/hdinsight_hadoop_cluster.html.markdown | 24 ++++++++++++++----- .../r/hdinsight_hbase_cluster.html.markdown | 22 +++++++++++++++-- ...ht_interactive_query_cluster.html.markdown | 22 +++++++++++++++-- .../r/hdinsight_kafka_cluster.html.markdown | 22 +++++++++++++++-- .../r/hdinsight_spark_cluster.html.markdown | 22 +++++++++++++++-- 5 files changed, 98 insertions(+), 14 deletions(-) diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index 75c49b8e8de7..602b3b436fbe 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Hadoop Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -162,19 +164,29 @@ A `storage_account` block supports the following: * `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. -* `storage_account_key` - (Required for Blob storage) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. +* `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. -* `storage_container_id` - (Required for Blob storage) The ID of the Storage Container. Changing this forces a new resource to be created. +* `storage_container_id` - (Required) The ID of the Storage Container. Changing this forces a new resource to be created. -> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. -* `storage_resource_id` - (Required for Gen2 storage) The resource ID of the Storage Account. Changing this forces a new resource to be created. +--- + +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. -* `filesystem_id` - (Required for Gen2 storage) The ID of the Gen2 filesystem. See `azurerm_storage_data_lake_gen2_filesystem`. Changing this forces a new resource to be created. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. -* `managed_identity_resource_id` - (Required for Gen2 storage) The ID managed identity for access to the Gen2 filesystem. Changing this forces a new resource to be created. +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. --- diff --git a/website/docs/r/hdinsight_hbase_cluster.html.markdown b/website/docs/r/hdinsight_hbase_cluster.html.markdown index 21fc6786136f..1cbf3fbcc96b 100644 --- a/website/docs/r/hdinsight_hbase_cluster.html.markdown +++ b/website/docs/r/hdinsight_hbase_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight HBase Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -158,9 +160,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight HBase Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -170,6 +172,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown index d074f412add7..50ab86813849 100644 --- a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown +++ b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Interactive Query Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -160,9 +162,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Interactive Query Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -172,6 +174,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_kafka_cluster.html.markdown b/website/docs/r/hdinsight_kafka_cluster.html.markdown index 8a02deef4165..366259b59f00 100644 --- a/website/docs/r/hdinsight_kafka_cluster.html.markdown +++ b/website/docs/r/hdinsight_kafka_cluster.html.markdown @@ -101,6 +101,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Kafka Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -159,9 +161,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Kafka Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -171,6 +173,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `number_of_disks_per_node` - (Required) The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_spark_cluster.html.markdown b/website/docs/r/hdinsight_spark_cluster.html.markdown index d7291e78f919..8e7a2a8a9e85 100644 --- a/website/docs/r/hdinsight_spark_cluster.html.markdown +++ b/website/docs/r/hdinsight_spark_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -158,9 +160,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Spark Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -170,6 +172,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created. From 20e432c13b474f38b38220f860bbf9c25622e4dd Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Fri, 15 Nov 2019 13:51:15 -0500 Subject: [PATCH 6/7] Addressing review comments, including validation improvements and a bugfix --- azurerm/helpers/azure/hdinsight.go | 4 ++-- azurerm/resource_arm_hdinsight_ml_services_cluster.go | 3 +-- azurerm/resource_arm_hdinsight_rserver_cluster.go | 3 +-- azurerm/resource_arm_hdinsight_storm_cluster.go | 5 +---- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index c76f692bf3ff..9d85334c167b 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -191,7 +191,7 @@ func SchemaHDInsightsGen2StorageAccounts() *schema.Schema { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validate.NoEmptyStrings, + ValidateFunc: ValidateResourceID, }, "filesystem_id": { Type: schema.TypeString, @@ -203,7 +203,7 @@ func SchemaHDInsightsGen2StorageAccounts() *schema.Schema { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validate.NoEmptyStrings, + ValidateFunc: ValidateResourceID, }, "is_default": { Type: schema.TypeBool, diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index 215fb7107f88..ca469e26b7a9 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -153,8 +153,7 @@ func resourceArmHDInsightMLServicesClusterCreate(d *schema.ResourceData, meta in gateway := expandHDInsightsMLServicesConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index e2353249f452..4a7db4ffbcd4 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -153,8 +153,7 @@ func resourceArmHDInsightRServerClusterCreate(d *schema.ResourceData, meta inter gateway := expandHDInsightsRServerConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } diff --git a/azurerm/resource_arm_hdinsight_storm_cluster.go b/azurerm/resource_arm_hdinsight_storm_cluster.go index f9604558a473..bc9123aa188b 100644 --- a/azurerm/resource_arm_hdinsight_storm_cluster.go +++ b/azurerm/resource_arm_hdinsight_storm_cluster.go @@ -88,8 +88,6 @@ func resourceArmHDInsightStormCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), - "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), - "roles": { Type: schema.TypeList, Required: true, @@ -139,8 +137,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) - storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } From 33322b7c8d2ced802edcccd72a460610b0d5fe9b Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Fri, 15 Nov 2019 15:41:40 -0500 Subject: [PATCH 7/7] Add test for clusters that have blob and gen2 storage accounts attached --- ...ource_arm_hdinsight_hadoop_cluster_test.go | 107 +++++++++++++++++- 1 file changed, 106 insertions(+), 1 deletion(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index b2f026f731a6..add6454f99ff 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -411,6 +411,43 @@ func TestAccAzureRMHDInsightHadoopCluster_gen2storage(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_gen2AndBlobStorage(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_gen2AndBlobStorage(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` @@ -809,7 +846,7 @@ func testAccAzureRMHDInsightHadoopCluster_gen2storage(rInt int, rString string, %s resource "azurerm_hdinsight_hadoop_cluster" "test" { depends_on = [azurerm_role_assignment.test] - + name = "acctesthdi-%d" resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" @@ -851,6 +888,74 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightHadoopCluster_gen2AndBlobStorage(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHadoopCluster_gen2template(rInt, rString, location) + + return fmt.Sprintf(` +%s +resource "azurerm_storage_account" "test" { + name = "acctestsa%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_container" "test" { + name = "acctest" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + component_version { + hadoop = "2.7" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + storage_account { + storage_container_id = "${azurerm_storage_container.test.id}" + storage_account_key = "${azurerm_storage_account.test.primary_access_key}" + is_default = false + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rString, rInt) +} + func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" {