Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_hdinsight_*_cluster - Support for autoscale #8104 #11547

Merged
merged 32 commits into from
May 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
dbb754f
Capacity autoscale support
kosinsky Jul 27, 2020
4b51802
Recurrence based autoscale
kosinsky Jul 28, 2020
e43803b
dependencies: updating to v44.2.0 of github.com/Azure/azure-sdk-for-go
kosinsky Jul 28, 2020
2dccb67
Merge branch 'update-sdk-to-v44.2.0' into autoscale
kosinsky Jul 28, 2020
33cd8ab
Support for update for Autoscale
kosinsky Aug 6, 2020
ee6df81
disable_correlation_request_id=true to avoid broken clusters
kosinsky Aug 10, 2020
5b8ce1b
Autoscale for Spark clusters
kosinsky Aug 11, 2020
6f12a0a
Autoscale for HBase clusters
kosinsky Aug 11, 2020
fa8d2ca
Autoscale for Interactive Query cluster
kosinsky Aug 12, 2020
9a3eab9
Documentation changes
kosinsky Aug 12, 2020
2f6c5f3
target_instance_count in schedule
kosinsky Aug 12, 2020
78495ef
Formatting
kosinsky Aug 12, 2020
fa6ae35
Merge remote-tracking branch 'upstream/master' into autoscale
kosinsky Aug 12, 2020
43aa254
Merge remote-tracking branch 'upstream/master' into autoscale
kosinsky Aug 25, 2020
0b8a67a
Post merge fix
kosinsky Aug 25, 2020
64a450f
Tune comment about HDI limitation
kosinsky Aug 25, 2020
5dbedfe
Refactor FlattenHDInsightAutoscaleRecurrenceDefinition
kosinsky Sep 22, 2020
f04ff1c
Move HDI client workaround to the NewClient method
kosinsky Sep 23, 2020
2e98e39
Update website/docs/r/hdinsight_interactive_query_cluster.html.markdown
kosinsky Sep 23, 2020
47c9e7b
Update website/docs/r/hdinsight_spark_cluster.html.markdown
kosinsky Sep 23, 2020
a183c1e
Update website/docs/r/hdinsight_spark_cluster.html.markdown
kosinsky Sep 23, 2020
5b26af7
Sort instance counts in the doc
kosinsky Sep 23, 2020
6172e49
Update website/docs/r/hdinsight_hadoop_cluster.html.markdown
kosinsky Sep 23, 2020
8506785
Update website/docs/r/hdinsight_hadoop_cluster.html.markdown
kosinsky Sep 23, 2020
6fe972d
Update website/docs/r/hdinsight_hadoop_cluster.html.markdown
kosinsky Sep 23, 2020
8a1be26
Update website/docs/r/hdinsight_hbase_cluster.html.markdown
kosinsky Sep 23, 2020
9eba86e
Update website/docs/r/hdinsight_interactive_query_cluster.html.markdown
kosinsky Sep 23, 2020
ea53ccd
recurrence is required for HBase
kosinsky Sep 23, 2020
f369fd8
Merge branch 'autoscale' of github.com:kosinsky/terraform-provider-az…
kosinsky Sep 23, 2020
5a61b42
Sorted attributes alphabetically
kosinsky Sep 23, 2020
a5656b9
Merge with master
mbfrahry Apr 30, 2021
833fc8f
Add nil checks
mbfrahry May 3, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 12 additions & 8 deletions azurerm/internal/services/hdinsight/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,21 @@ type Client struct {
}

func NewClient(o *common.ClientOptions) *Client {
ApplicationsClient := hdinsight.NewApplicationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&ApplicationsClient.Client, o.ResourceManagerAuthorizer)
// due to a bug in the HDInsight API we can't reuse client with the same x-ms-correlation-request-id for multiple updates
opts := *o
opts.DisableCorrelationRequestID = true

ClustersClient := hdinsight.NewClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&ClustersClient.Client, o.ResourceManagerAuthorizer)
ApplicationsClient := hdinsight.NewApplicationsClientWithBaseURI(opts.ResourceManagerEndpoint, opts.SubscriptionId)
opts.ConfigureClient(&ApplicationsClient.Client, opts.ResourceManagerAuthorizer)

ConfigurationsClient := hdinsight.NewConfigurationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&ConfigurationsClient.Client, o.ResourceManagerAuthorizer)
ClustersClient := hdinsight.NewClustersClientWithBaseURI(opts.ResourceManagerEndpoint, opts.SubscriptionId)
opts.ConfigureClient(&ClustersClient.Client, opts.ResourceManagerAuthorizer)

ExtensionsClient := hdinsight.NewExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&ExtensionsClient.Client, o.ResourceManagerAuthorizer)
ConfigurationsClient := hdinsight.NewConfigurationsClientWithBaseURI(opts.ResourceManagerEndpoint, opts.SubscriptionId)
opts.ConfigureClient(&ConfigurationsClient.Client, opts.ResourceManagerAuthorizer)

ExtensionsClient := hdinsight.NewExtensionsClientWithBaseURI(opts.ResourceManagerEndpoint, opts.SubscriptionId)
opts.ConfigureClient(&ExtensionsClient.Client, opts.ResourceManagerAuthorizer)

return &Client{
ApplicationsClient: &ApplicationsClient,
Expand Down
37 changes: 28 additions & 9 deletions azurerm/internal/services/hdinsight/common_hdinsight.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,18 +48,37 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema
roles := rolesRaw[0].(map[string]interface{})
workerNodes := roles["worker_node"].([]interface{})
workerNode := workerNodes[0].(map[string]interface{})
targetInstanceCount := workerNode["target_instance_count"].(int)
params := hdinsight.ClusterResizeParameters{
TargetInstanceCount: utils.Int32(int32(targetInstanceCount)),
}
if d.HasChange("roles.0.worker_node.0.target_instance_count") {
targetInstanceCount := workerNode["target_instance_count"].(int)
params := hdinsight.ClusterResizeParameters{
TargetInstanceCount: utils.Int32(int32(targetInstanceCount)),
}

future, err := client.Resize(ctx, resourceGroup, name, params)
if err != nil {
return fmt.Errorf("Error resizing the HDInsight %q Cluster %q (Resource Group %q): %+v", clusterKind, name, resourceGroup, err)
future, err := client.Resize(ctx, resourceGroup, name, params)
if err != nil {
return fmt.Errorf("Error resizing the HDInsight %q Cluster %q (Resource Group %q): %+v", clusterKind, name, resourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for the HDInsight %q Cluster %q (Resource Group %q) to finish resizing: %+v", clusterKind, name, resourceGroup, err)
}
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for the HDInsight %q Cluster %q (Resource Group %q) to finish resizing: %+v", clusterKind, name, resourceGroup, err)
if d.HasChange("roles.0.worker_node.0.autoscale") {
autoscale := ExpandHDInsightNodeAutoScaleDefinition(workerNode["autoscale"].([]interface{}))
params := hdinsight.AutoscaleConfigurationUpdateParameter{
Autoscale: autoscale,
}

future, err := client.UpdateAutoScaleConfiguration(ctx, resourceGroup, name, params)

if err != nil {
return fmt.Errorf("Error changing autoscale of the HDInsight %q Cluster %q (Resource Group %q): %+v", clusterKind, name, resourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for changing autoscale of the HDInsight %q Cluster %q (Resource Group %q) to finish resizing: %+v", clusterKind, name, resourceGroup, err)
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ var hdInsightHadoopClusterWorkerNodeDefinition = HDInsightNodeDefinition{
CanSpecifyInstanceCount: true,
MinInstanceCount: 1,
CanSpecifyDisks: false,
CanAutoScaleByCapacity: true,
CanAutoScaleOnSchedule: true,
}

var hdInsightHadoopClusterZookeeperNodeDefinition = HDInsightNodeDefinition{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,58 @@ func TestAccHDInsightHadoopCluster_updateMonitor(t *testing.T) {
})
}

func TestAccAzureRMHDInsightHadoopCluster_autoscale(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test")
r := HDInsightHadoopClusterResource{}
data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.autoscale_schedule(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("https_endpoint").Exists(),
check.That(data.ResourceName).Key("ssh_endpoint").Exists(),
),
},
data.ImportStep("roles.0.head_node.0.password",
"roles.0.head_node.0.vm_size",
"roles.0.worker_node.0.password",
"roles.0.worker_node.0.vm_size",
"roles.0.zookeeper_node.0.password",
"roles.0.zookeeper_node.0.vm_size",
"storage_account"),
{
Config: r.autoscale_capacity(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("https_endpoint").Exists(),
check.That(data.ResourceName).Key("ssh_endpoint").Exists(),
),
},
data.ImportStep("roles.0.head_node.0.password",
"roles.0.head_node.0.vm_size",
"roles.0.worker_node.0.password",
"roles.0.worker_node.0.vm_size",
"roles.0.zookeeper_node.0.password",
"roles.0.zookeeper_node.0.vm_size",
"storage_account"),
{
Config: r.basic(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("https_endpoint").Exists(),
check.That(data.ResourceName).Key("ssh_endpoint").Exists(),
),
},
data.ImportStep("roles.0.head_node.0.password",
"roles.0.head_node.0.vm_size",
"roles.0.worker_node.0.password",
"roles.0.worker_node.0.vm_size",
"roles.0.zookeeper_node.0.password",
"roles.0.zookeeper_node.0.vm_size",
"storage_account"),
})
}

func (t HDInsightHadoopClusterResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) {
id, err := parse.ClusterID(state.ID)
if err != nil {
Expand Down Expand Up @@ -1862,3 +1914,110 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" {
}
`, r.template(data), data.RandomInteger)
}

func (r HDInsightHadoopClusterResource) autoscale_capacity(data acceptance.TestData) string {
return fmt.Sprintf(`
%s
resource "azurerm_hdinsight_hadoop_cluster" "test" {
name = "acctesthdi-%d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
cluster_version = "4.0"
tier = "Standard"
component_version {
hadoop = "3.1"
}
gateway {
username = "acctestusrgw"
password = "TerrAform123!"
}
storage_account {
storage_container_id = azurerm_storage_container.test.id
storage_account_key = azurerm_storage_account.test.primary_access_key
is_default = true
}
roles {
head_node {
vm_size = "Standard_D3_v2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
}
worker_node {
vm_size = "Standard_D4_V2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
target_instance_count = 2
autoscale {
capacity {
min_instance_count = 2
max_instance_count = 3
}
}
}
zookeeper_node {
vm_size = "Standard_D3_v2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
}
}
}
`, r.template(data), data.RandomInteger)
}

func (r HDInsightHadoopClusterResource) autoscale_schedule(data acceptance.TestData) string {
return fmt.Sprintf(`
%s
resource "azurerm_hdinsight_hadoop_cluster" "test" {
name = "acctesthdi-%d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
cluster_version = "4.0"
tier = "Standard"
component_version {
hadoop = "3.1"
}
gateway {
username = "acctestusrgw"
password = "TerrAform123!"
}
storage_account {
storage_container_id = azurerm_storage_container.test.id
storage_account_key = azurerm_storage_account.test.primary_access_key
is_default = true
}
roles {
head_node {
vm_size = "Standard_D3_v2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
}
worker_node {
vm_size = "Standard_D4_V2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
target_instance_count = 2
autoscale {
recurrence {
timezone = "Pacific Standard Time"
schedule {
days = ["Monday"]
time = "10:00"
target_instance_count = 5
}
schedule {
days = ["Saturday", "Sunday"]
time = "10:00"
target_instance_count = 3
}
}
}
}
zookeeper_node {
vm_size = "Standard_D3_v2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
}
}
}
`, r.template(data), data.RandomInteger)
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ var hdInsightHBaseClusterWorkerNodeDefinition = HDInsightNodeDefinition{
CanSpecifyInstanceCount: true,
MinInstanceCount: 1,
CanSpecifyDisks: false,
CanAutoScaleOnSchedule: true,
}

var hdInsightHBaseClusterZookeeperNodeDefinition = HDInsightNodeDefinition{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,43 @@ func TestAccHDInsightHBaseCluster_updateMonitor(t *testing.T) {
})
}

func TestAccAzureRMHDInsightHBaseCluster_autoscale(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test")
r := HDInsightHBaseClusterResource{}
data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.autoscale_schedule(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("https_endpoint").Exists(),
check.That(data.ResourceName).Key("ssh_endpoint").Exists(),
),
},
data.ImportStep("roles.0.head_node.0.password",
"roles.0.head_node.0.vm_size",
"roles.0.worker_node.0.password",
"roles.0.worker_node.0.vm_size",
"roles.0.zookeeper_node.0.password",
"roles.0.zookeeper_node.0.vm_size",
"storage_account"),
{
Config: r.basic(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("https_endpoint").Exists(),
check.That(data.ResourceName).Key("ssh_endpoint").Exists(),
),
},
data.ImportStep("roles.0.head_node.0.password",
"roles.0.head_node.0.vm_size",
"roles.0.worker_node.0.password",
"roles.0.worker_node.0.vm_size",
"roles.0.zookeeper_node.0.password",
"roles.0.zookeeper_node.0.vm_size",
"storage_account"),
})
}

func (t HDInsightHBaseClusterResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) {
id, err := parse.ClusterID(state.ID)
if err != nil {
Expand Down Expand Up @@ -1200,3 +1237,69 @@ resource "azurerm_hdinsight_hbase_cluster" "test" {
}
`, r.template(data), data.RandomString, data.RandomInteger, data.RandomInteger)
}

func (r HDInsightHBaseClusterResource) autoscale_schedule(data acceptance.TestData) string {
return fmt.Sprintf(`
%s

resource "azurerm_hdinsight_hbase_cluster" "test" {
name = "acctesthdi-%d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
cluster_version = "4.0"
tier = "Standard"

component_version {
hbase = "2.1"
}

gateway {
enabled = true
username = "acctestusrgw"
password = "TerrAform123!"
}

storage_account {
storage_container_id = azurerm_storage_container.test.id
storage_account_key = azurerm_storage_account.test.primary_access_key
is_default = true
}

roles {
head_node {
vm_size = "Standard_D3_V2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
}

worker_node {
vm_size = "Standard_D3_V2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
target_instance_count = 2
autoscale {
recurrence {
timezone = "Pacific Standard Time"
schedule {
days = ["Monday"]
time = "10:00"
target_instance_count = 5
}
schedule {
days = ["Saturday", "Sunday"]
time = "10:00"
target_instance_count = 3
}
}
}
}

zookeeper_node {
vm_size = "Standard_D3_V2"
username = "acctestusrvm"
password = "AccTestvdSC4daf986!"
}
}
}
`, r.template(data), data.RandomInteger)
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ var hdInsightInteractiveQueryClusterWorkerNodeDefinition = HDInsightNodeDefiniti
CanSpecifyInstanceCount: true,
MinInstanceCount: 1,
CanSpecifyDisks: false,
CanAutoScaleByCapacity: true,
CanAutoScaleOnSchedule: true,
}

var hdInsightInteractiveQueryClusterZookeeperNodeDefinition = HDInsightNodeDefinition{
Expand Down
Loading