From 94c31b6306adfaf27c24ce8be43e74fc9991656e Mon Sep 17 00:00:00 2001 From: arcturusZhang Date: Fri, 17 Jan 2020 14:28:08 +0800 Subject: [PATCH 1/3] Add health probe back to update to prevent any potential errors from the service --- .../compute/resource_arm_linux_virtual_machine_scale_set.go | 6 ++++-- .../resource_arm_windows_virtual_machine_scale_set.go | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go index 9804acb7dd84..a1112827d644 100644 --- a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go @@ -611,11 +611,13 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i return fmt.Errorf("Error expanding `network_interface`: %+v", err) } - // TODO: setting the health probe on update once https://github.com/Azure/azure-rest-api-specs/pull/7355 has been fixed - //healthProbeId := d.Get("health_probe_id").(string) + healthProbeId := d.Get("health_probe_id").(string) updateProps.VirtualMachineProfile.NetworkProfile = &compute.VirtualMachineScaleSetUpdateNetworkProfile{ NetworkInterfaceConfigurations: networkInterfaces, + HealthProbe: &compute.APIEntityReference{ + ID: utils.String(healthProbeId), + }, } } diff --git a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go index eafc3415459c..ac2d0c8cf3e2 100644 --- a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go @@ -646,11 +646,13 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta return fmt.Errorf("Error expanding `network_interface`: %+v", err) } - // TODO: setting the health probe on update once https://github.com/Azure/azure-rest-api-specs/pull/7355 has been fixed - //healthProbeId := d.Get("health_probe_id").(string) + healthProbeId := d.Get("health_probe_id").(string) updateProps.VirtualMachineProfile.NetworkProfile = &compute.VirtualMachineScaleSetUpdateNetworkProfile{ NetworkInterfaceConfigurations: networkInterfaces, + HealthProbe: &compute.APIEntityReference{ + ID: utils.String(healthProbeId), + }, } } From 4d9e29413f53bba89c58415f1c4e00ccbbdbbf46 Mon Sep 17 00:00:00 2001 From: arcturusZhang Date: Fri, 17 Jan 2020 15:29:45 +0800 Subject: [PATCH 2/3] Escape health probe when health probe is empty --- .../resource_arm_linux_virtual_machine_scale_set.go | 10 ++++++---- .../resource_arm_windows_virtual_machine_scale_set.go | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go index a1112827d644..2a26cd2bef4d 100644 --- a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go @@ -611,13 +611,15 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i return fmt.Errorf("Error expanding `network_interface`: %+v", err) } - healthProbeId := d.Get("health_probe_id").(string) - updateProps.VirtualMachineProfile.NetworkProfile = &compute.VirtualMachineScaleSetUpdateNetworkProfile{ NetworkInterfaceConfigurations: networkInterfaces, - HealthProbe: &compute.APIEntityReference{ + } + + healthProbeId := d.Get("health_probe_id").(string) + if healthProbeId != "" { + updateProps.VirtualMachineProfile.NetworkProfile.HealthProbe = &compute.APIEntityReference{ ID: utils.String(healthProbeId), - }, + } } } diff --git a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go index ac2d0c8cf3e2..28340a296133 100644 --- a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go @@ -646,13 +646,15 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta return fmt.Errorf("Error expanding `network_interface`: %+v", err) } - healthProbeId := d.Get("health_probe_id").(string) - updateProps.VirtualMachineProfile.NetworkProfile = &compute.VirtualMachineScaleSetUpdateNetworkProfile{ NetworkInterfaceConfigurations: networkInterfaces, - HealthProbe: &compute.APIEntityReference{ + } + + healthProbeId := d.Get("health_probe_id").(string) + if healthProbeId != "" { + updateProps.VirtualMachineProfile.NetworkProfile.HealthProbe = &compute.APIEntityReference{ ID: utils.String(healthProbeId), - }, + } } } From a04738d9abcdf09e22da1890e25198fe1a4c9ec4 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 10 Feb 2020 15:40:25 +0100 Subject: [PATCH 3/3] r/(linux|windows)_virtual_machine_scale_set: conditionally updating the instances --- .../services/compute/client/client.go | 85 ++++---- ...rce_arm_linux_virtual_machine_scale_set.go | 98 +++------- ...e_arm_windows_virtual_machine_scale_set.go | 97 ++------- ...x_virtual_machine_scale_set_images_test.go | 6 +- ...s_virtual_machine_scale_set_images_test.go | 6 +- .../virtual_machine_scale_set_update.go | 184 ++++++++++++++++++ ...ux_virtual_machine_scale_set.html.markdown | 2 - ...ws_virtual_machine_scale_set.html.markdown | 2 - 8 files changed, 278 insertions(+), 202 deletions(-) create mode 100644 azurerm/internal/services/compute/virtual_machine_scale_set_update.go diff --git a/azurerm/internal/services/compute/client/client.go b/azurerm/internal/services/compute/client/client.go index 80130b9d6e09..99dfe90f137a 100644 --- a/azurerm/internal/services/compute/client/client.go +++ b/azurerm/internal/services/compute/client/client.go @@ -7,26 +7,27 @@ import ( ) type Client struct { - AvailabilitySetsClient *compute.AvailabilitySetsClient - DedicatedHostsClient *compute.DedicatedHostsClient - DedicatedHostGroupsClient *compute.DedicatedHostGroupsClient - DisksClient *compute.DisksClient - DiskEncryptionSetsClient *compute.DiskEncryptionSetsClient - GalleriesClient *compute.GalleriesClient - GalleryImagesClient *compute.GalleryImagesClient - GalleryImageVersionsClient *compute.GalleryImageVersionsClient - ProximityPlacementGroupsClient *compute.ProximityPlacementGroupsClient - MarketplaceAgreementsClient *marketplaceordering.MarketplaceAgreementsClient - ImagesClient *compute.ImagesClient - SnapshotsClient *compute.SnapshotsClient - UsageClient *compute.UsageClient - VMExtensionImageClient *compute.VirtualMachineExtensionImagesClient - VMExtensionClient *compute.VirtualMachineExtensionsClient - VMScaleSetClient *compute.VirtualMachineScaleSetsClient - VMScaleSetExtensionsClient *compute.VirtualMachineScaleSetExtensionsClient - VMScaleSetVMsClient *compute.VirtualMachineScaleSetVMsClient - VMClient *compute.VirtualMachinesClient - VMImageClient *compute.VirtualMachineImagesClient + AvailabilitySetsClient *compute.AvailabilitySetsClient + DedicatedHostsClient *compute.DedicatedHostsClient + DedicatedHostGroupsClient *compute.DedicatedHostGroupsClient + DisksClient *compute.DisksClient + DiskEncryptionSetsClient *compute.DiskEncryptionSetsClient + GalleriesClient *compute.GalleriesClient + GalleryImagesClient *compute.GalleryImagesClient + GalleryImageVersionsClient *compute.GalleryImageVersionsClient + ProximityPlacementGroupsClient *compute.ProximityPlacementGroupsClient + MarketplaceAgreementsClient *marketplaceordering.MarketplaceAgreementsClient + ImagesClient *compute.ImagesClient + SnapshotsClient *compute.SnapshotsClient + UsageClient *compute.UsageClient + VMExtensionImageClient *compute.VirtualMachineExtensionImagesClient + VMExtensionClient *compute.VirtualMachineExtensionsClient + VMScaleSetClient *compute.VirtualMachineScaleSetsClient + VMScaleSetExtensionsClient *compute.VirtualMachineScaleSetExtensionsClient + VMScaleSetRollingUpgradesClient *compute.VirtualMachineScaleSetRollingUpgradesClient + VMScaleSetVMsClient *compute.VirtualMachineScaleSetVMsClient + VMClient *compute.VirtualMachinesClient + VMImageClient *compute.VirtualMachineImagesClient } func NewClient(o *common.ClientOptions) *Client { @@ -84,6 +85,9 @@ func NewClient(o *common.ClientOptions) *Client { vmScaleSetExtensionsClient := compute.NewVirtualMachineScaleSetExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&vmScaleSetExtensionsClient.Client, o.ResourceManagerAuthorizer) + vmScaleSetRollingUpgradesClient := compute.NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&vmScaleSetRollingUpgradesClient.Client, o.ResourceManagerAuthorizer) + vmScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&vmScaleSetVMsClient.Client, o.ResourceManagerAuthorizer) @@ -91,25 +95,26 @@ func NewClient(o *common.ClientOptions) *Client { o.ConfigureClient(&vmClient.Client, o.ResourceManagerAuthorizer) return &Client{ - AvailabilitySetsClient: &availabilitySetsClient, - DedicatedHostsClient: &dedicatedHostsClient, - DedicatedHostGroupsClient: &dedicatedHostGroupsClient, - DisksClient: &disksClient, - DiskEncryptionSetsClient: &diskEncryptionSetsClient, - GalleriesClient: &galleriesClient, - GalleryImagesClient: &galleryImagesClient, - GalleryImageVersionsClient: &galleryImageVersionsClient, - ImagesClient: &imagesClient, - MarketplaceAgreementsClient: &marketplaceAgreementsClient, - ProximityPlacementGroupsClient: &proximityPlacementGroupsClient, - SnapshotsClient: &snapshotsClient, - UsageClient: &usageClient, - VMExtensionImageClient: &vmExtensionImageClient, - VMExtensionClient: &vmExtensionClient, - VMScaleSetClient: &vmScaleSetClient, - VMScaleSetExtensionsClient: &vmScaleSetExtensionsClient, - VMScaleSetVMsClient: &vmScaleSetVMsClient, - VMClient: &vmClient, - VMImageClient: &vmImageClient, + AvailabilitySetsClient: &availabilitySetsClient, + DedicatedHostsClient: &dedicatedHostsClient, + DedicatedHostGroupsClient: &dedicatedHostGroupsClient, + DisksClient: &disksClient, + DiskEncryptionSetsClient: &diskEncryptionSetsClient, + GalleriesClient: &galleriesClient, + GalleryImagesClient: &galleryImagesClient, + GalleryImageVersionsClient: &galleryImageVersionsClient, + ImagesClient: &imagesClient, + MarketplaceAgreementsClient: &marketplaceAgreementsClient, + ProximityPlacementGroupsClient: &proximityPlacementGroupsClient, + SnapshotsClient: &snapshotsClient, + UsageClient: &usageClient, + VMExtensionImageClient: &vmExtensionImageClient, + VMExtensionClient: &vmExtensionClient, + VMScaleSetClient: &vmScaleSetClient, + VMScaleSetExtensionsClient: &vmScaleSetExtensionsClient, + VMScaleSetRollingUpgradesClient: &vmScaleSetRollingUpgradesClient, + VMScaleSetVMsClient: &vmScaleSetVMsClient, + VMClient: &vmClient, + VMImageClient: &vmImageClient, } } diff --git a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go index 2a26cd2bef4d..4d92448063b7 100644 --- a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go @@ -499,6 +499,14 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i } update := compute.VirtualMachineScaleSetUpdate{} + // first try and pull this from existing vm, which covers no changes being made to this block + automaticOSUpgradeIsEnabled := false + if policy := existing.VirtualMachineScaleSetProperties.UpgradePolicy; policy != nil { + if policy.AutomaticOSUpgradePolicy != nil && policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade != nil { + automaticOSUpgradeIsEnabled = *policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade + } + } + if d.HasChange("automatic_os_upgrade_policy") || d.HasChange("rolling_upgrade_policy") { upgradePolicy := compute.UpgradePolicy{ Mode: compute.UpgradeMode(d.Get("upgrade_mode").(string)), @@ -507,6 +515,10 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i if d.HasChange("automatic_os_upgrade_policy") { automaticRaw := d.Get("automatic_os_upgrade_policy").([]interface{}) upgradePolicy.AutomaticOSUpgradePolicy = ExpandVirtualMachineScaleSetAutomaticUpgradePolicy(automaticRaw) + + // however if this block has been changed then we need to pull it + // we can guarantee this always has a value since it'll have been expanded and thus is safe to de-ref + automaticOSUpgradeIsEnabled = *upgradePolicy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade } if d.HasChange("rolling_upgrade_policy") { @@ -669,84 +681,18 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i update.VirtualMachineScaleSetUpdateProperties = &updateProps - log.Printf("[DEBUG] Updating Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - future, err := client.Update(ctx, id.ResourceGroup, id.Name, update) - if err != nil { - return fmt.Errorf("Error updating Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - log.Printf("[DEBUG] Waiting for update of Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for update of Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + metaData := virtualMachineScaleSetUpdateMetaData{ + AutomaticOSUpgradeIsEnabled: automaticOSUpgradeIsEnabled, + CanRollInstancesWhenRequired: meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired, + UpdateInstances: updateInstances, + Client: meta.(*clients.Client).Compute, + Existing: existing, + ID: id, + OSType: compute.Linux, } - log.Printf("[DEBUG] Updated Linux Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup) - - // if we update the SKU, we also need to subsequently roll the instances using the `UpdateInstances` API - if updateInstances { - userWantsToRollInstances := meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired - if userWantsToRollInstances { - log.Printf("[DEBUG] Rolling the VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - instancesClient := meta.(*clients.Client).Compute.VMScaleSetVMsClient - instances, err := instancesClient.ListComplete(ctx, id.ResourceGroup, id.Name, "", "", "") - if err != nil { - return fmt.Errorf("Error listing VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - log.Printf("[DEBUG] Determining instances to roll..") - instanceIdsToRoll := make([]string, 0) - for instances.NotDone() { - instance := instances.Value() - props := instance.VirtualMachineScaleSetVMProperties - if props != nil && instance.InstanceID != nil { - latestModel := props.LatestModelApplied - if latestModel != nil || !*latestModel { - instanceIdsToRoll = append(instanceIdsToRoll, *instance.InstanceID) - } - } - - if err := instances.NextWithContext(ctx); err != nil { - return fmt.Errorf("Error enumerating instances: %s", err) - } - } - - // TODO: there's a performance enhancement to do batches here, but this is fine for a first pass - for _, instanceId := range instanceIdsToRoll { - instanceIds := []string{instanceId} - log.Printf("[DEBUG] Updating Instance %q to the Latest Configuration..", instanceId) - ids := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ - InstanceIds: &instanceIds, - } - future, err := client.UpdateInstances(ctx, id.ResourceGroup, id.Name, ids) - if err != nil { - return fmt.Errorf("Error updating Instance %q (Linux VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for update of Instance %q (Linux VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err) - } - log.Printf("[DEBUG] Updated Instance %q to the Latest Configuration.", instanceId) - - // TODO: does this want to be a separate, user-configurable toggle? - log.Printf("[DEBUG] Reimaging Instance %q..", instanceId) - reimageInput := &compute.VirtualMachineScaleSetReimageParameters{ - InstanceIds: &instanceIds, - } - reimageFuture, err := client.Reimage(ctx, id.ResourceGroup, id.Name, reimageInput) - if err != nil { - return fmt.Errorf("Error reimaging Instance %q (Linux VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err) - } - - if err = reimageFuture.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for reimage of Instance %q (Linux VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err) - } - log.Printf("[DEBUG] Reimaged Instance %q..", instanceId) - } - - log.Printf("[DEBUG] Rolled the VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup) - } else { - log.Printf("[DEBUG] Terraform wants to roll the VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q) - but user has opted out - skipping..", id.Name, id.ResourceGroup) - } + if err := metaData.performUpdate(ctx, update); err != nil { + return err } return resourceArmLinuxVirtualMachineScaleSetRead(d, meta) diff --git a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go index 28340a296133..681fa49b7b0f 100644 --- a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go @@ -530,6 +530,13 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta update := compute.VirtualMachineScaleSetUpdate{} upgradeMode := compute.UpgradeMode(d.Get("upgrade_mode").(string)) + // first try and pull this from existing vm, which covers no changes being made to this block + automaticOSUpgradeIsEnabled := false + if policy := existing.VirtualMachineScaleSetProperties.UpgradePolicy; policy != nil { + if policy.AutomaticOSUpgradePolicy != nil && policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade != nil { + automaticOSUpgradeIsEnabled = *policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade + } + } if d.HasChange("automatic_os_upgrade_policy") || d.HasChange("rolling_upgrade_policy") { upgradePolicy := compute.UpgradePolicy{ Mode: upgradeMode, @@ -538,6 +545,10 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta if d.HasChange("automatic_os_upgrade_policy") { automaticRaw := d.Get("automatic_os_upgrade_policy").([]interface{}) upgradePolicy.AutomaticOSUpgradePolicy = ExpandVirtualMachineScaleSetAutomaticUpgradePolicy(automaticRaw) + + // however if this block has been changed then we need to pull it + // we can guarantee this always has a value since it'll have been expanded and thus is safe to de-ref + automaticOSUpgradeIsEnabled = *upgradePolicy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade } if d.HasChange("rolling_upgrade_policy") { @@ -704,84 +715,18 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta update.VirtualMachineScaleSetUpdateProperties = &updateProps - log.Printf("[DEBUG] Updating Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - future, err := client.Update(ctx, id.ResourceGroup, id.Name, update) - if err != nil { - return fmt.Errorf("Error updating Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + metaData := virtualMachineScaleSetUpdateMetaData{ + AutomaticOSUpgradeIsEnabled: automaticOSUpgradeIsEnabled, + CanRollInstancesWhenRequired: meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired, + UpdateInstances: updateInstances, + Client: meta.(*clients.Client).Compute, + Existing: existing, + ID: id, + OSType: compute.Windows, } - log.Printf("[DEBUG] Waiting for update of Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for update of Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - log.Printf("[DEBUG] Updated Windows Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup) - - // if we update the SKU, we also need to subsequently roll the instances using the `UpdateInstances` API - if updateInstances { - userWantsToRollInstances := meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired - if userWantsToRollInstances { - log.Printf("[DEBUG] Rolling the VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - instancesClient := meta.(*clients.Client).Compute.VMScaleSetVMsClient - instances, err := instancesClient.ListComplete(ctx, id.ResourceGroup, id.Name, "", "", "") - if err != nil { - return fmt.Errorf("Error listing VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - log.Printf("[DEBUG] Determining instances to roll..") - instanceIdsToRoll := make([]string, 0) - for instances.NotDone() { - instance := instances.Value() - props := instance.VirtualMachineScaleSetVMProperties - if props != nil && instance.InstanceID != nil { - latestModel := props.LatestModelApplied - if latestModel != nil || !*latestModel { - instanceIdsToRoll = append(instanceIdsToRoll, *instance.InstanceID) - } - } - - if err := instances.NextWithContext(ctx); err != nil { - return fmt.Errorf("Error enumerating instances: %s", err) - } - } - - // there's a performance enhancement to do batches here, but this is fine for a first pass - for _, instanceId := range instanceIdsToRoll { - instanceIds := []string{instanceId} - - log.Printf("[DEBUG] Updating Instance %q to the Latest Configuration..", instanceId) - ids := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ - InstanceIds: &instanceIds, - } - future, err := client.UpdateInstances(ctx, id.ResourceGroup, id.Name, ids) - if err != nil { - return fmt.Errorf("Error updating Instance %q (Windows VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for update of Instance %q (Windows VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err) - } - log.Printf("[DEBUG] Updated Instance %q to the Latest Configuration.", instanceId) - - // TODO: does this want to be a separate, user-configurable toggle? - log.Printf("[DEBUG] Reimaging Instance %q..", instanceId) - reimageInput := &compute.VirtualMachineScaleSetReimageParameters{ - InstanceIds: &instanceIds, - } - reimageFuture, err := client.Reimage(ctx, id.ResourceGroup, id.Name, reimageInput) - if err != nil { - return fmt.Errorf("Error reimaging Instance %q (Windows VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err) - } - - if err = reimageFuture.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for reimage of Instance %q (Windows VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err) - } - log.Printf("[DEBUG] Reimaged Instance %q..", instanceId) - } - - log.Printf("[DEBUG] Rolled the VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup) - } else { - log.Printf("[DEBUG] Terraform wants to roll the VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q) - but user has opted out - skipping..", id.Name, id.ResourceGroup) - } + if err := metaData.performUpdate(ctx, update); err != nil { + return err } return resourceArmWindowsVirtualMachineScaleSetRead(d, meta) diff --git a/azurerm/internal/services/compute/tests/resource_arm_linux_virtual_machine_scale_set_images_test.go b/azurerm/internal/services/compute/tests/resource_arm_linux_virtual_machine_scale_set_images_test.go index 73785bddd1e3..8cb54727bcef 100644 --- a/azurerm/internal/services/compute/tests/resource_arm_linux_virtual_machine_scale_set_images_test.go +++ b/azurerm/internal/services/compute/tests/resource_arm_linux_virtual_machine_scale_set_images_test.go @@ -298,9 +298,9 @@ resource "azurerm_linux_virtual_machine_scale_set" "test" { } rolling_upgrade_policy { - max_batch_instance_percent = 21 - max_unhealthy_instance_percent = 22 - max_unhealthy_upgraded_instance_percent = 23 + max_batch_instance_percent = 100 + max_unhealthy_instance_percent = 100 + max_unhealthy_upgraded_instance_percent = 100 pause_time_between_batches = "PT30S" } diff --git a/azurerm/internal/services/compute/tests/resource_arm_windows_virtual_machine_scale_set_images_test.go b/azurerm/internal/services/compute/tests/resource_arm_windows_virtual_machine_scale_set_images_test.go index 79e74bfe3683..81572c20a1ac 100644 --- a/azurerm/internal/services/compute/tests/resource_arm_windows_virtual_machine_scale_set_images_test.go +++ b/azurerm/internal/services/compute/tests/resource_arm_windows_virtual_machine_scale_set_images_test.go @@ -298,9 +298,9 @@ resource "azurerm_windows_virtual_machine_scale_set" "test" { } rolling_upgrade_policy { - max_batch_instance_percent = 21 - max_unhealthy_instance_percent = 22 - max_unhealthy_upgraded_instance_percent = 23 + max_batch_instance_percent = 100 + max_unhealthy_instance_percent = 100 + max_unhealthy_upgraded_instance_percent = 100 pause_time_between_batches = "PT30S" } diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_update.go b/azurerm/internal/services/compute/virtual_machine_scale_set_update.go new file mode 100644 index 000000000000..ba053bc95572 --- /dev/null +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_update.go @@ -0,0 +1,184 @@ +package compute + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/client" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type virtualMachineScaleSetUpdateMetaData struct { + // is "automaticOSUpgrade" enable in the upgradeProfile block + AutomaticOSUpgradeIsEnabled bool + + // can we roll instances if we need too? this is a feature toggle + CanRollInstancesWhenRequired bool + + // do we need to roll the instances in this scale set? + UpdateInstances bool + + Client *client.Client + Existing compute.VirtualMachineScaleSet + ID *VirtualMachineScaleSetResourceID + OSType compute.OperatingSystemTypes +} + +func (metadata virtualMachineScaleSetUpdateMetaData) performUpdate(ctx context.Context, update compute.VirtualMachineScaleSetUpdate) error { + if metadata.AutomaticOSUpgradeIsEnabled { + // Virtual Machine Scale Sets with Automatic OS Upgrade enabled must have all VM instances upgraded to same + // Platform Image. Upgrade all VM instances to latest Virtual Machine Scale Set model while property + // 'upgradePolicy.automaticOSUpgradePolicy.enableAutomaticOSUpgrade' is false and then update property + // 'upgradePolicy.automaticOSUpgradePolicy.enableAutomaticOSUpgrade' to true + + update.VirtualMachineScaleSetUpdateProperties.UpgradePolicy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade = utils.Bool(false) + } + + if err := metadata.updateVmss(ctx, update); err != nil { + return err + } + + // if we update the SKU, we also need to subsequently roll the instances using the `UpdateInstances` API + if metadata.UpdateInstances { + userWantsToRollInstances := metadata.CanRollInstancesWhenRequired + upgradeMode := metadata.Existing.VirtualMachineScaleSetProperties.UpgradePolicy.Mode + + if userWantsToRollInstances { + if upgradeMode == compute.Automatic { + if err := metadata.upgradeInstancesForAutomaticUpgradePolicy(ctx); err != nil { + return err + } + } + } + + if upgradeMode == compute.Manual { + if err := metadata.upgradeInstancesForManualUpgradePolicy(ctx); err != nil { + return err + } + } + } + + if metadata.AutomaticOSUpgradeIsEnabled { + // Virtual Machine Scale Sets with Automatic OS Upgrade enabled must have all VM instances upgraded to same + // Platform Image. Upgrade all VM instances to latest Virtual Machine Scale Set model while property + // 'upgradePolicy.automaticOSUpgradePolicy.enableAutomaticOSUpgrade' is false and then update property + // 'upgradePolicy.automaticOSUpgradePolicy.enableAutomaticOSUpgrade' to true + + // finally set this to true + update.VirtualMachineScaleSetUpdateProperties.UpgradePolicy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade = utils.Bool(true) + + // then update the VM + if err := metadata.updateVmss(ctx, update); err != nil { + return err + } + } + + return nil +} + +func (metadata virtualMachineScaleSetUpdateMetaData) updateVmss(ctx context.Context, update compute.VirtualMachineScaleSetUpdate) error { + client := metadata.Client.VMScaleSetClient + id := metadata.ID + + log.Printf("[DEBUG] Updating %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) + future, err := client.Update(ctx, id.ResourceGroup, id.Name, update) + if err != nil { + return fmt.Errorf("Error updating L%sinux Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) + } + + log.Printf("[DEBUG] Waiting for update of %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of %s Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Updated %s Virtual Machine Scale Set %q (Resource Group %q).", metadata.OSType, id.Name, id.ResourceGroup) + + return nil +} + +func (metadata virtualMachineScaleSetUpdateMetaData) upgradeInstancesForAutomaticUpgradePolicy(ctx context.Context) error { + client := metadata.Client.VMScaleSetClient + rollingUpgradesClient := metadata.Client.VMScaleSetRollingUpgradesClient + id := metadata.ID + + log.Printf("[DEBUG] Updating instances for %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) + future, err := rollingUpgradesClient.StartOSUpgrade(ctx, id.ResourceGroup, id.Name) + if err != nil { + return fmt.Errorf("Error updating instances for %s Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) + } + + log.Printf("[DEBUG] Waiting for update of instances for %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of instances for %s Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Updated instances for %s Virtual Machine Scale Set %q (Resource Group %q).", metadata.OSType, id.Name, id.ResourceGroup) + + return nil +} + +func (metadata virtualMachineScaleSetUpdateMetaData) upgradeInstancesForManualUpgradePolicy(ctx context.Context) error { + client := metadata.Client.VMScaleSetClient + id := metadata.ID + + log.Printf("[DEBUG] Rolling the VM Instances for %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) + instancesClient := metadata.Client.VMScaleSetVMsClient + instances, err := instancesClient.ListComplete(ctx, id.ResourceGroup, id.Name, "", "", "") + if err != nil { + return fmt.Errorf("Error listing VM Instances for %s Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) + } + + log.Printf("[DEBUG] Determining instances to roll..") + instanceIdsToRoll := make([]string, 0) + for instances.NotDone() { + instance := instances.Value() + props := instance.VirtualMachineScaleSetVMProperties + if props != nil && instance.InstanceID != nil { + latestModel := props.LatestModelApplied + if latestModel != nil || !*latestModel { + instanceIdsToRoll = append(instanceIdsToRoll, *instance.InstanceID) + } + } + + if err := instances.NextWithContext(ctx); err != nil { + return fmt.Errorf("Error enumerating instances: %s", err) + } + } + + // TODO: there's a performance enhancement to do batches here, but this is fine for a first pass + for _, instanceId := range instanceIdsToRoll { + instanceIds := []string{instanceId} + + log.Printf("[DEBUG] Updating Instance %q to the Latest Configuration..", instanceId) + ids := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ + InstanceIds: &instanceIds, + } + future, err := client.UpdateInstances(ctx, id.ResourceGroup, id.Name, ids) + if err != nil { + return fmt.Errorf("Error updating Instance %q (%s VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, metadata.OSType, id.Name, id.ResourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of Instance %q (%s VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, metadata.OSType, id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Updated Instance %q to the Latest Configuration.", instanceId) + + // TODO: does this want to be a separate, user-configurable toggle? + log.Printf("[DEBUG] Reimaging Instance %q..", instanceId) + reimageInput := &compute.VirtualMachineScaleSetReimageParameters{ + InstanceIds: &instanceIds, + } + reimageFuture, err := client.Reimage(ctx, id.ResourceGroup, id.Name, reimageInput) + if err != nil { + return fmt.Errorf("Error reimaging Instance %q (%s VM Scale Set %q / Resource Group %q): %+v", instanceId, metadata.OSType, id.Name, id.ResourceGroup, err) + } + + if err = reimageFuture.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for reimage of Instance %q (%s VM Scale Set %q / Resource Group %q): %+v", instanceId, metadata.OSType, id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Reimaged Instance %q..", instanceId) + } + + log.Printf("[DEBUG] Rolled the VM Instances for %s Virtual Machine Scale Set %q (Resource Group %q).", metadata.OSType, id.Name, id.ResourceGroup) + return nil +} diff --git a/website/docs/r/linux_virtual_machine_scale_set.html.markdown b/website/docs/r/linux_virtual_machine_scale_set.html.markdown index 921662e14dbe..e83b52d013f7 100644 --- a/website/docs/r/linux_virtual_machine_scale_set.html.markdown +++ b/website/docs/r/linux_virtual_machine_scale_set.html.markdown @@ -20,8 +20,6 @@ Manages a Linux Virtual Machine Scale Set. ~> **Note:** This resource does not support Unmanaged Disks. If you need to use Unmanaged Disks you can continue to use [the `azurerm_virtual_machine_scale_set` resource](virtual_machine_scale_set.html) instead -~> In this Beta release there's a known issue where the `health_probe_id` is not passed to the Azure API during an update for machines using an Automatic or Rolling Upgrade Policy. - ## Example Usage This example provisions a basic Linux Virtual Machine Scale Set on an internal network. Additional examples of how to use the `azurerm_linux_virtual_machine_scale_set` resource can be found [in the ./examples/vm-scale-set/linux` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/vm-scale-set/linux). diff --git a/website/docs/r/windows_virtual_machine_scale_set.html.markdown b/website/docs/r/windows_virtual_machine_scale_set.html.markdown index ef6bf421f2a4..c65ff22ab3a1 100644 --- a/website/docs/r/windows_virtual_machine_scale_set.html.markdown +++ b/website/docs/r/windows_virtual_machine_scale_set.html.markdown @@ -20,8 +20,6 @@ Manages a Windows Virtual Machine Scale Set. ~> **Note:** This resource does not support Unmanaged Disks. If you need to use Unmanaged Disks you can continue to use [the `azurerm_virtual_machine_scale_set` resource](virtual_machine_scale_set.html) instead -~> In this Beta release there's a known issue where the `health_probe_id` is not passed to the Azure API during an update for machines using an Automatic or Rolling Upgrade Policy. - ## Example Usage This example provisions a basic Windows Virtual Machine Scale Set on an internal network. Additional examples of how to use the `azurerm_windows_virtual_machine_scale_set` resource can be found [in the ./examples/vm-scale-set/windows` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/vm-scale-set/windows).