Skip to content

Commit

Permalink
Use disk API to load managed disk info when VM is stopped. (#1100)
Browse files Browse the repository at this point in the history
* Use disk API to load managed disk info when VM is stopped.

* Abstract the storage revise procedures according to the comments.

* Add more status codes to check for deallocation.

* Update managed disk information when flattening.

* Add test case for disk info of a deallocated virtual machine

* Fixing comments identified in the PR:

```
$ acctests azurerm TestAccAzureRMVirtualMachine_hasDiskInfoWhenStopped
=== RUN   TestAccAzureRMVirtualMachine_hasDiskInfoWhenStopped
--- PASS: TestAccAzureRMVirtualMachine_hasDiskInfoWhenStopped (729.95s)
PASS
ok  	github.com/terraform-providers/terraform-provider-azurerm/azurerm	729.986s
```
  • Loading branch information
Junyi Yi authored and tombuildsstuff committed Apr 23, 2018
1 parent 1b508d4 commit 4cc1381
Show file tree
Hide file tree
Showing 2 changed files with 191 additions and 7 deletions.
66 changes: 59 additions & 7 deletions azurerm/resource_arm_virtual_machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,6 @@ func resourceArmVirtualMachineRead(d *schema.ResourceData, meta interface{}) err
name := id.Path["virtualMachines"]

resp, err := vmClient.Get(ctx, resGroup, name, "")

if err != nil {
if utils.ResponseWasNotFound(resp.Response) {
d.SetId("")
Expand Down Expand Up @@ -669,12 +668,26 @@ func resourceArmVirtualMachineRead(d *schema.ResourceData, meta interface{}) err
}
}

if err := d.Set("storage_os_disk", flattenAzureRmVirtualMachineOsDisk(resp.VirtualMachineProperties.StorageProfile.OsDisk)); err != nil {
return fmt.Errorf("[DEBUG] Error setting Virtual Machine Storage OS Disk error: %#v", err)
if osDisk := resp.VirtualMachineProperties.StorageProfile.OsDisk; osDisk != nil {
diskInfo, err := resourceArmVirtualMachineGetManagedDiskInfo(osDisk.ManagedDisk, meta)
if err != nil {
return fmt.Errorf("[DEBUG] Error getting managed OS disk detailed information: %#v", err)
}
if err := d.Set("storage_os_disk", flattenAzureRmVirtualMachineOsDisk(osDisk, diskInfo)); err != nil {
return fmt.Errorf("[DEBUG] Error setting Virtual Machine Storage OS Disk error: %#v", err)
}
}

if resp.VirtualMachineProperties.StorageProfile.DataDisks != nil {
if err := d.Set("storage_data_disk", flattenAzureRmVirtualMachineDataDisk(resp.VirtualMachineProperties.StorageProfile.DataDisks)); err != nil {
if dataDisks := resp.VirtualMachineProperties.StorageProfile.DataDisks; dataDisks != nil {
disksInfo := make([]*compute.Disk, len(*dataDisks))
for i, dataDisk := range *dataDisks {
diskInfo, err := resourceArmVirtualMachineGetManagedDiskInfo(dataDisk.ManagedDisk, meta)
if err != nil {
return fmt.Errorf("[DEBUG] Error getting managed data disk detailed information: %#v", err)
}
disksInfo[i] = diskInfo
}
if err := d.Set("storage_data_disk", flattenAzureRmVirtualMachineDataDisk(dataDisks, disksInfo)); err != nil {
return fmt.Errorf("[DEBUG] Error setting Virtual Machine Storage Data Disks error: %#v", err)
}
}
Expand Down Expand Up @@ -958,7 +971,7 @@ func flattenAzureRmVirtualMachineOsProfileSecrets(secrets *[]compute.VaultSecret
return result
}

func flattenAzureRmVirtualMachineDataDisk(disks *[]compute.DataDisk) interface{} {
func flattenAzureRmVirtualMachineDataDisk(disks *[]compute.DataDisk, disksInfo []*compute.Disk) interface{} {
result := make([]interface{}, len(*disks))
for i, disk := range *disks {
l := make(map[string]interface{})
Expand All @@ -977,6 +990,8 @@ func flattenAzureRmVirtualMachineDataDisk(disks *[]compute.DataDisk) interface{}
}
l["lun"] = *disk.Lun

flattenAzureRmVirtualMachineReviseDiskInfo(l, disksInfo[i])

result[i] = l
}
return result
Expand Down Expand Up @@ -1065,7 +1080,7 @@ func flattenAzureRmVirtualMachineOsProfileLinuxConfiguration(config *compute.Lin
return []interface{}{result}
}

func flattenAzureRmVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} {
func flattenAzureRmVirtualMachineOsDisk(disk *compute.OSDisk, diskInfo *compute.Disk) []interface{} {
result := make(map[string]interface{})
result["name"] = *disk.Name
if disk.Vhd != nil {
Expand All @@ -1084,9 +1099,22 @@ func flattenAzureRmVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} {
}
result["os_type"] = string(disk.OsType)

flattenAzureRmVirtualMachineReviseDiskInfo(result, diskInfo)

return []interface{}{result}
}

func flattenAzureRmVirtualMachineReviseDiskInfo(result map[string]interface{}, diskInfo *compute.Disk) {
if diskInfo != nil {
if diskInfo.Sku != nil {
result["managed_disk_type"] = string(diskInfo.Sku.Name)
}
if diskInfo.DiskProperties != nil && diskInfo.DiskProperties.DiskSizeGB != nil {
result["disk_size_gb"] = *diskInfo.DiskProperties.DiskSizeGB
}
}
}

func expandAzureRmVirtualMachinePlan(d *schema.ResourceData) (*compute.Plan, error) {
planConfigs := d.Get("plan").([]interface{})

Expand Down Expand Up @@ -1599,3 +1627,27 @@ func resourceArmVirtualMachineStorageImageReferenceHash(v interface{}) int {
}
return hashcode.String(buf.String())
}

func resourceArmVirtualMachineGetManagedDiskInfo(disk *compute.ManagedDiskParameters, meta interface{}) (*compute.Disk, error) {
client := meta.(*ArmClient).diskClient
ctx := meta.(*ArmClient).StopContext

if disk == nil || disk.ID == nil {
return nil, nil
}

diskId := *disk.ID
id, err := parseAzureResourceID(diskId)
if err != nil {
return nil, fmt.Errorf("Error parsing Disk ID %q: %+v", diskId, err)
}

resourceGroup := id.ResourceGroup
name := id.Path["disks"]
diskResp, err := client.Get(ctx, resourceGroup, name)
if err != nil {
return nil, fmt.Errorf("Error retrieving Disk %q (Resource Group %q): %+v", name, resourceGroup, err)
}

return &diskResp, nil
}
132 changes: 132 additions & 0 deletions azurerm/resource_arm_virtual_machine_managed_disks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,63 @@ func TestAccAzureRMVirtualMachine_winRMCerts(t *testing.T) {
})
}

func TestAccAzureRMVirtualMachine_hasDiskInfoWhenStopped(t *testing.T) {
var vm compute.VirtualMachine
resourceName := "azurerm_virtual_machine.test"
rInt := acctest.RandInt()
config := testAccAzureRMVirtualMachine_hasDiskInfoWhenStopped(rInt, testLocation())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMVirtualMachineDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMVirtualMachineExists(resourceName, &vm),
resource.TestCheckResourceAttr(resourceName, "storage_os_disk.0.managed_disk_type", "Standard_LRS"),
resource.TestCheckResourceAttr(resourceName, "storage_data_disk.0.disk_size_gb", "64"),
),
},
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAndStopAzureRMVirtualMachine(&vm),
resource.TestCheckResourceAttr(resourceName, "storage_os_disk.0.managed_disk_type", "Standard_LRS"),
resource.TestCheckResourceAttr(resourceName, "storage_data_disk.0.disk_size_gb", "64"),
),
},
},
})
}

func testCheckAndStopAzureRMVirtualMachine(vm *compute.VirtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
vmID, err := parseAzureResourceID(*vm.ID)
if err != nil {
return fmt.Errorf("Unable to parse virtual machine ID %s, %+v", *vm.ID, err)
}

name := vmID.Path["virtualMachines"]
resourceGroup := vmID.ResourceGroup

client := testAccProvider.Meta().(*ArmClient).vmClient
ctx := testAccProvider.Meta().(*ArmClient).StopContext

future, err := client.Deallocate(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Failed stopping virtual machine %q: %+v", resourceGroup, err)
}

err = future.WaitForCompletion(ctx, client.Client)
if err != nil {
return fmt.Errorf("Failed long polling for the stop of virtual machine %q: %+v", resourceGroup, err)
}

return nil
}
}

func testAccAzureRMVirtualMachine_winRMCerts(rString string, location string) string {
return fmt.Sprintf(`
variable "prefix" {
Expand Down Expand Up @@ -1976,3 +2033,78 @@ resource "azurerm_virtual_machine" "test" {
}
`, rInt, location, rInt, rInt, rInt, rInt, rInt)
}

func testAccAzureRMVirtualMachine_hasDiskInfoWhenStopped(rInt int, location string) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctest-rg-%d"
location = "%s"
}
resource "azurerm_virtual_network" "test" {
name = "acctestvn-%d"
address_space = ["10.0.0.0/16"]
location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
}
resource "azurerm_subnet" "test" {
name = "internal"
resource_group_name = "${azurerm_resource_group.test.name}"
virtual_network_name = "${azurerm_virtual_network.test.name}"
address_prefix = "10.0.2.0/24"
}
resource "azurerm_network_interface" "test" {
name = "acctestni-%d"
location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
ip_configuration {
name = "testconfiguration"
subnet_id = "${azurerm_subnet.test.id}"
private_ip_address_allocation = "dynamic"
}
}
resource "azurerm_virtual_machine" "test" {
name = "acctestvm-%d"
location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
network_interface_ids = ["${azurerm_network_interface.test.id}"]
vm_size = "Standard_DS1_v2"
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
storage_os_disk {
name = "acctest-osdisk-%d"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
storage_data_disk {
name = "acctest-datadisk-%d"
caching = "ReadWrite"
create_option = "Empty"
lun = 0
disk_size_gb = 64
}
os_profile {
computer_name = "acctest-machine-%d"
admin_username = "testadmin"
admin_password = "Password1234!"
}
os_profile_linux_config {
disable_password_authentication = false
}
}
`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt)
}

0 comments on commit 4cc1381

Please sign in to comment.