diff --git a/azurestack/config.go b/azurestack/config.go index 13c70cf2f..bc516327a 100644 --- a/azurestack/config.go +++ b/azurestack/config.go @@ -42,6 +42,7 @@ type ArmClient struct { // Compute availSetClient compute.AvailabilitySetsClient + diskClient compute.DisksClient vmExtensionClient compute.VirtualMachineExtensionsClient // DNS @@ -197,6 +198,10 @@ func (c *ArmClient) registerComputeClients(endpoint, subscriptionId string, auth c.configureClient(&availabilitySetsClient.Client, auth) c.availSetClient = availabilitySetsClient + diskClient := compute.NewDisksClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&diskClient.Client, auth) + c.diskClient = diskClient + extensionsClient := compute.NewVirtualMachineExtensionsClientWithBaseURI(endpoint, subscriptionId) c.configureClient(&extensionsClient.Client, auth) c.vmExtensionClient = extensionsClient diff --git a/azurestack/resource_arm_virtual_machine.go b/azurestack/resource_arm_virtual_machine.go index f882575c2..e7e1d790c 100644 --- a/azurestack/resource_arm_virtual_machine.go +++ b/azurestack/resource_arm_virtual_machine.go @@ -202,8 +202,8 @@ func resourceArmVirtualMachine() *schema.Resource { Computed: true, ConflictsWith: []string{"storage_os_disk.0.vhd_uri"}, ValidateFunc: validation.StringInSlice([]string{ - "Premium_LRS", - "Standard_LRS", + string(compute.StorageAccountTypesPremiumLRS), + string(compute.StorageAccountTypesStandardLRS), }, true), }, @@ -267,8 +267,8 @@ func resourceArmVirtualMachine() *schema.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - "Premium_LRS", - "Standard_LRS", + string(compute.StorageAccountTypesPremiumLRS), + string(compute.StorageAccountTypesStandardLRS), }, true), }, @@ -674,31 +674,25 @@ func resourceArmVirtualMachineRead(d *schema.ResourceData, meta interface{}) err } if osDisk := resp.VirtualMachineProperties.StorageProfile.OsDisk; osDisk != nil { - - // The attribute ManagedDisk is missing in StorageProfile struct for 2017-03-09 profile - // diskInfo, err := resourceArmVirtualMachineGetManagedDiskInfo(osDisk.ManagedDisk, meta) - // if err != nil { - // return fmt.Errorf("[DEBUG] Error getting managed OS disk detailed information: %#v", err) - // } - - // Diskinfo is not in the 2017-03-09 profile - if err := d.Set("storage_os_disk", flattenAzureStackVirtualMachineOsDisk(osDisk)); err != nil { + diskInfo, err := resourceArmVirtualMachineGetManagedDiskInfo(osDisk.ManagedDisk, meta) + if err != nil { + return fmt.Errorf("[DEBUG] Error getting managed OS disk detailed information: %#v", err) + } + if err := d.Set("storage_os_disk", flattenAzureStackVirtualMachineOsDisk(osDisk, diskInfo)); err != nil { return fmt.Errorf("[DEBUG] Error setting Virtual Machine Storage OS Disk error: %#v", err) } } if dataDisks := resp.VirtualMachineProperties.StorageProfile.DataDisks; dataDisks != nil { - // disksInfo := make([]*compute.OSDisk, len(*dataDisks)) - - // DiskInfo is not present in the 2017-03-09 profile - // for i, dataDisk := range *dataDisks { - // diskInfo, err := resourceArmVirtualMachineGetManagedDiskInfo(dataDisk.ManagedDisk, meta) - // if err != nil { - // return fmt.Errorf("[DEBUG] Error getting managed data disk detailed information: %#v", err) - // } - // disksInfo[i] = diskInfo - // } - if err := d.Set("storage_data_disk", flattenAzureStackVirtualMachineDataDisk(dataDisks)); err != nil { + disksInfo := make([]*compute.Disk, len(*dataDisks)) + for i, dataDisk := range *dataDisks { + diskInfo, err := resourceArmVirtualMachineGetManagedDiskInfo(dataDisk.ManagedDisk, meta) + if err != nil { + return fmt.Errorf("[DEBUG] Error getting managed data disk detailed information: %#v", err) + } + disksInfo[i] = diskInfo + } + if err := d.Set("storage_data_disk", flattenAzureStackVirtualMachineDataDisk(dataDisks, disksInfo)); err != nil { return fmt.Errorf("[DEBUG] Error setting Virtual Machine Storage Data Disks error: %#v", err) } } @@ -786,13 +780,17 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e } if osDisk.Vhd != nil { - if err = resourceArmVirtualMachineDeleteVhd(*osDisk.Vhd.URI, meta); err != nil { - return fmt.Errorf("Error deleting OS Disk VHD: %+v", err) + if osDisk.Vhd.URI != nil { + if err = resourceArmVirtualMachineDeleteVhd(*osDisk.Vhd.URI, meta); err != nil { + return fmt.Errorf("Error deleting OS Disk VHD: %+v", err) + } + } + } else if osDisk.ManagedDisk != nil { + if osDisk.ManagedDisk.ID != nil { + if err = resourceArmVirtualMachineDeleteManagedDisk(*osDisk.ManagedDisk.ID, meta); err != nil { + return fmt.Errorf("Error deleting OS Managed Disk: %+v", err) + } } - // } else if osDisk.ManagedDisk != nil { - // if err = resourceArmVirtualMachineDeleteManagedDisk(*osDisk.ManagedDisk.ID, meta); err != nil { - // return fmt.Errorf("Error deleting OS Managed Disk: %+v", err) - // } } else { return fmt.Errorf("Unable to locate OS managed disk properties from %s", name) } @@ -812,10 +810,10 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e if err = resourceArmVirtualMachineDeleteVhd(*disk.Vhd.URI, meta); err != nil { return fmt.Errorf("Error deleting Data Disk VHD: %+v", err) } - // } else if disk.ManagedDisk != nil { - // if err = resourceArmVirtualMachineDeleteManagedDisk(*disk.ManagedDisk.ID, meta); err != nil { - // return fmt.Errorf("Error deleting Data Managed Disk: %+v", err) - // } + } else if disk.ManagedDisk != nil { + if err = resourceArmVirtualMachineDeleteManagedDisk(*disk.ManagedDisk.ID, meta); err != nil { + return fmt.Errorf("Error deleting Data Managed Disk: %+v", err) + } } else { return fmt.Errorf("Unable to locate data managed disk properties from %s", name) } @@ -869,29 +867,28 @@ func resourceArmVirtualMachineDeleteVhd(uri string, meta interface{}) error { return nil } -// func resourceArmVirtualMachineDeleteManagedDisk(managedDiskID string, meta interface{}) error { -// client := meta.(*ArmClient).diskClient -// ctx := meta.(*ArmClient).StopContext -// -// id, err := parseAzureResourceID(managedDiskID) -// if err != nil { -// return err -// } -// resGroup := id.ResourceGroup -// name := id.Path["disks"] -// -// future, err := client.Delete(ctx, resGroup, name) -// if err != nil { -// return fmt.Errorf("Error deleting Managed Disk (%s %s) %+v", name, resGroup, err) -// } -// -// err = future.WaitForCompletionRef(ctx, client.Client) -// if err != nil { -// return fmt.Errorf("Error deleting Managed Disk (%s %s) %+v", name, resGroup, err) -// } -// -// return nil -// } +func resourceArmVirtualMachineDeleteManagedDisk(managedDiskID string, meta interface{}) error { + client := meta.(*ArmClient).diskClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(managedDiskID) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["disks"] + + future, err := client.Delete(ctx, resGroup, name) + if err != nil { + return fmt.Errorf("Error deleting Managed Disk (%s %s) %+v", name, resGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error deleting Managed Disk (%s %s) %+v", name, resGroup, err) + } + + return nil +} func flattenAzureStackVirtualMachinePlan(plan *compute.Plan) []interface{} { result := make(map[string]interface{}) @@ -986,8 +983,7 @@ func flattenAzureStackVirtualMachineOsProfileSecrets(secrets *[]compute.VaultSec return result } -func flattenAzureStackVirtualMachineDataDisk(disks *[]compute.DataDisk) interface{} { - // ds := *disksInfo +func flattenAzureStackVirtualMachineDataDisk(disks *[]compute.DataDisk, disksInfo []*compute.Disk) interface{} { result := make([]interface{}, len(*disks)) for i, disk := range *disks { l := make(map[string]interface{}) @@ -995,10 +991,12 @@ func flattenAzureStackVirtualMachineDataDisk(disks *[]compute.DataDisk) interfac if disk.Vhd != nil { l["vhd_uri"] = *disk.Vhd.URI } - // if disk.ManagedDisk != nil { - // l["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType) - // l["managed_disk_id"] = *disk.ManagedDisk.ID - // } + if disk.ManagedDisk != nil { + l["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType) + if disk.ManagedDisk.ID != nil { + l["managed_disk_id"] = *disk.ManagedDisk.ID + } + } l["create_option"] = disk.CreateOption l["caching"] = string(disk.Caching) if disk.DiskSizeGB != nil { @@ -1006,7 +1004,11 @@ func flattenAzureStackVirtualMachineDataDisk(disks *[]compute.DataDisk) interfac } l["lun"] = *disk.Lun - // flattenAzureStackVirtualMachineReviseDiskInfo(l, ds[i]) + if v := disk.WriteAcceleratorEnabled; v != nil { + l["write_accelerator_enabled"] = *disk.WriteAcceleratorEnabled + } + + flattenAzureRmVirtualMachineReviseDiskInfo(l, disksInfo[i]) result[i] = l } @@ -1096,18 +1098,21 @@ func flattenAzureStackVirtualMachineOsProfileLinuxConfiguration(config *compute. return []interface{}{result} } -func flattenAzureStackVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} { +func flattenAzureStackVirtualMachineOsDisk(disk *compute.OSDisk, diskInfo *compute.Disk) []interface{} { result := make(map[string]interface{}) result["name"] = *disk.Name if disk.Vhd != nil { result["vhd_uri"] = *disk.Vhd.URI } - // if disk.ManagedDisk != nil { - // result["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType) - // if disk.ManagedDisk.ID != nil { - // result["managed_disk_id"] = *disk.ManagedDisk.ID - // } - // } + if disk.Image != nil && disk.Image.URI != nil { + result["image_uri"] = *disk.Image.URI + } + if disk.ManagedDisk != nil { + result["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType) + if disk.ManagedDisk.ID != nil { + result["managed_disk_id"] = *disk.ManagedDisk.ID + } + } result["create_option"] = disk.CreateOption result["caching"] = disk.Caching if disk.DiskSizeGB != nil { @@ -1115,21 +1120,25 @@ func flattenAzureStackVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} { } result["os_type"] = string(disk.OsType) - // flattenAzureStackVirtualMachineReviseDiskInfo(result, disk) + if v := disk.WriteAcceleratorEnabled; v != nil { + result["write_accelerator_enabled"] = *disk.WriteAcceleratorEnabled + } + + flattenAzureRmVirtualMachineReviseDiskInfo(result, diskInfo) return []interface{}{result} } -// func flattenAzureStackVirtualMachineReviseDiskInfo(result map[string]interface{}, diskInfo *compute.OSDisk) { -// if diskInfo != nil { -// if diskInfo.Sku != nil { -// result["managed_disk_type"] = string(diskInfo.Sku.Name) -// } -// if diskInfo.DiskProperties != nil && diskInfo.DiskProperties.DiskSizeGB != nil { -// result["disk_size_gb"] = *diskInfo.DiskProperties.DiskSizeGB -// } -// } -// } +func flattenAzureRmVirtualMachineReviseDiskInfo(result map[string]interface{}, diskInfo *compute.Disk) { + if diskInfo != nil { + if diskInfo.Sku != nil { + result["managed_disk_type"] = string(diskInfo.Sku.Name) + } + if diskInfo.DiskProperties != nil && diskInfo.DiskProperties.DiskSizeGB != nil { + result["disk_size_gb"] = *diskInfo.DiskProperties.DiskSizeGB + } + } +} func expandAzureStackVirtualMachinePlan(d *schema.ResourceData) (*compute.Plan, error) { planConfigs := d.Get("plan").([]interface{}) @@ -1383,17 +1392,17 @@ func expandAzureStackVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.D } } - // managedDisk := &compute.ManagedDiskParameters{} + managedDisk := &compute.ManagedDiskParameters{} - // if managedDiskType != "" { - // managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) - // data_disk.ManagedDisk = managedDisk - // } - // - // if managedDiskID != "" { - // managedDisk.ID = &managedDiskID - // data_disk.ManagedDisk = managedDisk - // } + if managedDiskType != "" { + managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) + data_disk.ManagedDisk = managedDisk + } + + if managedDiskID != "" { + managedDisk.ID = &managedDiskID + data_disk.ManagedDisk = managedDisk + } if vhdURI != "" && managedDiskID != "" { return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_id` (only one or the other can be used)") @@ -1409,9 +1418,12 @@ func expandAzureStackVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.D data_disk.Caching = compute.CachingTypes(v) } - if v := config["disk_size_gb"]; v != nil { - diskSize := int32(config["disk_size_gb"].(int)) - data_disk.DiskSizeGB = &diskSize + if v, ok := config["disk_size_gb"].(int); ok { + data_disk.DiskSizeGB = utils.Int32(int32(v)) + } + + if v, ok := config["write_accelerator_enabled"].(bool); ok { + data_disk.WriteAcceleratorEnabled = utils.Bool(v) } data_disks = append(data_disks, data_disk) @@ -1530,17 +1542,17 @@ func expandAzureStackVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDi } } - // managedDisk := &compute.ManagedDiskParameters{} - // - // if managedDiskType != "" { - // managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) - // osDisk.ManagedDisk = managedDisk - // } - // - // if managedDiskID != "" { - // managedDisk.ID = &managedDiskID - // osDisk.ManagedDisk = managedDisk - // } + managedDisk := &compute.ManagedDiskParameters{} + + if managedDiskType != "" { + managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) + osDisk.ManagedDisk = managedDisk + } + + if managedDiskID != "" { + managedDisk.ID = &managedDiskID + osDisk.ManagedDisk = managedDisk + } //BEGIN: code to be removed after GH-13016 is merged if vhdURI != "" && managedDiskID != "" { @@ -1569,8 +1581,11 @@ func expandAzureStackVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDi } if v := config["disk_size_gb"].(int); v != 0 { - diskSize := int32(v) - osDisk.DiskSizeGB = &diskSize + osDisk.DiskSizeGB = utils.Int32(int32(v)) + } + + if v, ok := config["write_accelerator_enabled"].(bool); ok { + osDisk.WriteAcceleratorEnabled = utils.Bool(v) } return osDisk, nil @@ -1655,26 +1670,26 @@ func resourceArmVirtualMachineStorageImageReferenceHash(v interface{}) int { return hashcode.String(buf.String()) } -// func resourceArmVirtualMachineGetManagedDiskInfo(disk *compute.ManagedDiskParameters, meta interface{}) (*compute.Disk, error) { -// client := meta.(*ArmClient).diskClient -// ctx := meta.(*ArmClient).StopContext -// -// if disk == nil || disk.ID == nil { -// return nil, nil -// } -// -// diskId := *disk.ID -// id, err := parseAzureResourceID(diskId) -// if err != nil { -// return nil, fmt.Errorf("Error parsing Disk ID %q: %+v", diskId, err) -// } -// -// resourceGroup := id.ResourceGroup -// name := id.Path["disks"] -// diskResp, err := client.Get(ctx, resourceGroup, name) -// if err != nil { -// return nil, fmt.Errorf("Error retrieving Disk %q (Resource Group %q): %+v", name, resourceGroup, err) -// } -// -// return &diskResp, nil -// } +func resourceArmVirtualMachineGetManagedDiskInfo(disk *compute.ManagedDiskParameters, meta interface{}) (*compute.Disk, error) { + client := meta.(*ArmClient).diskClient + ctx := meta.(*ArmClient).StopContext + + if disk == nil || disk.ID == nil { + return nil, nil + } + + diskId := *disk.ID + id, err := parseAzureResourceID(diskId) + if err != nil { + return nil, fmt.Errorf("Error parsing Disk ID %q: %+v", diskId, err) + } + + resourceGroup := id.ResourceGroup + name := id.Path["disks"] + diskResp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + return nil, fmt.Errorf("Error retrieving Disk %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return &diskResp, nil +} diff --git a/website/docs/r/virtual_machine.html.markdown b/website/docs/r/virtual_machine.html.markdown index 1c55d26b2..0f614fbd3 100644 --- a/website/docs/r/virtual_machine.html.markdown +++ b/website/docs/r/virtual_machine.html.markdown @@ -10,12 +10,187 @@ description: |- Manages a virtual machine. +## Example Usage with Managed Disks + +```hcl +resource "azurestack_resource_group" "test" { + name = "acctestrg" + + # This is Azure Stack Region so it will be different per Azure Stack and should not be in the format of "West US" etc... those are not the same values + location = "region1" +} + +resource "azurestack_virtual_network" "test" { + name = "acctvn" + address_space = ["10.0.0.0/16"] + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" +} + +resource "azurestack_subnet" "test" { + name = "acctsub" + resource_group_name = "${azurestack_resource_group.test.name}" + virtual_network_name = "${azurestack_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurestack_network_interface" "test" { + name = "acctni" + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurestack_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } +} + +resource "azurestack_virtual_machine" "test" { + name = "acctvm" + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" + network_interface_ids = ["${azurestack_network_interface.test.id}"] + vm_size = "Standard_F2" + + # Uncomment this line to delete the OS disk automatically when deleting the VM + # delete_os_disk_on_termination = true + + + # Uncomment this line to delete the data disks automatically when deleting the VM + # delete_data_disks_on_termination = true + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "hostname" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags { + environment = "staging" + } +} +``` + +## Example Usage with Managed Disks and Public IP + +```hcl +resource "azurestack_resource_group" "test" { + name = "acctestrg" + + # This is Azure Stack Region so it will be different per Azure Stack and should not be in the format of "West US" etc... those are not the same values + location = "region1" +} + +resource "azurestack_public_ip" "test" { + name = "acceptanceTestPublicIp1" + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" + public_ip_address_allocation = "static" + + tags { + environment = "Production" + } +} + +resource "azurestack_virtual_network" "test" { + name = "acctvn" + address_space = ["10.0.0.0/16"] + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" +} + +resource "azurestack_subnet" "test" { + name = "acctsub" + resource_group_name = "${azurestack_resource_group.test.name}" + virtual_network_name = "${azurestack_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurestack_network_interface" "test" { + name = "acctni" + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurestack_subnet.test.id}" + private_ip_address_allocation = "dynamic" + public_ip_address_id = "${azurestack_public_ip.test.id}" + } +} + + +resource "azurestack_virtual_machine" "test" { + name = "acctvm" + location = "${azurestack_resource_group.test.location}" + resource_group_name = "${azurestack_resource_group.test.name}" + network_interface_ids = ["${azurestack_network_interface.test.id}"] + vm_size = "Standard_D2_v2" + + # Uncomment this line to delete the OS disk automatically when deleting the VM + # delete_os_disk_on_termination = true + + + # Uncomment this line to delete the data disks automatically when deleting the VM + # delete_data_disks_on_termination = true + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "hostname" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags { + environment = "staging" + } +} +``` + ## Example Usage with Unmanaged Disks ```hcl resource "azurestack_resource_group" "test" { - name = "acctestrg" - location = "West US" + name = "acctestrg" + + # This is Azure Stack Region so it will be different per Azure Stack and should not be in the format of "West US" etc... those are not the same values + location = "region1" } resource "azurestack_virtual_network" "test" { @@ -242,6 +417,7 @@ The following arguments are supported: * `storage_image_reference` - (Optional) A Storage Image Reference block as documented below. * `storage_os_disk` - (Required) A `storage_os_disk` block. * `storage_data_disk` - (Optional) A list of Storage Data disk blocks as referenced below. +* `delete_os_disk_on_termination` - (Optional) Should the OS Disk be deleted when the Virtual Machine is destroyed? Defaults to `false`. * `delete_data_disks_on_termination` - (Optional) Flag to enable deletion of storage data disk VHD blobs when the VM is deleted, defaults to `false`. * `os_profile` - (Optional) An OS Profile block as documented below. Required when `create_option` in the `storage_os_disk` block is set to `FromImage`. * `identity` - (Optional) An identity block as documented below. @@ -298,22 +474,40 @@ resource "azurestack_virtual_machine" "test" { `storage_os_disk` block supports the following: * `name` - (Required) Specifies the disk name. -* `vhd_uri` - (Optional) Specifies the vhd uri. Changing this forces a new resource to be created. -* `create_option` - (Required) Specifies how the virtual machine should be created. Possible value is`FromImage`. -* `caching` - (Optional) Specifies the caching requirements. +* `create_option` - (Required) Specifies how the OS Disk should be created. Possible values are `Attach` (managed disks only) and `FromImage`. +* `caching` - (Optional) Specifies the caching requirements for the OS Disk. Possible values include `None`, `ReadOnly` and `ReadWrite`. * `image_uri` - (Optional) Specifies the image_uri in the form publisherName:offer:skus:version. `image_uri` can also specify the [VHD uri](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-cli-deploy-templates/#create-a-custom-vm-image) of a custom VM image to clone. When cloning a custom disk image the `os_type` documented below becomes required. -* `os_type` - (Optional) Specifies the operating system Type, valid values are windows, linux. +* `os_type` - (Optional) Specifies the Operating System on the OS Disk. Possible values are `Linux` and `Windows`. * `disk_size_gb` - (Optional) Specifies the size of the os disk in gigabytes. +The following properties apply when using Managed Disks: + +* `managed_disk_id` - (Optional) Specifies the ID of an existing Managed Disk which should be attached as the OS Disk of this Virtual Machine. If this is set then the `create_option` must be set to `Attach`. + +* `managed_disk_type` - (Optional) Specifies the type of Managed Disk which should be created. Possible values are `Standard_LRS` or `Premium_LRS`. + +The following properties apply when using Unmanaged Disks: + +* `vhd_uri` - (Optional) Specifies the URI of the VHD file backing this Unmanaged OS Disk. Changing this forces a new resource to be created. + `storage_data_disk` supports the following: * `name` - (Required) Specifies the name of the data disk. -* `vhd_uri` - (Optional) Specifies the uri of the location in storage where the vhd for the virtual machine should be placed. * `create_option` - (Required) Specifies how the data disk should be created. Possible values are `Attach`, `FromImage` and `Empty`. * `disk_size_gb` - (Required) Specifies the size of the data disk in gigabytes. * `caching` - (Optional) Specifies the caching requirements. * `lun` - (Required) Specifies the logical unit number of the data disk. +The following properties apply when using Managed Disks: + +* `managed_disk_type` - (Optional) Specifies the type of managed disk to create. Possible values are either `Standard_LRS` or `Premium_LRS`. + +* `managed_disk_id` - (Optional) Specifies the ID of an Existing Managed Disk which should be attached to this Virtual Machine. When this field is set `create_option` must be set to `Attach`. + +The following properties apply when using Unmanaged Disks: + +* `vhd_uri` - (Optional) Specifies the URI of the VHD file backing this Unmanaged Data Disk. Changing this forces a new resource to be created. + `os_profile` supports the following: * `computer_name` - (Required) Specifies the name of the virtual machine.