Skip to content

Commit

Permalink
Disk settings are not correctly applied
Browse files Browse the repository at this point in the history
Fixes #1028

1) According to the issue eagerly_scrubbed and thin_provision are not
applied correctly when set and the providers is throwing an error.

It is nomrmal VM disk to receive values different from the ones in the
configuration - it depends entirely on the vSphere backend. In such
cases the provider should not throw an error.

See https://kb.vmware.com/s/article/2145183

- removed the validation regarding both fields so ignore_state fixes the
  deployment

2) In another comment of the issue it is stated that when VM is cloned with
different values for  eagerly_scrubbed and thin_provision from the ones
of the original disk the values are not applied (tested on VMFS
datastore).

- initialy the provider stated that if there is no datastore explicitly
  set for the disk no relocation changes will be sent to the api for
  this disk. This is changed to making a diff between the disk properties
  specified in HCL and the same on the exisiting disk on the source VM

Added e2e test to cover both cases.

Fixed unit tests broken by Bumps [github.com/vmware/govmomi](https://github.com/vmware/govmomi) from 0.30.7 to 0.31.0.
37f0a77

Signed-off-by: Vasil Atanasov <[email protected]>
  • Loading branch information
vasilsatanasov committed Oct 2, 2023
1 parent 37f0a77 commit 88b62f9
Show file tree
Hide file tree
Showing 5 changed files with 250 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ func CreateLibrary(d *schema.ResourceData, restclient *rest.Client, backings []l
log.Printf("[DEBUG] contentlibrary.CreateLibrary: Creating content library %s", name)
clm := library.NewManager(restclient)
ctx := context.TODO()
description := d.Get("description").(string)
lib := library.Library{
Description: d.Get("description").(string),
Description: &description,
Name: name,
Storage: backings,
Type: "LOCAL",
Expand Down Expand Up @@ -165,7 +166,7 @@ func CreateLibraryItem(c *rest.Client, l *library.Library, name string, desc str
clm := library.NewManager(c)
ctx := context.TODO()
item := library.Item{
Description: desc,
Description: &desc,
LibraryID: l.ID,
Name: name,
Type: t,
Expand Down
138 changes: 114 additions & 24 deletions vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -892,49 +892,49 @@ func DiskMigrateRelocateOperation(data *schema.ResourceData, client *govmomi.Cli
// backing data defined in config, taking on these filenames when cloned. After
// the clone is complete, natural re-configuration happens to bring the disk
// configurations fully in sync with what is defined.
func DiskCloneRelocateOperation(d *schema.ResourceData, c *govmomi.Client, l object.VirtualDeviceList) ([]types.VirtualMachineRelocateSpecDiskLocator, error) {
func DiskCloneRelocateOperation(resourceData *schema.ResourceData, client *govmomi.Client, deviceList object.VirtualDeviceList) ([]types.VirtualMachineRelocateSpecDiskLocator, error) {
log.Printf("[DEBUG] DiskCloneRelocateOperation: Generating full disk relocate spec list")
devices := SelectDisks(l, d.Get("scsi_controller_count").(int), d.Get("sata_controller_count").(int), d.Get("ide_controller_count").(int))
devices := SelectDisks(deviceList, resourceData.Get("scsi_controller_count").(int), resourceData.Get("sata_controller_count").(int), resourceData.Get("ide_controller_count").(int))
log.Printf("[DEBUG] DiskCloneRelocateOperation: Disk devices located: %s", DeviceListString(devices))
// Sort the device list, in case it's not sorted already.
devSort := virtualDeviceListSorter{
Sort: devices,
DeviceList: l,
DeviceList: deviceList,
}
sort.Sort(devSort)
devices = devSort.Sort
log.Printf("[DEBUG] DiskCloneRelocateOperation: Disk devices order after sort: %s", DeviceListString(devices))
// Do the same for our listed disks.
curSet := d.Get(subresourceTypeDisk).([]interface{})
curSet := resourceData.Get(subresourceTypeDisk).([]interface{})
log.Printf("[DEBUG] DiskCloneRelocateOperation: Current resource set: %s", subresourceListString(curSet))
sort.Sort(virtualDiskSubresourceSorter(curSet))
log.Printf("[DEBUG] DiskCloneRelocateOperation: Resource set order after sort: %s", subresourceListString(curSet))

log.Printf("[DEBUG] DiskCloneRelocateOperation: Generating relocators for source disks")
var relocators []types.VirtualMachineRelocateSpecDiskLocator
for i, device := range devices {
m := curSet[i].(map[string]interface{})
diskDataMap := curSet[i].(map[string]interface{})
vd := device.GetVirtualDevice()
ctlr := l.FindByKey(vd.ControllerKey)
ctlr := deviceList.FindByKey(vd.ControllerKey)
if ctlr == nil {
return nil, fmt.Errorf("could not find controller with key %d", vd.Key)
}
m["key"] = int(vd.Key)
diskDataMap["key"] = int(vd.Key)
var err error
m["device_address"], err = computeDevAddr(vd, ctlr.(types.BaseVirtualController))
diskDataMap["device_address"], err = computeDevAddr(vd, ctlr.(types.BaseVirtualController))
if err != nil {
return nil, fmt.Errorf("error computing device address: %s", err)
}
r := NewDiskSubresource(c, d, m, nil, i)
// A disk locator is only useful if a target datastore is available. If we
// don't have a datastore specified (ie: when Storage DRS is in use), then
// we just need to skip this disk. The disk will be migrated properly
// through the SDRS API.
if dsID := r.Get("datastore_id"); dsID == "" || dsID == diskDatastoreComputedName {
r := NewDiskSubresource(client, resourceData, diskDataMap, nil, i)

shouldRelocate := shouldAddRelocateSpec(resourceData, device.(*types.VirtualDisk), i)
if !shouldRelocate {
continue
}

r = addDiskDatastore(r, resourceData)
// Otherwise, proceed with generating and appending the locator.
relocator, err := r.Relocate(l, true)
relocator, err := r.Relocate(deviceList, true)
if err != nil {
return nil, fmt.Errorf("%s: %s", r.Addr(), err)
}
Expand All @@ -946,6 +946,104 @@ func DiskCloneRelocateOperation(d *schema.ResourceData, c *govmomi.Client, l obj
return relocators, nil
}

/*
*
Sets the value of the VM datastore
*/
func addDiskDatastore(r *DiskSubresource, d *schema.ResourceData) *DiskSubresource {
diskDsId := r.Get("datastore_id")
dataDsId := d.Get("datastore_id")
if (diskDsId == "" || diskDsId == diskDatastoreComputedName) && dataDsId != "" {
r.Set("datastore_id", dataDsId)
}

return r
}
func shouldAddRelocateSpec(d *schema.ResourceData, disk *types.VirtualDisk, schemaDiskIndex int) bool {
relocateProperties := []string{
"datastore_id",
"disk_mode",
"eagerly_scrub",
"disk_sharing",
"thin_provisioned",
"write_through",
}

diskProps := virtualDiskToSchemaPropsMap(disk)
dataProps := diskDataToSchemaProps(d, schemaDiskIndex)

for _, key := range relocateProperties {
dataProp, dataPropOk := dataProps[key]
diskProp, diskPropOk := diskProps[key]

diff := false
if dataPropOk && diskPropOk {
diff = dataProp != diskProp
}

if diff {
return true
}

}

return false
}

func virtualDiskToSchemaPropsMap(disk *types.VirtualDisk) map[string]interface{} {
m := make(map[string]interface{})
if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["eagerly_scrub"] = backing.EagerlyScrub
m["thin_provisioned"] = backing.ThinProvisioned
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer1BackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskLocalPMemBackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
} else if backing, ok := disk.Backing.(*types.VirtualDiskSeSparseBackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskSeSparseBackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskSparseVer1BackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
}

return m
}

func diskDataToSchemaProps(d *schema.ResourceData, deviceIndex int) map[string]interface{} {
m := make(map[string]interface{})
diskKey := fmt.Sprintf("disk.%d.datastore_id", deviceIndex)
if datastoreId, ok := d.GetOk(diskKey); ok {
m["datastore_id"] = datastoreId
}

if diskMode, ok := d.GetOk(diskKey); ok {
m["disk_mode"] = diskMode
}

if eagerlyScrub, ok := d.GetOk(diskKey); ok {
m["eagerly_scrub"] = eagerlyScrub
}

if thinProvisioned, ok := d.GetOk(diskKey); ok {
m["thin_provisioned"] = thinProvisioned
}

return m
}

// DiskPostCloneOperation normalizes the virtual disks on a freshly-cloned
// virtual machine and outputs any necessary device change operations. It also
// sets the state in advance of the post-create read.
Expand Down Expand Up @@ -1466,15 +1564,7 @@ func (r *DiskSubresource) DiffExisting() error {
return fmt.Errorf("virtual disk %q: virtual disks cannot be shrunk (old: %d new: %d)", name, osize.(int), nsize.(int))
}

// Ensure that there is no change in either eagerly_scrub or thin_provisioned
// - these values cannot be changed once set.
if _, err = r.GetWithVeto("eagerly_scrub"); err != nil {
return fmt.Errorf("virtual disk %q: %s", name, err)
}
if _, err = r.GetWithVeto("thin_provisioned"); err != nil {
return fmt.Errorf("virtual disk %q: %s", name, err)
}
// Same with attach
// Ensure that there is no change in attach value
if _, err = r.GetWithVeto("attach"); err != nil {
return fmt.Errorf("virtual disk %q: %s", name, err)
}
Expand Down
4 changes: 2 additions & 2 deletions vsphere/resource_vsphere_content_library_item_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ func testAccResourceVSphereContentLibraryItemDescription(expected *regexp.Regexp
if err != nil {
return err
}
if !expected.MatchString(library.Description) {
return fmt.Errorf("Content Library item description does not match. expected: %s, got %s", expected.String(), library.Description)
if !expected.MatchString(*library.Description) {
return fmt.Errorf("Content Library item description does not match. expected: %s, got %v", expected.String(), library.Description)
}
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions vsphere/resource_vsphere_content_library_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ func testAccResourceVSphereContentLibraryDescription(expected *regexp.Regexp) re
if err != nil {
return err
}
if !expected.MatchString(library.Description) {
return fmt.Errorf("Content Library description does not match. expected: %s, got %s", expected.String(), library.Description)
if !expected.MatchString(*library.Description) {
return fmt.Errorf("Content Library description does not match. expected: %s, got %v", expected.String(), library.Description)
}
return nil
}
Expand Down
129 changes: 129 additions & 0 deletions vsphere/resource_vsphere_virtual_machine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1828,6 +1828,26 @@ func TestAccResourceVSphereVirtualMachine_cloneWithDifferentHostname(t *testing.
},
})
}
func TestAccResourceVSphereVirtualMachine_cloneWithDiskTypeChange(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
testAccPreCheck(t)
testAccResourceVSphereVirtualMachinePreCheck(t)
},
Providers: testAccProviders,
CheckDestroy: testAccResourceVSphereVirtualMachineCheckExists(false),
Steps: []resource.TestStep{
{
Config: testAccResourceVSphereVirtualMachineConfigCloneChangedDiskType(),
Check: resource.ComposeTestCheckFunc(
testAccResourceVSphereVirtualMachineCheckExists(true),
testAccResourceVSphereVirtualMachineCheckEagerlyScrub(0, true),
),
},
},
})
}

func TestAccResourceVSphereVirtualMachine_cpuHotAdd(t *testing.T) {
resource.Test(t, resource.TestCase{
Expand Down Expand Up @@ -2540,6 +2560,39 @@ func testAccResourceVSphereVirtualMachineCheckExists(expected bool) resource.Tes
}
}

func testAccResourceVSphereVirtualMachineCheckEagerlyScrub(diskIndex int, eagerlyScrubedValue bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
props, err := testGetVirtualMachineProperties(s, "vm")
if err != nil {
return err
}

currentDiskIndex := -1
for _, device := range props.Config.Hardware.Device {
disk, ok := device.(*types.VirtualDisk)
if !ok {
continue
}
currentDiskIndex++
if currentDiskIndex != diskIndex {
continue
}
backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
if !ok {
continue
}

if *backing.EagerlyScrub != eagerlyScrubedValue {
return fmt.Errorf("expected %t as eagerlyScrubbed for disk %d but received %t",
eagerlyScrubedValue, diskIndex, *backing.EagerlyScrub)
}

}

return nil
}
}

// testAccResourceVSphereVirtualMachineCheckPowerState is a check to check for
// a VirtualMachine's power state.

Expand Down Expand Up @@ -5639,6 +5692,82 @@ resource "vsphere_virtual_machine" "vm" {
)
}

func testAccResourceVSphereVirtualMachineConfigCloneChangedDiskType() string {
return fmt.Sprintf(`
%s
data "vsphere_network" "network" {
name = "VM Network"
datacenter_id = data.vsphere_datacenter.rootdc1.id
}
resource "vsphere_virtual_machine" "template" {
name = "vm-1-template"
resource_pool_id = data.vsphere_compute_cluster.rootcompute_cluster1.resource_pool_id
datastore_id = data.vsphere_datastore.rootds1.id
num_cpus = 2
memory = 2048
guest_id = "other3xLinuxGuest"
network_interface {
network_id = data.vsphere_network.network.id
}
wait_for_guest_ip_timeout = 0
wait_for_guest_net_timeout = 0
disk {
label = "disk0"
size = 4
eagerly_scrub = false
thin_provisioned = false
}
lifecycle {
ignore_changes = [disk]
}
}
resource "vsphere_virtual_machine" "vm" {
name = "vm-1-template-clone"
resource_pool_id = data.vsphere_compute_cluster.rootcompute_cluster1.resource_pool_id
guest_id = vsphere_virtual_machine.template.guest_id
network_interface {
network_id = data.vsphere_network.network.id
}
datastore_id = data.vsphere_datastore.rootds1.id
num_cpus = 2
memory = 2048
scsi_type =vsphere_virtual_machine.template.scsi_type
wait_for_guest_ip_timeout = 0
wait_for_guest_net_timeout = 0
disk {
label = "disk0"
size = vsphere_virtual_machine.template.disk.0.size
eagerly_scrub = true
thin_provisioned = false
}
clone {
template_uuid = vsphere_virtual_machine.template.id
}
}
`,
testhelper.CombineConfigs(
testhelper.ConfigDataRootDC1(),
testhelper.ConfigDataRootComputeCluster1(),
testhelper.ConfigDataRootDS1(),
),
)
}

func testAccResourceVSphereVirtualMachineConfigWithHotAdd(nc, nm int, cha, chr, mha bool) string {
return fmt.Sprintf(`
Expand Down

0 comments on commit 88b62f9

Please sign in to comment.