Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: disk provisioning settings are not correctly applied #2028

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
156 changes: 81 additions & 75 deletions CHANGELOG.md

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ func CreateLibrary(d *schema.ResourceData, restclient *rest.Client, backings []l
log.Printf("[DEBUG] contentlibrary.CreateLibrary: Creating content library %s", name)
clm := library.NewManager(restclient)
ctx := context.TODO()
description := d.Get("description").(string)
lib := library.Library{
Description: d.Get("description").(string),
Description: &description,
Name: name,
Storage: backings,
Type: "LOCAL",
Expand Down Expand Up @@ -165,7 +166,7 @@ func CreateLibraryItem(c *rest.Client, l *library.Library, name string, desc str
clm := library.NewManager(c)
ctx := context.TODO()
item := library.Item{
Description: desc,
Description: &desc,
LibraryID: l.ID,
Name: name,
Type: t,
Expand Down
138 changes: 114 additions & 24 deletions vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -892,49 +892,49 @@ func DiskMigrateRelocateOperation(data *schema.ResourceData, client *govmomi.Cli
// backing data defined in config, taking on these filenames when cloned. After
// the clone is complete, natural re-configuration happens to bring the disk
// configurations fully in sync with what is defined.
func DiskCloneRelocateOperation(d *schema.ResourceData, c *govmomi.Client, l object.VirtualDeviceList) ([]types.VirtualMachineRelocateSpecDiskLocator, error) {
func DiskCloneRelocateOperation(resourceData *schema.ResourceData, client *govmomi.Client, deviceList object.VirtualDeviceList) ([]types.VirtualMachineRelocateSpecDiskLocator, error) {
log.Printf("[DEBUG] DiskCloneRelocateOperation: Generating full disk relocate spec list")
devices := SelectDisks(l, d.Get("scsi_controller_count").(int), d.Get("sata_controller_count").(int), d.Get("ide_controller_count").(int))
devices := SelectDisks(deviceList, resourceData.Get("scsi_controller_count").(int), resourceData.Get("sata_controller_count").(int), resourceData.Get("ide_controller_count").(int))
log.Printf("[DEBUG] DiskCloneRelocateOperation: Disk devices located: %s", DeviceListString(devices))
// Sort the device list, in case it's not sorted already.
devSort := virtualDeviceListSorter{
Sort: devices,
DeviceList: l,
DeviceList: deviceList,
}
sort.Sort(devSort)
devices = devSort.Sort
log.Printf("[DEBUG] DiskCloneRelocateOperation: Disk devices order after sort: %s", DeviceListString(devices))
// Do the same for our listed disks.
curSet := d.Get(subresourceTypeDisk).([]interface{})
curSet := resourceData.Get(subresourceTypeDisk).([]interface{})
log.Printf("[DEBUG] DiskCloneRelocateOperation: Current resource set: %s", subresourceListString(curSet))
sort.Sort(virtualDiskSubresourceSorter(curSet))
log.Printf("[DEBUG] DiskCloneRelocateOperation: Resource set order after sort: %s", subresourceListString(curSet))

log.Printf("[DEBUG] DiskCloneRelocateOperation: Generating relocators for source disks")
var relocators []types.VirtualMachineRelocateSpecDiskLocator
for i, device := range devices {
m := curSet[i].(map[string]interface{})
diskDataMap := curSet[i].(map[string]interface{})
vd := device.GetVirtualDevice()
ctlr := l.FindByKey(vd.ControllerKey)
ctlr := deviceList.FindByKey(vd.ControllerKey)
if ctlr == nil {
return nil, fmt.Errorf("could not find controller with key %d", vd.Key)
}
m["key"] = int(vd.Key)
diskDataMap["key"] = int(vd.Key)
var err error
m["device_address"], err = computeDevAddr(vd, ctlr.(types.BaseVirtualController))
diskDataMap["device_address"], err = computeDevAddr(vd, ctlr.(types.BaseVirtualController))
if err != nil {
return nil, fmt.Errorf("error computing device address: %s", err)
}
r := NewDiskSubresource(c, d, m, nil, i)
// A disk locator is only useful if a target datastore is available. If we
// don't have a datastore specified (ie: when Storage DRS is in use), then
// we just need to skip this disk. The disk will be migrated properly
// through the SDRS API.
if dsID := r.Get("datastore_id"); dsID == "" || dsID == diskDatastoreComputedName {
r := NewDiskSubresource(client, resourceData, diskDataMap, nil, i)

shouldRelocate := shouldAddRelocateSpec(resourceData, device.(*types.VirtualDisk), i)
if !shouldRelocate {
continue
}

r = addDiskDatastore(r, resourceData)
// Otherwise, proceed with generating and appending the locator.
relocator, err := r.Relocate(l, true)
relocator, err := r.Relocate(deviceList, true)
if err != nil {
return nil, fmt.Errorf("%s: %s", r.Addr(), err)
}
Expand All @@ -946,6 +946,104 @@ func DiskCloneRelocateOperation(d *schema.ResourceData, c *govmomi.Client, l obj
return relocators, nil
}

/*
*
Sets the value of the VM datastore
*/
func addDiskDatastore(r *DiskSubresource, d *schema.ResourceData) *DiskSubresource {
diskDsId := r.Get("datastore_id")
dataDsId := d.Get("datastore_id")
if (diskDsId == "" || diskDsId == diskDatastoreComputedName) && dataDsId != "" {
r.Set("datastore_id", dataDsId)
}

return r
}
func shouldAddRelocateSpec(d *schema.ResourceData, disk *types.VirtualDisk, schemaDiskIndex int) bool {
relocateProperties := []string{
"datastore_id",
"disk_mode",
"eagerly_scrub",
"disk_sharing",
"thin_provisioned",
"write_through",
}

diskProps := virtualDiskToSchemaPropsMap(disk)
dataProps := diskDataToSchemaProps(d, schemaDiskIndex)

for _, key := range relocateProperties {
dataProp, dataPropOk := dataProps[key]
diskProp, diskPropOk := diskProps[key]

diff := false
if dataPropOk && diskPropOk {
diff = dataProp != diskProp
}

if diff {
return true
}

}

return false
}

func virtualDiskToSchemaPropsMap(disk *types.VirtualDisk) map[string]interface{} {
m := make(map[string]interface{})
if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["eagerly_scrub"] = backing.EagerlyScrub
m["thin_provisioned"] = backing.ThinProvisioned
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer1BackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskLocalPMemBackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
} else if backing, ok := disk.Backing.(*types.VirtualDiskSeSparseBackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskSeSparseBackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
} else if backing, ok := disk.Backing.(*types.VirtualDiskSparseVer1BackingInfo); ok {
m["datastore_id"] = backing.Datastore.Value
m["disk_mode"] = backing.DiskMode
m["write_through"] = backing.WriteThrough
}

return m
}

func diskDataToSchemaProps(d *schema.ResourceData, deviceIndex int) map[string]interface{} {
m := make(map[string]interface{})
diskKey := fmt.Sprintf("disk.%d.datastore_id", deviceIndex)
if datastoreId, ok := d.GetOk(diskKey); ok {
m["datastore_id"] = datastoreId
}

if diskMode, ok := d.GetOk(diskKey); ok {
m["disk_mode"] = diskMode
}

if eagerlyScrub, ok := d.GetOk(diskKey); ok {
m["eagerly_scrub"] = eagerlyScrub
}

if thinProvisioned, ok := d.GetOk(diskKey); ok {
m["thin_provisioned"] = thinProvisioned
}

return m
}

// DiskPostCloneOperation normalizes the virtual disks on a freshly-cloned
// virtual machine and outputs any necessary device change operations. It also
// sets the state in advance of the post-create read.
Expand Down Expand Up @@ -1466,15 +1564,7 @@ func (r *DiskSubresource) DiffExisting() error {
return fmt.Errorf("virtual disk %q: virtual disks cannot be shrunk (old: %d new: %d)", name, osize.(int), nsize.(int))
}

// Ensure that there is no change in either eagerly_scrub or thin_provisioned
// - these values cannot be changed once set.
if _, err = r.GetWithVeto("eagerly_scrub"); err != nil {
return fmt.Errorf("virtual disk %q: %s", name, err)
}
if _, err = r.GetWithVeto("thin_provisioned"); err != nil {
return fmt.Errorf("virtual disk %q: %s", name, err)
}
// Same with attach
// Ensure that there is no change in attach value
if _, err = r.GetWithVeto("attach"); err != nil {
return fmt.Errorf("virtual disk %q: %s", name, err)
}
Expand Down
4 changes: 2 additions & 2 deletions vsphere/resource_vsphere_content_library_item_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ func testAccResourceVSphereContentLibraryItemDescription(expected *regexp.Regexp
if err != nil {
return err
}
if !expected.MatchString(library.Description) {
return fmt.Errorf("Content Library item description does not match. expected: %s, got %s", expected.String(), library.Description)
if !expected.MatchString(*library.Description) {
return fmt.Errorf("Content Library item description does not match. expected: %s, got %v", expected.String(), library.Description)
}
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions vsphere/resource_vsphere_content_library_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ func testAccResourceVSphereContentLibraryDescription(expected *regexp.Regexp) re
if err != nil {
return err
}
if !expected.MatchString(library.Description) {
return fmt.Errorf("Content Library description does not match. expected: %s, got %s", expected.String(), library.Description)
if !expected.MatchString(*library.Description) {
return fmt.Errorf("Content Library description does not match. expected: %s, got %v", expected.String(), library.Description)
}
return nil
}
Expand Down
129 changes: 129 additions & 0 deletions vsphere/resource_vsphere_virtual_machine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1828,6 +1828,26 @@ func TestAccResourceVSphereVirtualMachine_cloneWithDifferentHostname(t *testing.
},
})
}
func TestAccResourceVSphereVirtualMachine_cloneWithDiskTypeChange(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
testAccPreCheck(t)
testAccResourceVSphereVirtualMachinePreCheck(t)
},
Providers: testAccProviders,
CheckDestroy: testAccResourceVSphereVirtualMachineCheckExists(false),
Steps: []resource.TestStep{
{
Config: testAccResourceVSphereVirtualMachineConfigCloneChangedDiskType(),
Check: resource.ComposeTestCheckFunc(
testAccResourceVSphereVirtualMachineCheckExists(true),
testAccResourceVSphereVirtualMachineCheckEagerlyScrub(0, true),
),
},
},
})
}

func TestAccResourceVSphereVirtualMachine_cpuHotAdd(t *testing.T) {
resource.Test(t, resource.TestCase{
Expand Down Expand Up @@ -2540,6 +2560,39 @@ func testAccResourceVSphereVirtualMachineCheckExists(expected bool) resource.Tes
}
}

func testAccResourceVSphereVirtualMachineCheckEagerlyScrub(diskIndex int, eagerlyScrubedValue bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
props, err := testGetVirtualMachineProperties(s, "vm")
if err != nil {
return err
}

currentDiskIndex := -1
for _, device := range props.Config.Hardware.Device {
disk, ok := device.(*types.VirtualDisk)
if !ok {
continue
}
currentDiskIndex++
if currentDiskIndex != diskIndex {
continue
}
backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
if !ok {
continue
}

if *backing.EagerlyScrub != eagerlyScrubedValue {
return fmt.Errorf("expected %t as eagerlyScrubbed for disk %d but received %t",
eagerlyScrubedValue, diskIndex, *backing.EagerlyScrub)
}

}

return nil
}
}

// testAccResourceVSphereVirtualMachineCheckPowerState is a check to check for
// a VirtualMachine's power state.

Expand Down Expand Up @@ -5639,6 +5692,82 @@ resource "vsphere_virtual_machine" "vm" {
)
}

func testAccResourceVSphereVirtualMachineConfigCloneChangedDiskType() string {
return fmt.Sprintf(`
%s

data "vsphere_network" "network" {
name = "VM Network"
datacenter_id = data.vsphere_datacenter.rootdc1.id
}

resource "vsphere_virtual_machine" "template" {
name = "vm-1-template"
resource_pool_id = data.vsphere_compute_cluster.rootcompute_cluster1.resource_pool_id
datastore_id = data.vsphere_datastore.rootds1.id

num_cpus = 2
memory = 2048
guest_id = "other3xLinuxGuest"

network_interface {
network_id = data.vsphere_network.network.id
}


wait_for_guest_ip_timeout = 0
wait_for_guest_net_timeout = 0

disk {
label = "disk0"
size = 4
eagerly_scrub = false
thin_provisioned = false
}
lifecycle {
ignore_changes = [disk]
}
}



resource "vsphere_virtual_machine" "vm" {
name = "vm-1-template-clone"
resource_pool_id = data.vsphere_compute_cluster.rootcompute_cluster1.resource_pool_id
guest_id = vsphere_virtual_machine.template.guest_id
network_interface {
network_id = data.vsphere_network.network.id
}
datastore_id = data.vsphere_datastore.rootds1.id

num_cpus = 2
memory = 2048

scsi_type =vsphere_virtual_machine.template.scsi_type
wait_for_guest_ip_timeout = 0
wait_for_guest_net_timeout = 0

disk {
label = "disk0"
size = vsphere_virtual_machine.template.disk.0.size
eagerly_scrub = true
thin_provisioned = false
}

clone {
template_uuid = vsphere_virtual_machine.template.id
}
}

`,
testhelper.CombineConfigs(
testhelper.ConfigDataRootDC1(),
testhelper.ConfigDataRootComputeCluster1(),
testhelper.ConfigDataRootDS1(),
),
)
}

func testAccResourceVSphereVirtualMachineConfigWithHotAdd(nc, nm int, cha, chr, mha bool) string {
return fmt.Sprintf(`

Expand Down