diff --git a/tf-vsphere-devrc.mk.example b/tf-vsphere-devrc.mk.example index 547f43200..6d36e9864 100644 --- a/tf-vsphere-devrc.mk.example +++ b/tf-vsphere-devrc.mk.example @@ -40,5 +40,9 @@ export VSPHERE_HOST_NIC0 ?= vmnic0 # NIC0 for host net tests export VSPHERE_HOST_NIC1 ?= vmnic1 # NIC1 for host net tests export VSPHERE_VMFS_EXPECTED ?= scsi-name # Name of expected SCSI disk export VSPHERE_VMFS_REGEXP ?= expr # Regexp for SCSI disk search +export VSPHERE_DS_VMFS_DISK0 ?= scsi-name0 # 1st disk for vmfs_datastore +export VSPHERE_DS_VMFS_DISK1 ?= scsi-name1 # 2nd disk for vmfs_datastore +export VSPHERE_DS_VMFS_DISK2 ?= scsi-name2 # 3rd disk for vmfs_datastore +export VSPHERE_DS_FOLDER ?= ds-folder # Path to a datastore folder # vi: filetype=make diff --git a/vsphere/datastore_helper.go b/vsphere/datastore_helper.go new file mode 100644 index 000000000..5527bedc7 --- /dev/null +++ b/vsphere/datastore_helper.go @@ -0,0 +1,71 @@ +package vsphere + +import ( + "context" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// datastoreFromID locates a Datastore by its managed object reference ID. +func datastoreFromID(client *govmomi.Client, id string) (*object.Datastore, error) { + finder := find.NewFinder(client.Client, false) + + ref := types.ManagedObjectReference{ + Type: "Datastore", + Value: id, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + ds, err := finder.ObjectReference(ctx, ref) + if err != nil { + return nil, err + } + // Should be safe to return here. If our reference returned here and is not a + // datastore, then we have bigger problems and to be honest we should be + // panicking anyway. + return ds.(*object.Datastore), nil +} + +// datastoreProperties is a convenience method that wraps fetching the +// Datastore MO from its higher-level object. +func datastoreProperties(ds *object.Datastore) (*mo.Datastore, error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + var props mo.Datastore + if err := ds.Properties(ctx, ds.Reference(), nil, &props); err != nil { + return nil, err + } + return &props, nil +} + +// moveDatastoreToFolder is a complex method that moves a datastore to a given +// relative datastore folder path. "Relative" here means relative to a +// datacenter, which is discovered from the current datastore path. +func moveDatastoreToFolder(client *govmomi.Client, ds *object.Datastore, relative string) error { + folder, err := datastoreFolderFromObject(client, ds, relative) + if err != nil { + return err + } + return moveObjectToFolder(ds.Reference(), folder) +} + +// moveDatastoreToFolderRelativeHostSystemID is a complex method that moves a +// datastore to a given datastore path, similar to moveDatastoreToFolder, +// except the path is relative to a HostSystem supplied by ID instead of the +// datastore. +func moveDatastoreToFolderRelativeHostSystemID(client *govmomi.Client, ds *object.Datastore, hsID, relative string) error { + hs, err := hostSystemFromID(client, hsID) + if err != nil { + return err + } + folder, err := datastoreFolderFromObject(client, hs, relative) + if err != nil { + return err + } + return moveObjectToFolder(ds.Reference(), folder) +} diff --git a/vsphere/datastore_summary_structure.go b/vsphere/datastore_summary_structure.go new file mode 100644 index 000000000..d543f21b3 --- /dev/null +++ b/vsphere/datastore_summary_structure.go @@ -0,0 +1,72 @@ +package vsphere + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/vmware/govmomi/vim25/types" +) + +// schemaDatastoreSummary returns schema items for resources that +// need to work with a DatastoreSummary. +func schemaDatastoreSummary() map[string]*schema.Schema { + return map[string]*schema.Schema{ + // Note that the following fields are not represented in the schema here: + // * Name (more than likely the ID attribute and will be represented in + // resource schema) + // * Type (redundant attribute as the datastore type will be represented by + // the resource) + "accessible": &schema.Schema{ + Type: schema.TypeBool, + Description: "The connectivity status of the datastore. If this is false, some other computed attributes may be out of date.", + Computed: true, + }, + "capacity": &schema.Schema{ + Type: schema.TypeInt, + Description: "Maximum capacity of the datastore, in MB.", + Computed: true, + }, + "free_space": &schema.Schema{ + Type: schema.TypeInt, + Description: "Available space of this datastore, in MB.", + Computed: true, + }, + "maintenance_mode": &schema.Schema{ + Type: schema.TypeString, + Description: "The current maintenance mode state of the datastore.", + Computed: true, + }, + "multiple_host_access": &schema.Schema{ + Type: schema.TypeBool, + Description: "If true, more than one host in the datacenter has been configured with access to the datastore.", + Computed: true, + }, + "uncommitted_space": &schema.Schema{ + Type: schema.TypeInt, + Description: "Total additional storage space, in MB, potentially used by all virtual machines on this datastore.", + Computed: true, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Description: "The unique locator for the datastore.", + Computed: true, + }, + } +} + +// flattenDatastoreSummary reads various fields from a DatastoreSummary into +// the passed in ResourceData. +func flattenDatastoreSummary(d *schema.ResourceData, obj *types.DatastoreSummary) error { + d.Set("accessible", obj.Accessible) + d.Set("capacity", byteToMB(obj.Capacity)) + d.Set("free_space", byteToMB(obj.FreeSpace)) + d.Set("maintenance_mode", obj.MaintenanceMode) + d.Set("multiple_host_access", obj.MultipleHostAccess) + d.Set("uncommitted_space", byteToMB(obj.Uncommitted)) + d.Set("url", obj.Url) + + // Set the name attribute off of the name here - since we do not track this + // here we check for errors + if err := d.Set("name", obj.Name); err != nil { + return err + } + return nil +} diff --git a/vsphere/folder_helper.go b/vsphere/folder_helper.go new file mode 100644 index 000000000..961c10c28 --- /dev/null +++ b/vsphere/folder_helper.go @@ -0,0 +1,193 @@ +package vsphere + +import ( + "context" + "fmt" + "path" + "reflect" + "strings" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// rootPathParticle is the section of a vSphere inventory path that denotes a +// specific kind of inventory item. +type rootPathParticle string + +// String implements Stringer for rootPathParticle. +func (p rootPathParticle) String() string { + return string(p) +} + +// Delimeter returns the path delimiter for the particle, which is basically +// just a particle with a leading slash. +func (p rootPathParticle) Delimeter() string { + return string("/" + p) +} + +// SplitDatacenter is a convenience method that splits out the datacenter path +// from the supplied path for the particle. +func (p rootPathParticle) SplitDatacenter(inventoryPath string) (string, error) { + s := strings.SplitN(inventoryPath, p.Delimeter(), 2) + if len(s) != 2 { + return inventoryPath, fmt.Errorf("could not split path %q on %q", inventoryPath, p.Delimeter()) + } + return s[0], nil +} + +// SplitRelativeFolder is a convenience method that splits out the relative +// folder from the supplied path for the particle. +func (p rootPathParticle) SplitRelativeFolder(inventoryPath string) (string, error) { + s := strings.SplitN(inventoryPath, p.Delimeter(), 2) + if len(s) != 2 { + return inventoryPath, fmt.Errorf("could not split path %q on %q", inventoryPath, p.Delimeter()) + } + return path.Dir(s[1]), nil +} + +// NewRootFromPath takes the datacenter path for a specific entity, and then +// appends the new particle supplied. +func (p rootPathParticle) NewRootFromPath(inventoryPath string, newParticle rootPathParticle) (string, error) { + dcPath, err := p.SplitDatacenter(inventoryPath) + if err != nil { + return inventoryPath, err + } + return fmt.Sprintf("%s/%s", dcPath, newParticle), nil +} + +// PathFromNewRoot takes the datacenter path for a specific entity, and then +// appends the new particle supplied with the new relative path. +// +// As an example, consider a supplied host path "/dc1/host/cluster1/esxi1", and +// a supplied datastore folder relative path of "/foo/bar". This function will +// split off the datacenter section of the path (/dc1) and combine it with the +// datastore folder with the proper delimiter. The resulting path will be +// "/dc1/datastore/foo/bar". +func (p rootPathParticle) PathFromNewRoot(inventoryPath string, newParticle rootPathParticle, relative string) (string, error) { + rootPath, err := p.NewRootFromPath(inventoryPath, newParticle) + if err != nil { + return inventoryPath, err + } + return path.Clean(fmt.Sprintf("%s/%s", rootPath, relative)), nil +} + +const ( + rootPathParticleVM = rootPathParticle("vm") + rootPathParticleNetwork = rootPathParticle("network") + rootPathParticleHost = rootPathParticle("host") + rootPathParticleDatastore = rootPathParticle("datastore") +) + +// datacenterPathFromHostSystemID returns the datacenter section of a +// HostSystem's inventory path. +func datacenterPathFromHostSystemID(client *govmomi.Client, hsID string) (string, error) { + hs, err := hostSystemFromID(client, hsID) + if err != nil { + return "", err + } + return rootPathParticleHost.SplitDatacenter(hs.InventoryPath) +} + +// datastoreRootPathFromHostSystemID returns the root datastore folder path +// for a specific host system ID. +func datastoreRootPathFromHostSystemID(client *govmomi.Client, hsID string) (string, error) { + hs, err := hostSystemFromID(client, hsID) + if err != nil { + return "", err + } + return rootPathParticleHost.NewRootFromPath(hs.InventoryPath, rootPathParticleDatastore) +} + +// folderFromObject returns an *object.Folder from a given object of specific +// types, and relative path of a type defined in folderType. If no such folder +// is found, an appropriate error will be returned. +// +// The list of supported object types will grow as the provider supports more +// resources. +func folderFromObject(client *govmomi.Client, obj interface{}, folderType rootPathParticle, relative string) (*object.Folder, error) { + if err := validateVirtualCenter(client); err != nil { + return nil, err + } + var p string + var err error + switch o := obj.(type) { + case (*object.Datastore): + p, err = rootPathParticleDatastore.PathFromNewRoot(o.InventoryPath, folderType, relative) + case (*object.HostSystem): + p, err = rootPathParticleHost.PathFromNewRoot(o.InventoryPath, folderType, relative) + default: + return nil, fmt.Errorf("unsupported object type %T", o) + } + if err != nil { + return nil, err + } + // Set up a finder. Don't set datacenter here as we are looking for full + // path, should not be necessary. + finder := find.NewFinder(client.Client, false) + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + folder, err := finder.Folder(ctx, p) + if err != nil { + return nil, err + } + return folder, nil +} + +// datastoreFolderFromObject returns an *object.Folder from a given object, +// and relative datastore folder path. If no such folder is found, of if it is +// not a datastore folder, an appropriate error will be returned. +func datastoreFolderFromObject(client *govmomi.Client, obj interface{}, relative string) (*object.Folder, error) { + folder, err := folderFromObject(client, obj, rootPathParticleDatastore, relative) + if err != nil { + return nil, err + } + + return validateDatastoreFolder(folder) +} + +// validateDatastoreFolder checks to make sure the folder is a datastore +// folder, and returns it if it is not, or an error if it isn't. +func validateDatastoreFolder(folder *object.Folder) (*object.Folder, error) { + var props mo.Folder + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + if err := folder.Properties(ctx, folder.Reference(), nil, &props); err != nil { + return nil, err + } + if !reflect.DeepEqual(props.ChildType, []string{"Folder", "Datastore", "StoragePod"}) { + return nil, fmt.Errorf("%q is not a datastore folder", folder.InventoryPath) + } + return folder, nil +} + +// pathIsEmpty checks a folder path to see if it's "empty" (ie: would resolve +// to the root inventory path for a given type in a datacenter - "" or "/"). +func pathIsEmpty(path string) bool { + return path == "" || path == "/" +} + +// normalizeFolderPath is a SchemaStateFunc that normalizes a folder path. +func normalizeFolderPath(v interface{}) string { + p := v.(string) + if pathIsEmpty(p) { + return "" + } + return strings.TrimPrefix(path.Clean(p), "/") +} + +// moveObjectToFolder moves a object by reference into a folder. +func moveObjectToFolder(ref types.ManagedObjectReference, folder *object.Folder) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + task, err := folder.MoveInto(ctx, []types.ManagedObjectReference{ref}) + if err != nil { + return err + } + tctx, tcancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer tcancel() + return task.Wait(tctx) +} diff --git a/vsphere/helper_test.go b/vsphere/helper_test.go index 5a4e26299..ba58f9123 100644 --- a/vsphere/helper_test.go +++ b/vsphere/helper_test.go @@ -3,6 +3,7 @@ package vsphere import ( "fmt" "os" + "regexp" "testing" "time" @@ -44,13 +45,35 @@ func testClientVariablesForResource(s *terraform.State, addr string) (testCheckV }, nil } +// testAccESXiFlagSet returns true if VSPHERE_TEST_ESXI is set. +func testAccESXiFlagSet() bool { + return os.Getenv("VSPHERE_TEST_ESXI") != "" +} + // testAccSkipIfNotEsxi skips a test if VSPHERE_TEST_ESXI is not set. func testAccSkipIfNotEsxi(t *testing.T) { - if os.Getenv("VSPHERE_TEST_ESXI") == "" { + if !testAccESXiFlagSet() { t.Skip("set VSPHERE_TEST_ESXI to run ESXi-specific acceptance tests") } } +// testAccSkipIfEsxi skips a test if VSPHERE_TEST_ESXI is set. +func testAccSkipIfEsxi(t *testing.T) { + if testAccESXiFlagSet() { + t.Skip("test skipped as VSPHERE_TEST_ESXI is set") + } +} + +// expectErrorIfNotVirtualCenter returns the error message that +// validateVirtualCenter returns if VSPHERE_TEST_ESXI is set, to allow for test +// cases that will still run on ESXi, but will expect validation failure. +func expectErrorIfNotVirtualCenter() *regexp.Regexp { + if testAccESXiFlagSet() { + return regexp.MustCompile(errVirtualCenterOnly) + } + return nil +} + // testGetPortGroup is a convenience method to fetch a static port group // resource for testing. func testGetPortGroup(s *terraform.State, resourceName string) (*types.HostPortGroup, error) { diff --git a/vsphere/host_data_store_system_helper.go b/vsphere/host_data_store_system_helper.go new file mode 100644 index 000000000..4b682f3ce --- /dev/null +++ b/vsphere/host_data_store_system_helper.go @@ -0,0 +1,152 @@ +package vsphere + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +// hostDatastoreSystemFromHostSystemID locates a HostDatastoreSystem from a +// specified HostSystem managed object ID. +func hostDatastoreSystemFromHostSystemID(client *govmomi.Client, hsID string) (*object.HostDatastoreSystem, error) { + hs, err := hostSystemFromID(client, hsID) + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + return hs.ConfigManager().DatastoreSystem(ctx) +} + +// availableScsiDisk checks to make sure that a disk is available for use in a +// VMFS datastore, and returns the ScsiDisk. +func availableScsiDisk(dss *object.HostDatastoreSystem, name string) (*types.HostScsiDisk, error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + disks, err := dss.QueryAvailableDisksForVmfs(ctx) + if err != nil { + return nil, fmt.Errorf("cannot query available disks: %s", err) + } + + var disk *types.HostScsiDisk + for _, d := range disks { + if d.CanonicalName == name { + disk = &d + break + } + } + if disk == nil { + return nil, fmt.Errorf("%s does not seem to be a disk available for VMFS", name) + } + return disk, nil +} + +// diskSpecForCreate checks to make sure that a disk is available to be used to +// create a VMFS datastore, specifically in its entirety, and returns a +// respective VmfsDatastoreCreateSpec. +func diskSpecForCreate(dss *object.HostDatastoreSystem, name string) (*types.VmfsDatastoreCreateSpec, error) { + disk, err := availableScsiDisk(dss, name) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + options, err := dss.QueryVmfsDatastoreCreateOptions(ctx, disk.DevicePath) + if err != nil { + return nil, fmt.Errorf("could not get disk creation options for %q: %s", name, err) + } + var option *types.VmfsDatastoreOption + for _, o := range options { + if _, ok := o.Info.(*types.VmfsDatastoreAllExtentOption); ok { + option = &o + break + } + } + if option == nil { + return nil, fmt.Errorf("device %q is not available as a new whole-disk device for datastore", name) + } + return option.Spec.(*types.VmfsDatastoreCreateSpec), nil +} + +// diskSpecForExtend checks to make sure that a disk is available to be +// used to extend a VMFS datastore, specifically in its entirety, and returns a +// respective VmfsDatastoreExtendSpec if it is. An error is returned if it's +// not. +func diskSpecForExtend(dss *object.HostDatastoreSystem, ds *object.Datastore, name string) (*types.VmfsDatastoreExtendSpec, error) { + disk, err := availableScsiDisk(dss, name) + if err != nil { + return nil, err + } + + props, err := datastoreProperties(ds) + if err != nil { + return nil, fmt.Errorf("error getting properties for datastore ID %q: %s", ds.Reference().Value, err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + options, err := queryVmfsDatastoreExtendOptions(ctx, dss, ds, disk.DevicePath, true) + if err != nil { + return nil, fmt.Errorf("could not get disk extension options for %q: %s", name, err) + } + var option *types.VmfsDatastoreOption + for _, o := range options { + if _, ok := o.Info.(*types.VmfsDatastoreAllExtentOption); ok { + option = &o + break + } + } + if option == nil { + return nil, fmt.Errorf("device %q cannot be used as a new whole-disk device for datastore %q", name, props.Summary.Name) + } + return option.Spec.(*types.VmfsDatastoreExtendSpec), nil +} + +// removeDatastore is a convenience method for removing a referenced datastore. +func removeDatastore(s *object.HostDatastoreSystem, ds *object.Datastore) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + return s.Remove(ctx, ds) +} + +// queryVmfsDatastoreExtendOptions is a stop-gap method that implements +// QueryVmfsDatastoreExtendOptions. It will be removed once the higher level +// HostDatastoreSystem object supports this method. +func queryVmfsDatastoreExtendOptions(ctx context.Context, s *object.HostDatastoreSystem, ds *object.Datastore, devicePath string, suppressExpandCandidates bool) ([]types.VmfsDatastoreOption, error) { + req := types.QueryVmfsDatastoreExtendOptions{ + This: s.Reference(), + Datastore: ds.Reference(), + DevicePath: devicePath, + SuppressExpandCandidates: &suppressExpandCandidates, + } + + res, err := methods.QueryVmfsDatastoreExtendOptions(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// extendVmfsDatastore is a stop-gap method that implements +// ExtendVmfsDatastore. It will be removed once the higher level +// HostDatastoreSystem object supports this method. +func extendVmfsDatastore(ctx context.Context, s *object.HostDatastoreSystem, ds *object.Datastore, spec types.VmfsDatastoreExtendSpec) (*object.Datastore, error) { + req := types.ExtendVmfsDatastore{ + This: s.Reference(), + Datastore: ds.Reference(), + Spec: spec, + } + + res, err := methods.ExtendVmfsDatastore(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return object.NewDatastore(s.Client(), res.Returnval), nil +} diff --git a/vsphere/provider.go b/vsphere/provider.go index c2787ae4c..1b4ad9424 100644 --- a/vsphere/provider.go +++ b/vsphere/provider.go @@ -77,6 +77,7 @@ func Provider() terraform.ResourceProvider { "vsphere_license": resourceVSphereLicense(), "vsphere_virtual_disk": resourceVSphereVirtualDisk(), "vsphere_virtual_machine": resourceVSphereVirtualMachine(), + "vsphere_vmfs_datastore": resourceVSphereVmfsDatastore(), }, DataSourcesMap: map[string]*schema.Resource{ diff --git a/vsphere/resource_vsphere_vmfs_datastore.go b/vsphere/resource_vsphere_vmfs_datastore.go new file mode 100644 index 000000000..16cc99565 --- /dev/null +++ b/vsphere/resource_vsphere_vmfs_datastore.go @@ -0,0 +1,398 @@ +package vsphere + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + retryDeletePending = "retryDeletePending" + retryDeleteCompleted = "retryDeleteCompleted" + retryDeleteError = "retryDeleteError" + + waitForDeletePending = "waitForDeletePending" + waitForDeleteCompleted = "waitForDeleteCompleted" + waitForDeleteError = "waitForDeleteError" +) + +// formatVmfsDatastoreCreateRollbackErrorFolder defines the verbose error for moving a +// datastore to a folder on creation where rollback was not possible. +const formatVmfsDatastoreCreateRollbackErrorFolder = ` +WARNING: Dangling resource! +There was an error moving your datastore to the desired folder %q: +%s +Additionally, there was an error removing the created datastore: +%s +You will need to remove this datastore manually before trying again. +` + +// formatVmfsDatastoreCreateRollbackErrorUpdate defines the verbose error for extending a +// disk on creation where rollback is not possible. +const formatVmfsDatastoreCreateRollbackErrorUpdate = ` +WARNING: Dangling resource! +There was an error extending your datastore with disk: %q: +%s +Additionally, there was an error removing the created datastore: +%s +You will need to remove this datastore manually before trying again. +` + +// formatVmfsDatastoreCreateRollbackError defines the verbose error for extending a disk on +// creation where rollback is not possible. +const formatVmfsDatastoreCreateRollbackErrorProperties = ` +WARNING: Dangling resource! +After creating the datastore, there was an error fetching its properties: +%s +Additionally, there was an error removing the created datastore: +%s +You will need to remove this datastore manually before trying again. +` + +func resourceVSphereVmfsDatastore() *schema.Resource { + s := map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "The name of the datastore.", + Required: true, + }, + "host_system_id": &schema.Schema{ + Type: schema.TypeString, + Description: "The managed object ID of the host to set up the datastore on.", + ForceNew: true, + Required: true, + }, + "folder": &schema.Schema{ + Type: schema.TypeString, + Description: "The path to the datastore folder to put the datastore in.", + Optional: true, + StateFunc: normalizeFolderPath, + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Description: "The disks to add to the datastore.", + Required: true, + MinItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + } + mergeSchema(s, schemaDatastoreSummary()) + return &schema.Resource{ + Create: resourceVSphereVmfsDatastoreCreate, + Read: resourceVSphereVmfsDatastoreRead, + Update: resourceVSphereVmfsDatastoreUpdate, + Delete: resourceVSphereVmfsDatastoreDelete, + Importer: &schema.ResourceImporter{ + State: resourceVSphereVmfsDatastoreImport, + }, + Schema: s, + } +} + +func resourceVSphereVmfsDatastoreCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + hsID := d.Get("host_system_id").(string) + dss, err := hostDatastoreSystemFromHostSystemID(client, hsID) + if err != nil { + return fmt.Errorf("error loading host datastore system: %s", err) + } + + // To ensure the datastore is fully created with all the disks that we want + // to add to it, first we add the initial disk, then we expand the disk with + // the rest of the extents. + disks := d.Get("disks").([]interface{}) + disk := disks[0].(string) + spec, err := diskSpecForCreate(dss, disk) + if err != nil { + return err + } + spec.Vmfs.VolumeName = d.Get("name").(string) + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + ds, err := dss.CreateVmfsDatastore(ctx, *spec) + if err != nil { + return fmt.Errorf("error creating datastore with disk %s: %s", disk, err) + } + + // Move the datastore to the correct folder first, if specified. + folder := d.Get("folder").(string) + if !pathIsEmpty(folder) { + if err := moveDatastoreToFolderRelativeHostSystemID(client, ds, hsID, folder); err != nil { + if remErr := removeDatastore(dss, ds); remErr != nil { + // We could not destroy the created datastore and there is now a dangling + // resource. We need to instruct the user to remove the datastore + // manually. + return fmt.Errorf(formatVmfsDatastoreCreateRollbackErrorFolder, folder, err, remErr) + } + return fmt.Errorf("could not move datastore to folder %q: %s", folder, err) + } + } + + // Now add any remaining disks. + for _, disk := range disks[1:] { + spec, err := diskSpecForExtend(dss, ds, disk.(string)) + if err != nil { + // We have to destroy the created datastore here. + if remErr := removeDatastore(dss, ds); remErr != nil { + // We could not destroy the created datastore and there is now a dangling + // resource. We need to instruct the user to remove the datastore + // manually. + return fmt.Errorf(formatVmfsDatastoreCreateRollbackErrorUpdate, disk, err, remErr) + } + return fmt.Errorf("error fetching datastore extend spec for disk %q: %s", disk, err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + if _, err := extendVmfsDatastore(ctx, dss, ds, *spec); err != nil { + if remErr := removeDatastore(dss, ds); remErr != nil { + // We could not destroy the created datastore and there is now a dangling + // resource. We need to instruct the user to remove the datastore + // manually. + return fmt.Errorf(formatVmfsDatastoreCreateRollbackErrorUpdate, disk, err, remErr) + } + return fmt.Errorf("error extending datastore with disk %q: %s", disk, err) + } + } + + d.SetId(ds.Reference().Value) + + // Done + return resourceVSphereVmfsDatastoreRead(d, meta) +} + +func resourceVSphereVmfsDatastoreRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + id := d.Id() + ds, err := datastoreFromID(client, id) + if err != nil { + return fmt.Errorf("cannot find datastore: %s", err) + } + props, err := datastoreProperties(ds) + if err != nil { + return fmt.Errorf("could not get properties for datastore: %s", err) + } + if err := flattenDatastoreSummary(d, &props.Summary); err != nil { + return err + } + + // Set the folder + folder, err := rootPathParticleDatastore.SplitRelativeFolder(ds.InventoryPath) + if err != nil { + return fmt.Errorf("error parsing datastore path %q: %s", ds.InventoryPath, err) + } + d.Set("folder", normalizeFolderPath(folder)) + + // We also need to update the disk list from the summary. + var disks []string + for _, disk := range props.Info.(*types.VmfsDatastoreInfo).Vmfs.Extent { + disks = append(disks, disk.DiskName) + } + if err := d.Set("disks", disks); err != nil { + return err + } + + return nil +} + +func resourceVSphereVmfsDatastoreUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + hsID := d.Get("host_system_id").(string) + dss, err := hostDatastoreSystemFromHostSystemID(client, hsID) + if err != nil { + return fmt.Errorf("error loading host datastore system: %s", err) + } + + id := d.Id() + ds, err := datastoreFromID(client, id) + if err != nil { + return fmt.Errorf("cannot find datastore: %s", err) + } + + // Rename this datastore if our name has drifted. + if d.HasChange("name") { + if err := renameObject(client, ds.Reference(), d.Get("name").(string)); err != nil { + return err + } + } + + // Update folder if necessary + if d.HasChange("folder") { + folder := d.Get("folder").(string) + if err := moveDatastoreToFolder(client, ds, folder); err != nil { + return fmt.Errorf("Could not move datastore to folder %q: %s", folder, err) + } + } + + // Veto this update if it means a disk was removed. Shrinking + // datastores/removing extents is not supported. + old, new := d.GetChange("disks") + for _, v1 := range old.([]interface{}) { + var found bool + for _, v2 := range new.([]interface{}) { + if v1.(string) == v2.(string) { + found = true + } + } + if !found { + return fmt.Errorf("disk %s found in state but not config (removal of disks is not supported)", v1) + } + } + + // Now we basically reverse what we did above when we were checking for + // removed disks, and add any new disks that have been added. + for _, v1 := range new.([]interface{}) { + var found bool + for _, v2 := range old.([]interface{}) { + if v1.(string) == v2.(string) { + found = true + } + } + if !found { + // Add the disk + spec, err := diskSpecForExtend(dss, ds, v1.(string)) + if err != nil { + return err + } + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + if _, err := extendVmfsDatastore(ctx, dss, ds, *spec); err != nil { + return err + } + } + } + + // Should be done with the update here. + return resourceVSphereVmfsDatastoreRead(d, meta) +} + +func resourceVSphereVmfsDatastoreDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + hsID := d.Get("host_system_id").(string) + dss, err := hostDatastoreSystemFromHostSystemID(client, hsID) + if err != nil { + return fmt.Errorf("error loading host datastore system: %s", err) + } + + id := d.Id() + ds, err := datastoreFromID(client, id) + if err != nil { + return fmt.Errorf("cannot find datastore: %s", err) + } + + // This is a race that more than likely will only come up during tests, but + // we still want to guard against it - when working with datastores that end + // up mounting across multiple hosts, removing the datastore will fail if + // it's removed too quickly (like right away, for example). So we set up a + // very short retry waiter to make sure if the first attempt fails, the + // second one should probably succeed right away. We also insert a small + // minimum delay to make an honest first attempt at trying to delete the + // datastore without spamming the task log with errors. + deleteRetryFunc := func() (interface{}, string, error) { + err := removeDatastore(dss, ds) + if err != nil { + if isResourceInUseError(err) { + // Pending + return struct{}{}, retryDeletePending, nil + } + // Some other error + return struct{}{}, retryDeleteError, err + } + // Done + return struct{}{}, retryDeleteCompleted, nil + } + + deleteRetry := &resource.StateChangeConf{ + Pending: []string{retryDeletePending}, + Target: []string{retryDeleteCompleted}, + Refresh: deleteRetryFunc, + Timeout: 30 * time.Second, + MinTimeout: 2 * time.Second, + Delay: 2 * time.Second, + } + + _, err = deleteRetry.WaitForState() + if err != nil { + return fmt.Errorf("could not delete datastore: %s", err) + } + + // We need to make sure the datastore is completely removed. There appears to + // be a bit of a delay sometimes on vCenter, and it causes issues in tests, + // which means it could cause issues somewhere else too. + waitForDeleteFunc := func() (interface{}, string, error) { + _, err := datastoreFromID(client, id) + if err != nil { + if isManagedObjectNotFoundError(err) { + // Done + return struct{}{}, waitForDeleteCompleted, nil + } + // Some other error + return struct{}{}, waitForDeleteError, err + } + return struct{}{}, waitForDeletePending, nil + } + + waitForDelete := &resource.StateChangeConf{ + Pending: []string{waitForDeletePending}, + Target: []string{waitForDeleteCompleted}, + Refresh: waitForDeleteFunc, + Timeout: defaultAPITimeout, + MinTimeout: 2 * time.Second, + Delay: 1 * time.Second, + NotFoundChecks: 35, + } + + _, err = waitForDelete.WaitForState() + if err != nil { + return fmt.Errorf("error waiting for datastore to delete: %s", err.Error()) + } + + return nil +} + +func resourceVSphereVmfsDatastoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // We support importing a MoRef - so we need to load the datastore and check + // to make sure 1) it exists, and 2) it's a VMFS datastore. If it is, we are + // good to go (rest of the stuff will be handled by read on refresh). + ids := strings.SplitN(d.Id(), ":", 2) + if len(ids) != 2 { + return nil, errors.New("please supply the ID in the following format: DATASTOREID:HOSTID") + } + + id := ids[0] + hsID := ids[1] + client := meta.(*govmomi.Client) + ds, err := datastoreFromID(client, id) + if err != nil { + return nil, fmt.Errorf("cannot find datastore: %s", err) + } + props, err := datastoreProperties(ds) + if err != nil { + return nil, fmt.Errorf("could not get properties for datastore: %s", err) + } + + t := types.HostFileSystemVolumeFileSystemType(props.Summary.Type) + if t != types.HostFileSystemVolumeFileSystemTypeVMFS { + return nil, fmt.Errorf("datastore ID %q is not a VMFS datastore", id) + } + + var found bool + for _, mount := range props.Host { + if mount.Key.Value == hsID { + found = true + } + } + if !found { + return nil, fmt.Errorf("configured host_system_id %q not found as a mounted host on datastore", hsID) + } + d.SetId(id) + d.Set("host_system_id", hsID) + + return []*schema.ResourceData{d}, nil +} diff --git a/vsphere/resource_vsphere_vmfs_datastore_test.go b/vsphere/resource_vsphere_vmfs_datastore_test.go new file mode 100644 index 000000000..57df9b25e --- /dev/null +++ b/vsphere/resource_vsphere_vmfs_datastore_test.go @@ -0,0 +1,463 @@ +package vsphere + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccResourceVSphereVmfsDatastore(t *testing.T) { + var tp *testing.T + testAccResourceVSphereVmfsDatastoreCases := []struct { + name string + testCase resource.TestCase + }{ + { + "basic", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingle(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + }, + }, + }, + { + "multi-disk", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticMulti(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + }, + }, + }, + { + "discovery via data source", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigDiscoverDatasource(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + }, + }, + }, + { + "add disks through update", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingle(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticMulti(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + }, + }, + }, + { + "rename datastore", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingle(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingleAltName(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + testAccResourceVSphereVmfsDatastoreHasName("terraform-test-renamed"), + ), + }, + }, + }, + }, + { + "with folder", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + // NOTE: This test can't run on ESXi without giving a "dangling + // resource" error during testing - "move to folder after" hits the + // error on the same path of the call stack that triggers an error in + // both create and update and should provide adequate coverage + // barring manual testing. + testAccSkipIfEsxi(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingleFolder(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + testAccResourceVSphereVmfsDatastoreMatchInventoryPath(os.Getenv("VSPHERE_DS_FOLDER")), + ), + }, + }, + }, + }, + { + "move to folder after", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereVmfsDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingle(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingleFolder(), + ExpectError: expectErrorIfNotVirtualCenter(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVmfsDatastoreExists(true), + testAccResourceVSphereVmfsDatastoreMatchInventoryPath(os.Getenv("VSPHERE_DS_FOLDER")), + ), + }, + }, + }, + }, + // TODO: Re-enable this after ImportStateIdFunc is merged and we can vendor it cleanly. + // { + // "import", + // resource.TestCase{ + // PreCheck: func() { + // testAccPreCheck(tp) + // testAccResourceVSphereVmfsDatastorePreCheck(tp) + // }, + // Providers: testAccProviders, + // CheckDestroy: testAccResourceVSphereVmfsDatastoreExists(false), + // Steps: []resource.TestStep{ + // { + // Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingle(), + // Check: resource.ComposeTestCheckFunc( + // testAccResourceVSphereVmfsDatastoreExists(true), + // ), + // }, + // { + // Config: testAccResourceVSphereVmfsDatastoreConfigStaticSingle(), + // ImportState: true, + // ResourceName: "vsphere_vmfs_datastore.datastore", + // ImportStateVerify: true, + // }, + // }, + // }, + // }, + } + + for _, tc := range testAccResourceVSphereVmfsDatastoreCases { + t.Run(tc.name, func(t *testing.T) { + tp = t + resource.Test(t, tc.testCase) + }) + } +} + +func testAccResourceVSphereVmfsDatastorePreCheck(t *testing.T) { + if os.Getenv("VSPHERE_ESXI_HOST") == "" { + t.Skip("set VSPHERE_ESXI_HOST to run vsphere_vmfs_disks acceptance tests") + } + if os.Getenv("VSPHERE_DS_VMFS_DISK0") == "" { + t.Skip("set VSPHERE_DS_VMFS_DISK0 to run vsphere_vmfs_datastore acceptance tests") + } + if os.Getenv("VSPHERE_DS_VMFS_DISK1") == "" { + t.Skip("set VSPHERE_DS_VMFS_DISK1 to run vsphere_vmfs_datastore acceptance tests") + } + if os.Getenv("VSPHERE_DS_VMFS_DISK2") == "" { + t.Skip("set VSPHERE_DS_VMFS_DISK2 to run vsphere_vmfs_datastore acceptance tests") + } + if os.Getenv("VSPHERE_VMFS_REGEXP") == "" { + t.Skip("set VSPHERE_VMFS_REGEXP to run vsphere_vmfs_datastore acceptance tests") + } + if os.Getenv("VSPHERE_DS_FOLDER") == "" { + t.Skip("set VSPHERE_DS_FOLDER to run vsphere_vmfs_datastore acceptance tests") + } +} + +func testAccResourceVSphereVmfsDatastoreExists(expected bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + vars, err := testClientVariablesForResource(s, "vsphere_vmfs_datastore.datastore") + if err != nil { + return err + } + + _, err = datastoreFromID(vars.client, vars.resourceID) + if err != nil { + if isManagedObjectNotFoundError(err) && expected == false { + // Expected missing + return nil + } + return err + } + if !expected { + return fmt.Errorf("expected datastore %s to be missing", vars.resourceID) + } + return nil + } +} + +func testAccResourceVSphereVmfsDatastoreHasName(expected string) resource.TestCheckFunc { + return func(s *terraform.State) error { + vars, err := testClientVariablesForResource(s, "vsphere_vmfs_datastore.datastore") + if err != nil { + return err + } + + ds, err := datastoreFromID(vars.client, vars.resourceID) + if err != nil { + return err + } + + props, err := datastoreProperties(ds) + if err != nil { + return err + } + + actual := props.Summary.Name + if expected != actual { + return fmt.Errorf("expected datastore name to be %s, got %s", expected, actual) + } + return nil + } +} + +func testAccResourceVSphereVmfsDatastoreMatchInventoryPath(expected string) resource.TestCheckFunc { + return func(s *terraform.State) error { + vars, err := testClientVariablesForResource(s, "vsphere_vmfs_datastore.datastore") + if err != nil { + return err + } + + ds, err := datastoreFromID(vars.client, vars.resourceID) + if err != nil { + return err + } + + expected, err := rootPathParticleDatastore.PathFromNewRoot(ds.InventoryPath, rootPathParticleDatastore, expected) + actual := path.Dir(ds.InventoryPath) + if err != nil { + return fmt.Errorf("bad: %s", err) + } + if expected != actual { + return fmt.Errorf("expected path to be %s, got %s", expected, actual) + } + return nil + } +} + +func testAccResourceVSphereVmfsDatastoreConfigStaticSingle() string { + return fmt.Sprintf(` +variable "disk0" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test" + host_system_id = "${data.vsphere_host.esxi_host.id}" + + disks = [ + "${var.disk0}", + ] +} +`, os.Getenv("VSPHERE_DS_VMFS_DISK0"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} + +func testAccResourceVSphereVmfsDatastoreConfigStaticSingleAltName() string { + return fmt.Sprintf(` +variable "disk0" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test-renamed" + host_system_id = "${data.vsphere_host.esxi_host.id}" + + disks = [ + "${var.disk0}", + ] +} +`, os.Getenv("VSPHERE_DS_VMFS_DISK0"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} + +func testAccResourceVSphereVmfsDatastoreConfigStaticMulti() string { + return fmt.Sprintf(` +variable "disk0" { + type = "string" + default = "%s" +} + +variable "disk1" { + type = "string" + default = "%s" +} + +variable "disk2" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test" + host_system_id = "${data.vsphere_host.esxi_host.id}" + + disks = [ + "${var.disk0}", + "${var.disk1}", + "${var.disk2}", + ] +} +`, os.Getenv("VSPHERE_DS_VMFS_DISK0"), os.Getenv("VSPHERE_DS_VMFS_DISK1"), os.Getenv("VSPHERE_DS_VMFS_DISK2"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} + +func testAccResourceVSphereVmfsDatastoreConfigDiscoverDatasource() string { + return fmt.Sprintf(` +variable "regexp" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +data "vsphere_vmfs_disks" "available" { + host_system_id = "${data.vsphere_host.esxi_host.id}" + rescan = true + filter = "${var.regexp}" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test" + host_system_id = "${data.vsphere_host.esxi_host.id}" + + disks = ["${data.vsphere_vmfs_disks.available.disks}"] +} +`, os.Getenv("VSPHERE_VMFS_REGEXP"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} + +func testAccResourceVSphereVmfsDatastoreConfigStaticSingleFolder() string { + return fmt.Sprintf(` +variable "disk0" { + type = "string" + default = "%s" +} + +variable "folder" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test" + host_system_id = "${data.vsphere_host.esxi_host.id}" + folder = "${var.folder}" + + disks = [ + "${var.disk0}", + ] +} +`, os.Getenv("VSPHERE_DS_VMFS_DISK0"), os.Getenv("VSPHERE_DS_FOLDER"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} diff --git a/vsphere/structure_helper.go b/vsphere/structure_helper.go index 344821910..f198800d6 100644 --- a/vsphere/structure_helper.go +++ b/vsphere/structure_helper.go @@ -44,3 +44,17 @@ func mergeSchema(dst, src map[string]*schema.Schema) { func boolPtr(v bool) *bool { return &v } + +// byteToMB returns n/1000000. The input must be an integer that can be divisible +// by 1000000. +func byteToMB(n interface{}) interface{} { + switch v := n.(type) { + case int: + return v / 1000000 + case int32: + return v / 1000000 + case int64: + return v / 1000000 + } + panic(fmt.Errorf("non-integer type %T for value", n)) +} diff --git a/vsphere/vim_helper.go b/vsphere/vim_helper.go new file mode 100644 index 000000000..55d2ebf1c --- /dev/null +++ b/vsphere/vim_helper.go @@ -0,0 +1,84 @@ +package vsphere + +import ( + "context" + "errors" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// errVirtualCenterOnly is the error message that validateVirtualCenter returns. +const errVirtualCenterOnly = "this operation is only supported on vCenter" + +// soapFault extracts the SOAP fault from an error fault, if it exists. Check +// the returned boolean value to see if you have a SoapFault. +func soapFault(err error) (*soap.Fault, bool) { + if soap.IsSoapFault(err) { + return soap.ToSoapFault(err), true + } + return nil, false +} + +// vimSoapFault extracts the VIM fault Check the returned boolean value to see +// if you have a fault, which will need to be further asserted into the error +// that you are looking for. +func vimSoapFault(err error) (types.AnyType, bool) { + if sf, ok := soapFault(err); ok { + return sf.VimFault(), true + } + return nil, false +} + +// isManagedObjectNotFoundError checks an error to see if it's of the +// ManagedObjectNotFound type. +func isManagedObjectNotFoundError(err error) bool { + if f, ok := vimSoapFault(err); ok { + if _, ok := f.(types.ManagedObjectNotFound); ok { + return true + } + } + return false +} + +// isResourceInUseError checks an error to see if it's of the +// ResourceInUse type. +func isResourceInUseError(err error) bool { + if f, ok := vimSoapFault(err); ok { + if _, ok := f.(types.ResourceInUse); ok { + return true + } + } + return false +} + +// renameObject renames a MO and tracks the task to make sure it completes. +func renameObject(client *govmomi.Client, ref types.ManagedObjectReference, new string) error { + req := types.Rename_Task{ + This: ref, + NewName: new, + } + + rctx, rcancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer rcancel() + res, err := methods.Rename_Task(rctx, client.Client, &req) + if err != nil { + return err + } + + t := object.NewTask(client.Client, res.Returnval) + tctx, tcancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer tcancel() + return t.Wait(tctx) +} + +// validateVirtualCenter ensures that the client is connected to vCenter. +func validateVirtualCenter(c *govmomi.Client) error { + if c.ServiceContent.About.ApiType != "VirtualCenter" { + return errors.New(errVirtualCenterOnly) + } + return nil +} diff --git a/website/docs/r/vmfs_datastore.html.markdown b/website/docs/r/vmfs_datastore.html.markdown new file mode 100644 index 000000000..bb12cd691 --- /dev/null +++ b/website/docs/r/vmfs_datastore.html.markdown @@ -0,0 +1,185 @@ +--- +layout: "vsphere" +page_title: "VMware vSphere: vsphere_vmfs_datastore" +sidebar_current: "docs-vsphere-resource-vmfs-datastore" +description: |- + Provides a vSphere VMFS datastore resource. This can be used to configure a VMFS datastore on a host or set of hosts. +--- + +# vsphere\_vmfs\_datastore + +The `vsphere_vmfs_datastore` resource can be used to create and manage VMFS +datastores on an ESXi host or a set of hosts. The resource supports using any +SCSI device that can generally be used in a datastore, such as local disks, or +disks presented to a host or multiple hosts over Fibre Channel or iSCSI. +Devices can be specified manually, or discovered using the +[`vsphere_vmfs_disks`][data-source-vmfs-disks] data source. + +[data-source-vmfs-disks]: /docs/providers/vsphere/d/vmfs_disks.html + +## Auto-Mounting of Datastores Within vCenter + +Note that the current behaviour of this resource will auto-mount any created +datastores to any other host within vCenter that has access to the same disk. + +Example: You want to create a datastore with a iSCSI LUN that is visible on 3 +hosts in a single vSphere cluster (`esxi1`, `esxi2` and `esxi3`). When you +create the datastore on `esxi1`, the datastore will be automatically mounted on +`esxi2` and `esxi3`, without the need to configure the resource on either of +those two hosts. + +Future versions of this resource may allow you to control the hosts that a +datastore is mounted to, but currently, this automatic behaviour cannot be +changed, so keep this in mind when writing your configurations and deploying +your disks. + +## Increasing Datastore Size + +To increase the size of a datastore, you must add additional disks to the +`disks` attribute. Expanding the size of a datastore by increasing the size of +an already provisioned disk is currently not supported (but may be in future +versions of this resource). + +~> **NOTE:** You cannot decrease the size of a datastore. If the resource +detects disks removed from the configuration, Terraform will give an error. To +reduce the size of the datastore, the resource needs to be re-created - run +[`terraform taint`][cmd-taint] to taint the resource so it can be re-created. + +[cmd-taint]: /docs/commands/taint.html + +## Example Usage + +**Addition of local disks on a single host** + +The following example uses the default datacenter and default host to add a +datastore with local disks to a single ESXi server. + +~> **NOTE:** There are some situations where datastore creation will not work +when working through vCenter (usually when trying to create a datastore on a +single host with local disks). If you experience trouble creating the datastore +you need through vCenter, break the datstore off into a different configuration +and deploy it using the ESXi server as the provider endpoint, using a similar +configuration to what is below. + +```hcl +data "vsphere_datacenter" "datacenter" {} + +data "vsphere_host" "esxi_host" { + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test" + host_system_id = "${data.vsphere_host.esxi_host.id}" + + disks = [ + "mpx.vmhba1:C0:T1:L0", + "mpx.vmhba1:C0:T2:L0", + "mpx.vmhba1:C0:T2:L0", + ] +} +``` + +**Auto-detection of disks via `vsphere_vmfs_disks`** + +The following example makes use of the +[`vsphere_vmfs_disks`][data-source-vmfs-disks] data source to auto-detect +exported iSCSI LUNS matching a certain NAA vendor ID (in this case, LUNs +exported from a [NetApp][ext-netapp]). These discovered disks are then loaded +into `vsphere_vmfs_datastore`. The datastore is also placed in the +`datastore-folder` folder afterwards. + +[ext-netapp]: https://kb.netapp.com/support/s/article/ka31A0000000rLRQAY/how-to-match-a-lun-s-naa-number-to-its-serial-number?language=en_US + +```hcl +data "vsphere_datacenter" "datacenter" { + name = "dc1" +} + +data "vsphere_host" "esxi_host" { + name = "esxi1" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +data "vsphere_vmfs_disks" "available" { + host_system_id = "${data.vsphere_host.esxi_host.id}" + rescan = true + filter = "naa.60a98000" +} + +resource "vsphere_vmfs_datastore" "datastore" { + name = "terraform-test" + host_system_id = "${data.vsphere_host.esxi_host.id}" + folder = "datastore-folder" + + disks = ["${data.vsphere_vmfs_disks.available.disks}"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (String, required, forces new resource) The name of the datastore. +* `host_system_id` - (String, required, forces new resource) The managed object + ID of the host to set the datastore up on. Note that this is not necessarily + the only host that the datastore will be set up on - see + [here](#auto-mounting-of-datastores-within-vcenter) for more info. +* `folder` - (String, optional) The relative path to a folder to put this + datastore in. This is a path relative to the datacenter you are deploying the + datastore to. Example: for the `dc1` datacenter, and a provided `folder` of + `foo/bar`, Terraform will place a datastore named `terraform-test` in a + datastore folder located at `/dc1/datastore/foo/bar`, with the final + inventory path being `/dc1/datastore/foo/bar/terraform-test`. +* `disks` - (List of strings, required) The disks to use with the datastore. + +## Attribute Reference + +The following attributes are exported: + +* `id` - The managed object reference ID of the datastore. +* `accessible` - The connectivity status of the datastore. If this is `false`, + some other computed attributes may be out of date. +* `capacity` - Maximum capacity of the datastore, in megabytes. +* `free_space` - Available space of this datastore, in megabytes. +* `maintenance_mode` - The current maintenance mode state of the datastore. +* `multiple_host_access` - If `true`, more than one host in the datacenter has + been configured with access to the datastore. +* `uncommitted_space` - Total additional storage space, in megabytes, + potentially used by all virtual machines on this datastore. +* `url` - The unique locator for the datastore. + +## Importing + +An existing VMFS datastore can be [imported][docs-import] into this resource +via its managed object ID, via the command below. You also need the host system +ID. + +[docs-import]: https://www.terraform.io/docs/import/index.html + +``` +terraform import vsphere_vmfs_datastore.datastore datastore-123:host-10 +``` + +You need a tool like [`govc`][ext-govc] that can display managed object IDs. + +[ext-govc]: https://github.com/vmware/govmomi/tree/master/govc + +In the case of govc, you can locate a managed object ID from an inventory path +by doing the following: + +``` +$ govc ls -i /dc/datastore/terraform-test +Datastore:datastore-123 +``` + +To locate host IDs, it might be a good idea to supply the `-l` flag as well so +that you can line up the names with the IDs: + +``` +$ govc ls -l -i /dc/host/cluster1 +ResourcePool:resgroup-10 /dc/host/cluster1/Resources +HostSystem:host-10 /dc/host/cluster1/esxi1 +HostSystem:host-11 /dc/host/cluster1/esxi2 +HostSystem:host-12 /dc/host/cluster1/esxi3 +``` diff --git a/website/vsphere.erb b/website/vsphere.erb index 40332b69c..38306a936 100644 --- a/website/vsphere.erb +++ b/website/vsphere.erb @@ -52,6 +52,9 @@