diff --git a/tf-vsphere-devrc.mk.example b/tf-vsphere-devrc.mk.example index 6d36e9864..f3f8ff180 100644 --- a/tf-vsphere-devrc.mk.example +++ b/tf-vsphere-devrc.mk.example @@ -36,6 +36,8 @@ export VSPHERE_ADAPTER_TYPE ?= lsiLogic # Virtual disk adapter type export VSPHERE_LICENSE ?= key # License resource test key export VSPHERE_DC_FOLDER ?= dc-folder # DC resource test folder export VSPHERE_ESXI_HOST ?= esxi1 # ESXi host to work with +export VSPHERE_ESXI_HOST2 ?= esxi2 # 2nd ESXi host to work with +export VSPHERE_ESXI_HOST3 ?= esxi2 # 3nd ESXi host to work with export VSPHERE_HOST_NIC0 ?= vmnic0 # NIC0 for host net tests export VSPHERE_HOST_NIC1 ?= vmnic1 # NIC1 for host net tests export VSPHERE_VMFS_EXPECTED ?= scsi-name # Name of expected SCSI disk @@ -44,5 +46,7 @@ export VSPHERE_DS_VMFS_DISK0 ?= scsi-name0 # 1st disk for vmfs_datastore export VSPHERE_DS_VMFS_DISK1 ?= scsi-name1 # 2nd disk for vmfs_datastore export VSPHERE_DS_VMFS_DISK2 ?= scsi-name2 # 3rd disk for vmfs_datastore export VSPHERE_DS_FOLDER ?= ds-folder # Path to a datastore folder +export VSPHERE_NAS_HOST ?= nas-host # Hostname for nas_datastore +export VSPHERE_NFS_PATH ?= nfs-path # NFS path for nas_datastore # vi: filetype=make diff --git a/vsphere/host_nas_volume_structure.go b/vsphere/host_nas_volume_structure.go new file mode 100644 index 000000000..6a7c2f787 --- /dev/null +++ b/vsphere/host_nas_volume_structure.go @@ -0,0 +1,131 @@ +package vsphere + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + hostNasVolumeAccessModeReadOnly = "readOnly" + hostNasVolumeAccessModeReadWrite = "readWrite" + + hostNasVolumeSecurityTypeAuthSys = "AUTH_SYS" + hostNasVolumeSecurityTypeSecKrb5 = "SEC_KRB5" + hostNasVolumeSecurityTypeSecKrb5i = "SEC_KRB5I" +) + +// schemaHostNasVolumeSpec returns schema items for resources that need to work +// with a HostNasVolumeSpec. +func schemaHostNasVolumeSpec() map[string]*schema.Schema { + return map[string]*schema.Schema{ + // HostNasVolumeSpec + // Skipped attributes: localPath (this is the name attribute) + // All CIFS attributes (we currently do not support CIFS as it's not + // available in the vSphere client and there is not much data about how to + // get it working) + "access_mode": &schema.Schema{ + Type: schema.TypeString, + Default: hostNasVolumeAccessModeReadWrite, + Description: "Access mode for the mount point. Can be one of readOnly or readWrite.", + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice( + []string{ + hostNasVolumeAccessModeReadOnly, + hostNasVolumeAccessModeReadWrite, + }, + false, + ), + }, + "remote_hosts": &schema.Schema{ + Type: schema.TypeList, + Description: "The hostnames or IP addresses of the remote server or servers. Only one element should be present for NFS v3 but multiple can be present for NFS v4.1.", + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + MinItems: 1, + Required: true, + }, + "remote_path": &schema.Schema{ + Type: schema.TypeString, + Description: "The remote path of the mount point.", + ForceNew: true, + Required: true, + }, + "security_type": &schema.Schema{ + Type: schema.TypeString, + Description: "The security type to use.", + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice( + []string{ + hostNasVolumeSecurityTypeAuthSys, + hostNasVolumeSecurityTypeSecKrb5, + hostNasVolumeSecurityTypeSecKrb5i, + }, + false, + ), + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Default: "NFS", + Description: "The type of NAS volume. Can be one of NFS (to denote v3) or NFS41 (to denote NFS v4.1).", + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice( + []string{ + string(types.HostFileSystemVolumeFileSystemTypeNFS), + string(types.HostFileSystemVolumeFileSystemTypeNFS41), + }, + false, + ), + }, + "protocol_endpoint": &schema.Schema{ + Type: schema.TypeString, + Description: "Indicates that this NAS volume is a protocol endpoint. This field is only populated if the host supports virtual datastores.", + Computed: true, + }, + } +} + +// expandHostNasVolumeSpec reads certain ResourceData keys and returns a +// HostNasVolumeSpec. +func expandHostNasVolumeSpec(d *schema.ResourceData) *types.HostNasVolumeSpec { + obj := &types.HostNasVolumeSpec{ + AccessMode: d.Get("access_mode").(string), + LocalPath: d.Get("name").(string), + RemoteHost: sliceInterfacesToStrings(d.Get("remote_hosts").([]interface{}))[0], + RemoteHostNames: sliceInterfacesToStrings(d.Get("remote_hosts").([]interface{})), + RemotePath: d.Get("remote_path").(string), + SecurityType: d.Get("security_type").(string), + Type: d.Get("type").(string), + } + + return obj +} + +// flattenHostNasVolume reads various fields from a HostNasVolume into the +// passed in ResourceData. +// +// Note the name attribute is not set here, bur rather set in +// flattenDatastoreSummary and sourced from there. +func flattenHostNasVolume(d *schema.ResourceData, obj *types.HostNasVolume) error { + d.Set("remote_path", obj.RemotePath) + d.Set("security_type", obj.SecurityType) + d.Set("protocol_endpoint", obj.ProtocolEndpoint) + + if err := d.Set("remote_hosts", obj.RemoteHostNames); err != nil { + return err + } + return nil +} + +// isNasVolume returns true if the HostFileSystemVolumeFileSystemType matches +// one of the possible filesystem types that a NAS datastore supports. +func isNasVolume(t types.HostFileSystemVolumeFileSystemType) bool { + switch t { + case types.HostFileSystemVolumeFileSystemTypeNFS, types.HostFileSystemVolumeFileSystemTypeNFS41: + return true + } + return false +} diff --git a/vsphere/host_system_helper.go b/vsphere/host_system_helper.go index 0dbced922..1400a6d6c 100644 --- a/vsphere/host_system_helper.go +++ b/vsphere/host_system_helper.go @@ -49,3 +49,24 @@ func hostSystemFromID(client *govmomi.Client, id string) (*object.HostSystem, er } return ds.(*object.HostSystem), nil } + +// hostSystemNameFromID returns the name of a host via its its managed object +// reference ID. +func hostSystemNameFromID(client *govmomi.Client, id string) (string, error) { + hs, err := hostSystemFromID(client, id) + if err != nil { + return "", err + } + return hs.Name(), nil +} + +// hostSystemNameOrID is a convenience method mainly for helping displaying friendly +// errors where space is important - it displays either the host name or the ID +// if there was an error fetching it. +func hostSystemNameOrID(client *govmomi.Client, id string) string { + name, err := hostSystemNameFromID(client, id) + if err != nil { + return id + } + return name +} diff --git a/vsphere/nas_datastore_helper.go b/vsphere/nas_datastore_helper.go new file mode 100644 index 000000000..dd4c7e8ae --- /dev/null +++ b/vsphere/nas_datastore_helper.go @@ -0,0 +1,132 @@ +package vsphere + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +// nasDatastoreMountProcessor is an object that wraps the "complex" mounting +// and unmounting workflows in the NAS datastore resource. We are using an +// object as the process itself is a bit too complex for a pure functional +// approach. +type nasDatastoreMountProcessor struct { + // The client connection. + client *govmomi.Client + + // A list of old (current) hosts mounted to the datastore. + oldHSIDs []string + + // The list of hosts that should be mounted to the datastore. + newHSIDs []string + + // The NAS datastore volume spec, used for mounting new hosts to a datastore. + volSpec *types.HostNasVolumeSpec + + // The datastore. If this is not populated by the time the first host is + // mounted, it's assumed that the datastore is new and we populate this field + // with that newly created datastore. If this is missing, unmount operations + // will also be skipped. + ds *object.Datastore +} + +// diffOldNew returns any elements of old that were missing in new. +func (p *nasDatastoreMountProcessor) diffOldNew() []string { + return p.diff(p.oldHSIDs, p.newHSIDs) +} + +// diffNewOld returns any elements of new that were missing in old. +func (p *nasDatastoreMountProcessor) diffNewOld() []string { + return p.diff(p.newHSIDs, p.oldHSIDs) +} + +// diff is what diffOldNew and diffNewOld hand off to. +func (p *nasDatastoreMountProcessor) diff(a, b []string) []string { + var found bool + c := make([]string, 0) + for _, v1 := range a { + for _, v2 := range b { + if v1 == v2 { + found = true + } + } + if !found { + c = append(c, v1) + } + } + return c +} + +// processMountOperations processes all pending mount operations by diffing old +// and new and adding any hosts that were not found in old. The datastore is +// returned, along with any error. +func (p *nasDatastoreMountProcessor) processMountOperations() (*object.Datastore, error) { + hosts := p.diffNewOld() + if len(hosts) < 1 { + // Nothing to do + return p.ds, nil + } + // Validate we are vCenter if we are working with multiple hosts + if len(hosts) > 1 { + if err := validateVirtualCenter(p.client); err != nil { + return p.ds, fmt.Errorf("cannot mount on multiple hosts: %s", err) + } + } + for _, hsID := range hosts { + dss, err := hostDatastoreSystemFromHostSystemID(p.client, hsID) + if err != nil { + return p.ds, fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + ds, err := dss.CreateNasDatastore(ctx, *p.volSpec) + if err != nil { + return p.ds, fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err) + } + if err := p.validateDatastore(ds); err != nil { + return p.ds, fmt.Errorf("datastore validation error on host %q: %s", hostSystemNameOrID(p.client, hsID), err) + } + } + return p.ds, nil +} + +// processUnmountOperations processes all pending unmount operations by diffing old +// and new and removing any hosts that were not found in new. This operation +// only proceeds if the datastore field in the processor is populated. +func (p *nasDatastoreMountProcessor) processUnmountOperations() error { + hosts := p.diffOldNew() + if len(hosts) < 1 || p.ds == nil { + // Nothing to do + return nil + } + for _, hsID := range hosts { + dss, err := hostDatastoreSystemFromHostSystemID(p.client, hsID) + if err != nil { + return fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err) + } + if err := removeDatastore(dss, p.ds); err != nil { + return fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err) + } + } + return nil +} + +// validateDatastore does one of two things: either stores the current +// datastore in the processor, if it's missing, or validates the supplied +// datastore with the one currently in the processor by checking if their IDs +// match. +func (p *nasDatastoreMountProcessor) validateDatastore(ds *object.Datastore) error { + if p.ds == nil { + p.ds = ds + return nil + } + expected := p.ds.Reference().Value + actual := ds.Reference().Value + if expected != actual { + return fmt.Errorf("expected datastore ID to be %q, got %q", expected, actual) + } + return nil +} diff --git a/vsphere/provider.go b/vsphere/provider.go index 1b4ad9424..d65174394 100644 --- a/vsphere/provider.go +++ b/vsphere/provider.go @@ -77,6 +77,7 @@ func Provider() terraform.ResourceProvider { "vsphere_license": resourceVSphereLicense(), "vsphere_virtual_disk": resourceVSphereVirtualDisk(), "vsphere_virtual_machine": resourceVSphereVirtualMachine(), + "vsphere_nas_datastore": resourceVSphereNasDatastore(), "vsphere_vmfs_datastore": resourceVSphereVmfsDatastore(), }, diff --git a/vsphere/resource_vsphere_nas_datastore.go b/vsphere/resource_vsphere_nas_datastore.go new file mode 100644 index 000000000..f7e62e3a7 --- /dev/null +++ b/vsphere/resource_vsphere_nas_datastore.go @@ -0,0 +1,211 @@ +package vsphere + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/vim25/types" +) + +// formatNasDatastoreIDMismatch is a error message format string that is given +// when two NAS datastore IDs mismatch. +const formatNasDatastoreIDMismatch = "datastore ID on host %q (%s) does not original datastore ID (%s)" + +func resourceVSphereNasDatastore() *schema.Resource { + s := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "The name of the datastore.", + Required: true, + }, + "host_system_ids": &schema.Schema{ + Type: schema.TypeSet, + Description: "The managed object IDs of the hosts to mount the datastore on.", + Elem: &schema.Schema{Type: schema.TypeString}, + MinItems: 1, + Required: true, + }, + "folder": &schema.Schema{ + Type: schema.TypeString, + Description: "The path to the datastore folder to put the datastore in.", + Optional: true, + StateFunc: normalizeFolderPath, + }, + } + mergeSchema(s, schemaHostNasVolumeSpec()) + mergeSchema(s, schemaDatastoreSummary()) + + return &schema.Resource{ + Create: resourceVSphereNasDatastoreCreate, + Read: resourceVSphereNasDatastoreRead, + Update: resourceVSphereNasDatastoreUpdate, + Delete: resourceVSphereNasDatastoreDelete, + Importer: &schema.ResourceImporter{ + State: resourceVSphereNasDatastoreImport, + }, + Schema: s, + } +} + +func resourceVSphereNasDatastoreCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + hosts := sliceInterfacesToStrings(d.Get("host_system_ids").(*schema.Set).List()) + p := &nasDatastoreMountProcessor{ + client: client, + oldHSIDs: nil, + newHSIDs: hosts, + volSpec: expandHostNasVolumeSpec(d), + } + ds, err := p.processMountOperations() + if ds != nil { + d.SetId(ds.Reference().Value) + } + if err != nil { + return fmt.Errorf("error mounting datastore: %s", err) + } + + // Move the datastore to the correct folder first, if specified. + folder := d.Get("folder").(string) + if !pathIsEmpty(folder) { + if err := moveDatastoreToFolderRelativeHostSystemID(client, ds, hosts[0], folder); err != nil { + return fmt.Errorf("error moving datastore to folder: %s", err) + } + } + + // Done + return resourceVSphereNasDatastoreRead(d, meta) +} + +func resourceVSphereNasDatastoreRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + id := d.Id() + ds, err := datastoreFromID(client, id) + if err != nil { + return fmt.Errorf("cannot find datastore: %s", err) + } + props, err := datastoreProperties(ds) + if err != nil { + return fmt.Errorf("could not get properties for datastore: %s", err) + } + if err := flattenDatastoreSummary(d, &props.Summary); err != nil { + return err + } + + // Set the folder + folder, err := rootPathParticleDatastore.SplitRelativeFolder(ds.InventoryPath) + if err != nil { + return fmt.Errorf("error parsing datastore path %q: %s", ds.InventoryPath, err) + } + d.Set("folder", normalizeFolderPath(folder)) + + // Update NAS spec + if err := flattenHostNasVolume(d, props.Info.(*types.NasDatastoreInfo).Nas); err != nil { + return err + } + + // Update mounted hosts + var mountedHosts []string + for _, mount := range props.Host { + mountedHosts = append(mountedHosts, mount.Key.Value) + } + if err := d.Set("host_system_ids", mountedHosts); err != nil { + return err + } + + return nil +} + +func resourceVSphereNasDatastoreUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + id := d.Id() + ds, err := datastoreFromID(client, id) + if err != nil { + return fmt.Errorf("cannot find datastore: %s", err) + } + + // Rename this datastore if our name has drifted. + if d.HasChange("name") { + if err := renameObject(client, ds.Reference(), d.Get("name").(string)); err != nil { + return err + } + } + + // Update folder if necessary + if d.HasChange("folder") { + folder := d.Get("folder").(string) + if err := moveDatastoreToFolder(client, ds, folder); err != nil { + return fmt.Errorf("could not move datastore to folder %q: %s", folder, err) + } + } + + // Process mount/unmount operations. + o, n := d.GetChange("host_system_ids") + + p := &nasDatastoreMountProcessor{ + client: client, + oldHSIDs: sliceInterfacesToStrings(o.(*schema.Set).List()), + newHSIDs: sliceInterfacesToStrings(n.(*schema.Set).List()), + volSpec: expandHostNasVolumeSpec(d), + ds: ds, + } + // Unmount first + if err := p.processUnmountOperations(); err != nil { + return fmt.Errorf("error unmounting hosts: %s", err) + } + // Now mount + if _, err := p.processMountOperations(); err != nil { + return fmt.Errorf("error mounting hosts: %s", err) + } + + // Should be done with the update here. + return resourceVSphereNasDatastoreRead(d, meta) +} + +func resourceVSphereNasDatastoreDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + dsID := d.Id() + ds, err := datastoreFromID(client, dsID) + if err != nil { + return fmt.Errorf("cannot find datastore: %s", err) + } + + // Unmount the datastore from every host. Once the last host is unmounted we + // are done and the datastore will delete itself. + hosts := sliceInterfacesToStrings(d.Get("host_system_ids").(*schema.Set).List()) + p := &nasDatastoreMountProcessor{ + client: client, + oldHSIDs: hosts, + newHSIDs: nil, + volSpec: expandHostNasVolumeSpec(d), + ds: ds, + } + if err := p.processUnmountOperations(); err != nil { + return fmt.Errorf("error unmounting hosts: %s", err) + } + + return nil +} + +func resourceVSphereNasDatastoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // We support importing a MoRef - so we need to load the datastore and check + // to make sure 1) it exists, and 2) it's a VMFS datastore. If it is, we are + // good to go (rest of the stuff will be handled by read on refresh). + client := meta.(*govmomi.Client) + id := d.Id() + ds, err := datastoreFromID(client, id) + if err != nil { + return nil, fmt.Errorf("cannot find datastore: %s", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + t, err := ds.Type(ctx) + if err != nil { + return nil, fmt.Errorf("error fetching datastore type: %s", err) + } + if !isNasVolume(t) { + return nil, fmt.Errorf("datastore ID %q is not a NAS datastore", id) + } + return []*schema.ResourceData{d}, nil +} diff --git a/vsphere/resource_vsphere_nas_datastore_test.go b/vsphere/resource_vsphere_nas_datastore_test.go new file mode 100644 index 000000000..7110ff628 --- /dev/null +++ b/vsphere/resource_vsphere_nas_datastore_test.go @@ -0,0 +1,455 @@ +package vsphere + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccResourceVSphereNasDatastore(t *testing.T) { + var tp *testing.T + testAccResourceVSphereNasDatastoreCases := []struct { + name string + testCase resource.TestCase + }{ + { + "basic", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereNasDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + }, + }, + }, + { + "multi-host", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + testAccSkipIfEsxi(tp) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigMultiHost(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + }, + }, + }, + { + "basic, then multi-host", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereNasDatastoreConfigMultiHost(), + ExpectError: expectErrorIfNotVirtualCenter(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + }, + }, + }, + { + "multi-host, then basic", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + testAccSkipIfEsxi(tp) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigMultiHost(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + }, + }, + }, + { + "rename datastore", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereNasDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereNasDatastoreConfigBasicAltName(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + testAccResourceVSphereNasDatastoreHasName("terraform-test-nas-renamed"), + ), + }, + }, + }, + }, + { + "with folder", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + // NOTE: This test can't run on ESXi without giving a "dangling + // resource" error during testing - "move to folder after" hits the + // error on the same path of the call stack that triggers an error in + // both create and update and should provide adequate coverage + // barring manual testing. + testAccSkipIfEsxi(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereNasDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigBasicFolder(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + testAccResourceVSphereNasDatastoreMatchInventoryPath(os.Getenv("VSPHERE_DS_FOLDER")), + ), + }, + }, + }, + }, + { + "move to folder after", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereNasDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereNasDatastoreConfigBasicFolder(), + ExpectError: expectErrorIfNotVirtualCenter(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + testAccResourceVSphereNasDatastoreMatchInventoryPath(os.Getenv("VSPHERE_DS_FOLDER")), + ), + }, + }, + }, + }, + { + "import", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccResourceVSphereNasDatastorePreCheck(tp) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereNasDatastoreExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereNasDatastoreExists(true), + ), + }, + { + Config: testAccResourceVSphereNasDatastoreConfigBasic(), + ImportState: true, + ResourceName: "vsphere_nas_datastore.datastore", + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"host_system_id", "access_mode", "type"}, + }, + }, + }, + }, + } + + for _, tc := range testAccResourceVSphereNasDatastoreCases { + t.Run(tc.name, func(t *testing.T) { + tp = t + resource.Test(t, tc.testCase) + }) + } +} + +func testAccResourceVSphereNasDatastorePreCheck(t *testing.T) { + if os.Getenv("VSPHERE_ESXI_HOST") == "" { + t.Skip("set VSPHERE_ESXI_HOST to run vsphere_vmfs_disks acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST2") == "" { + t.Skip("set VSPHERE_ESXI_HOST2 to run vsphere_vmfs_disks acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST3") == "" { + t.Skip("set VSPHERE_ESXI_HOST3 to run vsphere_vmfs_disks acceptance tests") + } + if os.Getenv("VSPHERE_NAS_HOST") == "" { + t.Skip("set VSPHERE_NAS_HOST to run vsphere_nas_datastore acceptance tests") + } + if os.Getenv("VSPHERE_NFS_PATH") == "" { + t.Skip("set VSPHERE_NFS_PATH to run vsphere_nas_datastore acceptance tests") + } + if os.Getenv("VSPHERE_DS_FOLDER") == "" { + t.Skip("set VSPHERE_DS_FOLDER to run vsphere_nas_datastore acceptance tests") + } +} + +func testAccResourceVSphereNasDatastoreExists(expected bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + vars, err := testClientVariablesForResource(s, "vsphere_nas_datastore.datastore") + if err != nil { + return err + } + + _, err = datastoreFromID(vars.client, vars.resourceID) + if err != nil { + if isManagedObjectNotFoundError(err) && expected == false { + // Expected missing + return nil + } + return err + } + if !expected { + return fmt.Errorf("expected datastore %s to be missing", vars.resourceID) + } + return nil + } +} + +func testAccResourceVSphereNasDatastoreHasName(expected string) resource.TestCheckFunc { + return func(s *terraform.State) error { + vars, err := testClientVariablesForResource(s, "vsphere_nas_datastore.datastore") + if err != nil { + return err + } + + ds, err := datastoreFromID(vars.client, vars.resourceID) + if err != nil { + return err + } + + props, err := datastoreProperties(ds) + if err != nil { + return err + } + + actual := props.Summary.Name + if expected != actual { + return fmt.Errorf("expected datastore name to be %s, got %s", expected, actual) + } + return nil + } +} + +func testAccResourceVSphereNasDatastoreMatchInventoryPath(expected string) resource.TestCheckFunc { + return func(s *terraform.State) error { + vars, err := testClientVariablesForResource(s, "vsphere_nas_datastore.datastore") + if err != nil { + return err + } + + ds, err := datastoreFromID(vars.client, vars.resourceID) + if err != nil { + return err + } + + expected, err := rootPathParticleDatastore.PathFromNewRoot(ds.InventoryPath, rootPathParticleDatastore, expected) + actual := path.Dir(ds.InventoryPath) + if err != nil { + return fmt.Errorf("bad: %s", err) + } + if expected != actual { + return fmt.Errorf("expected path to be %s, got %s", expected, actual) + } + return nil + } +} + +func testAccResourceVSphereNasDatastoreConfigBasic() string { + return fmt.Sprintf(` +variable "nfs_host" { + type = "string" + default = "%s" +} + +variable "nfs_path" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_nas_datastore" "datastore" { + name = "terraform-test-nas" + host_system_ids = ["${data.vsphere_host.esxi_host.id}"] + + type = "NFS" + remote_hosts = ["${var.nfs_host}"] + remote_path = "${var.nfs_path}" +} +`, os.Getenv("VSPHERE_NAS_HOST"), os.Getenv("VSPHERE_NFS_PATH"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} + +func testAccResourceVSphereNasDatastoreConfigMultiHost() string { + return fmt.Sprintf(` +variable "nfs_host" { + type = "string" + default = "%s" +} + +variable "nfs_path" { + type = "string" + default = "%s" +} + +variable "esxi_hosts" { + default = [ + "%s", + "%s", + "%s", + ] +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + count = "${length(var.esxi_hosts)}" + name = "${var.esxi_hosts[count.index]}" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_nas_datastore" "datastore" { + name = "terraform-test-nas" + host_system_ids = ["${data.vsphere_host.esxi_host.*.id}"] + + type = "NFS" + remote_hosts = ["${var.nfs_host}"] + remote_path = "${var.nfs_path}" +} +`, os.Getenv("VSPHERE_NAS_HOST"), os.Getenv("VSPHERE_NFS_PATH"), os.Getenv("VSPHERE_ESXI_HOST"), os.Getenv("VSPHERE_ESXI_HOST2"), os.Getenv("VSPHERE_ESXI_HOST3"), os.Getenv("VSPHERE_DATACENTER")) +} + +func testAccResourceVSphereNasDatastoreConfigBasicAltName() string { + return fmt.Sprintf(` +variable "nfs_host" { + type = "string" + default = "%s" +} + +variable "nfs_path" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_nas_datastore" "datastore" { + name = "terraform-test-nas-renamed" + host_system_ids = ["${data.vsphere_host.esxi_host.id}"] + + type = "NFS" + remote_hosts = ["${var.nfs_host}"] + remote_path = "${var.nfs_path}" +} +`, os.Getenv("VSPHERE_NAS_HOST"), os.Getenv("VSPHERE_NFS_PATH"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} + +func testAccResourceVSphereNasDatastoreConfigBasicFolder() string { + return fmt.Sprintf(` +variable "nfs_host" { + type = "string" + default = "%s" +} + +variable "nfs_path" { + type = "string" + default = "%s" +} + +variable "folder" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_nas_datastore" "datastore" { + name = "terraform-test-nas" + host_system_ids = ["${data.vsphere_host.esxi_host.id}"] + folder = "${var.folder}" + + type = "NFS" + remote_hosts = ["${var.nfs_host}"] + remote_path = "${var.nfs_path}" +} +`, os.Getenv("VSPHERE_NAS_HOST"), os.Getenv("VSPHERE_NFS_PATH"), os.Getenv("VSPHERE_DS_FOLDER"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} diff --git a/website/docs/r/nas_datastore.html.markdown b/website/docs/r/nas_datastore.html.markdown new file mode 100644 index 000000000..de4c2ca8b --- /dev/null +++ b/website/docs/r/nas_datastore.html.markdown @@ -0,0 +1,123 @@ +--- +layout: "vsphere" +page_title: "VMware vSphere: vsphere_nas_datastore" +sidebar_current: "docs-vsphere-resource-nas-datastore" +description: |- + Provides a vSphere NAS datastore resource. This can be used to mount a NFS share as a datastore on a host. +--- + +# vsphere\_nas\_datastore + +The `vsphere_nas_datastore` resource can be used to create and manage NAS +datastores on an ESXi host or a set of hosts. The resource supports mounting +NFS v3 and v4.1 shares to be used as datastores. + +~> **NOTE:** Unlike [`vsphere_vmfs_datastore`][resource-vmfs-datastore], a NAS +datastore is only mounted on the hosts you choose to mount it on. To mount on +multiple hosts, you must specify each host that you want to add in the +`host_system_ids` argument. + +[resource-vmfs-datastore]: /docs/providers/vsphere/r/vmfs_datastore.html + +## Example Usage + +The following example would set up a NFS v3 share on 3 hosts connected through +vCenter in the same datacenter - `esxi1`, `esxi2`, and `esxi3`. The remote host +is named `nfs` and has `/export/terraform-test` exported. + +```hcl +variable "hosts" { + default = [ + "esxi1", + "esxi2", + "esxi3", + ] +} + +data "vsphere_datacenter" "datacenter" {} + +data "vsphere_host" "esxi_hosts" { + count = "${length(var.hosts)}" + name = "${var.hosts[count.index]}" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +resource "vsphere_nas_datastore" "datastore" { + name = "terraform-test" + host_system_ids = "${data.vsphere_host.esxi_hosts.*.id}" + + type = "NFS" + remote_hosts = ["nfs"] + remote_path = "/export/terraform-test" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (String, required, forces new resource) The name of the datastore. +* `host_system_ids` - (List of strings, required) The managed object + IDs of the hosts to mount the datastore on. +* `folder` - (String, optional) The relative path to a folder to put this + datastore in. This is a path relative to the datacenter you are deploying the + datastore to. Example: for the `dc1` datacenter, and a provided `folder` of + `foo/bar`, Terraform will place a datastore named `terraform-test` in a + datastore folder located at `/dc1/datastore/foo/bar`, with the final + inventory path being `/dc1/datastore/foo/bar/terraform-test`. +* `type` - (String, optional, forces new resource) The type of NAS volume. Can + be one of `NFS` (to denote v3) or `NFS41` (to denote NFS v4.1). Default: + `NFS`. +* `remote_hosts` - (List of strings, required, forces new resource) The + hostnames or IP addresses of the remote server or servers. Only one element + should be present for NFS v3 but multiple can be present for NFS v4.1. +* `remote_path` - (String, required, forces new resource) The remote path of + the mount point. +* `access_mode` - (String, optional, forces new resource) Access mode for the + mount point. Can be one of `readOnly` or `readWrite`. Note that `readWrite` + does not necessarily mean that the datastore will be read-write depending on + the permissions of the actual share. Default: `readWrite`. +* `security_type` - (String, optional, forces new resource) The security type + to use when using NFS v4.1. Can be one of `AUTH_SYS`, `SEC_KRB5`, or + `SEC_KRB5I`. + +## Attribute Reference + +The following attributes are exported: + +* `id` - The managed object reference ID of the datastore. +* `accessible` - The connectivity status of the datastore. If this is `false`, + some other computed attributes may be out of date. +* `capacity` - Maximum capacity of the datastore, in megabytes. +* `free_space` - Available space of this datastore, in megabytes. +* `maintenance_mode` - The current maintenance mode state of the datastore. +* `multiple_host_access` - If `true`, more than one host in the datacenter has + been configured with access to the datastore. +* `uncommitted_space` - Total additional storage space, in megabytes, + potentially used by all virtual machines on this datastore. +* `url` - The unique locator for the datastore. +* `protocol_endpoint` - Indicates that this NAS volume is a protocol endpoint. + This field is only populated if the host supports virtual datastores. + +## Importing + +An existing NAS datastore can be [imported][docs-import] into this resource via its managed +object ID, via the following command: + +[docs-import]: https://www.terraform.io/docs/import/index.html + +``` +terraform import vsphere_nas_datastore.datastore datastore-123 +``` + +You need a tool like [`govc`][ext-govc] that can display managed object IDs. + +[ext-govc]: https://github.com/vmware/govmomi/tree/master/govc + +In the case of govc, you can locate a managed object ID from an inventory path +by doing the following: + +``` +$ govc ls -i /dc/datastore/terraform-test +Datastore:datastore-123 +``` diff --git a/website/vsphere.erb b/website/vsphere.erb index 38306a936..dc3f98e98 100644 --- a/website/vsphere.erb +++ b/website/vsphere.erb @@ -52,6 +52,9 @@