Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New resource: vsphere_nas_datastore #149

Merged
merged 5 commits into from
Sep 7, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions tf-vsphere-devrc.mk.example
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ export VSPHERE_ADAPTER_TYPE ?= lsiLogic # Virtual disk adapter type
export VSPHERE_LICENSE ?= key # License resource test key
export VSPHERE_DC_FOLDER ?= dc-folder # DC resource test folder
export VSPHERE_ESXI_HOST ?= esxi1 # ESXi host to work with
export VSPHERE_ESXI_HOST2 ?= esxi2 # 2nd ESXi host to work with
export VSPHERE_ESXI_HOST3 ?= esxi3 # 3nd ESXi host to work with
export VSPHERE_HOST_NIC0 ?= vmnic0 # NIC0 for host net tests
export VSPHERE_HOST_NIC1 ?= vmnic1 # NIC1 for host net tests
export VSPHERE_VMFS_EXPECTED ?= scsi-name # Name of expected SCSI disk
Expand All @@ -44,5 +46,7 @@ export VSPHERE_DS_VMFS_DISK0 ?= scsi-name0 # 1st disk for vmfs_datastore
export VSPHERE_DS_VMFS_DISK1 ?= scsi-name1 # 2nd disk for vmfs_datastore
export VSPHERE_DS_VMFS_DISK2 ?= scsi-name2 # 3rd disk for vmfs_datastore
export VSPHERE_DS_FOLDER ?= ds-folder # Path to a datastore folder
export VSPHERE_NAS_HOST ?= nas-host # Hostname for nas_datastore
export VSPHERE_NFS_PATH ?= nfs-path # NFS path for nas_datastore

# vi: filetype=make
131 changes: 131 additions & 0 deletions vsphere/host_nas_volume_structure.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package vsphere

import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"github.com/vmware/govmomi/vim25/types"
)

const (
hostNasVolumeAccessModeReadOnly = "readOnly"
hostNasVolumeAccessModeReadWrite = "readWrite"

hostNasVolumeSecurityTypeAuthSys = "AUTH_SYS"
hostNasVolumeSecurityTypeSecKrb5 = "SEC_KRB5"
hostNasVolumeSecurityTypeSecKrb5i = "SEC_KRB5I"
)

// schemaHostNasVolumeSpec returns schema items for resources that need to work
// with a HostNasVolumeSpec.
func schemaHostNasVolumeSpec() map[string]*schema.Schema {
return map[string]*schema.Schema{
// HostNasVolumeSpec
// Skipped attributes: localPath (this is the name attribute)
// All CIFS attributes (we currently do not support CIFS as it's not
// available in the vSphere client and there is not much data about how to
// get it working)
"access_mode": &schema.Schema{
Type: schema.TypeString,
Default: hostNasVolumeAccessModeReadWrite,
Description: "Access mode for the mount point. Can be one of readOnly or readWrite.",
ForceNew: true,
Optional: true,
ValidateFunc: validation.StringInSlice(
[]string{
hostNasVolumeAccessModeReadOnly,
hostNasVolumeAccessModeReadWrite,
},
false,
),
},
"remote_hosts": &schema.Schema{
Type: schema.TypeList,
Description: "The hostnames or IP addresses of the remote server or servers. Only one element should be present for NFS v3 but multiple can be present for NFS v4.1.",
Elem: &schema.Schema{Type: schema.TypeString},
ForceNew: true,
MinItems: 1,
Required: true,
},
"remote_path": &schema.Schema{
Type: schema.TypeString,
Description: "The remote path of the mount point.",
ForceNew: true,
Required: true,
},
"security_type": &schema.Schema{
Type: schema.TypeString,
Description: "The security type to use.",
ForceNew: true,
Optional: true,
ValidateFunc: validation.StringInSlice(
[]string{
hostNasVolumeSecurityTypeAuthSys,
hostNasVolumeSecurityTypeSecKrb5,
hostNasVolumeSecurityTypeSecKrb5i,
},
false,
),
},
"type": &schema.Schema{
Type: schema.TypeString,
Default: "NFS",
Description: "The type of NAS volume. Can be one of NFS (to denote v3) or NFS41 (to denote NFS v4.1).",
ForceNew: true,
Optional: true,
ValidateFunc: validation.StringInSlice(
[]string{
string(types.HostFileSystemVolumeFileSystemTypeNFS),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Loving all the constants

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

😀

string(types.HostFileSystemVolumeFileSystemTypeNFS41),
},
false,
),
},
"protocol_endpoint": &schema.Schema{
Type: schema.TypeString,
Description: "Indicates that this NAS volume is a protocol endpoint. This field is only populated if the host supports virtual datastores.",
Computed: true,
},
}
}

// expandHostNasVolumeSpec reads certain ResourceData keys and returns a
// HostNasVolumeSpec.
func expandHostNasVolumeSpec(d *schema.ResourceData) *types.HostNasVolumeSpec {
obj := &types.HostNasVolumeSpec{
AccessMode: d.Get("access_mode").(string),
LocalPath: d.Get("name").(string),
RemoteHost: sliceInterfacesToStrings(d.Get("remote_hosts").([]interface{}))[0],
RemoteHostNames: sliceInterfacesToStrings(d.Get("remote_hosts").([]interface{})),
RemotePath: d.Get("remote_path").(string),
SecurityType: d.Get("security_type").(string),
Type: d.Get("type").(string),
}

return obj
}

// flattenHostNasVolume reads various fields from a HostNasVolume into the
// passed in ResourceData.
//
// Note the name attribute is not set here, bur rather set in
// flattenDatastoreSummary and sourced from there.
func flattenHostNasVolume(d *schema.ResourceData, obj *types.HostNasVolume) error {
d.Set("remote_path", obj.RemotePath)
d.Set("security_type", obj.SecurityType)
d.Set("protocol_endpoint", obj.ProtocolEndpoint)

if err := d.Set("remote_hosts", obj.RemoteHostNames); err != nil {
return err
}
return nil
}

// isNasVolume returns true if the HostFileSystemVolumeFileSystemType matches
// one of the possible filesystem types that a NAS datastore supports.
func isNasVolume(t types.HostFileSystemVolumeFileSystemType) bool {
switch t {
case types.HostFileSystemVolumeFileSystemTypeNFS, types.HostFileSystemVolumeFileSystemTypeNFS41:
return true
}
return false
}
21 changes: 21 additions & 0 deletions vsphere/host_system_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,24 @@ func hostSystemFromID(client *govmomi.Client, id string) (*object.HostSystem, er
}
return ds.(*object.HostSystem), nil
}

// hostSystemNameFromID returns the name of a host via its its managed object
// reference ID.
func hostSystemNameFromID(client *govmomi.Client, id string) (string, error) {
hs, err := hostSystemFromID(client, id)
if err != nil {
return "", err
}
return hs.Name(), nil
}

// hostSystemNameOrID is a convenience method mainly for helping displaying friendly
// errors where space is important - it displays either the host name or the ID
// if there was an error fetching it.
func hostSystemNameOrID(client *govmomi.Client, id string) string {
name, err := hostSystemNameFromID(client, id)
if err != nil {
return id
}
return name
}
132 changes: 132 additions & 0 deletions vsphere/nas_datastore_helper.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
package vsphere

import (
"context"
"fmt"

"github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
)

// nasDatastoreMountProcessor is an object that wraps the "complex" mounting
// and unmounting workflows in the NAS datastore resource. We are using an
// object as the process itself is a bit too complex for a pure functional
// approach.
type nasDatastoreMountProcessor struct {
// The client connection.
client *govmomi.Client

// A list of old (current) hosts mounted to the datastore.
oldHSIDs []string

// The list of hosts that should be mounted to the datastore.
newHSIDs []string

// The NAS datastore volume spec, used for mounting new hosts to a datastore.
volSpec *types.HostNasVolumeSpec

// The datastore. If this is not populated by the time the first host is
// mounted, it's assumed that the datastore is new and we populate this field
// with that newly created datastore. If this is missing, unmount operations
// will also be skipped.
ds *object.Datastore
}

// diffOldNew returns any elements of old that were missing in new.
func (p *nasDatastoreMountProcessor) diffOldNew() []string {
return p.diff(p.oldHSIDs, p.newHSIDs)
}

// diffNewOld returns any elements of new that were missing in old.
func (p *nasDatastoreMountProcessor) diffNewOld() []string {
return p.diff(p.newHSIDs, p.oldHSIDs)
}

// diff is what diffOldNew and diffNewOld hand off to.
func (p *nasDatastoreMountProcessor) diff(a, b []string) []string {
var found bool
c := make([]string, 0)
for _, v1 := range a {
for _, v2 := range b {
if v1 == v2 {
found = true
}
}
if !found {
c = append(c, v1)
}
}
return c
}

// processMountOperations processes all pending mount operations by diffing old
// and new and adding any hosts that were not found in old. The datastore is
// returned, along with any error.
func (p *nasDatastoreMountProcessor) processMountOperations() (*object.Datastore, error) {
hosts := p.diffNewOld()
if len(hosts) < 1 {
// Nothing to do
return p.ds, nil
}
// Validate we are vCenter if we are working with multiple hosts
if len(hosts) > 1 {
if err := validateVirtualCenter(p.client); err != nil {
return p.ds, fmt.Errorf("cannot mount on multiple hosts: %s", err)
}
}
for _, hsID := range hosts {
dss, err := hostDatastoreSystemFromHostSystemID(p.client, hsID)
if err != nil {
return p.ds, fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err)
}
ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout)
defer cancel()
ds, err := dss.CreateNasDatastore(ctx, *p.volSpec)
if err != nil {
return p.ds, fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err)
}
if err := p.validateDatastore(ds); err != nil {
return p.ds, fmt.Errorf("datastore validation error on host %q: %s", hostSystemNameOrID(p.client, hsID), err)
}
}
return p.ds, nil
}

// processUnmountOperations processes all pending unmount operations by diffing old
// and new and removing any hosts that were not found in new. This operation
// only proceeds if the datastore field in the processor is populated.
func (p *nasDatastoreMountProcessor) processUnmountOperations() error {
hosts := p.diffOldNew()
if len(hosts) < 1 || p.ds == nil {
// Nothing to do
return nil
}
for _, hsID := range hosts {
dss, err := hostDatastoreSystemFromHostSystemID(p.client, hsID)
if err != nil {
return fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err)
}
if err := removeDatastore(dss, p.ds); err != nil {
return fmt.Errorf("host %q: %s", hostSystemNameOrID(p.client, hsID), err)
}
}
return nil
}

// validateDatastore does one of two things: either stores the current
// datastore in the processor, if it's missing, or validates the supplied
// datastore with the one currently in the processor by checking if their IDs
// match.
func (p *nasDatastoreMountProcessor) validateDatastore(ds *object.Datastore) error {
if p.ds == nil {
p.ds = ds
return nil
}
expected := p.ds.Reference().Value
actual := ds.Reference().Value
if expected != actual {
return fmt.Errorf("expected datastore ID to be %q, got %q", expected, actual)
}
return nil
}
1 change: 1 addition & 0 deletions vsphere/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ func Provider() terraform.ResourceProvider {
"vsphere_license": resourceVSphereLicense(),
"vsphere_virtual_disk": resourceVSphereVirtualDisk(),
"vsphere_virtual_machine": resourceVSphereVirtualMachine(),
"vsphere_nas_datastore": resourceVSphereNasDatastore(),
"vsphere_vmfs_datastore": resourceVSphereVmfsDatastore(),
},

Expand Down
Loading