diff --git a/nfs-client/README.md b/nfs-client/README.md index 64d214ed849..0e9addfb84c 100644 --- a/nfs-client/README.md +++ b/nfs-client/README.md @@ -22,7 +22,7 @@ $ helm install stable/nfs-client-provisioner --set nfs.server=x.x.x.x --set nfs. **Step 1: Get connection information for your NFS server**. Make sure your NFS server is accessible from your Kubernetes cluster and get the information you need to connect to it. At a minimum you will need its hostname. -**Step 2: Get the NFS-Client Provisioner files**. To setup the provisioner you will download a set of YAML files, edit them to add your NFS server's connection information and then apply each with the ``kubectl`` / ``oc`` command. +**Step 2: Get the NFS-Client Provisioner files**. To setup the provisioner you will download a set of YAML files, edit them to add your NFS server's connection information and then apply each with the ``kubectl`` / ``oc`` command. Get all of the files in the [deploy](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client/deploy) directory of this repository. These instructions assume that you have cloned the [external-storage](https://github.com/kubernetes-incubator/external-storage) repository and have a bash-shell open in the ``nfs-client`` directory. @@ -40,7 +40,7 @@ $ kubectl create -f deploy/rbac.yaml OpenShift: -On some installations of OpenShift the default admin user does not have cluster-admin permissions. If these commands fail refer to the OpenShift documentation for **User and Role Management** or contact your OpenShift provider to help you grant the right permissions to your admin user. +On some installations of OpenShift the default admin user does not have cluster-admin permissions. If these commands fail refer to the OpenShift documentation for **User and Role Management** or contact your OpenShift provider to help you grant the right permissions to your admin user. ```sh # Set the subject of the RBAC objects to the current namespace where the provisioner is being deployed @@ -94,6 +94,8 @@ spec: path: /var/nfs ``` +**Alias mode:** use the provisioner in this mode to share the same existing NFS claim to multiple namespaces, without propagating manually the server/path in each namespace's claim. For example, first create a `data-original` claim as normal, through any provisioner such as `example.com/efs-aws` or the `fuseim.pri/ifs` example below. In the same namespace of your choice, run a new NFS client provisioner that uses the claim. Set NFS_SERVER to the magic value of `--alias`. Give the new deployment a clearer name, `nfs-alias-provisioner`, and set PROVISIONER_NAME to `foo.com/nfs-alias-provisioner`. Then create a StorageClass `nfs-alias` with its provisioner set to `foo.com/nfs-alias-provisioner`. Now, every new `nfs-alias` claim you create in any namespace will have the same `server:path` as the `data-original` volume. + You may also want to change the PROVISIONER_NAME above from ``fuseim.pri/ifs`` to something more descriptive like ``nfs-storage``, but if you do remember to also change the PROVISIONER_NAME in the storage class definition below: This is `deploy/class.yaml` which defines the NFS-Client's Kubernetes Storage Class: diff --git a/nfs-client/cmd/nfs-client-provisioner/provisioner.go b/nfs-client/cmd/nfs-client-provisioner/provisioner.go index b6f7c8161c2..089a435e4a5 100644 --- a/nfs-client/cmd/nfs-client-provisioner/provisioner.go +++ b/nfs-client/cmd/nfs-client-provisioner/provisioner.go @@ -26,10 +26,11 @@ import ( "strings" "k8s.io/kubernetes/pkg/apis/core/v1/helper" + mnt "k8s.io/kubernetes/pkg/util/mount" "github.com/golang/glog" "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -39,12 +40,14 @@ import ( const ( provisionerNameKey = "PROVISIONER_NAME" + magicAliasHostname = "--alias" ) type nfsProvisioner struct { client kubernetes.Interface server string path string + static bool } const ( @@ -54,24 +57,17 @@ const ( var _ controller.Provisioner = &nfsProvisioner{} func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { + var path string + var err error + if options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim Selector is not supported") } glog.V(4).Infof("nfs provisioner: VolumeOptions %v", options) - pvcNamespace := options.PVC.Namespace - pvcName := options.PVC.Name - - pvName := strings.Join([]string{pvcNamespace, pvcName, options.PVName}, "-") - - fullPath := filepath.Join(mountPath, pvName) - glog.V(4).Infof("creating path %s", fullPath) - if err := os.MkdirAll(fullPath, 0777); err != nil { - return nil, errors.New("unable to create directory to provision new pv: " + err.Error()) + if path, err = p.getOrMakeDir(options); err != nil { + return nil, err } - os.Chmod(fullPath, 0777) - - path := filepath.Join(p.path, pvName) pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -88,7 +84,7 @@ func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.Persis NFS: &v1.NFSVolumeSource{ Server: p.server, Path: path, - ReadOnly: false, + ReadOnly: false, // Pass ReadOnly through if in alias mode? }, }, }, @@ -96,6 +92,27 @@ func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.Persis return pv, nil } +// If in alias mode, forward the server:path details from the PVC we mounted. +// If not, create a new directory. +func (p *nfsProvisioner) getOrMakeDir(options controller.VolumeOptions) (_ string, err error) { + if p.static { + return p.path, nil + } + + pvcNamespace := options.PVC.Namespace + pvcName := options.PVC.Name + pvName := strings.Join([]string{pvcNamespace, pvcName, options.PVName}, "-") + + fullPath := filepath.Join(mountPath, pvName) + glog.V(4).Infof("creating path %s", fullPath) + if err = os.MkdirAll(fullPath, 0777); err == nil { + os.Chmod(fullPath, 0777) + return filepath.Join(p.path, pvName), nil + } + err = errors.New("unable to create directory to provision new pv: " + err.Error()) + return +} + func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error { path := volume.Spec.PersistentVolumeSource.NFS.Path pvName := filepath.Base(path) @@ -145,6 +162,17 @@ func (p *nfsProvisioner) getClassForVolume(pv *v1.PersistentVolume) (*storage.St return class, nil } +// Return the server and path parts for the given NFS mount +func getDetailsForMountPoint(m string) (server, path string, err error) { + if path, _, err = mnt.GetDeviceNameFromMount(mnt.New(""), m); err == nil { + if parts := strings.Split(path, ":"); len(parts) == 2 { + return parts[0], parts[1], err + } + err = errors.New("Can't parse server:path from device string: " + path) + } + return +} + func main() { flag.Parse() flag.Set("logtostderr", "true") @@ -180,10 +208,23 @@ func main() { glog.Fatalf("Error getting server version: %v", err) } + // If NFS_SERVER=="--alias", we just pass through the server/path we have + // mounted and never make a new directory for each volume we provision. + var static bool + if server == magicAliasHostname { + // Figure just once and store the server/path pair + if server, path, err = getDetailsForMountPoint(mountPath); err != nil { + glog.Fatalf("Error getting server details for %v: %v", mountPath, err) + } + glog.Infof("Aliasing all new volumes to %v::%v", server, path) + static = true + } + clientNFSProvisioner := &nfsProvisioner{ client: clientset, server: server, path: path, + static: static, } // Start the provision controller which will dynamically provision efs NFS // PVs