Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
alfredkrohmer committed Jul 7, 2020
0 parents commit b1bdbfc
Show file tree
Hide file tree
Showing 14 changed files with 848 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/efs-csi-pv-provisioner
13 changes: 13 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
FROM golang:1.14 AS build

WORKDIR /app

ADD ./ /app/
RUN go build -ldflags '-extldflags "-fno-PIC -static"' -buildmode pie -tags 'osusergo netgo static_build' . && strip efs-csi-pv-provisioner
RUN go test .

FROM scratch

ENTRYPOINT ["/efs-csi-pv-provisioner"]

COPY --from=build /app/efs-csi-pv-provisioner /
76 changes: 76 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# AWS EFS CSI PV provisioner

Kubernetes CSI driver to dynamically provisions Persistent Volumes (PVs) in response to user-requested Persistent Volume Clains (PVCs). Each PV / PVC is a subdirectory on a single, cluster-wide EFS file system. Works in conjunction with the [AWS EFS CSI driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver).

## Installation

1. Create an EFS filesystem and mount targets for your cluster.
2. Install the [AWS EFS CSI driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver) with a corresponding `StorageClass` called `efs-sc`.
3. Build a Docker image with the Dockerfile in this repository and push it into a repository. Put the image URL into `deploy/deployment.yaml`.
4. Put your EFS file system IDs into `deploy/deployment.yaml` and `deploy/pv.yaml`.
5. (Optional) Modify the desired mount options in `deploy/pv.yaml` and `deploy/sc.yaml` (e.g. to disable TLS and IAM if not needed).
6. Apply the manifests: `kubectl -n kube-system apply -f deploy/`

### Creating a PVC

Apply the following manifest:

```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-efs-pvc
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi # doesn't matter but is a required field
storageClassName: efs
volumeMode: Filesystem
```
A corresponding PV called `pvc-<UID of PVC>` will be created bound to the PVC. This PV is utilizing the AWS EFS CSI driver. A subdirectory called `<namespace of PVC>-<name of PVC>-<name of PV>` will be created on the configured EFS file system:

```yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-cdd36709-bd3b-11ea-9990-12db9e7ffa3d
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 1Mi
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: my-efs-pvc
namespace: default
resourceVersion: "836626609"
uid: cdd36709-bd3b-11ea-9990-12db9e7ffa3d
csi:
driver: efs.csi.aws.com
volumeHandle: fs-12345678:/persistentvolumes/default-my-efs-pvc-pvc-cdd36709-bd3b-11ea-9990-12db9e7ffa3d
mountOptions:
- tls
- iam
persistentVolumeReclaimPolicy: Delete
storageClassName: efs
volumeMode: Filesystem
status:
phase: Bound
```

When a pod is requesting to have this PVC mounted, the AWS EFS CSI driver daemonset will take care of executing the actual mount. The AWS EFS CSI PV provisioner in this repository is only responsible for creating the PVs in response to PVCs.

## "How it works" overview diagram

![](docs/overview.svg)

## TODOs

* create CI to build and push Docker image
* provide Helm chart
* potentially integerate into AWS EFS CSI driver
226 changes: 226 additions & 0 deletions aws-efs-csi-pv-provisioner.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
package main

import (
"flag"
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog"
"sigs.k8s.io/sig-storage-lib-external-provisioner/controller"
"sigs.k8s.io/sig-storage-lib-external-provisioner/gidallocator"
)

const provisionerName = "aws.k8s.logmein.com/efs-csi-pv-provisioner"
const efsCsiDriverName = "efs.csi.aws.com"

type efsProvisioner struct {
fileSystemID string
mountPoint string
subPath string
allocator gidallocator.Allocator
}

var _ controller.Provisioner = &efsProvisioner{}

// Provision creates a storage asset and returns a PV object representing it.
func (p *efsProvisioner) Provision(options controller.ProvisionOptions) (*v1.PersistentVolume, error) {
if options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported")
}

gidAllocate := true
for k, v := range options.StorageClass.Parameters {
switch strings.ToLower(k) {
case "gidmin":
// Let allocator handle
case "gidmax":
// Let allocator handle
case "gidallocate":
b, err := strconv.ParseBool(v)
if err != nil {
return nil, fmt.Errorf("invalid value %s for parameter %s: %v", v, k, err)
}
gidAllocate = b
}
}

var gid *int
if gidAllocate {
allocate, err := p.allocator.AllocateNext(options)
if err != nil {
return nil, err
}
gid = &allocate
}

err := p.createVolume(p.getLocalPath(options), gid)
if err != nil {
return nil, err
}

mountOptions := []string{}
if options.StorageClass.MountOptions != nil {
mountOptions = options.StorageClass.MountOptions
}

pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: options.PVName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy,
AccessModes: options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
StorageClassName: "efs-sc",
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: efsCsiDriverName,
VolumeHandle: fmt.Sprintf("%s:%s", p.fileSystemID, p.getRemotePath(options)),
},
},
MountOptions: mountOptions,
},
}

if gidAllocate {
pv.ObjectMeta.Annotations = map[string]string{
gidallocator.VolumeGidAnnotationKey: strconv.FormatInt(int64(*gid), 10),
}
}

return pv, nil
}

func (p *efsProvisioner) createVolume(path string, gid *int) error {
perm := os.FileMode(0777)
if gid != nil {
perm = os.FileMode(0771 | os.ModeSetgid)
}

if err := os.MkdirAll(path, perm); err != nil {
return err
}

// Due to umask, need to chmod
if err := os.Chmod(path, perm); err != nil {
os.RemoveAll(path)
return err
}

if gid != nil {
if err := os.Chown(path, os.Getuid(), *gid); err != nil {
os.RemoveAll(path)
return err
}
}

return nil
}

func (p *efsProvisioner) getLocalPath(options controller.ProvisionOptions) string {
return path.Join(p.mountPoint, "/", p.subPath, "/", p.getDirectoryName(options))
}

func (p *efsProvisioner) getRemotePath(options controller.ProvisionOptions) string {
return path.Join("/", p.subPath, "/", p.getDirectoryName(options))
}

func (p *efsProvisioner) getDirectoryName(options controller.ProvisionOptions) string {
return options.PVC.Namespace + "-" + options.PVC.Name + "-" + options.PVName
}

// Delete removes the storage asset that was created by Provision represented
// by the given PV.
func (p *efsProvisioner) Delete(volume *v1.PersistentVolume) error {
//TODO ignorederror
err := p.allocator.Release(volume)
if err != nil {
return err
}

path, err := p.getLocalPathToDelete(volume.Spec.CSI)
if err != nil {
return err
}

if err := os.RemoveAll(path); err != nil {
return err
}

return nil
}

func (p *efsProvisioner) getLocalPathToDelete(csi *v1.CSIPersistentVolumeSource) (string, error) {
if csi.Driver != efsCsiDriverName {
return "", fmt.Errorf("volume's driver %s is not %s", csi.Driver, efsCsiDriverName)
}

parts := strings.Split(csi.VolumeHandle, ":")
if len(parts) != 2 {
return "", fmt.Errorf("invalid volumeHandle: %s", csi.VolumeHandle)
}

if parts[0] != p.fileSystemID {
return "", fmt.Errorf("file system ID %s in volumeHandle doesn't match configured file system ID %s", parts[0], p.fileSystemID)
}

subPath := filepath.Clean(parts[1])
prefix := path.Join("/", p.subPath) + "/"
if !strings.HasPrefix(subPath, prefix) || subPath == prefix {
return "", fmt.Errorf("invalid subpath %s in volume", parts[1])
}

return path.Join(p.mountPoint, "/", parts[1]), nil
}

func main() {
var fileSystemID, mountPoint, subPath string
flag.StringVar(&fileSystemID, "file-system-id", "", "the ID of the EFS file system (fs-abcdefg)")
flag.StringVar(&mountPoint, "mountpoint", "/efs", "the path in this pod where the EFS file system is mounted")
flag.StringVar(&subPath, "subpath", "/persistentvolumes", "the subpath in the EFS file system that will be used for persistent volumes")
flag.Parse()
flag.Set("logtostderr", "true")

config, err := rest.InClusterConfig()
if err != nil {
klog.Fatalf("Failed to create config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatalf("Failed to create client: %v", err)
}

// The controller needs to know what the server version is because out-of-tree
// provisioners aren't officially supported until 1.5
serverVersion, err := clientset.Discovery().ServerVersion()
if err != nil {
klog.Fatalf("Error getting server version: %v", err)
}

efsProvisioner := &efsProvisioner{
fileSystemID: fileSystemID,
mountPoint: mountPoint,
subPath: subPath,
allocator: gidallocator.New(clientset),
}

pc := controller.NewProvisionController(
clientset,
provisionerName,
efsProvisioner,
serverVersion.GitVersion,
)

pc.Run(wait.NeverStop)
}
Loading

0 comments on commit b1bdbfc

Please sign in to comment.