From 5229c6fd74d2e80002d3f8170cb26b0ba0448fbb Mon Sep 17 00:00:00 2001 From: Pawan Date: Thu, 12 Sep 2019 12:32:17 +0530 Subject: [PATCH] feat(zfs-localpv): initial commit provisioning and deprovisioning of the volumes on the node where zfs pool has already been setup. Pool name and the volume parameters has to be given in storage class which will be used to provision the volume. Signed-off-by: Pawan --- .gitignore | 6 + .travis.yml | 41 + BUILDMETA | 1 + Gopkg.lock | 788 ++++++++++++++++++ Gopkg.toml | 43 + Makefile | 153 ++++ VERSION | 1 + buildscripts/build.sh | 135 +++ buildscripts/custom-boilerplate.go.txt | 15 + buildscripts/push | 102 +++ buildscripts/test-cov.sh | 13 + buildscripts/travis-build.sh | 63 ++ buildscripts/zfs-driver/Dockerfile | 25 + buildscripts/zfs-driver/entrypoint.sh | 5 + cmd/controller/controller.go | 247 ++++++ cmd/controller/controller_base.go | 136 +++ cmd/controller/start.go | 110 +++ cmd/main.go | 87 ++ deploy/sample/fio.yaml | 54 ++ deploy/sample/zfspvcr.yaml | 19 + deploy/zfs-operator.yaml | 457 ++++++++++ pkg/apis/openebs.io/core/v1alpha1/doc.go | 21 + pkg/apis/openebs.io/core/v1alpha1/register.go | 77 ++ .../openebs.io/core/v1alpha1/zfsvolume.go | 100 +++ .../core/v1alpha1/zz_generated.deepcopy.go | 127 +++ pkg/builder/build.go | 208 +++++ pkg/builder/buildlist.go | 72 ++ pkg/builder/kubernetes.go | 427 ++++++++++ pkg/builder/volume.go | 115 +++ pkg/common/env/env.go | 106 +++ pkg/common/errors/errors.go | 99 +++ pkg/common/errors/types.go | 188 +++++ pkg/common/kubernetes/client/client.go | 243 ++++++ pkg/common/kubernetes/client/client_test.go | 310 +++++++ pkg/config/config.go | 51 ++ pkg/driver/agent.go | 297 +++++++ pkg/driver/controller.go | 375 +++++++++ pkg/driver/driver.go | 104 +++ pkg/driver/grpc.go | 170 ++++ pkg/driver/identity.go | 112 +++ pkg/response/create.go | 64 ++ pkg/response/delete.go | 41 + pkg/version/version.go | 115 +++ pkg/zfs/mount.go | 145 ++++ pkg/zfs/volume.go | 138 +++ pkg/zfs/zfs_util.go | 133 +++ 46 files changed, 6339 insertions(+) create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 BUILDMETA create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 Makefile create mode 100644 VERSION create mode 100755 buildscripts/build.sh create mode 100644 buildscripts/custom-boilerplate.go.txt create mode 100755 buildscripts/push create mode 100755 buildscripts/test-cov.sh create mode 100755 buildscripts/travis-build.sh create mode 100644 buildscripts/zfs-driver/Dockerfile create mode 100644 buildscripts/zfs-driver/entrypoint.sh create mode 100644 cmd/controller/controller.go create mode 100644 cmd/controller/controller_base.go create mode 100644 cmd/controller/start.go create mode 100644 cmd/main.go create mode 100644 deploy/sample/fio.yaml create mode 100644 deploy/sample/zfspvcr.yaml create mode 100644 deploy/zfs-operator.yaml create mode 100644 pkg/apis/openebs.io/core/v1alpha1/doc.go create mode 100644 pkg/apis/openebs.io/core/v1alpha1/register.go create mode 100644 pkg/apis/openebs.io/core/v1alpha1/zfsvolume.go create mode 100644 pkg/apis/openebs.io/core/v1alpha1/zz_generated.deepcopy.go create mode 100644 pkg/builder/build.go create mode 100644 pkg/builder/buildlist.go create mode 100644 pkg/builder/kubernetes.go create mode 100644 pkg/builder/volume.go create mode 100644 pkg/common/env/env.go create mode 100644 pkg/common/errors/errors.go create mode 100644 pkg/common/errors/types.go create mode 100644 pkg/common/kubernetes/client/client.go create mode 100644 pkg/common/kubernetes/client/client_test.go create mode 100644 pkg/config/config.go create mode 100644 pkg/driver/agent.go create mode 100644 pkg/driver/controller.go create mode 100644 pkg/driver/driver.go create mode 100644 pkg/driver/grpc.go create mode 100644 pkg/driver/identity.go create mode 100644 pkg/response/create.go create mode 100644 pkg/response/delete.go create mode 100644 pkg/version/version.go create mode 100644 pkg/zfs/mount.go create mode 100644 pkg/zfs/volume.go create mode 100644 pkg/zfs/zfs_util.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..a4581f789 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +/bin +buildscripts/zfs-driver/zfs-driver +cscope* +tags +*.swp +*.swo diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..8cef3b8c5 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,41 @@ +sudo: required +dist: xenial +#group: edge +env: +services: + - docker +language: go +go: + - 1.12.5 + +addons: + apt: + update: true + +install: + - make bootstrap + - make format +script: + - ./buildscripts/travis-build.sh +after_success: + - make deploy-images + - bash <(curl -s https://codecov.io/bash) +notifications: + email: + recipients: + - pawan@openebs.io +deploy: + provider: releases + api_key: + secure: na/NPsgDHGGRaWxRSCo5gH3TPrEutNvhEK3q2k99bbW2INe0FZ+FIPeuL9rqV8eCQi8SWJGHNFjFyMRR798RRSn8bdiK0pxJXzYvphUEH2Azzoqr65TaJHpHNTkv1WTK9OtgahT71MbmIx777U6Vd6ylyJyWja+LPhY/z66XOYQmuXR2ds7FRBlAcWg8C0KIFTLYlms5C9RKwLS2jP5C8tlJBQXMDEk7ejR1mKn3R6KQyyHICGKPGhNE+d7iMs0qhhuGIhcDwXl1olChAAITOGyWEmjc2GeUbFALo8OXdQx9qBO7saw75IzyYV/itBjE0RpuM90jKuFzKGiotSyw7Fs0KgrjHC7keuaNMqBWgKl6qoAj2a5VVEBtx8k941hRLs/VpjQ+K8wJJpjlSR8vh906b8e+HL8BKJEifF09fKBTLd0AWy9I3x6TolmRqiamvIHEkup1fZqblhhe2ZLvwuuyfl3t1FTkkon5BASgSqFdBAhR3eAD/LOtrghjaRX7wCZCzKDEaS9QLeu9UbC+bmnaOo60Gaeyp/DN5FLc4cV/vZozroesu+UEtQIrC6VDlFNYfY0V1ETKpfEQ4I8yByDHx/KjMWDyUGd8e5tm0qsD1lW1yVekh5CjQRHpzShkmKvFieeVfqVy/aGB4GrTeWSxcKiN8W0ekcgCRYut6y0= + file_glob: true + file: + - bin/zfs-driver/zfs-driver-linux_*.zip + skip_cleanup: true + overwrite: true + on: + repo: openebs/zfs-localpv + tags: true +branches: + except: + - /^*-v[0-9]/ diff --git a/BUILDMETA b/BUILDMETA new file mode 100644 index 000000000..54ae09e0c --- /dev/null +++ b/BUILDMETA @@ -0,0 +1 @@ +unreleased diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..528cc9a85 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,788 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:04457f9f6f3ffc5fea48e71d62f2ca256637dee0a04d710288e27e05c8b41976" + name = "github.com/Sirupsen/logrus" + packages = ["."] + pruneopts = "UT" + revision = "839c75faf7f98a33d445d181f3018b5c3409a45e" + version = "v1.4.2" + +[[projects]] + digest = "1:7f21fa1f8ab9a529dba26a7e9cf20de217c307fa1d96cb599d3afd9e5c83e9d6" + name = "github.com/container-storage-interface/spec" + packages = ["lib/go/csi"] + pruneopts = "UT" + revision = "f750e6765f5f6b4ac0e13e95214d58901290fb4b" + version = "v1.1.0" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:ac425d784b13d49b37a5bbed3ce022677f8f3073b216f05d6adcb9303e27fa0f" + name = "github.com/evanphx/json-patch" + packages = ["."] + pruneopts = "UT" + revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f" + version = "v4.5.0" + +[[projects]] + digest = "1:8a85f428bc6ebfa87f53216b6e43b52b30eccbcffcbd6b057a69ee16718a2248" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "UT" + revision = "0ca988a254f991240804bf9821f3450d87ccbb1b" + version = "v1.3.0" + +[[projects]] + branch = "master" + digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "UT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "869f871628b6baa9cfbc11732cdf6546b17c1298" + +[[projects]] + digest = "1:43299f4347f25208606272268dc33dbbc45d5f9b3a68c51dca090960e9b61522" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:a6181aca1fd5e27103f9a920876f29ac72854df7345a39f3b01e61c8c94cc8af" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "UT" + revision = "f140a6486e521aad38f5917de355cbf147cc0496" + version = "v1.0.0" + +[[projects]] + digest = "1:ca4524b4855ded427c7003ec903a5c854f37e7b1e8e2a93277243462c5b753a8" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "UT" + revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab" + version = "v0.3.1" + +[[projects]] + digest = "1:c77361e611524ec8f2ad37c408c3c916111a70b6acf806a1200855696bf8fa4d" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "UT" + revision = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d" + version = "v0.5.3" + +[[projects]] + digest = "1:a0cefd27d12712af4b5018dc7046f245e1e3b5760e2e848c30b171b570708f9b" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "UT" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "UT" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:709cd2a2c29cc9b89732f6c24846bbb9d6270f28ef5ef2128cc73bd0d6d7bff9" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "UT" + revision = "27518f6661eba504be5a7a9a9f6d9460d892ade3" + version = "v1.1.7" + +[[projects]] + digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" + +[[projects]] + digest = "1:5f039a8e43dc5b00adee7b38e39baf6c36f607372940c11975f00ec9c5f298ae" + name = "github.com/kubernetes-csi/csi-lib-utils" + packages = ["protosanitizer"] + pruneopts = "UT" + revision = "b8b7a89535d80e12f2c0f4c53cfb981add8aaca2" + version = "v0.6.1" + +[[projects]] + digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "UT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "UT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "UT" + revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5" + version = "v0.0.5" + +[[projects]] + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "UT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + branch = "master" + digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "UT" + revision = "9756ffdc24725223350eb3266ffb92590d28f278" + +[[projects]] + branch = "master" + digest = "1:e93fe09ca93cf16f8b2dc48053f56c2f91ed4f3fd16bfaf9596b6548c7b48a7f" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "ba9fcec4b297b415637633c5a6e8fa592e4a16c3" + +[[projects]] + branch = "master" + digest = "1:8d1c112fb1679fa097e9a9255a786ee47383fa2549a3da71bcb1334a693ebcfe" + name = "golang.org/x/oauth2" + packages = [ + ".", + "internal", + ] + pruneopts = "UT" + revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" + +[[projects]] + branch = "master" + digest = "1:d94059c196c160bd1c4030d49ffaa39a456be516501e5916bea663f5d79a75ec" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "UT" + revision = "9109b7679e13aa34a54834cfb4949cac4b96e576" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "UT" + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" + +[[projects]] + branch = "master" + digest = "1:b36898a8dd62a54ce3b77ace9fa0e180fa35f1c8b3d56db2cd435f6c0cc815ff" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/packages", + "go/types/typeutil", + "imports", + "internal/fastwalk", + "internal/gopathwalk", + "internal/imports", + "internal/module", + "internal/semver", + ] + pruneopts = "UT" + revision = "afe7f8212f0d48598f7ba258eba2127cbfb7c3e9" + +[[projects]] + digest = "1:6eb6e3b6d9fffb62958cf7f7d88dbbe1dd6839436b0802e194c590667a40412a" + name = "google.golang.org/appengine" + packages = [ + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "UT" + revision = "5f2a59506353b8d5ba8cbbcd9f3c1f41f1eaf079" + version = "v1.6.2" + +[[projects]] + branch = "master" + digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "24fa4b261c55da65468f2abfdae2b024eef27dfb" + +[[projects]] + digest = "1:3b97661db2e5d4c87f7345e875ea28f911e54c715ba0a74be08e1649d67e05cd" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb" + version = "v1.23.0" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "UT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:86ad5797d1189de342ed6988fbb76b92dc0429a4d677ad69888d6137efa5712e" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "auditregistration/v1alpha1", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "networking/v1beta1", + "node/v1alpha1", + "node/v1beta1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "UT" + revision = "40a48860b5abbba9aa891b02b32da429b08d96a0" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:05e778704c75489c318be7673685d848eb22b7e8ec47204c82a399b19b38bdcd" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/strategicpatch", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "UT" + revision = "d7deff9243b165ee192f5551710ea4285dcfd615" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:551a9be727bf4521e11bd51ad6e972d7f937a5962e1fdcfe322f5686298aa3a9" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/fake", + "dynamic", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/autoscaling/v2beta2", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/coordination", + "informers/coordination/v1", + "informers/coordination/v1beta1", + "informers/core", + "informers/core/v1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/networking/v1beta1", + "informers/node", + "informers/node/v1alpha1", + "informers/node/v1beta1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/auditregistration/v1alpha1", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/coordination/v1", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1beta1", + "kubernetes/typed/node/v1alpha1", + "kubernetes/typed/node/v1beta1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/autoscaling/v2beta2", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/coordination/v1", + "listers/coordination/v1beta1", + "listers/core/v1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/networking/v1beta1", + "listers/node/v1alpha1", + "listers/node/v1beta1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "rest", + "rest/watch", + "testing", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/record/util", + "tools/reference", + "transport", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/keyutil", + "util/retry", + "util/workqueue", + ] + pruneopts = "UT" + revision = "6ee68ca5fd8355d024d02f9db0b3b667e8357a0f" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:ecb748d587016adfa22178f69cbecdf36b4dc871c3fbad3e08354863e14bcd7b" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "pkg/namer", + "pkg/util", + ] + pruneopts = "UT" + revision = "50b561225d70b3eb79a1faafd3dfe7b1a62cbe73" + version = "kubernetes-1.14.0" + +[[projects]] + branch = "master" + digest = "1:a00af03291badd21310c2c66efe801d0afb691f7552ce6228244e0070406c5a6" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "UT" + revision = "a874a240740c2ae854082ec73d46c5efcedd2149" + +[[projects]] + digest = "1:ccb9be4c583b6ec848eb98aa395a4e8c8f8ad9ebb823642c0dd1c1c45939a5bb" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "UT" + revision = "3ca30a56d8a775276f9cdae009ba326fdc05af7f" + version = "v0.4.0" + +[[projects]] + branch = "master" + digest = "1:22abb5d4204ab1a0dcc9cda64906a31c43965ff5159e8b9f766c9d2a162dbed5" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + pruneopts = "UT" + revision = "743ec37842bffe49dd4221d9026f30fb1d5adbc4" + +[[projects]] + digest = "1:943dfbf554e6c66394ca3320f7e495237e16afd0ded7048da57d89d91a16c1e8" + name = "k8s.io/kubernetes" + packages = [ + "pkg/util/mount", + "pkg/util/slice", + ] + pruneopts = "UT" + revision = "2d3c76f9091b6bec110a5e63777c332469e0cba2" + version = "v1.15.3" + +[[projects]] + branch = "master" + digest = "1:e2b6ca8db8676351de30e24f0a6a5f447f2c1672abd47ada2da2fa62d9042cd6" + name = "k8s.io/utils" + packages = [ + "buffer", + "exec", + "integer", + "io", + "keymutex", + "path", + "trace", + ] + pruneopts = "UT" + revision = "3a4a5477acf81b48e20870a3b9dc743f63c66730" + +[[projects]] + digest = "1:89915cfa579f04f253da4b56246d337907e99745c7afcbff800f8e0dba0d5191" + name = "sigs.k8s.io/controller-runtime" + packages = [ + "pkg/manager/signals", + "pkg/runtime/signals", + ] + pruneopts = "UT" + revision = "e1159d6655b260c4812fd0792cd1344ecc96a57e" + version = "v0.2.0" + +[[projects]] + digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849" + name = "sigs.k8s.io/yaml" + packages = ["."] + pruneopts = "UT" + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/Sirupsen/logrus", + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/glog", + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer", + "github.com/pkg/errors", + "github.com/spf13/cobra", + "golang.org/x/net/context", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/status", + "k8s.io/api/core/v1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/informers", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/workqueue", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/util/slice", + "sigs.k8s.io/controller-runtime/pkg/runtime/signals", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..5d6c1bcc2 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,43 @@ +# Adding the generators here allows us to vendor the code-generator package +# This is required since code-generator is not directly or transitively +# dependent +required = [ + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/defaulter-gen" + ] + +[[constraint]] + name = "k8s.io/api" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.14.0" + +[[override]] + name = "k8s.io/apiserver" + version = "kubernetes-1.14.0" + +[[override]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/client-go" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/code-generator" + version = "kubernetes-1.14.0" + +# Apply workaround from https://github.com/golang/dep/issues/1799 +[[override]] + source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" + name = "gopkg.in/fsnotify.v1" + +[prune] + unused-packages = true + go-tests = true diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..93abe652a --- /dev/null +++ b/Makefile @@ -0,0 +1,153 @@ +# list only csi source code directories +PACKAGES = $(shell go list ./... | grep -v 'vendor\|pkg/generated') + +# Lint our code. Reference: https://golang.org/cmd/vet/ +VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \ + -nilfunc -printf -rangeloops -shift -structtags -unsafeptr + +# Tools required for different make +# targets or for development purposes +EXTERNAL_TOOLS=\ + github.com/golang/dep/cmd/dep \ + golang.org/x/tools/cmd/cover \ + github.com/axw/gocov/gocov \ + gopkg.in/matm/v1/gocov-html \ + github.com/ugorji/go/codec/codecgen \ + github.com/onsi/ginkgo/ginkgo \ + github.com/onsi/gomega/... + +ifeq (${IMAGE_TAG}, ) + IMAGE_TAG = ci + export IMAGE_TAG +endif + +ifeq (${TRAVIS_TAG}, ) + BASE_TAG = ci + export BASE_TAG +else + BASE_TAG = ${TRAVIS_TAG} + export BASE_TAG +endif + +# Specify the name for the binary +CSI_DRIVER=zfs-driver + +# Specify the date o build +BUILD_DATE = $(shell date +'%Y%m%d%H%M%S') + +.PHONY: all +all: test zfs-driver-image + +.PHONY: clean +clean: + go clean -testcache + rm -rf bin + rm -rf ${GOPATH}/bin/${CSI_DRIVER} + rm -rf ${GOPATH}/pkg/* + +.PHONY: format +format: + @echo "--> Running go fmt" + @go fmt $(PACKAGES) + +.PHONY: test +test: format + @echo "--> Running go test" ; + @go test $(PACKAGES) + +# Bootstrap downloads tools required +# during build +.PHONY: bootstrap +bootstrap: + @for tool in $(EXTERNAL_TOOLS) ; do \ + echo "+ Installing $$tool" ; \ + go get -u $$tool; \ + done + +# SRC_PKG is the path of code files +SRC_PKG := github.com/openebs/zfs-localpv/pkg + +# code generation for custom resources +.PHONY: kubegen +kubegen: kubegendelete deepcopy-install clientset-install lister-install informer-install + @GEN_SRC=openebs.io/core/v1alpha1 make deepcopy clientset lister informer + +# deletes generated code by codegen +.PHONY: kubegendelete +kubegendelete: + @rm -rf pkg/generated/clientset + @rm -rf pkg/generated/lister + @rm -rf pkg/generated/informer + +.PHONY: deepcopy-install +deepcopy-install: + @go install ./vendor/k8s.io/code-generator/cmd/deepcopy-gen + +.PHONY: deepcopy +deepcopy: + @echo "+ Generating deepcopy funcs for $(GEN_SRC)" + @deepcopy-gen \ + --input-dirs $(SRC_PKG)/apis/$(GEN_SRC) \ + --output-file-base zz_generated.deepcopy \ + --go-header-file ./buildscripts/custom-boilerplate.go.txt + +.PHONY: clientset-install +clientset-install: + @go install ./vendor/k8s.io/code-generator/cmd/client-gen + +.PHONY: clientset +clientset: + @echo "+ Generating clientsets for $(GEN_SRC)" + @client-gen \ + --fake-clientset=true \ + --input $(GEN_SRC) \ + --input-base $(SRC_PKG)/apis \ + --clientset-path $(SRC_PKG)/generated/clientset \ + --go-header-file ./buildscripts/custom-boilerplate.go.txt + +.PHONY: lister-install +lister-install: + @go install ./vendor/k8s.io/code-generator/cmd/lister-gen + +.PHONY: lister +lister: + @echo "+ Generating lister for $(GEN_SRC)" + @lister-gen \ + --input-dirs $(SRC_PKG)/apis/$(GEN_SRC) \ + --output-package $(SRC_PKG)/generated/lister \ + --go-header-file ./buildscripts/custom-boilerplate.go.txt + +.PHONY: informer-install +informer-install: + @go install ./vendor/k8s.io/code-generator/cmd/informer-gen + +.PHONY: informer +informer: + @echo "+ Generating informer for $(GEN_SRC)" + @informer-gen \ + --input-dirs $(SRC_PKG)/apis/$(GEN_SRC) \ + --versioned-clientset-package $(SRC_PKG)/generated/clientset/internalclientset \ + --listers-package $(SRC_PKG)/generated/lister \ + --output-package $(SRC_PKG)/generated/informer \ + --go-header-file ./buildscripts/custom-boilerplate.go.txt + +.PHONY: zfs-driver +zfs-driver: + @echo "--------------------------------" + @echo "+ Building ${CSI_DRIVER} " + @echo "--------------------------------" + @PNAME=${CSI_DRIVER} CTLNAME=${CSI_DRIVER} sh -c "'$(PWD)/buildscripts/build.sh'" + +.PHONY: zfs-driver-image +zfs-driver-image: zfs-driver + @echo "--------------------------------" + @echo "+ Generating ${CSI_DRIVER} image" + @echo "--------------------------------" + @cp bin/${CSI_DRIVER}/${CSI_DRIVER} buildscripts/${CSI_DRIVER}/ + cd buildscripts/${CSI_DRIVER} && sudo docker build -t openebs/${CSI_DRIVER}:${IMAGE_TAG} --build-arg BUILD_DATE=${BUILD_DATE} . && sudo docker tag openebs/${CSI_DRIVER}:${IMAGE_TAG} quay.io/openebs/${CSI_DRIVER}:${IMAGE_TAG} + @rm buildscripts/${CSI_DRIVER}/${CSI_DRIVER} + +# Push images +deploy-images: + @DIMAGE="openebs/zfs-driver" ./buildscripts/push + diff --git a/VERSION b/VERSION new file mode 100644 index 000000000..6e8bf73aa --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/buildscripts/build.sh b/buildscripts/build.sh new file mode 100755 index 000000000..7a7b2a34f --- /dev/null +++ b/buildscripts/build.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +# +# This script builds the application from source for multiple platforms. +set -e + +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )/../" && pwd )" + +# Change into that directory +cd "$DIR" + +# Get the git commit +if [ -f $GOPATH/src/github.com/openebs/zfs-localpv/GITCOMMIT ]; +then + GIT_COMMIT="$(cat $GOPATH/src/github.com/openebs/zfs-localpv/GITCOMMIT)" +else + GIT_COMMIT="$(git rev-parse HEAD)" +fi + +# Set BUILDMETA based on travis tag +if [[ -n "$TRAVIS_TAG" ]] && [[ $TRAVIS_TAG != *"RC"* ]]; then + echo "released" > BUILDMETA +fi + +# Get the version details +VERSION="$(cat $GOPATH/src/github.com/openebs/zfs-localpv/VERSION)" +VERSION_META="$(cat $GOPATH/src/github.com/openebs/zfs-localpv/BUILDMETA)" + +# Determine the arch/os combos we're building for +UNAME=$(uname) +ARCH=$(uname -m) +if [ "$UNAME" != "Linux" -a "$UNAME" != "Darwin" ] ; then + echo "Sorry, this OS is not supported yet." + exit 1 +fi + +if [ "$UNAME" = "Darwin" ] ; then + XC_OS="darwin" +elif [ "$UNAME" = "Linux" ] ; then + XC_OS="linux" +fi + +if [ "${ARCH}" = "i686" ] ; then + XC_ARCH='386' +elif [ "${ARCH}" = "x86_64" ] ; then + XC_ARCH='amd64' +else + echo "Unusable architecture: ${ARCH}" + exit 1 +fi + + +if [ -z "${PNAME}" ]; +then + echo "Project name not defined" + exit 1 +fi + +if [ -z "${CTLNAME}" ]; +then + echo "CTLNAME not defined" + exit 1 +fi + +# Delete the old dir +echo "==> Removing old directory..." +rm -rf bin/${PNAME}/* +mkdir -p bin/${PNAME}/ + +# If its dev mode, only build for ourself +if [[ "${DEV}" ]]; then + XC_OS=$(go env GOOS) + XC_ARCH=$(go env GOARCH) +fi + +# Build! +echo "==> Building ${CTLNAME} using $(go version)... " + +GOOS="${XC_OS}" +GOARCH="${XC_ARCH}" +output_name="bin/${PNAME}/"$GOOS"_"$GOARCH"/"$CTLNAME + +if [ $GOOS = "windows" ]; then + output_name+='.exe' +fi +env GOOS=$GOOS GOARCH=$GOARCH go build -ldflags \ + "-X github.com/openebs/zfs-localpv/pkg/version.GitCommit=${GIT_COMMIT} \ + -X main.CtlName='${CTLNAME}' \ + -X github.com/openebs/zfs-localpv/pkg/version.Version=${VERSION} \ + -X github.com/openebs/zfs-localpv/pkg/version.VersionMeta=${VERSION_META}"\ + -o $output_name\ + ./cmd + +echo "" + +# Move all the compiled things to the $GOPATH/bin +GOPATH=${GOPATH:-$(go env GOPATH)} +case $(uname) in + CYGWIN*) + GOPATH="$(cygpath $GOPATH)" + ;; +esac +OLDIFS=$IFS +IFS=: MAIN_GOPATH=($GOPATH) +IFS=$OLDIFS + +# Create the gopath bin if not already available +mkdir -p ${MAIN_GOPATH}/bin/ + +# Copy our OS/Arch to the bin/ directory +DEV_PLATFORM="./bin/${PNAME}/$(go env GOOS)_$(go env GOARCH)" +for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do + cp ${F} bin/${PNAME}/ + cp ${F} ${MAIN_GOPATH}/bin/ +done + +if [[ "x${DEV}" == "x" ]]; then + # Zip and copy to the dist dir + echo "==> Packaging..." + for PLATFORM in $(find ./bin/${PNAME} -mindepth 1 -maxdepth 1 -type d); do + OSARCH=$(basename ${PLATFORM}) + echo "--> ${OSARCH}" + + pushd "$PLATFORM" >/dev/null 2>&1 + zip ../${PNAME}-${OSARCH}.zip ./* + popd >/dev/null 2>&1 + done +fi + +# Done! +echo +echo "==> Results:" +ls -hl bin/${PNAME}/ diff --git a/buildscripts/custom-boilerplate.go.txt b/buildscripts/custom-boilerplate.go.txt new file mode 100644 index 000000000..554649b83 --- /dev/null +++ b/buildscripts/custom-boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/buildscripts/push b/buildscripts/push new file mode 100755 index 000000000..e6f442002 --- /dev/null +++ b/buildscripts/push @@ -0,0 +1,102 @@ +#!/bin/bash +set -e + +if [ -z ${DIMAGE} ]; +then + echo "Error: DIMAGE is not specified"; + exit 1 +fi + +IMAGEID=$( sudo docker images -q ${DIMAGE}:ci ) +echo "${DIMAGE}:ci -> $IMAGEID" +if [ -z ${IMAGEID} ]; +then + echo "Error: unable to get IMAGEID for ${DIMAGE}:ci"; + exit 1 +fi + +# Generate a unique tag based on the commit and tag +BUILD_ID=$(git describe --tags --always) + +# Determine the current branch +CURRENT_BRANCH="" +if [ -z ${TRAVIS_BRANCH} ]; +then + CURRENT_BRANCH=$(git branch | grep \* | cut -d ' ' -f2) +else + CURRENT_BRANCH=${TRAVIS_BRANCH} +fi + +#Depending on the branch where builds are generated, +# set the tag CI (fixed) and build tags. +BUILD_TAG="${CURRENT_BRANCH}-${BUILD_ID}" +CI_TAG="${CURRENT_BRANCH}-ci" +if [ ${CURRENT_BRANCH} = "master" ]; then + CI_TAG="ci" +fi + +echo "Set the fixed ci image tag as: ${CI_TAG}" +echo "Set the build/unique image tag as: ${BUILD_TAG}" + +function TagAndPushImage() { + REPO="$1" + TAG="$2" + + IMAGE_URI="${REPO}:${TAG}"; + sudo docker tag ${IMAGEID} ${IMAGE_URI}; + echo " push ${IMAGE_URI}"; + sudo docker push ${IMAGE_URI}; +} + + +if [ ! -z "${DNAME}" ] && [ ! -z "${DPASS}" ]; +then + sudo docker login -u "${DNAME}" -p "${DPASS}"; + + # Push CI tagged image - :ci or :branch-ci + TagAndPushImage "${DIMAGE}" "${CI_TAG}" + + # Push unique tagged image - :master- or :branch- + # This unique/build image will be pushed to corresponding ci repo. + TagAndPushImage "${DIMAGE}-ci" "${BUILD_TAG}" + + if [ ! -z "${TRAVIS_TAG}" ] ; + then + # Push with different tags if tagged as a release + # When github is tagged with a release, then Travis will + # set the release tag in env TRAVIS_TAG + TagAndPushImage "${DIMAGE}" "${TRAVIS_TAG}" + TagAndPushImage "${DIMAGE}" "latest" + fi; +else + echo "No docker credentials provided. Skip uploading ${DIMAGE} to docker hub"; +fi; + +# Push ci image to quay.io for security scanning +if [ ! -z "${QNAME}" ] && [ ! -z "${QPASS}" ]; +then + sudo docker login -u "${QNAME}" -p "${QPASS}" quay.io; + + # Push CI tagged image - :ci or :branch-ci + TagAndPushImage "quay.io/${DIMAGE}" "${CI_TAG}" + + if [ ! -z "${TRAVIS_TAG}" ] ; + then + # Push with different tags if tagged as a release + # When github is tagged with a release, then Travis will + # set the release tag in env TRAVIS_TAG + TagAndPushImage "quay.io/${DIMAGE}" "${TRAVIS_TAG}" + TagAndPushImage "quay.io/${DIMAGE}" "latest" + fi; +else + echo "No docker credentials provided. Skip uploading ${DIMAGE} to quay"; +fi; + +#Push image to run openebs-e2e based on git commit +if [ ! -z "${COMMIT}" ]; +then + sudo docker login -u "${GITLAB_DNAME}" -p "${GITLAB_DPASS}"; + + # Push COMMIT tagged image - :COMMIT + TagAndPushImage "${DIMAGE}" "${COMMIT}" +fi; diff --git a/buildscripts/test-cov.sh b/buildscripts/test-cov.sh new file mode 100755 index 000000000..a68f52fd5 --- /dev/null +++ b/buildscripts/test-cov.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v 'vendor\|pkg/apis\|pkg/generated\|tests'); do + #TODO - Include -race while creating the coverage profile. + go test -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/buildscripts/travis-build.sh b/buildscripts/travis-build.sh new file mode 100755 index 000000000..9c9b397e3 --- /dev/null +++ b/buildscripts/travis-build.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -e +# Copyright 2019 The OpenEBS Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SRC_REPO="$TRAVIS_BUILD_DIR" +DST_REPO="$GOPATH/src/github.com/openebs/zfs-localpv" + +function checkGitDiff() { + if [[ `git diff --shortstat | wc -l` != 0 ]]; then echo "Some files got changed after $1";printf "\n";git diff --stat;printf "\n"; exit 1; fi +} + +if [ "$SRC_REPO" != "$DST_REPO" ]; +then + echo "Copying from $SRC_REPO to $DST_REPO" + # Get the git commit + echo "But first, get the git revision from $SRC_REPO" + GIT_COMMIT="$(git rev-parse HEAD)" + echo $GIT_COMMIT >> $SRC_REPO/GITCOMMIT + + mkdir -p $DST_REPO + cp -R $SRC_REPO/* $DST_REPO/ + cd $DST_REPO +fi + +#make golint-travis +#rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi + +echo "Running : make format" +make format +rc=$?; if [[ $rc != 0 ]]; then echo "make format failed"; exit $rc; fi +checkGitDiff "make format" +printf "\n" + +echo "Running : make kubegen" +make kubegen +rc=$?; if [[ $rc != 0 ]]; then echo "make kubegen failed"; exit $rc; fi +checkGitDiff "make kubegen" +printf "\n" + +./buildscripts/test-cov.sh +rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi + +make all +rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi + +if [ $SRC_REPO != $DST_REPO ]; +then + echo "Copying coverage.txt to $SRC_REPO" + cp coverage.txt $SRC_REPO/ + cd $SRC_REPO +fi diff --git a/buildscripts/zfs-driver/Dockerfile b/buildscripts/zfs-driver/Dockerfile new file mode 100644 index 000000000..382a06faa --- /dev/null +++ b/buildscripts/zfs-driver/Dockerfile @@ -0,0 +1,25 @@ +# +# This Dockerfile builds a recent volume-mgmt using the latest binary from +# volume-mgmt releases. +# + +FROM ubuntu:16.04 +RUN apt-get update; exit 0 +RUN apt-get -y install rsyslog +#RUN apt-get clean && rm -rf /var/lib/apt/lists/* + +COPY zfs-driver /usr/local/bin/ +COPY entrypoint.sh /usr/local/bin/ + +RUN chmod +x /usr/local/bin/entrypoint.sh + +ARG BUILD_DATE +LABEL org.label-schema.name="zfs-driver" +LABEL org.label-schema.description="OpenEBS" +LABEL org.label-schema.url="http://www.openebs.io/" +LABEL org.label-schema.vcs-url="https://github.com/openebs/zfs-localpv" +LABEL org.label-schema.schema-version="1.0" +LABEL org.label-schema.build-date=$BUILD_DATE + +ENTRYPOINT ["/usr/local/bin/zfs-driver"] +EXPOSE 7676 7777 diff --git a/buildscripts/zfs-driver/entrypoint.sh b/buildscripts/zfs-driver/entrypoint.sh new file mode 100644 index 000000000..a24b20d85 --- /dev/null +++ b/buildscripts/zfs-driver/entrypoint.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +set -ex + +/usr/local/bin/zfs-driver diff --git a/cmd/controller/controller.go b/cmd/controller/controller.go new file mode 100644 index 000000000..ca839b838 --- /dev/null +++ b/cmd/controller/controller.go @@ -0,0 +1,247 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "time" + + "github.com/Sirupsen/logrus" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + zvol "github.com/openebs/zfs-localpv/pkg/zfs" + k8serror "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" +) + +// isDeletionCandidate checks if a zfs volume is a deletion candidate. +func (c *ZVController) isDeletionCandidate(zv *apis.ZFSVolume) bool { + return zv.ObjectMeta.DeletionTimestamp != nil +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the spcPoolUpdated resource +// with the current status of the resource. +func (c *ZVController) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the zv resource with this namespace/name + zv, err := c.zvLister.ZFSVolumes(namespace).Get(name) + if k8serror.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("zfsvolume '%s' has been deleted", key)) + return nil + } + if err != nil { + return err + } + zvCopy := zv.DeepCopy() + err = c.syncZV(zvCopy) + return err +} + +// enqueueZV takes a ZFSVolume resource and converts it into a namespace/name +// string which is then put onto the work queue. This method should *not* be +// passed resources of any type other than ZFSVolume. +func (c *ZVController) enqueueZV(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + runtime.HandleError(err) + return + } + c.workqueue.Add(key) + +} + +// synZV is the function which tries to converge to a desired state for the +// ZFSVolume +func (c *ZVController) syncZV(zv *apis.ZFSVolume) error { + var err error + // ZFS Volume should be deleted. Check if deletion timestamp is set + if c.isDeletionCandidate(zv) { + err = zvol.DestroyZvol(zv) + if err == nil { + zvol.RemoveZvolFinalizer(zv) + } + } else { + err = zvol.SetZvolProp(zv) + } + return err +} + +// addZV is the add event handler for CstorVolumeClaim +func (c *ZVController) addZV(obj interface{}) { + zv, ok := obj.(*apis.ZFSVolume) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get zv object %#v", obj)) + return + } + + if zvol.NodeID != zv.Spec.OwnerNodeID { + return + } + // TODO(pawan) scheduler will schedule the volume + // on a node and populate the OwnerNodeID accordingly. + // We need to create the zfs volume in that case. + logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name) + //c.enqueueZV(zv) +} + +// updateZV is the update event handler for CstorVolumeClaim +func (c *ZVController) updateZV(oldObj, newObj interface{}) { + + newZV, ok := newObj.(*apis.ZFSVolume) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get zv object %#v", newZV)) + return + } + + if zvol.NodeID != newZV.Spec.OwnerNodeID { + return + } + + oldZV, ok := oldObj.(*apis.ZFSVolume) + if zvol.PropertyChanged(oldZV, newZV) || + c.isDeletionCandidate(newZV) { + logrus.Infof("Got update event for ZV %s/%s", newZV.Spec.PoolName, newZV.Name) + c.enqueueZV(newZV) + } +} + +// deleteZV is the delete event handler for CstorVolumeClaim +func (c *ZVController) deleteZV(obj interface{}) { + zv, ok := obj.(*apis.ZFSVolume) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + zv, ok = tombstone.Obj.(*apis.ZFSVolume) + if !ok { + runtime.HandleError(fmt.Errorf("Tombstone contained object that is not a cstorvolumeclaim %#v", obj)) + return + } + } + + if zvol.NodeID != zv.Spec.OwnerNodeID { + return + } + + logrus.Infof("Got delete event for ZV %s/%s", zv.Spec.PoolName, zv.Name) + c.enqueueZV(zv) +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *ZVController) Run(threadiness int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + logrus.Info("Starting ZV controller") + + // Wait for the k8s caches to be synced before starting workers + logrus.Info("Waiting for informer caches to sync") + if ok := cache.WaitForCacheSync(stopCh, c.zvSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + logrus.Info("Starting ZV workers") + // Launch worker to process ZV resources + // Threadiness will decide the number of workers you want to launch to process work items from queue + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + logrus.Info("Started ZV workers") + <-stopCh + logrus.Info("Shutting down ZV workers") + + return nil +} + +// runWorker is a long-running function that will continually call the +// processNextWorkItem function in order to read and process a message on the +// workqueue. +func (c *ZVController) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the syncHandler. +func (c *ZVController) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the syncHandler, passing it the namespace/name string of the + // ZV resource to be synced. + if err := c.syncHandler(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + logrus.Infof("Successfully synced '%s'", key) + return nil + }(obj) + + if err != nil { + runtime.HandleError(err) + return true + } + + return true +} diff --git a/cmd/controller/controller_base.go b/cmd/controller/controller_base.go new file mode 100644 index 000000000..b7619960b --- /dev/null +++ b/cmd/controller/controller_base.go @@ -0,0 +1,136 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "github.com/Sirupsen/logrus" + + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme" + informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions" + listers "github.com/openebs/zfs-localpv/pkg/generated/lister/core/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +const controllerAgentName = "zfsvolume-controller" + +// ZVController is the controller implementation for ZV resources +type ZVController struct { + // kubeclientset is a standard kubernetes clientset + kubeclientset kubernetes.Interface + + // clientset is a openebs custom resource package generated for custom API group. + clientset clientset.Interface + + zvLister listers.ZFSVolumeLister + + // zvSynced is used for caches sync to get populated + zvSynced cache.InformerSynced + + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +// ZVControllerBuilder is the builder object for controller. +type ZVControllerBuilder struct { + ZVController *ZVController +} + +// NewZVControllerBuilder returns an empty instance of controller builder. +func NewZVControllerBuilder() *ZVControllerBuilder { + return &ZVControllerBuilder{ + ZVController: &ZVController{}, + } +} + +// withKubeClient fills kube client to controller object. +func (cb *ZVControllerBuilder) withKubeClient(ks kubernetes.Interface) *ZVControllerBuilder { + cb.ZVController.kubeclientset = ks + return cb +} + +// withOpenEBSClient fills openebs client to controller object. +func (cb *ZVControllerBuilder) withOpenEBSClient(cs clientset.Interface) *ZVControllerBuilder { + cb.ZVController.clientset = cs + return cb +} + +// withZVLister fills zv lister to controller object. +func (cb *ZVControllerBuilder) withZVLister(sl informers.SharedInformerFactory) *ZVControllerBuilder { + zvInformer := sl.Openebs().V1alpha1().ZFSVolumes() + cb.ZVController.zvLister = zvInformer.Lister() + return cb +} + +// withZVSynced adds object sync information in cache to controller object. +func (cb *ZVControllerBuilder) withZVSynced(sl informers.SharedInformerFactory) *ZVControllerBuilder { + zvInformer := sl.Openebs().V1alpha1().ZFSVolumes() + cb.ZVController.zvSynced = zvInformer.Informer().HasSynced + return cb +} + +// withWorkqueue adds workqueue to controller object. +func (cb *ZVControllerBuilder) withWorkqueueRateLimiting() *ZVControllerBuilder { + cb.ZVController.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ZV") + return cb +} + +// withRecorder adds recorder to controller object. +func (cb *ZVControllerBuilder) withRecorder(ks kubernetes.Interface) *ZVControllerBuilder { + logrus.Infof("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(logrus.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + cb.ZVController.recorder = recorder + return cb +} + +// withEventHandler adds event handlers controller object. +func (cb *ZVControllerBuilder) withEventHandler(cvcInformerFactory informers.SharedInformerFactory) *ZVControllerBuilder { + cvcInformer := cvcInformerFactory.Openebs().V1alpha1().ZFSVolumes() + // Set up an event handler for when ZV resources change + cvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: cb.ZVController.addZV, + UpdateFunc: cb.ZVController.updateZV, + DeleteFunc: cb.ZVController.deleteZV, + }) + return cb +} + +// Build returns a controller instance. +func (cb *ZVControllerBuilder) Build() (*ZVController, error) { + err := openebsScheme.AddToScheme(scheme.Scheme) + if err != nil { + return nil, err + } + return cb.ZVController, nil +} diff --git a/cmd/controller/start.go b/cmd/controller/start.go new file mode 100644 index 000000000..5615452af --- /dev/null +++ b/cmd/controller/start.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "sync" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" + + "time" + + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/runtime/signals" +) + +var ( + masterURL string + kubeconfig string +) + +// Start starts the zfsvolume controller. +func Start(controllerMtx *sync.RWMutex) error { + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + // Get in cluster config + cfg, err := getClusterConfig(kubeconfig) + if err != nil { + return errors.Wrap(err, "error building kubeconfig") + } + + // Building Kubernetes Clientset + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return errors.Wrap(err, "error building kubernetes clientset") + } + + // Building OpenEBS Clientset + openebsClient, err := clientset.NewForConfig(cfg) + if err != nil { + return errors.Wrap(err, "error building openebs clientset") + } + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) + zvInformerFactory := informers.NewSharedInformerFactory(openebsClient, time.Second*30) + // Build() fn of all controllers calls AddToScheme to adds all types of this + // clientset into the given scheme. + // If multiple controllers happen to call this AddToScheme same time, + // it causes panic with error saying concurrent map access. + // This lock is used to serialize the AddToScheme call of all controllers. + controllerMtx.Lock() + + controller, err := NewZVControllerBuilder(). + withKubeClient(kubeClient). + withOpenEBSClient(openebsClient). + withZVSynced(zvInformerFactory). + withZVLister(zvInformerFactory). + withRecorder(kubeClient). + withEventHandler(zvInformerFactory). + withWorkqueueRateLimiting().Build() + + // blocking call, can't use defer to release the lock + controllerMtx.Unlock() + + if err != nil { + return errors.Wrapf(err, "error building controller instance") + } + + go kubeInformerFactory.Start(stopCh) + go zvInformerFactory.Start(stopCh) + + // Threadiness defines the number of workers to be launched in Run function + return controller.Run(2, stopCh) +} + +// GetClusterConfig return the config for k8s. +func getClusterConfig(kubeconfig string) (*rest.Config, error) { + cfg, err := rest.InClusterConfig() + if err != nil { + logrus.Errorf("Failed to get k8s Incluster config. %+v", err) + if kubeconfig == "" { + return nil, errors.Wrap(err, "kubeconfig is empty") + } + cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + if err != nil { + return nil, errors.Wrap(err, "error building kubeconfig") + } + } + return cfg, err +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 000000000..8e6d3b642 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,87 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + + "github.com/Sirupsen/logrus" + config "github.com/openebs/zfs-localpv/pkg/config" + "github.com/openebs/zfs-localpv/pkg/driver" + "github.com/openebs/zfs-localpv/pkg/version" + zvol "github.com/openebs/zfs-localpv/pkg/zfs" + "github.com/spf13/cobra" +) + +/* + * main routine to start the zfs-driver. The same + * binary is used to controller and agent deployment. + * they both are differentiated via plugin command line + * argument. To start the controller, we have to pass + * --plugin=controller and to start it as agent, we have + * to pass --plugin=agent. + */ +func main() { + _ = flag.CommandLine.Parse([]string{}) + var config = config.Default() + + cmd := &cobra.Command{ + Use: "zfs-driver", + Short: "driver for provisioning zfs volume", + Long: `provisions and deprovisions the volume + on the node which has zfs pool configured.`, + Run: func(cmd *cobra.Command, args []string) { + run(config) + }, + } + + cmd.Flags().AddGoFlagSet(flag.CommandLine) + + cmd.PersistentFlags().StringVar( + &config.NodeID, "nodeid", zvol.NodeID, "NodeID to identify the node running this driver", + ) + + cmd.PersistentFlags().StringVar( + &config.Version, "version", "", "Displays driver version", + ) + + cmd.PersistentFlags().StringVar( + &config.Endpoint, "endpoint", "unix://csi/csi.sock", "CSI endpoint", + ) + + cmd.PersistentFlags().StringVar( + &config.DriverName, "name", "zfs-localpv", "Name of this driver", + ) + + cmd.PersistentFlags().StringVar( + &config.PluginType, "plugin", "csi-plugin", "Type of this driver i.e. controller or node", + ) + + err := cmd.Execute() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "%s", err.Error()) + os.Exit(1) + } +} + +func run(config *config.Config) { + if config.Version == "" { + config.Version = version.Current() + } + + logrus.Infof("%s - %s", version.Current(), version.GetGitCommit()) + logrus.Infof( + "DriverName: %s Plugin: %s EndPoint: %s NodeID: %s", + config.DriverName, + config.PluginType, + config.Endpoint, + config.NodeID, + ) + + err := driver.New(config).Run() + if err != nil { + log.Fatalln(err) + } + os.Exit(0) +} diff --git a/deploy/sample/fio.yaml b/deploy/sample/fio.yaml new file mode 100644 index 000000000..041273be1 --- /dev/null +++ b/deploy/sample/fio.yaml @@ -0,0 +1,54 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-zfspv +allowVolumeExpansion: true +parameters: + blocksize: "4k" + compression: "on" + dedup: "on" + thinprovision: "yes" + poolname: "zfspv-pool" +provisioner: openebs.io/zfs +volumeBindingMode: WaitForFirstConsumer +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: csi-zfspv +spec: + storageClassName: openebs-zfspv + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: fio +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - gke-pawan-zfspv-default-pool-1813a371-6nhl + restartPolicy: Never + containers: + - name: perfrunner + image: openebs/tests-fio + command: ["/bin/bash"] + args: ["-c", "while true ;do sleep 50; done"] + volumeMounts: + - mountPath: /datadir + name: fio-vol + tty: true + volumes: + - name: fio-vol + persistentVolumeClaim: + claimName: csi-zfspv diff --git a/deploy/sample/zfspvcr.yaml b/deploy/sample/zfspvcr.yaml new file mode 100644 index 000000000..6e2f79415 --- /dev/null +++ b/deploy/sample/zfspvcr.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +items: +- apiVersion: openebs.io/v1alpha1 + kind: ZFSVolume + metadata: + name: pvc-a6855135-c70e-11e9-8fa2-42010a80012d + namespace: openebs + spec: + blocksize: 4k + capacity: "4294967296" + compression: "on" + dedup: "on" + ownerNodeID: gke-pawan-zfspv-default-pool-354050c7-wl8v + poolName: zfspv-pool + thinprovison: "yes" +kind: List +metadata: + resourceVersion: "" + selfLink: "" diff --git a/deploy/zfs-operator.yaml b/deploy/zfs-operator.yaml new file mode 100644 index 000000000..ac3f56eea --- /dev/null +++ b/deploy/zfs-operator.yaml @@ -0,0 +1,457 @@ +# This manifest deploys the OpenEBS ZFS control plane components, +# with associated CRs & RBAC rules. + +# Create the OpenEBS namespace +apiVersion: v1 +kind: Namespace +metadata: + name: openebs +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: zfsvolumes.openebs.io +spec: + group: openebs.io + version: v1alpha1 + scope: Namespaced + names: + plural: zfsvolumes + singular: zfsvolume + kind: ZFSVolume + shortNames: + - zvol + - zv + additionalPrinterColumns: + - JSONPath: .spec.ownerNodeID + name: Node + description: Node where the volume is created + type: string + - JSONPath: .spec.capacity + name: Size + description: Size of the volume + type: string + +--- +############################################## +########### ############ +########### Controller plugin ############ +########### ############ +############################################## + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: openebs-zfs-controller-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-provisioner-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["*"] + resources: ["zfsvolumes"] + verbs: ["*"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-provisioner-binding +subjects: + - kind: ServiceAccount + name: openebs-zfs-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: openebs-zfs-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: openebs-zfs-controller + namespace: kube-system +spec: + selector: + matchLabels: + app: openebs-zfs-controller + role: openebs-zfs + serviceName: "openebs-zfs" + replicas: 1 + template: + metadata: + labels: + app: openebs-zfs-controller + role: openebs-zfs + spec: + priorityClassName: system-cluster-critical + serviceAccount: openebs-zfs-controller-sa + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.0.1 + imagePullPolicy: IfNotPresent + args: + - "--provisioner=openebs.io/zfs" + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--feature-gates=Topology=true" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v1.0.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-cluster-driver-registrar + image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 + args: + - "--v=5" + - "--driver-requires-attachment=false" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: openebs-zfs-plugin + image: quay.io/openebs/zfs-driver:ci + imagePullPolicy: Always + env: + - name: OPENEBS_CONTROLLER_DRIVER + value: controller + - name: OPENEBS_CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: OPENEBS_NAMESPACE + value: openebs + args : + - "--endpoint=$(OPENEBS_CSI_ENDPOINT)" + - "--plugin=$(OPENEBS_CONTROLLER_DRIVER)" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + volumes: + - name: socket-dir + emptyDir: {} +--- + +############################## CSI- Attacher ####################### +# Attacher must be able to work with PVs, nodes and VolumeAttachments + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments", "csinodes"] + verbs: ["get", "list", "watch", "update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-attacher-binding +subjects: + - kind: ServiceAccount + name: openebs-zfs-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: openebs-zfs-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-snapshotter-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-snapshotter-binding +subjects: + - kind: ServiceAccount + name: openebs-zfs-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: openebs-zfs-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-cluster-driver-registrar-role +rules: + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csidrivers"] + verbs: ["create", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-cluster-driver-registrar-binding +subjects: + - kind: ServiceAccount + name: openebs-zfs-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: openebs-zfs-cluster-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +######################################## +########### ############ +########### Node plugin ############ +########### ############ +######################################## + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: openebs-zfs-node-sa + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-driver-registrar-role +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumes", "nodes", "services"] + verbs: ["get", "list"] + - apiGroups: ["*"] + resources: ["zfsvolumes"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-zfs-driver-registrar-binding +subjects: + - kind: ServiceAccount + name: openebs-zfs-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: openebs-zfs-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: DaemonSet +apiVersion: apps/v1beta2 +metadata: + name: openebs-zfs-node + namespace: kube-system +spec: + selector: + matchLabels: + app: openebs-zfs-node + template: + metadata: + labels: + app: openebs-zfs-node + role: openebs-zfs + spec: + priorityClassName: system-node-critical + serviceAccount: openebs-zfs-node-sa + hostNetwork: true + containers: + - name: csi-node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/zfs-localpv /registration/zfs-localpv-reg.sock"] + env: + - name: ADDRESS + value: /plugin/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/zfs-localpv/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_DRIVER + value: openebs-zfs + volumeMounts: + - name: plugin-dir + mountPath: /plugin + - name: registration-dir + mountPath: /registration + - name: openebs-zfs-plugin + securityContext: + privileged: true + capabilities: + add: ["CAP_MKNOD", "CAP_SYS_ADMIN", "SYS_ADMIN"] + allowPrivilegeEscalation: true + image: quay.io/openebs/zfs-driver:ci + imagePullPolicy: Always + args: + - "--nodeid=$(OPENEBS_NODE_ID)" + - "--endpoint=$(OPENEBS_CSI_ENDPOINT)" + - "--plugin=$(OPENEBS_NODE_DRIVER)" + env: + - name: OPENEBS_NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OPENEBS_CSI_ENDPOINT + value: unix:///plugin/csi.sock + - name: OPENEBS_NODE_DRIVER + value: agent + - name: OPENEBS_NAMESPACE + value: openebs + volumeMounts: + - name: plugin-dir + mountPath: /plugin + - name: device-dir + mountPath: /dev + - name: zfs-bin + mountPath: /sbin/zfs + - name: lib1 + mountPath: /lib/libzpool.so.2 + - name: lib2 + mountPath: /lib/libzfs_core.so.1 + - name: lib3 + mountPath: /lib/libzfs.so.2 + - name: lib4 + mountPath: /lib/libuutil.so.1 + - name: lib5 + mountPath: /lib/libnvpair.so.1 + - name: pods-mount-dir + mountPath: /var/lib/kubelet/pods + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + volumes: + - name: device-dir + hostPath: + path: /dev + type: Directory + - name: zfs-bin + hostPath: + path: /sbin/zfs + type: File + - name: lib1 + hostPath: + path: /lib/libzpool.so.2.0.0 + type: File + - name: lib2 + hostPath: + path: /lib/libzfs_core.so.1.0.0 + type: File + - name: lib3 + hostPath: + path: /lib/libzfs.so.2.0.0 + type: File + - name: lib4 + hostPath: + path: /lib/libuutil.so.1.0.1 + type: File + - name: lib5 + hostPath: + path: /lib/libnvpair.so.1.0.1 + type: File + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/zfs-localpv/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory +--- diff --git a/pkg/apis/openebs.io/core/v1alpha1/doc.go b/pkg/apis/openebs.io/core/v1alpha1/doc.go new file mode 100644 index 000000000..c182efa32 --- /dev/null +++ b/pkg/apis/openebs.io/core/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package v1alpha1 is the API version +// +groupName=openebs.io +package v1alpha1 diff --git a/pkg/apis/openebs.io/core/v1alpha1/register.go b/pkg/apis/openebs.io/core/v1alpha1/register.go new file mode 100644 index 000000000..23410ff62 --- /dev/null +++ b/pkg/apis/openebs.io/core/v1alpha1/register.go @@ -0,0 +1,77 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used +// to register custom resources +// +// NOTE: +// This variable name should not be changed +var SchemeGroupVersion = schema.GroupVersion{ + Group: "openebs.io", + Version: "v1alpha1", +} + +// Resource takes an unqualified resource and +// returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion. + WithResource(resource). + GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder + // with scheme init functions to run + // for this API package + SchemeBuilder runtime.SchemeBuilder + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that + // registers this API group & version to + // a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions + // here. This registration of generated functions + // takes place in the generated files. + // + // NOTE: + // This separation makes the code compile even + // when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes( + SchemeGroupVersion, + &ZFSVolume{}, + &ZFSVolumeList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/apis/openebs.io/core/v1alpha1/zfsvolume.go b/pkg/apis/openebs.io/core/v1alpha1/zfsvolume.go new file mode 100644 index 000000000..337872f42 --- /dev/null +++ b/pkg/apis/openebs.io/core/v1alpha1/zfsvolume.go @@ -0,0 +1,100 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=zfsvolume + +// ZFSVolume represents a ZFS based volume +type ZFSVolume struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VolumeInfo `json:"spec"` +} + +// MountInfo contains the volume related info +// for all types of volumes in ZFSVolume +type MountInfo struct { + // FSType of a volume will specify the + // format type - ext4(default), xfs of PV + FSType string `json:"fsType"` + + // AccessMode of a volume will hold the + // access mode of the volume + AccessModes []string `json:"accessModes"` + + // MountPath of the volume will hold the + // path on which the volume is mounted + // on that node + MountPath string `json:"mountPath"` + + // ReadOnly specifies if the volume needs + // to be mounted in ReadOnly mode + ReadOnly bool `json:"readOnly"` + + // MountOptions specifies the options with + // which mount needs to be attempted + MountOptions []string `json:"mountOptions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=csivolumes + +// ZFSVolumeList is a list of ZFSVolume resources +type ZFSVolumeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ZFSVolume `json:"items"` +} + +// VolumeInfo contains the volume related info +// for all types of volumes in ZFSVolume +type VolumeInfo struct { + // OwnerNodeID is the Node ID which + // is the owner of this Volume + OwnerNodeID string `json:"ownerNodeID"` + + // poolName specifies the name of the + // pool where this volume should be created + PoolName string `json:"poolName"` + + // Capacity of the volume + Capacity string `json:"capacity"` + + // BlockSize specifies the blocksize + // which we should use to create the zvol + BlockSize string `json:"blocksize"` + + // Compression specifies if the it should + // enabled on the zvol + Compression string `json:"compression"` + + // Dedup specifies the deduplication + // should be enabledd on the zvol + Dedup string `json:"dedup"` + + // Thinprovision specifies if we should + // thin provisioned the volume or not + ThinProvision string `json:"thinProvison"` +} diff --git a/pkg/apis/openebs.io/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/openebs.io/core/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..2399e1053 --- /dev/null +++ b/pkg/apis/openebs.io/core/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MountInfo) DeepCopyInto(out *MountInfo) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountInfo. +func (in *MountInfo) DeepCopy() *MountInfo { + if in == nil { + return nil + } + out := new(MountInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeInfo) DeepCopyInto(out *VolumeInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeInfo. +func (in *VolumeInfo) DeepCopy() *VolumeInfo { + if in == nil { + return nil + } + out := new(VolumeInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSVolume) DeepCopyInto(out *ZFSVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSVolume. +func (in *ZFSVolume) DeepCopy() *ZFSVolume { + if in == nil { + return nil + } + out := new(ZFSVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZFSVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSVolumeList) DeepCopyInto(out *ZFSVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ZFSVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSVolumeList. +func (in *ZFSVolumeList) DeepCopy() *ZFSVolumeList { + if in == nil { + return nil + } + out := new(ZFSVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZFSVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/pkg/builder/build.go b/pkg/builder/build.go new file mode 100644 index 000000000..506a2f806 --- /dev/null +++ b/pkg/builder/build.go @@ -0,0 +1,208 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + "github.com/openebs/zfs-localpv/pkg/common/errors" +) + +// Builder is the builder object for ZFSVolume +type Builder struct { + volume *ZFSVolume + errs []error +} + +// NewBuilder returns new instance of Builder +func NewBuilder() *Builder { + return &Builder{ + volume: &ZFSVolume{ + Object: &apis.ZFSVolume{}, + }, + } +} + +// BuildFrom returns new instance of Builder +// from the provided api instance +func BuildFrom(volume *apis.ZFSVolume) *Builder { + if volume == nil { + b := NewBuilder() + b.errs = append( + b.errs, + errors.New("failed to build volume object: nil volume"), + ) + return b + } + return &Builder{ + volume: &ZFSVolume{ + Object: volume, + }, + } +} + +// WithNamespace sets the namespace of csi volume +func (b *Builder) WithNamespace(namespace string) *Builder { + if namespace == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi volume object: missing namespace", + ), + ) + return b + } + b.volume.Object.Namespace = namespace + return b +} + +// WithName sets the name of csi volume +func (b *Builder) WithName(name string) *Builder { + if name == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi volume object: missing name", + ), + ) + return b + } + b.volume.Object.Name = name + return b +} + +// WithCapacity sets the Capacity of csi volume by converting string +// capacity into Quantity +func (b *Builder) WithCapacity(capacity string) *Builder { + if capacity == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi volume object: missing capacity", + ), + ) + return b + } + b.volume.Object.Spec.Capacity = capacity + return b +} + +// WithCompression sets compression of CStorVolumeClaim +func (b *Builder) WithCompression(compression string) *Builder { + + comp := "off" + if compression == "on" { + comp = "on" + } + b.volume.Object.Spec.Compression = comp + return b +} + +// WithDedup sets compression of CStorVolumeClaim +func (b *Builder) WithDedup(dedup string) *Builder { + + dp := "off" + if dedup == "on" { + dp = "on" + } + b.volume.Object.Spec.Dedup = dp + return b +} + +// WithThinProv sets compression of CStorVolumeClaim +func (b *Builder) WithThinProv(thinprov string) *Builder { + + tp := "no" + if thinprov == "yes" { + tp = "yes" + } + b.volume.Object.Spec.ThinProvision = tp + return b +} + +// WithBlockSize sets blocksize of CStorVolumeClaim +func (b *Builder) WithBlockSize(blockSize string) *Builder { + + bs := "4k" + if len(blockSize) > 0 { + bs = blockSize + } + b.volume.Object.Spec.BlockSize = bs + return b +} + +func (b *Builder) WithPoolName(pool string) *Builder { + if pool == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi volume object: missing pool name", + ), + ) + return b + } + b.volume.Object.Spec.PoolName = pool + return b +} + +func (b *Builder) WithNodename(name string) *Builder { + if name == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi volume object: missing node name", + ), + ) + return b + } + b.volume.Object.Spec.OwnerNodeID = name + return b +} + +// WithLabels merges existing labels if any +// with the ones that are provided here +func (b *Builder) WithLabels(labels map[string]string) *Builder { + if len(labels) == 0 { + b.errs = append( + b.errs, + errors.New("failed to build cstorvolume object: missing labels"), + ) + return b + } + + if b.volume.Object.Labels == nil { + b.volume.Object.Labels = map[string]string{} + } + + for key, value := range labels { + b.volume.Object.Labels[key] = value + } + return b +} + +func (b *Builder) WithFinalizer(finalizer []string) *Builder { + b.volume.Object.Finalizers = append(b.volume.Object.Finalizers, finalizer...) + return b +} + +// Build returns csi volume API object +func (b *Builder) Build() (*apis.ZFSVolume, error) { + if len(b.errs) > 0 { + return nil, errors.Errorf("%+v", b.errs) + } + + return b.volume.Object, nil +} diff --git a/pkg/builder/buildlist.go b/pkg/builder/buildlist.go new file mode 100644 index 000000000..ad240d73c --- /dev/null +++ b/pkg/builder/buildlist.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" +) + +// ListBuilder enables building an instance of +// ZFSVolumeList +type ListBuilder struct { + list *apis.ZFSVolumeList + filters predicateList +} + +// NewListBuilder returns a new instance of ListBuilder +func NewListBuilder() *ListBuilder { + return &ListBuilder{ + list: &apis.ZFSVolumeList{}, + } +} + +// ListBuilderFrom returns a new instance of +// ListBuilder from API list instance +func ListBuilderFrom(vols apis.ZFSVolumeList) *ListBuilder { + b := &ListBuilder{list: &apis.ZFSVolumeList{}} + if len(vols.Items) == 0 { + return b + } + + b.list.Items = append(b.list.Items, vols.Items...) + return b +} + +// List returns the list of pod +// instances that was built by this +// builder +func (b *ListBuilder) List() *apis.ZFSVolumeList { + if b.filters == nil || len(b.filters) == 0 { + return b.list + } + + filtered := &apis.ZFSVolumeList{} + for _, vol := range b.list.Items { + vol := vol // pin it + if b.filters.all(From(&vol)) { + filtered.Items = append(filtered.Items, vol) + } + } + return filtered +} + +// WithFilter add filters on which the pod +// has to be filtered +func (b *ListBuilder) WithFilter(pred ...Predicate) *ListBuilder { + b.filters = append(b.filters, pred...) + return b +} diff --git a/pkg/builder/kubernetes.go b/pkg/builder/kubernetes.go new file mode 100644 index 000000000..5ff1d89f2 --- /dev/null +++ b/pkg/builder/kubernetes.go @@ -0,0 +1,427 @@ +// Copyright © 2019 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "encoding/json" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + client "github.com/openebs/zfs-localpv/pkg/common/kubernetes/client" + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// getClientsetFn is a typed function that +// abstracts fetching of internal clientset +type getClientsetFn func() (clientset *clientset.Clientset, err error) + +// getClientsetFromPathFn is a typed function that +// abstracts fetching of clientset from kubeConfigPath +type getClientsetForPathFn func(kubeConfigPath string) ( + clientset *clientset.Clientset, + err error, +) + +// createFn is a typed function that abstracts +// creating csi volume instance +type createFn func( + cs *clientset.Clientset, + upgradeResultObj *apis.ZFSVolume, + namespace string, +) (*apis.ZFSVolume, error) + +// getFn is a typed function that abstracts +// fetching a csi volume instance +type getFn func( + cli *clientset.Clientset, + name, + namespace string, + opts metav1.GetOptions, +) (*apis.ZFSVolume, error) + +// listFn is a typed function that abstracts +// listing of csi volume instances +type listFn func( + cli *clientset.Clientset, + namespace string, + opts metav1.ListOptions, +) (*apis.ZFSVolumeList, error) + +// delFn is a typed function that abstracts +// deleting a csi volume instance +type delFn func( + cli *clientset.Clientset, + name, + namespace string, + opts *metav1.DeleteOptions, +) error + +// updateFn is a typed function that abstracts +// updating csi volume instance +type updateFn func( + cs *clientset.Clientset, + vol *apis.ZFSVolume, + namespace string, +) (*apis.ZFSVolume, error) + +// Kubeclient enables kubernetes API operations +// on csi volume instance +type Kubeclient struct { + // clientset refers to csi volume's + // clientset that will be responsible to + // make kubernetes API calls + clientset *clientset.Clientset + + kubeConfigPath string + + // namespace holds the namespace on which + // kubeclient has to operate + namespace string + + // functions useful during mocking + getClientset getClientsetFn + getClientsetForPath getClientsetForPathFn + get getFn + list listFn + del delFn + create createFn + update updateFn +} + +// KubeclientBuildOption defines the abstraction +// to build a kubeclient instance +type KubeclientBuildOption func(*Kubeclient) + +// defaultGetClientset is the default implementation to +// get kubernetes clientset instance +func defaultGetClientset() (clients *clientset.Clientset, err error) { + + config, err := client.GetConfig(client.New()) + if err != nil { + return nil, err + } + + return clientset.NewForConfig(config) + +} + +// defaultGetClientsetForPath is the default implementation to +// get kubernetes clientset instance based on the given +// kubeconfig path +func defaultGetClientsetForPath( + kubeConfigPath string, +) (clients *clientset.Clientset, err error) { + config, err := client.GetConfig( + client.New(client.WithKubeConfigPath(kubeConfigPath))) + if err != nil { + return nil, err + } + + return clientset.NewForConfig(config) +} + +// defaultGet is the default implementation to get +// a csi volume instance in kubernetes cluster +func defaultGet( + cli *clientset.Clientset, + name, namespace string, + opts metav1.GetOptions, +) (*apis.ZFSVolume, error) { + return cli.OpenebsV1alpha1(). + ZFSVolumes(namespace). + Get(name, opts) +} + +// defaultList is the default implementation to list +// csi volume instances in kubernetes cluster +func defaultList( + cli *clientset.Clientset, + namespace string, + opts metav1.ListOptions, +) (*apis.ZFSVolumeList, error) { + return cli.OpenebsV1alpha1(). + ZFSVolumes(namespace). + List(opts) +} + +// defaultCreate is the default implementation to delete +// a csi volume instance in kubernetes cluster +func defaultDel( + cli *clientset.Clientset, + name, namespace string, + opts *metav1.DeleteOptions, +) error { + deletePropagation := metav1.DeletePropagationForeground + opts.PropagationPolicy = &deletePropagation + err := cli.OpenebsV1alpha1(). + ZFSVolumes(namespace). + Delete(name, opts) + return err +} + +// defaultCreate is the default implementation to create +// a csi volume instance in kubernetes cluster +func defaultCreate( + cli *clientset.Clientset, + vol *apis.ZFSVolume, + namespace string, +) (*apis.ZFSVolume, error) { + return cli.OpenebsV1alpha1(). + ZFSVolumes(namespace). + Create(vol) +} + +// defaultUpdate is the default implementation to update +// a csi volume instance in kubernetes cluster +func defaultUpdate( + cli *clientset.Clientset, + vol *apis.ZFSVolume, + namespace string, +) (*apis.ZFSVolume, error) { + return cli.OpenebsV1alpha1(). + ZFSVolumes(namespace). + Update(vol) +} + +// withDefaults sets the default options +// of kubeclient instance +func (k *Kubeclient) withDefaults() { + if k.getClientset == nil { + k.getClientset = defaultGetClientset + } + if k.getClientsetForPath == nil { + k.getClientsetForPath = defaultGetClientsetForPath + } + if k.get == nil { + k.get = defaultGet + } + if k.list == nil { + k.list = defaultList + } + if k.del == nil { + k.del = defaultDel + } + if k.create == nil { + k.create = defaultCreate + } + if k.update == nil { + k.update = defaultUpdate + } +} + +// WithClientSet sets the kubernetes client against +// the kubeclient instance +func WithClientSet(c *clientset.Clientset) KubeclientBuildOption { + return func(k *Kubeclient) { + k.clientset = c + } +} + +// WithNamespace sets the kubernetes client against +// the provided namespace +func WithNamespace(namespace string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.namespace = namespace + } +} + +// WithNamespace sets the provided namespace +// against this Kubeclient instance +func (k *Kubeclient) WithNamespace(namespace string) *Kubeclient { + k.namespace = namespace + return k +} + +// WithKubeConfigPath sets the kubernetes client +// against the provided path +func WithKubeConfigPath(path string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.kubeConfigPath = path + } +} + +// NewKubeclient returns a new instance of +// kubeclient meant for csi volume operations +func NewKubeclient(opts ...KubeclientBuildOption) *Kubeclient { + k := &Kubeclient{} + for _, o := range opts { + o(k) + } + + k.withDefaults() + return k +} + +func (k *Kubeclient) getClientsetForPathOrDirect() ( + *clientset.Clientset, + error, +) { + if k.kubeConfigPath != "" { + return k.getClientsetForPath(k.kubeConfigPath) + } + + return k.getClientset() +} + +// getClientOrCached returns either a new instance +// of kubernetes client or its cached copy +func (k *Kubeclient) getClientOrCached() (*clientset.Clientset, error) { + if k.clientset != nil { + return k.clientset, nil + } + + c, err := k.getClientsetForPathOrDirect() + if err != nil { + return nil, + errors.Wrapf( + err, + "failed to get clientset", + ) + } + + k.clientset = c + return k.clientset, nil +} + +// Create creates a csi volume instance +// in kubernetes cluster +func (k *Kubeclient) Create(vol *apis.ZFSVolume) (*apis.ZFSVolume, error) { + if vol == nil { + return nil, + errors.New( + "failed to create csivolume: nil vol object", + ) + } + cs, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to create csi volume {%s} in namespace {%s}", + vol.Name, + k.namespace, + ) + } + + return k.create(cs, vol, k.namespace) +} + +// Get returns csi volume object for given name +func (k *Kubeclient) Get( + name string, + opts metav1.GetOptions, +) (*apis.ZFSVolume, error) { + if name == "" { + return nil, + errors.New( + "failed to get csi volume: missing csi volume name", + ) + } + + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to get csi volume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return k.get(cli, name, k.namespace, opts) +} + +// GetRaw returns csi volume instance +// in bytes +func (k *Kubeclient) GetRaw( + name string, + opts metav1.GetOptions, +) ([]byte, error) { + if name == "" { + return nil, errors.New( + "failed to get raw csi volume: missing vol name", + ) + } + csiv, err := k.Get(name, opts) + if err != nil { + return nil, errors.Wrapf( + err, + "failed to get csi volume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return json.Marshal(csiv) +} + +// List returns a list of csi volume +// instances present in kubernetes cluster +func (k *Kubeclient) List(opts metav1.ListOptions) (*apis.ZFSVolumeList, error) { + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to list csi volumes in namespace {%s}", + k.namespace, + ) + } + + return k.list(cli, k.namespace, opts) +} + +// Delete deletes the csi volume from +// kubernetes +func (k *Kubeclient) Delete(name string) error { + if name == "" { + return errors.New( + "failed to delete csivolume: missing vol name", + ) + } + cli, err := k.getClientOrCached() + if err != nil { + return errors.Wrapf( + err, + "failed to delete csivolume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return k.del(cli, name, k.namespace, &metav1.DeleteOptions{}) +} + +// Update updates this csi volume instance +// against kubernetes cluster +func (k *Kubeclient) Update(vol *apis.ZFSVolume) (*apis.ZFSVolume, error) { + if vol == nil { + return nil, + errors.New( + "failed to update csivolume: nil vol object", + ) + } + + cs, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to update csivolume {%s} in namespace {%s}", + vol.Name, + vol.Namespace, + ) + } + + return k.update(cs, vol, k.namespace) +} diff --git a/pkg/builder/volume.go b/pkg/builder/volume.go new file mode 100644 index 000000000..b053c517a --- /dev/null +++ b/pkg/builder/volume.go @@ -0,0 +1,115 @@ +// Copyright © 2019 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" +) + +// ZFSVolume is a wrapper over +// ZFSVolume API instance +type ZFSVolume struct { + Object *apis.ZFSVolume +} + +// From returns a new instance of +// csi volume +func From(vol *apis.ZFSVolume) *ZFSVolume { + return &ZFSVolume{ + Object: vol, + } +} + +// Predicate defines an abstraction +// to determine conditional checks +// against the provided pod instance +type Predicate func(*ZFSVolume) bool + +// PredicateList holds a list of predicate +type predicateList []Predicate + +// ZFSVolumeList holds the list +// of csi volume instances +type ZFSVolumeList struct { + List apis.ZFSVolumeList +} + +// Len returns the number of items present +// in the ZFSVolumeList +func (p *ZFSVolumeList) Len() int { + return len(p.List.Items) +} + +// all returns true if all the predicates +// succeed against the provided ZFSVolume +// instance +func (l predicateList) all(p *ZFSVolume) bool { + for _, pred := range l { + if !pred(p) { + return false + } + } + return true +} + +// HasLabels returns true if provided labels +// are present in the provided ZFSVolume instance +func HasLabels(keyValuePair map[string]string) Predicate { + return func(p *ZFSVolume) bool { + for key, value := range keyValuePair { + if !p.HasLabel(key, value) { + return false + } + } + return true + } +} + +// HasLabel returns true if provided label +// is present in the provided ZFSVolume instance +func (p *ZFSVolume) HasLabel(key, value string) bool { + val, ok := p.Object.GetLabels()[key] + if ok { + return val == value + } + return false +} + +// HasLabel returns true if provided label +// is present in the provided ZFSVolume instance +func HasLabel(key, value string) Predicate { + return func(p *ZFSVolume) bool { + return p.HasLabel(key, value) + } +} + +// IsNil returns true if the csi volume instance +// is nil +func (p *ZFSVolume) IsNil() bool { + return p.Object == nil +} + +// IsNil is predicate to filter out nil csi volume +// instances +func IsNil() Predicate { + return func(p *ZFSVolume) bool { + return p.IsNil() + } +} + +// GetAPIObject returns csi volume's API instance +func (p *ZFSVolume) GetAPIObject() *apis.ZFSVolume { + return p.Object +} diff --git a/pkg/common/env/env.go b/pkg/common/env/env.go new file mode 100644 index 000000000..1fb83e7cf --- /dev/null +++ b/pkg/common/env/env.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "os" + "strconv" + "strings" +) + +// EnvironmentSetter abstracts setting of environment variable +type EnvironmentSetter func(envKey string, value string) (err error) + +// EnvironmentGetter abstracts fetching value from an environment variable +type EnvironmentGetter func(envKey string) (value string) + +// EnvironmentLookup abstracts looking up an environment variable +type EnvironmentLookup func(envKey string) (value string, present bool) + +// Set sets the provided environment variable +// +// NOTE: +// This is an implementation of EnvironmentSetter +func Set(envKey string, value string) (err error) { + return os.Setenv(string(envKey), value) +} + +// Get fetches value from the provided environment variable +// +// NOTE: +// This is an implementation of EnvironmentGetter +func Get(envKey string) (value string) { + return getEnv(string(envKey)) +} + +// GetOrDefault fetches value from the provided environment variable +// which on empty returns the defaultValue +// NOTE: os.Getenv is used here instead of os.LookupEnv because it is +// not required to know if the environment variable is defined on the system +func GetOrDefault(e string, defaultValue string) (value string) { + envValue := Get(e) + if len(envValue) == 0 { + // ENV not defined or set to "" + return defaultValue + } else { + return envValue + } +} + +// Lookup looks up an environment variable +// +// NOTE: +// This is an implementation of EnvironmentLookup +func Lookup(envKey string) (value string, present bool) { + return lookupEnv(string(envKey)) +} + +// Truthy returns boolean based on the environment variable's value +// +// The lookup value can be truthy (i.e. 1, t, TRUE, true) or falsy (0, false, +// etc) based on strconv.ParseBool logic +func Truthy(envKey string) (truth bool) { + v, found := Lookup(envKey) + if !found { + return + } + truth, _ = strconv.ParseBool(v) + return +} + +// LookupOrFalse looks up an environment variable and returns a string "false" +// if environment variable is not present. It returns appropriate values for +// other cases. +func LookupOrFalse(envKey string) string { + val, present := Lookup(envKey) + if !present { + return "false" + } + return strings.TrimSpace(val) +} + +// getEnv fetches the provided environment variable's value +func getEnv(envKey string) (value string) { + return strings.TrimSpace(os.Getenv(envKey)) +} + +// lookupEnv looks up the provided environment variable +func lookupEnv(envKey string) (value string, present bool) { + value, present = os.LookupEnv(envKey) + value = strings.TrimSpace(value) + return +} diff --git a/pkg/common/errors/errors.go b/pkg/common/errors/errors.go new file mode 100644 index 000000000..f60adef31 --- /dev/null +++ b/pkg/common/errors/errors.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "fmt" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &err{ + prefix: stackTraceMessagePrefix, + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &err{ + prefix: stackTraceMessagePrefix, + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// Wrap annotates err with a new message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + return &wrapper{wrapErrorMessagePrefix, message, err} +} + +// Wrapf annotates err with the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &wrapper{wrapErrorMessagePrefix, fmt.Sprintf(format, args...), err} +} + +// WithStack annotates err with a stack trace at the +// point WithStack was called. If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + stackTraceMessagePrefix, + err, + callers(), + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/pkg/common/errors/types.go b/pkg/common/errors/types.go new file mode 100644 index 000000000..bfa4863c3 --- /dev/null +++ b/pkg/common/errors/types.go @@ -0,0 +1,188 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "fmt" + "runtime" + + "github.com/pkg/errors" +) + +const ( + wrapErrorMessagePrefix string = " -- " + listErrorMessagePrefix string = " - " + stackTraceMessagePrefix string = " " +) + +// stack represents a stack of program counters. +type stack []uintptr + +// callers returns stack of caller function +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// err implements error interface that has a message and stack +type err struct { + prefix string + msg string + *stack +} + +// Error is implementation of error interface +func (e *err) Error() string { return e.msg } + +// Format is implementation of Formater interface +func (e *err) Format(s fmt.State, verb rune) { + message := wrapErrorMessagePrefix + e.msg + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprint(s, message) + for i, pc := range *e.stack { + if i > 0 { + return + } + f := errors.Frame(pc) + fmt.Fprintf(s, "\n%s%+v", e.prefix, f) + } + return + } + fallthrough + case 's', 'q': + fmt.Fprint(s, message) + } +} + +// wrapper implements error interface that has a message and error +type wrapper struct { + prefix string + msg string + error +} + +// Error is implementation of error interface +func (w *wrapper) Error() string { return w.msg } + +// Cause is implementation of causer interface +func (w *wrapper) Cause() error { return w.error } + +// Format is implementation of Formater interface +func (w *wrapper) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.error) + fmt.Fprint(s, w.prefix+w.msg) + return + } + fallthrough + case 's', 'q': + fmt.Fprintf(s, "%s\n", w.error) + fmt.Fprint(s, w.prefix+w.msg) + } +} + +// withStack implements error interface that has a stack and error +type withStack struct { + prefix string + error + *stack +} + +// Format is implementation of Formater interface +func (ws *withStack) Format(s fmt.State, verb rune) { + message := wrapErrorMessagePrefix + fmt.Sprintf("%s", ws.error) + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprint(s, message) + for i, pc := range *ws.stack { + if i > 0 { + return + } + f := errors.Frame(pc) + fmt.Fprintf(s, "\n%s%+v", ws.prefix, f) + } + return + } + fallthrough + case 's', 'q': + fmt.Fprint(s, message) + } +} + +// Cause is implementation of causer interface +func (ws *withStack) Cause() error { return ws.error } + +// ErrorList is a wrapper over list of errors +// It implements error interface +type ErrorList struct { + Errors []error + msg string +} + +// Error is implementation of error interface +func (el *ErrorList) Error() string { + message := "" + for _, err := range el.Errors { + message += err.Error() + ":" + } + el.msg = message + return message +} + +// Format is implementation of Formater interface +func (el *ErrorList) Format(s fmt.State, verb rune) { + message := "" + for _, err := range el.Errors { + message += "\n" + listErrorMessagePrefix + err.Error() + } + fmt.Fprint(s, message) + +} + +// WithStack annotates ErrorList with a new message and +// stack trace of caller. +func (el *ErrorList) WithStack(message string) error { + if el == nil { + return nil + } + return &withStack{ + stackTraceMessagePrefix, + Wrap(el, message), + callers(), + } +} + +// WithStackf annotates ErrorList with the format specifier +// and stack trace of caller. +func (el *ErrorList) WithStackf(format string, args ...interface{}) error { + if el == nil { + return nil + } + return &withStack{ + stackTraceMessagePrefix, + Wrapf(el, format, args...), + callers(), + } +} diff --git a/pkg/common/kubernetes/client/client.go b/pkg/common/kubernetes/client/client.go new file mode 100644 index 000000000..9366e8b94 --- /dev/null +++ b/pkg/common/kubernetes/client/client.go @@ -0,0 +1,243 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "strings" + + env "github.com/openebs/zfs-localpv/pkg/common/env" + "github.com/pkg/errors" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + // K8sMasterIPEnvironmentKey is the environment variable key used to + // determine the kubernetes master IP address + K8sMasterIPEnvironmentKey string = "OPENEBS_IO_K8S_MASTER" + + // KubeConfigEnvironmentKey is the environment variable key used to + // determine the kubernetes config + KubeConfigEnvironmentKey string = "OPENEBS_IO_KUBE_CONFIG" +) + +// getInClusterConfigFunc abstracts the logic to get +// kubernetes incluster config +// +// NOTE: +// typed function makes it simple to mock +type getInClusterConfigFunc func() (*rest.Config, error) + +// buildConfigFromFlagsFunc provides the abstraction to get +// kubernetes config from provided flags +// +// NOTE: +// typed function makes it simple to mock +type buildConfigFromFlagsFunc func(string, string) (*rest.Config, error) + +// GetConfigFunc provides the abstraction to get +// kubernetes config from provided client instance +// +// NOTE: +// typed function makes it simple to mock +type GetConfigFunc func(*Client) (*rest.Config, error) + +// GetConfig returns kubernetes config instance +// +// NOTE: +// This is an implementation of GetConfigFunc +func GetConfig(c *Client) (*rest.Config, error) { + if c == nil { + return nil, errors.New("failed to get kubernetes config: nil client was provided") + } + return c.GetConfigForPathOrDirect() +} + +// getKubeMasterIPFunc provides the abstraction to get +// kubernetes master IP address +// +// NOTE: +// typed function makes it simple to mock +type getKubeMasterIPFunc func(string) string + +// getKubeConfigPathFunc provides the abstraction to get +// kubernetes config path +// +// NOTE: +// typed function makes it simple to mock +type getKubeConfigPathFunc func(string) string + +// getKubernetesDynamicClientFunc provides the abstraction to get +// dynamic kubernetes clientset +// +// NOTE: +// typed function makes it simple to mock +type getKubernetesDynamicClientFunc func(*rest.Config) (dynamic.Interface, error) + +// getKubernetesClientsetFunc provides the abstraction to get +// kubernetes clientset +// +// NOTE: +// typed function makes it simple to mock +type getKubernetesClientsetFunc func(*rest.Config) (*kubernetes.Clientset, error) + +// Client provides common kuberenetes client operations +type Client struct { + IsInCluster bool // flag to let client point to its own cluster + KubeConfigPath string // kubeconfig path to get kubernetes clientset + + // Below functions are useful during mock + + // handle to get in cluster config + getInClusterConfig getInClusterConfigFunc + + // handle to get desired kubernetes config + buildConfigFromFlags buildConfigFromFlagsFunc + + // handle to get kubernetes clienset + getKubernetesClientset getKubernetesClientsetFunc + + // handle to get dynamic kubernetes clientset + getKubernetesDynamicClient getKubernetesDynamicClientFunc + + // handle to get kubernetes master IP + getKubeMasterIP getKubeMasterIPFunc + + // handle to get kubernetes config path + getKubeConfigPath getKubeConfigPathFunc +} + +// OptionFunc is a typed function that abstracts any kind of operation +// against the provided client instance +// +// This is the basic building block to create functional operations +// against the client instance +type OptionFunc func(*Client) + +// New returns a new instance of client +func New(opts ...OptionFunc) *Client { + c := &Client{} + for _, o := range opts { + o(c) + } + withDefaults(c) + return c +} + +func withDefaults(c *Client) { + if c.getInClusterConfig == nil { + c.getInClusterConfig = rest.InClusterConfig + } + if c.buildConfigFromFlags == nil { + c.buildConfigFromFlags = clientcmd.BuildConfigFromFlags + } + if c.getKubernetesClientset == nil { + c.getKubernetesClientset = kubernetes.NewForConfig + } + if c.getKubernetesDynamicClient == nil { + c.getKubernetesDynamicClient = dynamic.NewForConfig + } + if c.getKubeMasterIP == nil { + c.getKubeMasterIP = env.Get + } + if c.getKubeConfigPath == nil { + c.getKubeConfigPath = env.Get + } +} + +// InCluster enables IsInCluster flag +func InCluster() OptionFunc { + return func(c *Client) { + c.IsInCluster = true + } +} + +// WithKubeConfigPath sets kubeconfig path +// against this client instance +func WithKubeConfigPath(kubeConfigPath string) OptionFunc { + return func(c *Client) { + c.KubeConfigPath = kubeConfigPath + } +} + +// Clientset returns a new instance of kubernetes clientset +func (c *Client) Clientset() (*kubernetes.Clientset, error) { + config, err := c.GetConfigForPathOrDirect() + if err != nil { + return nil, errors.Wrapf(err, + "failed to get kubernetes clientset: failed to get kubernetes config: IsInCluster {%t}: KubeConfigPath {%s}", + c.IsInCluster, + c.KubeConfigPath, + ) + } + return c.getKubernetesClientset(config) +} + +// Config returns the kubernetes config instance based on available criteria +func (c *Client) Config() (config *rest.Config, err error) { + // IsInCluster flag holds the top most priority + if c.IsInCluster { + return c.getInClusterConfig() + } + + // ENV holds second priority + if strings.TrimSpace(c.getKubeMasterIP(K8sMasterIPEnvironmentKey)) != "" || + strings.TrimSpace(c.getKubeConfigPath(KubeConfigEnvironmentKey)) != "" { + return c.getConfigFromENV() + } + + // Defaults to InClusterConfig + return c.getInClusterConfig() +} + +// ConfigForPath returns the kuberentes config instance based on KubeConfig path +func (c *Client) ConfigForPath(kubeConfigPath string) (config *rest.Config, err error) { + return c.buildConfigFromFlags("", kubeConfigPath) +} + +func (c *Client) GetConfigForPathOrDirect() (config *rest.Config, err error) { + if c.KubeConfigPath != "" { + return c.ConfigForPath(c.KubeConfigPath) + } + return c.Config() +} + +func (c *Client) getConfigFromENV() (config *rest.Config, err error) { + k8sMaster := c.getKubeMasterIP(K8sMasterIPEnvironmentKey) + kubeConfig := c.getKubeConfigPath(KubeConfigEnvironmentKey) + if strings.TrimSpace(k8sMaster) == "" && + strings.TrimSpace(kubeConfig) == "" { + return nil, errors.Errorf( + "failed to get kubernetes config: missing ENV: atleast one should be set: {%s} or {%s}", + K8sMasterIPEnvironmentKey, + KubeConfigEnvironmentKey, + ) + } + return c.buildConfigFromFlags(k8sMaster, kubeConfig) +} + +// Dynamic returns a kubernetes dynamic client capable of invoking operations +// against kubernetes resources +func (c *Client) Dynamic() (dynamic.Interface, error) { + config, err := c.GetConfigForPathOrDirect() + if err != nil { + return nil, errors.Wrap(err, "failed to get dynamic client") + } + return c.getKubernetesDynamicClient(config) +} diff --git a/pkg/common/kubernetes/client/client_test.go b/pkg/common/kubernetes/client/client_test.go new file mode 100644 index 000000000..bfcf35c82 --- /dev/null +++ b/pkg/common/kubernetes/client/client_test.go @@ -0,0 +1,310 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/pkg/errors" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +func fakeGetClientsetOk(c *rest.Config) (*kubernetes.Clientset, error) { + return &kubernetes.Clientset{}, nil +} + +func fakeGetClientsetErr(c *rest.Config) (*kubernetes.Clientset, error) { + return nil, errors.New("fake error") +} + +func fakeInClusterConfigOk() (*rest.Config, error) { + return &rest.Config{}, nil +} + +func fakeInClusterConfigErr() (*rest.Config, error) { + return nil, errors.New("fake error") +} + +func fakeBuildConfigFromFlagsOk(kubemaster string, kubeconfig string) (*rest.Config, error) { + return &rest.Config{}, nil +} + +func fakeBuildConfigFromFlagsErr(kubemaster string, kubeconfig string) (*rest.Config, error) { + return nil, errors.New("fake error") +} + +func fakeGetKubeConfigPathOk(e string) string { + return "fake" +} + +func fakeGetKubeConfigPathNil(e string) string { + return "" +} + +func fakeGetKubeMasterIPOk(e string) string { + return "fake" +} + +func fakeGetKubeMasterIPNil(e string) string { + return "" +} + +func fakeGetDynamicClientSetOk(c *rest.Config) (dynamic.Interface, error) { + return dynamic.NewForConfig(c) +} + +func fakeGetDynamicClientSetNil(c *rest.Config) (dynamic.Interface, error) { + return nil, nil +} + +func fakeGetDynamicClientSetErr(c *rest.Config) (dynamic.Interface, error) { + return nil, errors.New("fake error") +} + +func TestNewInCluster(t *testing.T) { + c := New(InCluster()) + if !c.IsInCluster { + t.Fatalf("test failed: expected IsInCluster as 'true' actual '%t'", c.IsInCluster) + } +} + +func TestConfig(t *testing.T) { + tests := map[string]struct { + isInCluster bool + kubeConfigPath string + getInClusterConfig getInClusterConfigFunc + getKubeMasterIP getKubeMasterIPFunc + getKubeConfigPath getKubeConfigPathFunc + getConfigFromENV buildConfigFromFlagsFunc + isErr bool + }{ + "t1": {true, "", fakeInClusterConfigOk, nil, nil, nil, false}, + "t2": {true, "", fakeInClusterConfigErr, nil, nil, nil, true}, + "t3": {false, "", fakeInClusterConfigErr, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathNil, nil, true}, + "t4": {false, "", fakeInClusterConfigOk, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathNil, nil, false}, + "t5": {false, "fakeKubeConfigPath", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathNil, fakeBuildConfigFromFlagsOk, false}, + "t6": {false, "", nil, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, false}, + "t7": {false, "", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, false}, + "t8": {false, "fakeKubeConfigPath", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsErr, true}, + "t9": {false, "fakeKubeConfigpath", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, false}, + } + for name, mock := range tests { + name, mock := name, mock // pin It + t.Run(name, func(t *testing.T) { + c := &Client{ + IsInCluster: mock.isInCluster, + KubeConfigPath: mock.kubeConfigPath, + getInClusterConfig: mock.getInClusterConfig, + getKubeMasterIP: mock.getKubeMasterIP, + getKubeConfigPath: mock.getKubeConfigPath, + buildConfigFromFlags: mock.getConfigFromENV, + } + _, err := c.Config() + if mock.isErr && err == nil { + t.Fatalf("test '%s' failed: expected no error actual '%s'", name, err) + } + }) + } +} + +func TestGetConfigFromENV(t *testing.T) { + tests := map[string]struct { + getKubeMasterIP getKubeMasterIPFunc + getKubeConfigPath getKubeConfigPathFunc + getConfigFromENV buildConfigFromFlagsFunc + isErr bool + }{ + "t1": {fakeGetKubeMasterIPNil, fakeGetKubeConfigPathNil, nil, true}, + "t2": {fakeGetKubeMasterIPNil, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, false}, + "t3": {fakeGetKubeMasterIPOk, fakeGetKubeConfigPathNil, fakeBuildConfigFromFlagsOk, false}, + "t4": {fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, false}, + "t5": {fakeGetKubeMasterIPNil, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsErr, true}, + "t6": {fakeGetKubeMasterIPOk, fakeGetKubeConfigPathNil, fakeBuildConfigFromFlagsErr, true}, + "t7": {fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsErr, true}, + } + for name, mock := range tests { + name, mock := name, mock // pin It + t.Run(name, func(t *testing.T) { + c := &Client{ + getKubeMasterIP: mock.getKubeMasterIP, + getKubeConfigPath: mock.getKubeConfigPath, + buildConfigFromFlags: mock.getConfigFromENV, + } + _, err := c.getConfigFromENV() + if mock.isErr && err == nil { + t.Fatalf("test '%s' failed: expected error actual no error", name) + } + if !mock.isErr && err != nil { + t.Fatalf("test '%s' failed: expected no error actual '%s'", name, err) + } + }) + } +} + +func TestGetConfigFromPathOrDirect(t *testing.T) { + tests := map[string]struct { + kubeConfigPath string + getConfigFromFlags buildConfigFromFlagsFunc + getInClusterConfig getInClusterConfigFunc + isErr bool + }{ + "T1": {"", fakeBuildConfigFromFlagsErr, fakeInClusterConfigOk, false}, + "T2": {"fake-path", fakeBuildConfigFromFlagsOk, fakeInClusterConfigErr, false}, + "T3": {"fake-path", fakeBuildConfigFromFlagsErr, fakeInClusterConfigOk, true}, + "T4": {"", fakeBuildConfigFromFlagsOk, fakeInClusterConfigErr, true}, + "T5": {"fake-path", fakeBuildConfigFromFlagsErr, fakeInClusterConfigErr, true}, + } + for name, mock := range tests { + name, mock := name, mock // pin It + t.Run(name, func(t *testing.T) { + c := &Client{ + KubeConfigPath: mock.kubeConfigPath, + buildConfigFromFlags: mock.getConfigFromFlags, + getInClusterConfig: mock.getInClusterConfig, + getKubeMasterIP: fakeGetKubeMasterIPNil, + getKubeConfigPath: fakeGetKubeConfigPathNil, + } + _, err := c.GetConfigForPathOrDirect() + if mock.isErr && err == nil { + t.Fatalf("test '%s' failed: expected error actual no error", name) + } + if !mock.isErr && err != nil { + t.Fatalf("test '%s' failed: expected no error actual '%s'", name, err) + } + }) + } +} + +func TestClientset(t *testing.T) { + tests := map[string]struct { + isInCluster bool + kubeConfigPath string + getInClusterConfig getInClusterConfigFunc + getKubeMasterIP getKubeMasterIPFunc + getKubeConfigPath getKubeConfigPathFunc + getConfigFromENV buildConfigFromFlagsFunc + getKubernetesClientset getKubernetesClientsetFunc + isErr bool + }{ + "t10": {true, "", fakeInClusterConfigOk, nil, nil, nil, fakeGetClientsetOk, false}, + "t11": {true, "", fakeInClusterConfigOk, nil, nil, nil, fakeGetClientsetErr, true}, + "t12": {true, "", fakeInClusterConfigErr, nil, nil, nil, fakeGetClientsetOk, true}, + + "t21": {false, "", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathNil, fakeBuildConfigFromFlagsOk, fakeGetClientsetOk, false}, + "t22": {false, "", nil, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, fakeGetClientsetOk, false}, + "t23": {false, "", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, fakeGetClientsetOk, false}, + "t24": {false, "fake-path", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsErr, fakeGetClientsetOk, true}, + "t25": {false, "", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, fakeGetClientsetErr, true}, + "t26": {false, "fakePath", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsErr, fakeGetClientsetOk, true}, + + "t30": {false, "", fakeInClusterConfigOk, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathNil, nil, fakeGetClientsetOk, false}, + "t31": {false, "", fakeInClusterConfigOk, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathNil, nil, fakeGetClientsetErr, true}, + "t32": {false, "", fakeInClusterConfigErr, fakeGetKubeMasterIPNil, fakeGetKubeConfigPathNil, nil, nil, true}, + "t33": {false, "fakePath", nil, fakeGetKubeMasterIPOk, fakeGetKubeConfigPathOk, fakeBuildConfigFromFlagsOk, fakeGetClientsetOk, false}, + } + for name, mock := range tests { + name, mock := name, mock // pin It + t.Run(name, func(t *testing.T) { + c := &Client{ + IsInCluster: mock.isInCluster, + KubeConfigPath: mock.kubeConfigPath, + getInClusterConfig: mock.getInClusterConfig, + getKubeMasterIP: mock.getKubeMasterIP, + getKubeConfigPath: mock.getKubeConfigPath, + buildConfigFromFlags: mock.getConfigFromENV, + getKubernetesClientset: mock.getKubernetesClientset, + } + _, err := c.Clientset() + if mock.isErr && err == nil { + t.Fatalf("test '%s' failed: expected error actual no error", name) + } + if !mock.isErr && err != nil { + t.Fatalf("test '%s' failed: expected no error actual '%s'", name, err) + } + }) + } +} + +func TestDynamic(t *testing.T) { + tests := map[string]struct { + getKubeMasterIP getKubeMasterIPFunc + getInClusterConfig getInClusterConfigFunc + getKubernetesDynamicClientSet getKubernetesDynamicClientFunc + kubeConfigPath string + getConfigFromENV buildConfigFromFlagsFunc + getKubeConfigPath getKubeConfigPathFunc + isErr bool + }{ + "t1": {fakeGetKubeMasterIPNil, fakeInClusterConfigErr, fakeGetDynamicClientSetOk, "fake-path", fakeBuildConfigFromFlagsOk, fakeGetKubeConfigPathNil, false}, + "t2": {fakeGetKubeMasterIPNil, fakeInClusterConfigErr, fakeGetDynamicClientSetErr, "fake-path", fakeBuildConfigFromFlagsOk, fakeGetKubeConfigPathOk, true}, + "t3": {fakeGetKubeMasterIPNil, fakeInClusterConfigErr, fakeGetDynamicClientSetOk, "fake-path", fakeBuildConfigFromFlagsErr, fakeGetKubeConfigPathOk, true}, + "t4": {fakeGetKubeMasterIPOk, fakeInClusterConfigOk, fakeGetDynamicClientSetOk, "", fakeBuildConfigFromFlagsOk, fakeGetKubeConfigPathOk, false}, + "t5": {fakeGetKubeMasterIPOk, fakeInClusterConfigErr, fakeGetDynamicClientSetErr, "", fakeBuildConfigFromFlagsOk, fakeGetKubeConfigPathOk, true}, + "t6": {fakeGetKubeMasterIPNil, fakeInClusterConfigOk, fakeGetDynamicClientSetErr, "", fakeBuildConfigFromFlagsErr, fakeGetKubeConfigPathNil, true}, + "t7": {fakeGetKubeMasterIPNil, fakeInClusterConfigErr, fakeGetDynamicClientSetOk, "", fakeBuildConfigFromFlagsErr, fakeGetKubeConfigPathNil, true}, + "t8": {fakeGetKubeMasterIPNil, fakeInClusterConfigErr, fakeGetDynamicClientSetErr, "", fakeBuildConfigFromFlagsErr, fakeGetKubeConfigPathNil, true}, + } + for name, mock := range tests { + name, mock := name, mock // pin It + t.Run(name, func(t *testing.T) { + c := &Client{ + getKubeMasterIP: mock.getKubeMasterIP, + KubeConfigPath: mock.kubeConfigPath, + getInClusterConfig: mock.getInClusterConfig, + buildConfigFromFlags: mock.getConfigFromENV, + getKubeConfigPath: mock.getKubeConfigPath, + getKubernetesDynamicClient: mock.getKubernetesDynamicClientSet, + } + _, err := c.Dynamic() + if mock.isErr && err == nil { + t.Fatalf("test '%s' failed: expected error actual no error", name) + } + if !mock.isErr && err != nil { + t.Fatalf("test '%s' failed: expected no error but got '%v'", name, err) + } + }) + } +} + +func TestConfigForPath(t *testing.T) { + tests := map[string]struct { + kubeConfigPath string + getConfigFromPath buildConfigFromFlagsFunc + isErr bool + }{ + "T1": {"", fakeBuildConfigFromFlagsErr, true}, + "T2": {"fake-path", fakeBuildConfigFromFlagsOk, false}, + } + for name, mock := range tests { + name, mock := name, mock // pin It + t.Run(name, func(t *testing.T) { + c := &Client{ + KubeConfigPath: mock.kubeConfigPath, + buildConfigFromFlags: mock.getConfigFromPath, + } + _, err := c.ConfigForPath(mock.kubeConfigPath) + if mock.isErr && err == nil { + t.Fatalf("test '%s' failed: expected error actual no error", name) + } + if !mock.isErr && err != nil { + t.Fatalf("test '%s' failed: expected no error but got '%v'", name, err) + } + }) + } +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 000000000..8b9cacf6e --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,51 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// Config struct fills the parameters of request or user input +type Config struct { + // DriverName to be registered at CSI + DriverName string + + // PluginType flags if the driver is + // it is a node plugin or controller + // plugin + PluginType string + + // Version of the CSI controller/node driver + Version string + + // Endpoint on which requests are made by kubelet + // or external provisioner + // + // NOTE: + // - Controller/node plugin will listen on this + // - This will be a unix based socket + Endpoint string + + // NodeID helps in differentiating the nodes on + // which node drivers are running. This is useful + // in case of topologies and publishing or + // unpublishing volumes on nodes + NodeID string +} + +// Default returns a new instance of config +// required to initialize a driver instance +func Default() *Config { + return &Config{} +} diff --git a/pkg/driver/agent.go b/pkg/driver/agent.go new file mode 100644 index 000000000..98f2eca1d --- /dev/null +++ b/pkg/driver/agent.go @@ -0,0 +1,297 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "github.com/Sirupsen/logrus" + "github.com/container-storage-interface/spec/lib/go/csi" + ctrl "github.com/openebs/zfs-localpv/cmd/controller" + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + "github.com/openebs/zfs-localpv/pkg/builder" + zvol "github.com/openebs/zfs-localpv/pkg/zfs" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sync" +) + +// node is the server implementation +// for CSI NodeServer +type node struct { + driver *CSIDriver +} + +// NewNode returns a new instance +// of CSI NodeServer +func NewNode(d *CSIDriver) csi.NodeServer { + var ControllerMutex = sync.RWMutex{} + // start the zfsvolume watcher + go func() { + err := ctrl.Start(&ControllerMutex) + if err != nil { + logrus.Errorf("Failed to start cstorvolume claim controller: %s", err.Error()) + } + }() + + return &node{ + driver: d, + } +} + +func GetVolAndMountInfo( + req *csi.NodePublishVolumeRequest, +) (*apis.ZFSVolume, *apis.MountInfo, error) { + var mountinfo apis.MountInfo + + mountinfo.FSType = req.GetVolumeCapability().GetMount().GetFsType() + mountinfo.MountPath = req.GetTargetPath() + mountinfo.ReadOnly = req.GetReadonly() + mountinfo.MountOptions = append(mountinfo.MountOptions, req.GetVolumeCapability().GetMount().GetMountFlags()...) + + getOptions := metav1.GetOptions{} + vol, err := builder.NewKubeclient(). + WithNamespace(zvol.OpenEBSNamespace). + Get(req.GetVolumeId(), getOptions) + + if err != nil { + return nil, nil, err + } + + return vol, &mountinfo, nil +} + +// NodePublishVolume publishes (mounts) the volume +// at the corresponding node at a given path +// +// This implements csi.NodeServer +func (ns *node) NodePublishVolume( + ctx context.Context, + req *csi.NodePublishVolumeRequest, +) (*csi.NodePublishVolumeResponse, error) { + + var ( + err error + ) + + if err = ns.validateNodePublishReq(req); err != nil { + return nil, err + } + + vol, mountInfo, err := GetVolAndMountInfo(req) + if err != nil { + goto PublishVolumeResponse + } + // Create the zfs volume and attempt mount operation on the requested path + if err = zvol.CreateAndMountZvol(vol, mountInfo); err != nil { + goto PublishVolumeResponse + } + +PublishVolumeResponse: + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &csi.NodePublishVolumeResponse{}, nil +} + +// NodeUnpublishVolume unpublishes (unmounts) the volume +// from the corresponding node from the given path +// +// This implements csi.NodeServer +func (ns *node) NodeUnpublishVolume( + ctx context.Context, + req *csi.NodeUnpublishVolumeRequest, +) (*csi.NodeUnpublishVolumeResponse, error) { + + var ( + err error + vol *apis.ZFSVolume + currentMounts []string + ) + + if err = ns.validateNodeUnpublishReq(req); err != nil { + return nil, err + } + + targetPath := req.GetTargetPath() + volumeID := req.GetVolumeId() + + getOptions := metav1.GetOptions{} + vol, err = builder.NewKubeclient(). + WithNamespace(zvol.OpenEBSNamespace). + Get(volumeID, getOptions) + + if err != nil { + return nil, err + } + + zfsvolume := vol.Spec.PoolName + "/" + vol.Name + devpath := zvol.ZFS_DEVPATH + zfsvolume + currentMounts, err = zvol.GetMounts(devpath) + if err != nil { + return nil, err + } else if len(currentMounts) == 0 { + goto NodeUnpublishResponse + } else if len(currentMounts) == 1 { + if currentMounts[0] != targetPath { + return nil, status.Error(codes.Internal, "device not mounted at right path") + } + } else { + logrus.Errorf( + "can not unmount, more than one mounts for volume:%s path %s mounts: %v", + volumeID, targetPath, currentMounts, + ) + return nil, status.Error(codes.Internal, "device not mounted at rightpath") + } + + if vol, err = zvol.GetZFSVolume(volumeID); (err != nil) || (vol == nil) { + goto NodeUnpublishResponse + } + + if err = zvol.UmountVolume(vol, req.GetTargetPath()); err != nil { + goto NodeUnpublishResponse + } + +NodeUnpublishResponse: + logrus.Infof("hostpath: volume %s path: %s has been unmounted.", + volumeID, targetPath) + + return &csi.NodeUnpublishVolumeResponse{}, nil +} + +// NodeGetInfo returns node details +// +// This implements csi.NodeServer +func (ns *node) NodeGetInfo( + ctx context.Context, + req *csi.NodeGetInfoRequest, +) (*csi.NodeGetInfoResponse, error) { + + return &csi.NodeGetInfoResponse{ + NodeId: ns.driver.config.NodeID, + }, nil +} + +// NodeGetCapabilities returns capabilities supported +// by this node service +// +// This implements csi.NodeServer +func (ns *node) NodeGetCapabilities( + ctx context.Context, + req *csi.NodeGetCapabilitiesRequest, +) (*csi.NodeGetCapabilitiesResponse, error) { + + return &csi.NodeGetCapabilitiesResponse{ + Capabilities: []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_UNKNOWN, + }, + }, + }, + }, + }, nil +} + +// TODO +// This needs to be implemented +// +// NodeStageVolume mounts the volume on the staging +// path +// +// This implements csi.NodeServer +func (ns *node) NodeStageVolume( + ctx context.Context, + req *csi.NodeStageVolumeRequest, +) (*csi.NodeStageVolumeResponse, error) { + + return &csi.NodeStageVolumeResponse{}, nil +} + +// NodeUnstageVolume unmounts the volume from +// the staging path +// +// This implements csi.NodeServer +func (ns *node) NodeUnstageVolume( + ctx context.Context, + req *csi.NodeUnstageVolumeRequest, +) (*csi.NodeUnstageVolumeResponse, error) { + + return &csi.NodeUnstageVolumeResponse{}, nil +} + +// TODO +// Verify if this needs to be implemented +// +// NodeExpandVolume resizes the filesystem if required +// +// If ControllerExpandVolumeResponse returns true in +// node_expansion_required then FileSystemResizePending +// condition will be added to PVC and NodeExpandVolume +// operation will be queued on kubelet +// +// This implements csi.NodeServer +func (ns *node) NodeExpandVolume( + ctx context.Context, + req *csi.NodeExpandVolumeRequest, +) (*csi.NodeExpandVolumeResponse, error) { + + return nil, nil +} + +// NodeGetVolumeStats returns statistics for the +// given volume +// +// This implements csi.NodeServer +func (ns *node) NodeGetVolumeStats( + ctx context.Context, + in *csi.NodeGetVolumeStatsRequest, +) (*csi.NodeGetVolumeStatsResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +func (ns *node) validateNodePublishReq( + req *csi.NodePublishVolumeRequest, +) error { + if req.GetVolumeCapability() == nil { + return status.Error(codes.InvalidArgument, + "Volume capability missing in request") + } + + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, + "Volume ID missing in request") + } + return nil +} + +func (ns *node) validateNodeUnpublishReq( + req *csi.NodeUnpublishVolumeRequest, +) error { + if req.GetVolumeId() == "" { + return status.Error(codes.InvalidArgument, + "Volume ID missing in request") + } + + if req.GetTargetPath() == "" { + return status.Error(codes.InvalidArgument, + "Target path missing in request") + } + return nil +} diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go new file mode 100644 index 000000000..5f87c26fa --- /dev/null +++ b/pkg/driver/controller.go @@ -0,0 +1,375 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "fmt" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/openebs/zfs-localpv/pkg/builder" + errors "github.com/openebs/zfs-localpv/pkg/common/errors" + csipayload "github.com/openebs/zfs-localpv/pkg/response" + zvol "github.com/openebs/zfs-localpv/pkg/zfs" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// controller is the server implementation +// for CSI Controller +type controller struct { + driver *CSIDriver + capabilities []*csi.ControllerServiceCapability +} + +// NewController returns a new instance +// of CSI controller +func NewController(d *CSIDriver) csi.ControllerServer { + return &controller{ + driver: d, + capabilities: newControllerCapabilities(), + } +} + +// SupportedVolumeCapabilityAccessModes contains the list of supported access +// modes for the volume +var SupportedVolumeCapabilityAccessModes = []*csi.VolumeCapability_AccessMode{ + &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, +} + +// CreateVolume provisions a volume +func (cs *controller) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest, +) (*csi.CreateVolumeResponse, error) { + + logrus.Infof("received request to create volume {%s} vol{%v}", req.GetName(), req) + var err error + + if err = cs.validateVolumeCreateReq(req); err != nil { + return nil, err + } + + volName := req.GetName() + size := req.GetCapacityRange().RequiredBytes + bs := req.GetParameters()["blocksize"] + compression := req.GetParameters()["compression"] + dedup := req.GetParameters()["dedup"] + pool := req.GetParameters()["poolname"] + tp := req.GetParameters()["thinprovision"] + + volObj, err := builder.NewBuilder(). + WithName(volName). + WithCapacity(strconv.FormatInt(int64(size), 10)). + WithBlockSize(bs). + WithPoolName(pool). + WithDedup(dedup). + WithThinProv(tp). + WithCompression(compression).Build() + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + err = zvol.ProvisionVolume(size, volObj) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return csipayload.NewCreateVolumeResponseBuilder(). + WithName(volName). + WithCapacity(size). + Build(), nil +} + +// DeleteVolume deletes the specified volume +func (cs *controller) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + + logrus.Infof("received request to delete volume {%s}", req.VolumeId) + + var ( + err error + ) + + if err = cs.validateDeleteVolumeReq(req); err != nil { + return nil, err + } + + volumeID := req.GetVolumeId() + + // verify if the volume has already been deleted + vol, err := zvol.GetVolume(volumeID) + if vol != nil && vol.DeletionTimestamp != nil { + goto deleteResponse + } + + // Delete the corresponding ZV CR + err = zvol.DeleteVolume(volumeID) + if err != nil { + return nil, errors.Wrapf( + err, + "failed to handle delete volume request for {%s}", + volumeID, + ) + } +deleteResponse: + return csipayload.NewDeleteVolumeResponseBuilder().Build(), nil +} + +// TODO Implementation will be taken up later + +// ValidateVolumeCapabilities validates the capabilities +// required to create a new volume +// This implements csi.ControllerServer +func (cs *controller) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest, +) (*csi.ValidateVolumeCapabilitiesResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerGetCapabilities fetches controller capabilities +// +// This implements csi.ControllerServer +func (cs *controller) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest, +) (*csi.ControllerGetCapabilitiesResponse, error) { + + resp := &csi.ControllerGetCapabilitiesResponse{ + Capabilities: cs.capabilities, + } + + return resp, nil +} + +// ControllerExpandVolume resizes previously provisioned volume +// +// This implements csi.ControllerServer +func (cs *controller) ControllerExpandVolume( + ctx context.Context, + req *csi.ControllerExpandVolumeRequest, +) (*csi.ControllerExpandVolumeResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// CreateSnapshot creates a snapshot for given volume +// +// This implements csi.ControllerServer +func (cs *controller) CreateSnapshot( + ctx context.Context, + req *csi.CreateSnapshotRequest, +) (*csi.CreateSnapshotResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// DeleteSnapshot deletes given snapshot +// +// This implements csi.ControllerServer +func (cs *controller) DeleteSnapshot( + ctx context.Context, + req *csi.DeleteSnapshotRequest, +) (*csi.DeleteSnapshotResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// ListSnapshots lists all snapshots for the +// given volume +// +// This implements csi.ControllerServer +func (cs *controller) ListSnapshots( + ctx context.Context, + req *csi.ListSnapshotsRequest, +) (*csi.ListSnapshotsResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerUnpublishVolume removes a previously +// attached volume from the given node +// +// This implements csi.ControllerServer +func (cs *controller) ControllerUnpublishVolume( + ctx context.Context, + req *csi.ControllerUnpublishVolumeRequest, +) (*csi.ControllerUnpublishVolumeResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerPublishVolume attaches given volume +// at the specified node +// +// This implements csi.ControllerServer +func (cs *controller) ControllerPublishVolume( + ctx context.Context, + req *csi.ControllerPublishVolumeRequest, +) (*csi.ControllerPublishVolumeResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// GetCapacity return the capacity of the +// given volume +// +// This implements csi.ControllerServer +func (cs *controller) GetCapacity( + ctx context.Context, + req *csi.GetCapacityRequest, +) (*csi.GetCapacityResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// ListVolumes lists all the volumes +// +// This implements csi.ControllerServer +func (cs *controller) ListVolumes( + ctx context.Context, + req *csi.ListVolumesRequest, +) (*csi.ListVolumesResponse, error) { + + return nil, status.Error(codes.Unimplemented, "") +} + +// validateCapabilities validates if provided capabilities +// are supported by this driver +func validateCapabilities(caps []*csi.VolumeCapability) bool { + + for _, cap := range caps { + if !IsSupportedVolumeCapabilityAccessMode(cap.AccessMode.Mode) { + return false + } + } + return true +} + +func (cs *controller) validateDeleteVolumeReq(req *csi.DeleteVolumeRequest) error { + volumeID := req.GetVolumeId() + if volumeID == "" { + return status.Error( + codes.InvalidArgument, + "failed to handle delete volume request: missing volume id", + ) + } + + err := cs.validateRequest( + csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + ) + if err != nil { + return errors.Wrapf( + err, + "failed to handle delete volume request for {%s} : validation failed", + volumeID, + ) + } + return nil +} + +// IsSupportedVolumeCapabilityAccessMode valides the requested access mode +func IsSupportedVolumeCapabilityAccessMode( + accessMode csi.VolumeCapability_AccessMode_Mode, +) bool { + + for _, access := range SupportedVolumeCapabilityAccessModes { + if accessMode == access.Mode { + return true + } + } + return false +} + +// newControllerCapabilities returns a list +// of this controller's capabilities +func newControllerCapabilities() []*csi.ControllerServiceCapability { + fromType := func( + cap csi.ControllerServiceCapability_RPC_Type, + ) *csi.ControllerServiceCapability { + return &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: cap, + }, + }, + } + } + + var capabilities []*csi.ControllerServiceCapability + for _, cap := range []csi.ControllerServiceCapability_RPC_Type{ + csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + } { + capabilities = append(capabilities, fromType(cap)) + } + return capabilities +} + +// validateRequest validates if the requested service is +// supported by the driver +func (cs *controller) validateRequest( + c csi.ControllerServiceCapability_RPC_Type, +) error { + + for _, cap := range cs.capabilities { + if c == cap.GetRpc().GetType() { + return nil + } + } + + return status.Error( + codes.InvalidArgument, + fmt.Sprintf("failed to validate request: {%s} is not supported", c), + ) +} + +func (cs *controller) validateVolumeCreateReq(req *csi.CreateVolumeRequest) error { + err := cs.validateRequest( + csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + ) + if err != nil { + return errors.Wrapf( + err, + "failed to handle create volume request for {%s}", + req.GetName(), + ) + } + + if req.GetName() == "" { + return status.Error( + codes.InvalidArgument, + "failed to handle create volume request: missing volume name", + ) + } + + volCapabilities := req.GetVolumeCapabilities() + if volCapabilities == nil { + return status.Error( + codes.InvalidArgument, + "failed to handle create volume request: missing volume capabilities", + ) + } + return nil +} diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go new file mode 100644 index 000000000..1c146da22 --- /dev/null +++ b/pkg/driver/driver.go @@ -0,0 +1,104 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "github.com/Sirupsen/logrus" + "github.com/container-storage-interface/spec/lib/go/csi" + config "github.com/openebs/zfs-localpv/pkg/config" +) + +// volume can only be published once as +// read/write on a single node, at any +// given time +var supportedAccessMode = &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, +} + +// TODO check if this can be renamed to Base +// +// CSIDriver defines a common data structure +// for drivers +type CSIDriver struct { + // TODO change the field names to make it + // readable + config *config.Config + ids csi.IdentityServer + ns csi.NodeServer + cs csi.ControllerServer + + cap []*csi.VolumeCapability_AccessMode +} + +// GetVolumeCapabilityAccessModes fetches the access +// modes on which the volume can be exposed +func GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode { + supported := []csi.VolumeCapability_AccessMode_Mode{ + csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + } + + var vcams []*csi.VolumeCapability_AccessMode + for _, vcam := range supported { + logrus.Infof("enabling volume access mode: %s", vcam.String()) + vcams = append(vcams, newVolumeCapabilityAccessMode(vcam)) + } + return vcams +} + +func newVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode { + return &csi.VolumeCapability_AccessMode{Mode: mode} +} + +// New returns a new driver instance +func New(config *config.Config) *CSIDriver { + driver := &CSIDriver{ + config: config, + cap: GetVolumeCapabilityAccessModes(), + } + + switch config.PluginType { + case "controller": + driver.cs = NewController(driver) + + case "agent": + // Start monitor goroutine to monitor the + // ZfsVolume CR. If there is any event + // related to the volume like destroy or + // property change, handle it accordingly. + + driver.ns = NewNode(driver) + } + + // Identity server is common to both node and + // controller, it is required to register, + // share capabilities and probe the corresponding + // driver + driver.ids = NewIdentity(driver) + return driver +} + +// Run starts the CSI plugin by communicating +// over the given endpoint +func (d *CSIDriver) Run() error { + // Initialize and start listening on grpc server + s := NewNonBlockingGRPCServer(d.config.Endpoint, d.ids, d.cs, d.ns) + + s.Start() + s.Wait() + + return nil +} diff --git a/pkg/driver/grpc.go b/pkg/driver/grpc.go new file mode 100644 index 000000000..82f8e438f --- /dev/null +++ b/pkg/driver/grpc.go @@ -0,0 +1,170 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "fmt" + "net" + "os" + "strings" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +// parseEndpoint should have a valid prefix(unix/tcp) to return a valid endpoint parts +func parseEndpoint(ep string) (string, string, error) { + if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") { + s := strings.SplitN(ep, "://", 2) + if s[1] != "" { + return s[0], s[1], nil + } + } + return "", "", fmt.Errorf("Invalid endpoint: %v", ep) +} + +// logGRPC logs all the grpc related errors, i.e the final errors +// which are returned to the grpc clients +func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + logrus.Infof("GRPC call: %s", info.FullMethod) + logrus.Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) + resp, err := handler(ctx, req) + if err != nil { + logrus.Errorf("GRPC error: %v", err) + } else { + logrus.Infof("GRPC response: %s", protosanitizer.StripSecrets(resp)) + } + return resp, err +} + +// NonBlockingGRPCServer defines Non blocking GRPC server interfaces +type NonBlockingGRPCServer interface { + // Start services at the endpoint + Start() + + // Waits for the service to stop + Wait() + + // Stops the service gracefully + Stop() + + // Stops the service forcefully + ForceStop() +} + +// NewNonBlockingGRPCServer returns a new instance of NonBlockingGRPCServer +func NewNonBlockingGRPCServer(ep string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) NonBlockingGRPCServer { + return &nonBlockingGRPCServer{ + endpoint: ep, + idnty_server: ids, + ctrl_server: cs, + agent_server: ns} +} + +// NonBlocking server +// dont block the execution for a task to complete. +// use wait group to wait for all the tasks dispatched. +type nonBlockingGRPCServer struct { + wg sync.WaitGroup + server *grpc.Server + endpoint string + idnty_server csi.IdentityServer + ctrl_server csi.ControllerServer + agent_server csi.NodeServer +} + +// Start grpc server for serving CSI endpoints +func (s *nonBlockingGRPCServer) Start() { + + s.wg.Add(1) + + go s.serve(s.endpoint, s.idnty_server, s.ctrl_server, s.agent_server) + + return +} + +// Wait for the service to stop +func (s *nonBlockingGRPCServer) Wait() { + s.wg.Wait() +} + +// Stop the service forcefully +func (s *nonBlockingGRPCServer) Stop() { + s.server.GracefulStop() +} + +// ForceStop the service +func (s *nonBlockingGRPCServer) ForceStop() { + s.server.Stop() +} + +// serve starts serving requests at the provided endpoint based on the type of +// plugin. In this function all the csi related interfaces are provided by +// container-storage-interface +func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { + + proto, addr, err := parseEndpoint(endpoint) + if err != nil { + logrus.Fatal(err.Error()) + } + + // Clear off the addr if it is already present, this is done to remove stale + // entries, as this path is shared with the OS and will be the same + // everytime the plugin restarts, its possible that the last instance leaves + // a stale entry + if proto == "unix" { + addr = "/" + addr + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + logrus.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) + } + } + + listener, err := net.Listen(proto, addr) + if err != nil { + logrus.Fatalf("Failed to listen: %v", err) + } + + opts := []grpc.ServerOption{ + grpc.UnaryInterceptor(logGRPC), + } + // Create a new grpc server, all the request from csi client to + // create/delete/... will hit this server + server := grpc.NewServer(opts...) + s.server = server + + if ids != nil { + csi.RegisterIdentityServer(server, ids) + } + if cs != nil { + csi.RegisterControllerServer(server, cs) + } + if ns != nil { + csi.RegisterNodeServer(server, ns) + } + + logrus.Infof("Listening for connections on address: %#v", listener.Addr()) + + // Start serving requests on the grpc server created + server.Serve(listener) + +} diff --git a/pkg/driver/identity.go b/pkg/driver/identity.go new file mode 100644 index 000000000..1e5b3e370 --- /dev/null +++ b/pkg/driver/identity.go @@ -0,0 +1,112 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/openebs/zfs-localpv/pkg/version" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// identity is the server implementation +// for CSI IdentityServer +type identity struct { + driver *CSIDriver +} + +// NewIdentity returns a new instance of CSI +// IdentityServer +func NewIdentity(d *CSIDriver) csi.IdentityServer { + return &identity{ + driver: d, + } +} + +// GetPluginInfo returns the version and name of +// this service +// +// This implements csi.IdentityServer +func (id *identity) GetPluginInfo( + ctx context.Context, + req *csi.GetPluginInfoRequest, +) (*csi.GetPluginInfoResponse, error) { + + if id.driver.config.DriverName == "" { + return nil, status.Error(codes.Unavailable, "missing driver name") + } + + if id.driver.config.Version == "" { + return nil, status.Error(codes.Unavailable, "missing driver version") + } + + return &csi.GetPluginInfoResponse{ + Name: id.driver.config.DriverName, + // TODO + // verify which version needs to be used: + // config.version or version.Current() + VendorVersion: version.Current(), + }, nil +} + +// TODO +// Need to implement this +// +// Probe checks if the plugin is running or not +// +// This implements csi.IdentityServer +func (id *identity) Probe( + ctx context.Context, + req *csi.ProbeRequest, +) (*csi.ProbeResponse, error) { + + return &csi.ProbeResponse{}, nil +} + +// GetPluginCapabilities returns supported capabilities +// of this plugin +// +// Currently it reports whether this plugin can serve +// the Controller interface. Controller interface methods +// are called dependant on this +// +// This implements csi.IdentityServer +func (id *identity) GetPluginCapabilities( + ctx context.Context, + req *csi.GetPluginCapabilitiesRequest, +) (*csi.GetPluginCapabilitiesResponse, error) { + + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS, + }, + }, + }, + }, + }, nil +} diff --git a/pkg/response/create.go b/pkg/response/create.go new file mode 100644 index 000000000..6c23222e6 --- /dev/null +++ b/pkg/response/create.go @@ -0,0 +1,64 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/container-storage-interface/spec/lib/go/csi" +) + +// CreateVolumeResponseBuilder helps building an +// instance of csi CreateVolumeResponse +type CreateVolumeResponseBuilder struct { + response *csi.CreateVolumeResponse +} + +// NewCreateVolumeResponseBuilder returns a new +// instance of CreateVolumeResponseBuilder +func NewCreateVolumeResponseBuilder() *CreateVolumeResponseBuilder { + return &CreateVolumeResponseBuilder{ + response: &csi.CreateVolumeResponse{ + Volume: &csi.Volume{}, + }, + } +} + +// WithName sets the name against the +// CreateVolumeResponse instance +func (b *CreateVolumeResponseBuilder) WithName(name string) *CreateVolumeResponseBuilder { + b.response.Volume.VolumeId = name + return b +} + +// WithName sets the capacity against the +// CreateVolumeResponse instance +func (b *CreateVolumeResponseBuilder) WithCapacity(capacity int64) *CreateVolumeResponseBuilder { + b.response.Volume.CapacityBytes = capacity + return b +} + +// WithContext sets the context against the +// CreateVolumeResponse instance +func (b *CreateVolumeResponseBuilder) WithContext(ctx map[string]string) *CreateVolumeResponseBuilder { + b.response.Volume.VolumeContext = ctx + return b +} + +// Build returns the constructed instance +// of csi CreateVolumeResponse +func (b *CreateVolumeResponseBuilder) Build() *csi.CreateVolumeResponse { + return b.response +} diff --git a/pkg/response/delete.go b/pkg/response/delete.go new file mode 100644 index 000000000..29a3d4015 --- /dev/null +++ b/pkg/response/delete.go @@ -0,0 +1,41 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/container-storage-interface/spec/lib/go/csi" +) + +// DeleteVolumeResponseBuilder helps building an +// instance of csi DeleteVolumeResponse +type DeleteVolumeResponseBuilder struct { + response *csi.DeleteVolumeResponse +} + +// NewDeleteVolumeResponseBuilder returns a new +// instance of DeleteVolumeResponseBuilder +func NewDeleteVolumeResponseBuilder() *DeleteVolumeResponseBuilder { + return &DeleteVolumeResponseBuilder{ + response: &csi.DeleteVolumeResponse{}, + } +} + +// Build returns the constructed instance +// of csi DeleteVolumeResponse +func (b *DeleteVolumeResponseBuilder) Build() *csi.DeleteVolumeResponse { + return b.response +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 000000000..620067cb4 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,115 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" +) + +var ( + // GitCommit that was compiled; filled in by + // the compiler. + GitCommit string + + // Version is the version of this repo; filled + // in by the compiler + Version string + + // VersionMeta is a pre-release marker for the + // version. If this is "" (empty string) then + // it means that it is a final release. Otherwise, + // this is a pre-release such as "dev" (in + // development), "beta", "rc1", etc. + VersionMeta string +) + +const ( + versionFile string = "/src/github.com/openebs/zfs-localpv/VERSION" + buildMetaFile string = "/src/github.com/openebs/zfs-localpv/BUILDMETA" +) + +// Current returns current version of csi driver +func Current() string { + return Get() +} + +// Get returns current version from global +// Version variable. If Version is unset then +// from VERSION file at the root of this repo. +func Get() string { + if Version != "" { + return Version + } + + path := filepath.Join(os.Getenv("GOPATH") + versionFile) + vBytes, err := ioutil.ReadFile(path) + if err != nil { + logrus.Errorf("failed to get version: %s", err.Error()) + return "" + } + + return strings.TrimSpace(string(vBytes)) +} + +// GetBuildMeta returns build type from +// global VersionMeta variable. If VersionMeta +// is unset then this is fetched from BUILDMETA +// file at the root of this repo. +func GetBuildMeta() string { + if VersionMeta != "" { + return "-" + VersionMeta + } + + path := filepath.Join(os.Getenv("GOPATH") + buildMetaFile) + vBytes, err := ioutil.ReadFile(path) + if err != nil { + logrus.Errorf("failed to get build version: %s", err.Error()) + return "" + } + + return "-" + strings.TrimSpace(string(vBytes)) +} + +// GetGitCommit returns Git commit SHA-1 from +// global GitCommit variable. If GitCommit is +// unset this calls Git directly. +func GetGitCommit() string { + if GitCommit != "" { + return GitCommit + } + + cmd := exec.Command("git", "rev-parse", "--verify", "HEAD") + output, err := cmd.Output() + if err != nil { + logrus.Errorf("failed to get git commit: %s", err.Error()) + return "" + } + + return strings.TrimSpace(string(output)) +} + +// Verbose returns version details with git +// commit info +func Verbose() string { + return strings.Join([]string{Get(), GetGitCommit()[0:7]}, "-") +} diff --git a/pkg/zfs/mount.go b/pkg/zfs/mount.go new file mode 100644 index 000000000..20522693b --- /dev/null +++ b/pkg/zfs/mount.go @@ -0,0 +1,145 @@ +package zfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/kubernetes/pkg/util/mount" +) + +// FormatAndMountZvol formats and mounts the created volume to the desired mount path +func FormatAndMountZvol(devicePath string, mountInfo *apis.MountInfo) error { + mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()} + + err := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions) + if err != nil { + logrus.Errorf( + "zfspv: failed to mount volume %s [%s] to %s, error %v", + devicePath, mountInfo.FSType, mountInfo.MountPath, err, + ) + return err + } + + logrus.Infof("created zvol %v and mounted %v fs %v", devicePath, mountInfo.MountPath, mountInfo.FSType) + return nil +} + +// UmountVolume unmounts the volume and the corresponding mount path is removed +func UmountVolume(vol *apis.ZFSVolume, targetPath string, +) error { + mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()} + + _, _, err := mount.GetDeviceNameFromMount(mounter, targetPath) + if err != nil { + logrus.Errorf( + "zfspv umount volume: failed to get device from mnt: %s\nError: %v", + targetPath, err, + ) + return err + } + + if pathExists, pathErr := mount.PathExists(targetPath); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + logrus.Warningf( + "Warning: Unmount skipped because path does not exist: %v", + targetPath, + ) + return nil + } + + if err = mounter.Unmount(targetPath); err != nil { + logrus.Errorf( + "zfspv umount volume: failed to unmount: %s\nError: %v", + targetPath, err, + ) + return err + } + + if err := os.RemoveAll(targetPath); err != nil { + logrus.Errorf("zfspv: failed to remove mount path Error: %v", err) + return err + } + + logrus.Infof("umount done path %v", targetPath) + + return nil +} + +// GetMounts gets mountpoints for the specified volume +func GetMounts(devicepath string) ([]string, error) { + + var ( + currentMounts []string + err error + mountList []mount.MountPoint + ) + + dev, err := filepath.EvalSymlinks(devicepath) + if err != nil { + return nil, err + } + mounter := mount.New("") + // Get list of mounted paths present with the node + if mountList, err = mounter.List(); err != nil { + return nil, err + } + for _, mntInfo := range mountList { + if mntInfo.Device == dev { + currentMounts = append(currentMounts, mntInfo.Path) + } + } + return currentMounts, nil +} + +// CreateAndMountZvol creates the zfs Volume +// and mounts the disk to the specified path +func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error { + if len(mount.MountPath) == 0 { + return status.Error(codes.InvalidArgument, "mount path missing in request") + } + + if len(vol.Spec.OwnerNodeID) > 0 && + vol.Spec.OwnerNodeID != NodeID { + return status.Error(codes.Internal, "volume is owned by different node") + } + + devicePath, err := createZvol(vol) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = UpdateZvolInfo(vol) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + /* + * This check is the famous *Wall Of North* + * It will not let the volume to be mounted + * at more than two places. The volume should + * be unmounted before proceeding to the mount + * operation. + */ + currentMounts, err := GetMounts(devicePath) + if err != nil { + return err + } else if len(currentMounts) >= 1 { + logrus.Errorf( + "can not mount, more than one mounts for volume:%s dev %s mounts: %v", + vol.Name, devicePath, currentMounts, + ) + return status.Error(codes.Internal, "device already mounted") + } + err = FormatAndMountZvol(devicePath, mount) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + return err +} diff --git a/pkg/zfs/volume.go b/pkg/zfs/volume.go new file mode 100644 index 000000000..734985a5b --- /dev/null +++ b/pkg/zfs/volume.go @@ -0,0 +1,138 @@ +// Copyright © 2019 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zfs + +import ( + "github.com/Sirupsen/logrus" + "os" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + "github.com/openebs/zfs-localpv/pkg/builder" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // OpenEBSNamespace is the environment variable to get openebs namespace + // + // This environment variable is set via kubernetes downward API + OpenEBSNamespaceKey string = "OPENEBS_NAMESPACE" + // ZFSFinalizer for the ZfsVolume CR + ZFSFinalizer string = "zfs.openebs.io/finalizer" + // ZFSNodeKey will be used to insert Label + // in ZfsVolume CR + ZFSNodeKey string = "kubernetes.io/nodename" +) + +var ( + // OpenEBSNamespace is openebs system namespace + OpenEBSNamespace string + + // NodeID is the NodeID of the node on which the pod is present + NodeID string +) + +func init() { + + OpenEBSNamespace = os.Getenv(OpenEBSNamespaceKey) + if OpenEBSNamespace == "" { + logrus.Fatalf("OPENEBS_NAMESPACE environment variable not set") + } + NodeID = os.Getenv("OPENEBS_NODE_ID") + if NodeID == "" && os.Getenv("OPENEBS_NODE_DRIVER") != "" { + logrus.Fatalf("NodeID environment variable not set") + } +} + +// ProvisionVolume creates a ZFSVolume(zv) CR, +// watcher for zvc is present in CSI agent +func ProvisionVolume( + size int64, + vol *apis.ZFSVolume, +) error { + + _, err := builder.NewKubeclient().WithNamespace(OpenEBSNamespace).Create(vol) + if err == nil { + logrus.Infof("provisioned volume %s", vol.Name) + } + + return err +} + +// GetVolume the corresponding ZFSVolume CR +func GetVolume(volumeID string) (*apis.ZFSVolume, error) { + return builder.NewKubeclient(). + WithNamespace(OpenEBSNamespace). + Get(volumeID, metav1.GetOptions{}) +} + +// DeleteVolume deletes the corresponding ZFSVol CR +func DeleteVolume(volumeID string) (err error) { + err = builder.NewKubeclient().WithNamespace(OpenEBSNamespace).Delete(volumeID) + if err == nil { + logrus.Infof("deprovisioned volume %s", volumeID) + } + + return +} + +// GetVolList fetches the current Published Volume list +func GetVolList(volumeID string) (*apis.ZFSVolumeList, error) { + listOptions := v1.ListOptions{ + LabelSelector: ZFSNodeKey + "=" + NodeID, + } + + return builder.NewKubeclient(). + WithNamespace(OpenEBSNamespace).List(listOptions) + +} + +// GetZFSVolume fetches the current Published csi Volume +func GetZFSVolume(volumeID string) (*apis.ZFSVolume, error) { + getOptions := metav1.GetOptions{} + vol, err := builder.NewKubeclient(). + WithNamespace(OpenEBSNamespace).Get(volumeID, getOptions) + return vol, err +} + +// UpdateZvolInfo updates ZFSVolume CR with node id and finalizer +func UpdateZvolInfo(vol *apis.ZFSVolume) error { + finalizers := []string{ZFSFinalizer} + labels := map[string]string{ZFSNodeKey: NodeID} + + if vol.Finalizers != nil { + return nil + } + + newVol, err := builder.BuildFrom(vol). + WithNodename(NodeID). + WithFinalizer(finalizers). + WithLabels(labels).Build() + + if err != nil { + return err + } + + _, err = builder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(newVol) + return err +} + +// RemoveZvolFinalizer adds finalizer to ZFSVolume CR +func RemoveZvolFinalizer(vol *apis.ZFSVolume) error { + vol.Finalizers = nil + + _, err := builder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(vol) + return err +} diff --git a/pkg/zfs/zfs_util.go b/pkg/zfs/zfs_util.go new file mode 100644 index 000000000..adff3a030 --- /dev/null +++ b/pkg/zfs/zfs_util.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zfs + +import ( + "os" + + "github.com/Sirupsen/logrus" + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1" + "k8s.io/kubernetes/pkg/util/mount" +) + +const ( + ZFS_DEVPATH = "/dev/zvol/" +) + +func PropertyChanged(oldVol *apis.ZFSVolume, newVol *apis.ZFSVolume) bool { + return oldVol.Spec.Compression != newVol.Spec.Compression || + oldVol.Spec.Dedup != newVol.Spec.Dedup || + oldVol.Spec.Capacity != newVol.Spec.Capacity +} + +// createZvol creates the zvol and returns the corresponding diskPath +// of the volume which gets created on the node +func createZvol(vol *apis.ZFSVolume) (string, error) { + var out []byte + zvol := vol.Spec.PoolName + "/" + vol.Name + devicePath := ZFS_DEVPATH + zvol + + if _, err := os.Stat(devicePath); os.IsNotExist(err) { + if vol.Spec.ThinProvision == "yes" { + out, err = mount.NewOsExec().Run( + "zfs", "create", + "-s", + "-V", vol.Spec.Capacity, + "-b", vol.Spec.BlockSize, + "-o", "compression="+vol.Spec.Compression, + "-o", "dedup="+vol.Spec.Dedup, + zvol, + ) + } else { + out, err = mount.NewOsExec().Run( + "zfs", "create", + "-V", vol.Spec.Capacity, + "-b", vol.Spec.BlockSize, + "-o", "compression="+vol.Spec.Compression, + "-o", "dedup="+vol.Spec.Dedup, + zvol, + ) + } + + if err != nil { + logrus.Errorf( + "zfs: could not create zvol %v vol %v error: %s", zvol, vol, string(out), + ) + return "", err + } + logrus.Infof("created zvol %s", zvol) + } else if err == nil { + logrus.Infof("using existing zvol %v", zvol) + } else { + return "", err + } + + return devicePath, nil +} + +// SetZvolProp sets the zvol property +func SetZvolProp(vol *apis.ZFSVolume) error { + var out []byte + var err error + zvol := vol.Spec.PoolName + "/" + vol.Name + devicePath := ZFS_DEVPATH + zvol + + if _, err = os.Stat(devicePath); err == nil { + // TODO(pawan) need to find a way to identify + // which property has changed + out, err = mount.NewOsExec().Run( + "zfs", "set", + "volsize="+vol.Spec.Capacity, + "compression="+vol.Spec.Compression, + "dedup="+vol.Spec.Dedup, + zvol, + ) + if err != nil { + logrus.Errorf( + "zfs: could not set property on zvol %v vol %v error: %s", zvol, vol, string(out), + ) + return err + } + logrus.Infof("property set on zvol %s", zvol) + } + + return err +} + +// DestroyZvol deletes the zvol +func DestroyZvol(vol *apis.ZFSVolume) error { + var out []byte + zvol := vol.Spec.PoolName + "/" + vol.Name + devicePath := ZFS_DEVPATH + zvol + + if _, err := os.Stat(devicePath); err == nil { + out, err = mount.NewOsExec().Run( + "zfs", "destroy", + "-R", + zvol, + ) + if err != nil { + logrus.Errorf( + "zfs: could not destroy zvol %v vol %v error: %s", zvol, vol, string(out), + ) + return err + } + logrus.Infof("destroyed zvol %s", zvol) + } + + return nil +}