From b3c86190b38c29346471166b33a25588996502a2 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 14 Nov 2018 11:11:47 -0800 Subject: [PATCH 1/3] Update csi/v0 import to csi/ --- pkg/connection/connection.go | 6 +++--- pkg/connection/connection_test.go | 6 +++--- pkg/controller/framework_test.go | 2 +- pkg/controller/util.go | 2 +- pkg/controller/util_test.go | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/connection/connection.go b/pkg/connection/connection.go index 2cd13b1d2..0c0d02bf9 100644 --- a/pkg/connection/connection.go +++ b/pkg/connection/connection.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/glog" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -215,8 +215,8 @@ func (c *csiConnection) Detach(ctx context.Context, volumeID string, nodeID stri client := csi.NewControllerClient(c.conn) req := csi.ControllerUnpublishVolumeRequest{ - VolumeId: volumeID, - NodeId: nodeID, + VolumeId: volumeID, + NodeId: nodeID, ControllerUnpublishSecrets: secrets, } diff --git a/pkg/connection/connection_test.go b/pkg/connection/connection_test.go index 6d4b204e9..98bfa0adf 100644 --- a/pkg/connection/connection_test.go +++ b/pkg/connection/connection_test.go @@ -22,7 +22,7 @@ import ( "reflect" "testing" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" "github.com/golang/protobuf/proto" "github.com/kubernetes-csi/csi-test/driver" @@ -538,8 +538,8 @@ func TestDetachAttach(t *testing.T) { } secretsRequest := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: defaultVolumeID, - NodeId: defaultNodeID, + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, ControllerUnpublishSecrets: map[string]string{"foo": "bar"}, } diff --git a/pkg/controller/framework_test.go b/pkg/controller/framework_test.go index 7576ad165..e0d3e2cd3 100644 --- a/pkg/controller/framework_test.go +++ b/pkg/controller/framework_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/davecgh/go-spew/spew" "github.com/golang/glog" "github.com/kubernetes-csi/external-attacher/pkg/connection" diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 0bf421080..55ed6647e 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -21,7 +21,7 @@ import ( "fmt" "regexp" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/glog" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index 79754b707..7af9500bc 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) From 08b5d5b3a884bcb0f89b282206947cee955434fe Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 14 Nov 2018 11:25:32 -0800 Subject: [PATCH 2/3] Update vendor csi spec and csi-test to 1.0.0-rc2 --- Gopkg.lock | 165 +- Gopkg.toml | 8 +- .../spec/.gitignore | 1 + .../spec/.travis.yml | 2 +- .../container-storage-interface/spec/CCLA.pdf | Bin 0 -> 67545 bytes .../spec/CONTRIBUTING.md | 3 + .../container-storage-interface/spec/OWNERS | 8 +- .../spec/README.md | 2 +- .../container-storage-interface/spec/VERSION | 2 +- .../spec/csi.proto | 473 ++-- .../spec/lib/go/Makefile | 31 +- .../spec/lib/go/csi/{v0 => }/csi.pb.go | 1945 +++++++++------- .../container-storage-interface/spec/spec.md | 677 +++--- .../kubernetes-csi/csi-test/.gitignore | 6 + .../kubernetes-csi/csi-test/.travis.yml | 11 +- .../kubernetes-csi/csi-test/CONTRIBUTING.md | 22 + .../kubernetes-csi/csi-test/Gopkg.lock | 68 +- .../kubernetes-csi/csi-test/Gopkg.toml | 4 +- .../kubernetes-csi/csi-test/Makefile | 14 +- .../github.com/kubernetes-csi/csi-test/OWNERS | 4 + .../kubernetes-csi/csi-test/README.md | 17 +- .../kubernetes-csi/csi-test/SECURITY_CONTACTS | 14 + .../csi-test/cmd/csi-sanity/sanity_test.go | 1 + .../kubernetes-csi/csi-test/driver/driver.go | 77 +- .../csi-test/driver/driver.mock.go | 855 ++----- .../csi-test/hack/_apitest/api_test.go | 18 + .../csi-test/hack/_embedded/embedded_test.go | 42 + .../kubernetes-csi/csi-test/hack/e2e.sh | 32 +- .../kubernetes-csi/csi-test/mock/README.md | 22 +- .../csi-test/mock/cache/SnapshotCache.go | 89 + .../kubernetes-csi/csi-test/mock/main.go | 9 +- .../csi-test/mock/mocksecret.yaml | 4 + .../csi-test/mock/service/controller.go | 291 ++- .../csi-test/mock/service/identity.go | 9 +- .../csi-test/mock/service/node.go | 68 +- .../csi-test/mock/service/service.go | 60 +- .../csi-test/pkg/sanity/README.md | 45 +- .../csi-test/pkg/sanity/cleanup.go | 134 ++ .../csi-test/pkg/sanity/controller.go | 2011 ++++++++++++----- .../csi-test/pkg/sanity/identity.go | 114 +- .../csi-test/pkg/sanity/node.go | 820 ++++--- .../csi-test/pkg/sanity/sanity.go | 132 +- .../csi-test/pkg/sanity/tests.go | 56 + .../kubernetes-csi/csi-test/test/co_test.go | 91 +- .../csi-test/test/driver_test.go | 2 +- .../csi-test/utils/safegoroutinetester.go | 2 +- 46 files changed, 5028 insertions(+), 3433 deletions(-) create mode 100644 vendor/github.com/container-storage-interface/spec/CCLA.pdf rename vendor/github.com/container-storage-interface/spec/lib/go/csi/{v0 => }/csi.pb.go (69%) create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/OWNERS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go diff --git a/Gopkg.lock b/Gopkg.lock index c983df7b2..35d295a87 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,66 +2,51 @@ [[projects]] - digest = "1:cf4f5171128e62b46299b0a7cd79543f50e62f483d2ca9364e4957c7bbee7a38" name = "github.com/container-storage-interface/spec" - packages = ["lib/go/csi/v0"] - pruneopts = "" - revision = "2178fdeea87f1150a17a63252eee28d4d8141f72" - version = "v0.3.0" + packages = ["lib/go/csi"] + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" [[projects]] - digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "" revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" [[projects]] - digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22" name = "github.com/ghodss/yaml" packages = ["."] - pruneopts = "" revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" [[projects]] - digest = "1:0a3f6a0c68ab8f3d455f8892295503b179e571b7fefe47cc6c556405d1f83411" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys", + "sortkeys" ] - pruneopts = "" revision = "1adfc126b41513cc696b209667c8656ea7aac67c" version = "v1.0.0" [[projects]] branch = "master" - digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a" name = "github.com/golang/glog" packages = ["."] - pruneopts = "" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" - digest = "1:b7677b91b9250563c6851dd5f2d8083972188bfe4f8fb7b61489a2f832f19b11" name = "github.com/golang/groupcache" packages = ["lru"] - pruneopts = "" revision = "66deaeb636dff1ac7d938ce666d090556056a4b0" [[projects]] - digest = "1:73a7106c799f98af4f3da7552906efc6a2570329f4cd2d2f5fb8f9d6c053ff2f" name = "github.com/golang/mock" packages = ["gomock"] - pruneopts = "" revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" version = "v1.1.1" [[projects]] - digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" packages = [ "proto", @@ -70,151 +55,119 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers", + "ptypes/wrappers" ] - pruneopts = "" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6" name = "github.com/google/btree" packages = ["."] - pruneopts = "" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" - digest = "1:754f77e9c839b24778a4b64422236d38515301d2baeb63113aa3edc42e6af692" name = "github.com/google/gofuzz" packages = ["."] - pruneopts = "" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] - digest = "1:2a131706ff80636629ab6373f2944569b8252ecc018cda8040931b05d32e3c16" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions", + "extensions" ] - pruneopts = "" revision = "ee43cbb60db7bd22502942cccbc39059117352ab" version = "v0.1.0" [[projects]] branch = "master" - digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache", + "diskcache" ] - pruneopts = "" revision = "9cad4c3443a7200dd6400aef47183728de563a38" [[projects]] branch = "master" - digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru", + "simplelru" ] - pruneopts = "" revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" [[projects]] - digest = "1:23bc0b496ba341c6e3ba24d6358ff4a40a704d9eb5f9a3bd8e8fbd57ad869013" name = "github.com/imdario/mergo" packages = ["."] - pruneopts = "" revision = "163f41321a19dd09362d4c63cc2489db2015f1f4" version = "0.3.2" [[projects]] - digest = "1:b79fc583e4dc7055ed86742e22164ac41bf8c0940722dbcb600f1a3ace1a8cb5" name = "github.com/json-iterator/go" packages = ["."] - pruneopts = "" revision = "1624edc4454b8682399def8740d46db5e4362ba4" version = "v1.1.5" [[projects]] - branch = "master" - digest = "1:81b4369f8c84cd71286978502e39bddb351aef515e88e0d566392efa71e1af8b" name = "github.com/kubernetes-csi/csi-test" packages = [ "driver", - "utils", + "utils" ] - pruneopts = "" - revision = "1bf94ed5c3afa2db7d3117f206f1b00249764790" + revision = "42947e04c4a0d2087448841a1dc3ccb20fb903b1" + version = "v1.0.0-rc2" [[projects]] branch = "master" - digest = "1:3162e91e0a20faee7756465f3816ecf389eb3d52850dff8218ca56a1f7060880" name = "github.com/kubernetes-csi/kubernetes-csi-migration-library" packages = [ ".", - "plugins", + "plugins" ] - pruneopts = "" revision = "edcf4b4169dcecadd06e071c4801e0373f14d7a2" [[projects]] - digest = "1:76a22f13ffa6d5d0b91beecdcec5c7651a42d3c5fcc12757e578808826fe4b0a" name = "github.com/modern-go/concurrent" packages = ["."] - pruneopts = "" revision = "938152ca6a933f501bb238954eebd3cbcbf489ff" version = "1.0.2" [[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" name = "github.com/modern-go/reflect2" packages = ["."] - pruneopts = "" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] branch = "master" - digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc" name = "github.com/petar/GoLLRB" packages = ["llrb"] - pruneopts = "" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] - digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f" name = "github.com/peterbourgon/diskv" packages = ["."] - pruneopts = "" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] - digest = "1:261bc565833ef4f02121450d74eb88d5ae4bd74bfe5d0e862cddb8550ec35000" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "" revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" version = "v1.0.0" [[projects]] branch = "master" - digest = "1:79b763a59bc081a752605854f75ac04d4b8fba22bab9bbb11689efd2de255864" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - pruneopts = "" revision = "91a49db82a88618983a78a06c1cbd4e00ab749ab" [[projects]] branch = "master" - digest = "1:4a65e28058fde372f1febbf1bca01ee4aed7472569fd1bc81db9e91bf105f7c8" name = "golang.org/x/net" packages = [ "context", @@ -224,35 +177,29 @@ "idna", "internal/timeseries", "lex/httplex", - "trace", + "trace" ] - pruneopts = "" revision = "22ae77b79946ea320088417e4d50825671d82d57" [[projects]] branch = "master" - digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" name = "golang.org/x/oauth2" packages = [ ".", - "internal", + "internal" ] - pruneopts = "" revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" [[projects]] branch = "master" - digest = "1:0a0c73aced706c77f4f128971976b0ee94db7bdcc95b6088bda9e72594598634" name = "golang.org/x/sys" packages = [ "unix", - "windows", + "windows" ] - pruneopts = "" revision = "dd2ff4accc098aceecb86b36eaa7829b2a17b1c9" [[projects]] - digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ "collate", @@ -268,22 +215,18 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable", + "unicode/rangetable" ] - pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" - digest = "1:8d6915fbd16d945a7e80b46b78fc75f0fadf7d30eb0a90badf36471b23bcd94f" name = "golang.org/x/time" packages = ["rate"] - pruneopts = "" revision = "26559e0f760e39c24d730d3224364aef164ee23f" [[projects]] - digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = [ "internal", @@ -292,22 +235,18 @@ "internal/log", "internal/remote_api", "internal/urlfetch", - "urlfetch", + "urlfetch" ] - pruneopts = "" revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:02b227168a215a14f7f16af45ca649b7c1efc33919ce27a03996dfb54dcf556c" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - pruneopts = "" revision = "2c5e7ac708aaa719366570dd82bda44541ca2a63" [[projects]] - digest = "1:d2dc833c73202298c92b63a7e180e2b007b5a3c3c763e3b9fe1da249b5c7f5b9" name = "google.golang.org/grpc" packages = [ ".", @@ -334,30 +273,24 @@ "stats", "status", "tap", - "transport", + "transport" ] - pruneopts = "" revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655" version = "v1.10.0" [[projects]] - digest = "1:e5d1fb981765b6f7513f793a3fcaac7158408cca77f75f7311ac82cc88e9c445" name = "gopkg.in/inf.v0" packages = ["."] - pruneopts = "" revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" version = "v0.9.0" [[projects]] - digest = "1:5fe876313b07628905b2181e537faabe45032cb9c79c01b49b51c25a0a40040d" name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "" revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" version = "v2.1.1" [[projects]] - digest = "1:5f076f6f9c3ac4f2b99d79dc7974eabd3f51be35254aa0d8c4cf920fdb9c7ff8" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -390,14 +323,12 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1", + "storage/v1beta1" ] - pruneopts = "" revision = "357ec6384fa7e10d6ea160d2299a98ddfdc3ab3c" version = "kubernetes-1.12.1" [[projects]] - digest = "1:7aa037a4df5432be2820d164f378d7c22335e5cbba124e90e42114757ebd11ac" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -441,14 +372,12 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect", + "third_party/forked/golang/reflect" ] - pruneopts = "" revision = "6dd46049f39503a1fc8d65de4bd566829e95faff" version = "kubernetes-1.12.0" [[projects]] - digest = "1:5d4153d12c3aed2c90a94262520d2498d5afa4d692554af55e65a7c5af0bc399" name = "k8s.io/client-go" packages = [ "discovery", @@ -617,15 +546,13 @@ "util/homedir", "util/integer", "util/retry", - "util/workqueue", + "util/workqueue" ] - pruneopts = "" revision = "1638f8970cefaa404ff3a62950f88b08292b2696" version = "kubernetes-1.12.0" [[projects]] branch = "master" - digest = "1:d7c38f69d229b1475d7563256062736682b35e40c347e7479c35c7c3cf3acdc4" name = "k8s.io/csi-api" packages = [ "pkg/apis/csi/v1alpha1", @@ -638,65 +565,19 @@ "pkg/client/informers/externalversions/csi", "pkg/client/informers/externalversions/csi/v1alpha1", "pkg/client/informers/externalversions/internalinterfaces", - "pkg/client/listers/csi/v1alpha1", + "pkg/client/listers/csi/v1alpha1" ] - pruneopts = "" revision = "2966180a4e54fab57c98153a33cf018cc4017ba3" [[projects]] branch = "master" - digest = "1:9a648ff9eb89673d2870c22fc011ec5db0fcff6c4e5174a650298e51be71bbf1" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] - pruneopts = "" revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/container-storage-interface/spec/lib/go/csi/v0", - "github.com/davecgh/go-spew/spew", - "github.com/golang/glog", - "github.com/golang/mock/gomock", - "github.com/golang/protobuf/proto", - "github.com/kubernetes-csi/csi-test/driver", - "github.com/kubernetes-csi/kubernetes-csi-migration-library", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/connectivity", - "google.golang.org/grpc/status", - "k8s.io/api/core/v1", - "k8s.io/api/storage/v1", - "k8s.io/api/storage/v1beta1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/client-go/informers", - "k8s.io/client-go/informers/core/v1", - "k8s.io/client-go/informers/storage/v1beta1", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/listers/core/v1", - "k8s.io/client-go/listers/storage/v1beta1", - "k8s.io/client-go/rest", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/leaderelection", - "k8s.io/client-go/tools/leaderelection/resourcelock", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/util/workqueue", - "k8s.io/csi-api/pkg/apis/csi/v1alpha1", - "k8s.io/csi-api/pkg/client/clientset/versioned", - "k8s.io/csi-api/pkg/client/clientset/versioned/fake", - "k8s.io/csi-api/pkg/client/informers/externalversions", - "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1", - ] + inputs-digest = "4d95364159e4085dd476659e96deee915264cf580ba3c5aed230ab7581a1f60f" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index dca3df0ea..f3e950e28 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -2,7 +2,7 @@ [[constraint]] name = "github.com/container-storage-interface/spec" - version = "~0.3.0" + version = "1.0.0-rc2" [[constraint]] name = "github.com/golang/protobuf" @@ -12,13 +12,9 @@ branch = "master" name = "github.com/golang/glog" -#[[constraint]] -# name = "github.com/golang/mock" -# version = "1.0.0" - [[constraint]] - branch = "master" name = "github.com/kubernetes-csi/csi-test" + version = "1.0.0-rc2" [[constraint]] name = "google.golang.org/grpc" diff --git a/vendor/github.com/container-storage-interface/spec/.gitignore b/vendor/github.com/container-storage-interface/spec/.gitignore index 4f7ede45c..443a2c83d 100644 --- a/vendor/github.com/container-storage-interface/spec/.gitignore +++ b/vendor/github.com/container-storage-interface/spec/.gitignore @@ -1,3 +1,4 @@ *.tmp .DS_Store .build +*.swp diff --git a/vendor/github.com/container-storage-interface/spec/.travis.yml b/vendor/github.com/container-storage-interface/spec/.travis.yml index 15b11d3a5..65d1a6ab0 100644 --- a/vendor/github.com/container-storage-interface/spec/.travis.yml +++ b/vendor/github.com/container-storage-interface/spec/.travis.yml @@ -29,7 +29,7 @@ jobs: # Lang stage: Go - stage: lang language: go - go: 1.9.5 + go: 1.10.4 go_import_path: github.com/container-storage-interface/spec install: - make -C lib/go protoc diff --git a/vendor/github.com/container-storage-interface/spec/CCLA.pdf b/vendor/github.com/container-storage-interface/spec/CCLA.pdf new file mode 100644 index 0000000000000000000000000000000000000000..08a9f2a50d5e812a6c8355960fdf5a77912761b4 GIT binary patch literal 67545 zcmcG$1$3Rek}hawX7)BSb7E#YW@ct)<``mT=9rmdW`>xVnK5P_`<#2a@0)o&J-ybO zwYRpU5|w_J^jE1qnM_Vtl#Yp>1%|9|@6X;z(RI#b-vA6NfDvG8U;)F;4PcNmwlQ@w z1F(IN6aWmO=2lL|4j-|VzLT-Av7xPzF@TpB#?i^aSl=4P4Y*&^GJ5qF`fH`G02pNs zID6PS%W>nZGb|?4BmfpnL<_9->-t*Xtr_tJY1Z4=KH{yyE?2&ZFnp2yB&T0JinBZ6 z0k7Neql9;bkAg_vyX|~|MneJ4da3Q4iucvFQ0_TjvDeG}md?PQx_Wx&?eEf|`7)b! zl{sq719u(r`-{138ztpP!_&R}_NVRrLb;$8ce#=l1rEN_WR9JBMF(SLW9=;1wYJee z4P^5vM`!&=H1IVSN>AI8-tMI{D_=oyjF{u=o?eb0#ea$4N=h>&^@7q| z^Y0?ym>@0a=qdIr^Rbgz=iQ`flh@rICMoQp(b@pT$p2Hmp8svXeDup`SxZC%|5G;O z@(Fe4k0dn~$$XMSK3w-W_2{w%>-n(8pgnm-r?=8Q=*i=eQM2~u7D>U}ob$jM7vLi? zuM#qaHIaND3(0jKS6TyV#o*QRZ#e}n}cd&AI zAXTA%GQwCZ@<@+%{7joV_|mfDY3v>tzJe^QgN-vwa3K%cpdZE|ECzx>hEg^8GUvrB zc?Tb|R^cNZH8L}1=o~A@R~$=jPnnejMVJ$j3he$)1o_(jQ~3ES<$zRXjH$$;K7>l3 zWQ8?TbV6T&Z)Od}9lM_0ol8JGTlAV~IAWPt2{|VWA+L?Tm?Rh8ECM1VH~6d`9}0@2 z#B9>3Ds&2b?+HOu9r%W2Ox|LPRshE}<<@!OX%*Y84rv8@4?M2rt;ykw~&2Xddzf==2#J1=l z&V`A+m?Z@8r*~jPIgZbBq99|Vt~Z4jp01AgU@jhlyWS@!6)5;?vq zl$UFinB%CUHE7Hn7~W1R1z(uZQq1G~+bs!DKHp)U(-Yf`Zss5iz$ z5LDh|0-fwD%Ex&y@S&Qas@p_Ax>UXd#6DFR^aPsraF2m*=ss-3nVguAr6r2^pb`Z* z!4b33D4DAE#S5~>5|!-7=e^Rd}>VmF7%u z2vAKq4pJ8A%rWf=XDDc>J7tHuv@}zm$YFr+5NCXo(?LF?6Wshc6htoYqNAhks@{-f zQBNT9B&?&pP$nxPp-~;4sk#sb^~+wBczcnz*f6vm?m)&|PQtg;Qkk`8qPMR_>qmn- zskci%d^z zBAfm=swIb!_!1ku%wst6ZD5U{W`Qqf*JlD9RgkW-9zV=6Dj{+3EN(em6hInIFk>9O9~Sz;EY` z=idfLbGX|PEKJM3*-9WlXgz_Bh!m0bVGLy5mS~6n=6F=UR*I}g9?vnC8veCqdcOgf zbLfpm$phV1ia-F&f-$wTaawOiJiJ?@8yy$GYY_JSD3ZwEfhWoY>LR#tl^XO1gOOLe zpr9i1bHx=IVf{*JThhU6qfCE}IrAH7;`?vT$NX+Hnnu#hv#K;bL>PHGJW#F?@GWiF z!F%Cj;Iw|%uZ}w{#k1t{$BoL5lj!MsX)926!s8`}uP=CnH8qFa5CIX=or=X^H8!5h z4|o9v2JsTI?^un#=p-RUS<}EZOyV}@8HT3Z`73bW-{gK zo_8u6K}RQ%CMk6Jg-wqw`@V%Xcyev%y~Z97eBUx%MaHw^(VCMg8gmZm2CWaP;&+*5 z@6(Iz^Fz;*!3N`Tac?x5yx|+ACYjXx4sxM`35eEOhI`b0hMc3dSNKk%O4U$6{4` z_>Y~+8o3mYpJK*( zLXf;Fz1>lSt^N6ga*{N2+f!3fVq>iMV&44JS2}WZ^+(vcyjAXn_NYVSo2A$bo6hYd zGpkrXu>Vk%L|Svd6RASvTJe|3V4^FI!k;|arOs(uC@cLDk{S#4DZTkDtJuE~1E>w* z4hO{0vqTPbuOv^JJen;F{S!Toa!XrJl_ph_l5bnVog=$2&wT8<*oVmVnELi*oA)OU!4y&qN`jX?8F`j z@bbm{poF$-;52%Hu)Q)~gAln<6T%Gh2tb9M%k8~F$FqAha*MnKxZ7~-&-%wxn;DCW zt?gDce~Mw;02gm*l;b?cgAEamvdo}YtL1@{y(xElZEyot_TQ_nTszxc(EaK_g3W1h z&+3TvB>iUt{Oq7rF|1Wsr3*&fpw}F9sL>dY@mFh2Vptm-ACXt#9JC-dkaUsY49p4p z0Bc@fKo+m;Fr7WWz}a;cJ;6ivpqtp+aZMmMAt$4)M~F@&_y_SK*>&V6P;YZ;IZmX9 zbRZ+=hz$4;4y#O)t_caq=3<>L9v+{kdE z&bk}nB}Q#6@p%rInEx8NB!t>3r6rr{Yqf(QGjg+a|AOSU&pZ_519J{~7!vZBQhvx;} z25GtIt}ZnN*<;;jml0tSqBO)joquAt5!xtIwNmqFAz~k!mdi!$)-jN= zfSW9Les0L!Uo9EzZf9rDr0_k2y4_Sn(Vg-=V8opx>R+za4(phJwcO7rBmgEV>Zpo- z1&>$bndr`t{d|HKvv{IHDDsiEz{#H2`jw#ex4=lXRRAqOb? ziGffo7IiZTOX9CeB2#bJc;Kb2_L4BJK=V!VQ69F0dM(1xN~qh7oiz+eTAtHB&vq#I zgS?>e*NpFzh>t$>MEtTB{PHU)z_dN-y7jSC8FLZYV!KM@63C(leFCxsfn3VW&)s}X z8ULwRO)0`}7%BHimzEh6zhO@%X2l~QifB}j0!paxFi}tT(VUBkQS@s)>7 z9fzui^N&b~c3pG84sT@PR_=` zML=nHFm6W*?mN34j&04L7$jdlkZOl-UZJP~_>|!fI3DGokC%A5RxiF5uG0Wx->Hyk zaFBh!lg@LWUFwj+sGq(sGgCe&GL|ASCfm23Ap)?4W>#i`h^V$XUwV>icBr$Ur0YtWUeuR~n-EuLMr`N7By6>2i^t@^?#x_R( zGP*uepSIekf%X}*u(GgzmVTz0|ILn5a1Zs&S$%S>_s^T zTSG--CxGV1V8WsR1|?%RCxAA9LCDt1)YkVP!<_sW@h`HH z5)**)lMKV4q{Iy1`lrT!j>YuJ!#{)l%Q$6X`l}v;%*T`%1U@ZPn-B98z#wewVs2=x zASU?V8?Kp2u1bna7$Fnw9PN`R!~n7z91m!~^jC3oF!@jkKRJ-#Kynvha8Wd5DitH8 zE?}_`VnY!$*zW-b3($X%7nV#fycb%4;}QmJ{#vRaxB? z?bEalrzDf_=HjT2W4EzpMYPJTr;%;tMS$gMrI2l!=6OLHw<}4c0qxyW&dfYyNC7dT zrRt6ykmp*3GK-*LpNIYd0fI{C<7#t22!G@<%K~yMG~EpY3zaY^L|#ZS-#Z*KdAh1o zd^BL9N$8c{`r#%uaZ?W$#fa%wWt$`dOr~y~Y8HD|fp>PMKEH4F*txVp;G_7;Gvx8C zd_{gbktGn3rM_WxTF%gp0dz;Ik&a*2k*Ew*Zw00(izNkpE`Q}tn#%Glh2<-0-eQJWUsKO@SE`17#u?K zgwg9g()&&qm-+AEUfPW8F3hW-LuL8H1n9s}v(_MPf3+BTBbY$nxh-ETb_z&rpG-?M zGiO~O?;tL@CJD$PTOJ@G5>QebJ`WJ@6HO#0n4yj~vx?X%$7p7cw}U4_SgkOdEhzzF z#r-?DXd?%*pS0L{8-Z5cASf}APB}2F{`8C>w0iJ(ek^vN>j0oYKXD|`Bv5*B5OaR8 zS^?fgAQ%CrMPQfj7Ir|h{vMNHG6C#%5L#fWUGR3WaDHO^Fu~p57=hRQ&7lG=kBc=+@BjIX5xeE!%aiW3t3x&oY#qmE*k{4r9e=p4ylH<7{al&bV(*FLG133v` zg?I(_4n$4?nbC(|1p4K-RQ(kXN~~veQ`ikX0}``qWlORNgBPK(2XhPe5{NcPwmLB_exkWMvW90naDDhI3J`$BrOIZAGbMDJl1{}6e1*7KLrb80DC7yJz~p1 zMStEvl5sA@FhxZQbD!M=iN*ghpuk|RrcjNdoX-WC9XUOcu1`fTT2He&xdwDm|MV*! z!eAHnHlCeN6P6aU4O=BxBjQ3ttKUg4{1*9@TO0K{+C>0jx8tRO7f}a{FP<;KMl|6- zCq)dj2AE|4B2f~6)QVII2?y!`>NOC#hgIIcSk{R+7|Fd)%YdpTYEhs~0*_oK9!(;c z6jOneDz)BUNy36yo1BNdohW|zi;G^MNrp?Z znctn?U8q0-zSw3~tu&=2Q$x00*jv(@K`fpn`8s)LxFWtR)-v%V*^^8*VIkQyS(iSW zLYK%_@~!Zt2}Hh|zgy8iuUaKnbgRCHCrCmh?PsFF4}&y=P=oK=Z2j_NZE^l_jVQ7u z{72Q(WCcVxf-?o~(@pi#^P8eG zdKmQ{SQM}eBH|-jBciWa_nRknCRW(1%yr(|s1hHkZK%Bxi4#o{ffJ)krIc;w(B?8r ztxIKR87;^y^evVwp6BSxA}5L_T(c%Jby?qcUqX(nTS7gWo-A&Ak9m&canf))aIkTd zagtdF*s#-V(uULa(hgV$>&-M^8^AR7G!7bc7!y;^$COOS>cY8lD=N>5=%#q{*UES* z42$d3O!H*w#!@!F^2%pdWaoE^b!z&KbWH-5^}+?x zyNh$fay#|2^z*h1NlIhOrl|8Y^1cpm8ve2myW-u^B(Dz5l!%jv9F|sGoLyJaYniB@ zfV12;BWEpRYaRSqy{fKWE;^sX)yqBPmFJ%RNCV3gt`?q&v4){bdq%5BcS1|3PNfd6 zW?g?={nWeMmu9JL=sg&0v13>_Ubwd3P*Fm+;nI2L{WVRydJUshrgh>D;g0y3{Fw=o zC&VqJ9pQe_$%V2t(0dN~DxlERasW=5iDBU62xY8-w%4;Bvd5_6Ug%VElX zs|n8`mfnayLf6eMcUE~NaYy6iX-s+{^1!9fx^HcfXX&-+nFjG2VysyBU_w#+0nP-h z&X43O)mYW4#<+Wf3yX^)-Y>kNypg=99WEVZ-V6`=uOP23Pixl;mj_P^&$3{gVCB$< zF!^AjU@DNju*}dX;Lc#Z-N4V!`(?eq6kr>}FTt$b)`bFl%q(wYMl108HtI?|0?TSWdL;wnT zFbHwcJxN_0O75rEzJ=_ZZo}-H)3azL)txRoH0+g(PE^L|cB|V{6W|i!Zy>#gqV|;R zNmbtHHoNYDEyTBmJjWdj&>TcwX+Idh#DXOU)&@=qC--TG8cI}3piAt4$O<%UI@jv{ z@!u^;h)Kw!Vkwy_`E4Fp|4=_8NTwfFO{2}iL($IA%bLe1)G}f|cdm^3lYc5!iqcWt zq%|e#P3l%Wf;64%yE<&0MeVw&m~r-q{2227&2HMR%_#ox_3*O~n+cn%vuTIRTkuOa z+qoq|MbLbu)=IOCt9EzG)@{d0v^Bt5eCcvjbTrZ!cf%%ZeO5k4zPdxttnOleK(JX* zeyg2sTob{n-wNI8Y|W&u!eS+vSZC_iV01fdTb67!?qoyVTlH~m>@M)Ez2L6k*>%$e z%5`<^sO4HM)j(ZmX|i%tyJBzb&(A-Nt*WOTD*BrCdWh!HM$z@m#kK8{RTg4%MaTKa z{y5QT`|Zb`jc1w5gDVT28`i!vz>%Q+kh}zFcx}Ez_i~mM0@nTdt6Sy06DL29TRKgB zb$x};#IIr}#Dc^M2798N=f2J^a1?VSu=hm94b<$|58A$N-U@l5mL18l>FS&<#mmX2PX5amq)CkkaonM^~{guyd?kVNDapX6Xi9e1{$Cv)@brW%y znpQVr`fVb196ckQE8ErQkKjPiRd^_7$QM;7-j@2^>CDRN%JotI#tLgC5SqFV;-EZX0L{)J3lF4WT~@C`6^!L9u`bW+HX#C z7W5|SewaoFHwiJkCq7om_JU88Owfqdiyn(rh)qO1Myz?%-1J|LMkLSd1a(rq{kl(_ zD$Dk9eQq~381B2YoH|M6lUy(E;ChN^@6 z0shiq6#&}*)M5J6#Q*O)?Z1@o|IXubrX>-L*NEP+Pz7)(1zz6M@En%T;v6RZWf(oc z2r>#8t&w&$&qiH24S{@Mim~V5$7u zOgr;N>i5T+Cu@D34mS7qi?eUo$)?6ew{ z(SSf)el4SclqAFlDz~i30{MzeD+*jlLIN!hBofjNjGQsfAO^(fx7h410YmHCZKV%< zpxr~N7tHS>%nL$7YkZkQf}y$(>3H7=TUxWurjB2LeK5_V*AH3V#BpDgX#RDEV2r0v zK#C|}p}iP)YBfn$)o6*tFZJYLG8FgvR(X3i&(iliTcxyQlaA|(Y#&&Fc@17-dJ2|y zS)MYu37Q9}Vu%O7wdjtny647^xNfi%3|ctatA~Nly(UZph(o@C6WAuad>!Y265>~8 z#tJ)Q0BLs19e&pEh#__7b?~Z7XZhGG@`x$_#RUMmmM6}_KO~uMzV|?0D@4%{4tRy6?HZtDuykc)6kJV{j290&0Ygc|v1WoFW%c|Zh#>`pX0u(WoW(m|u?$xA z30OfpMV{^{u4U!s2}C*D?tSdF0w}EWXr@*PW%@I@iHRa+=>NTJb1tKE`B_}Njwr$vWx9JWF#M`}NUap5w$H#5tvb5S{ z%Me?cq9I0ac0O)PAyvbhMW#h_rzXp)?=N|1R!MVjr@AA|M&H9Sb>~WL%>TiKkCF9r zg5)b`Iet-$3sZy$J@fBCIF)mrO~u)18YjUj$;tV1V5i8Npx4Xwz8&>sJDB|Bvkh&2`KkM7q$1&tr{yx>HDv!U;8jU z6|2LDSFBxrGTB$Fosb=IM&>OC6uRog7le{Z&t}XO4v%P)bPWEqUkZ)Mmr(oauRIWd z5~f!%cJ&2=f@Y?Hg5oZ4j!xKHgSf;dtlVmT#eE!s<%}$@FCSdxXo$BDW-k{Rev8kx|`>q*PQ75={$fcY%pOoZqcKZ7>NF0V%XMs^2+< zU7tevY4n%5idtsbtUsjQ0n+i9!Aat-_Xyts?f?;ROx*j1LM5L1?_EWsX<~oSX8f_t zc)IT)B<@gkKp?4-^7!2+NTo541xHDZ(W%C*QOUk3aLbb19BSEwa&pIzcPAVn3j`b_ z3~)XKa!DW5y+2Y?B_-?`+u&Nq&l0)IJgKBEPWO$(=*b-iDIvL6c}hGzFHX3VbVFR8B5F!S#Su9n zVf|Fj_;q2iRn@CAGvVB^MWH4{fwQ$UGW-Zu$x|`q!ykR)vFZbGHA0mEPBluy{8St_ z9e#wL6x*n6+HwP&;`y-%3{dXNWm&X_T9z1(5M=lI$X&^V2sEX=IWIZJ6BwbQ;eBOB zygpXd0>s$r!WXp26%SBsrVO-V8$o}1$+*OEpZ*wTr7&03l~TjZprY=1ujhrSZyGcY z7lbW|XN2u+EBI*HSg0b4^q_<=eFMDhL}RsE*6E zRt{My3iXd_gcY@+e61wVpuhF?%_hD92WZv+zxx=Eh1VPc|AD5rL;AX|mIvFYY5o>; z6T*-$;!>wPcTmEgU}c(f^V7xREYO5Zv<^rUdx`eSIZ<%ZOz^Dh+nST* zAl{?%Y)UQ?Bcbq)Vj>(TZ$l=@b-4|6UF0LTphCL?^*#P_3}wW|ER<(KUepwnCj(H) zLze{lG)Nh%md`=K?8WNiZN3w*tJ(Jv&H4H0?)^5@+rXZT(72^R)Nm~`+$7S-+OKfA zi@bS?Re;<$Yh_(3{o{nVuVX0<55L-6T)H@NfJ;!_#P_`9DjKEdPxC|8)%?Z4=b&xe zJ^bRG5b@?n86^)ahP}b#L#2PlPG?$obZ3~?xbZia2;yz>j~@XJ9>8y^OI4Z!&O#R2 z=SFS#An_ODP88eq;*r38H$b=DtWoqfgz>To#*ql2xU0QrSUF#-H7k2yV|p?VJ^gUb zTtV!Y+*K|DD+AT8YjA_nTE~^dOUPnF&3wbY|5)3uXZ5T@O%vk}23b7~857o^XD$WCdS7=yR7slK*80so@!`GnclD$CPW2s?fxUN{rR%0&iqUyu|-#?7c<$hS1+4ZzCDbQEjntiIPV*%6v0Gq(CRgFlKLY7tg zv*P5pr%2hI_^5xyRW*Lk9IZl^!foUfy5k|lVP8}Vcthspt{$YEM;9J}YFs|VH=rmk zb%yrc3aQPCCs51DrBV8skU!ALRAo5Z#8fq~SFzKjrLf-W8bEM*S9)*{4Z2fkiyQOs zPK>A#UmX>sPF z6JS%F&La96&09*WiJGvry$9CJ0-G4DxUp!*G2N<57|T>u+N#7oAS2+B)le@5SfqXVFaAKO@b z1znv}AnYskt;x2*9}FyqC^^#xjBp|i9mnu6DXIe<&Bc5>!>u8y(n6^a96K@y!k*LL zocTe#Gm>-gT+*|L)GOE~@Ea3sv&~-Hmvjj?L{xOkg6pE@ZL)E&vRgw5t|_lc!xSWR z@HRfdv=lwB%a|X4d(aGCeW_76`Qu<%bsM?^P1nTJ$3aKJSu+fz*{1ufLsD=RTeno9 z1~Tf4N*r@2HO!cy_RMbkmsm&gZYyVg`Uj`QAqdjw#73#U;?^9vmyjh;sP*vjS-~rZ z@`wrK4PaZ9ED37tuQ`g=X7gj@gqkQ1gbYsA*h}Gf`tbe0&X9I`xQVl{v~)JF%4kIv zq!?es2+_Rj-~q{acTZ`ah} zHP}uVifzel{zyh@yWt4_qY=m!gJ~=V*IhHGhnLhN@_NpL>K{&1vf+~HxH0F80gPS!x(YN zmE&x@Y)|N_BBn{^Bc@p)68;^G@Fhp!SXt;kOK16%vGd|cV`SXT z{ap& zZR4WxN*?=ItxYRvs}MbI(<_o!JBRuGhH&$PwquV|qu8BS9XyMQw+Jb$mFlbpUW27s zMDU7R&JbQA9uIH#m>1l^N_Pz08gf&Kxe9^Q9G#$QwvJp|I&9=BQQu6bQ))5p_DA+e z$27RF3hp+JS$xO2B?LB-CuO-eovKX=Lfo15@6vd;up>NKqwn6Aeq$bJ6}q0gmj0mp zo!(VV9GR{QXDUzMXtDpafc|5E4yvWO@Eye9i#Yky86!f=o;Gi#Fs+~N02GV;!6*Uc z7>C2puL@!EB~CSoy;#U4PI%SQ$OT&In^y0Jt-^I$*h7B`ms5yQSaB_%89P-rHu3ml zT#j>Nns{N`BQgkQ%5mr+>BD>!@TFodUSa=f~ z#^tkJmhfMq8U5~_LP@p&!m24M8%W<|T&A&&v~LzA;xHf%-)s6du?!1gbF9Ztn<9A6 z=dtx8rZX-a8=*vXs^Es|ec>DK0v;pzM+tIXvLl`9eS6 z*s%xo`~|Yz!y;;PhnOtyYc*eyDDJwO<2ZYU2cWEnd@GL25>T5gt#zDA+ZZ5#XE(+6 z80q^WPD5wh;&2%+F24_C~h;LO2}j zAmq4Q41f%uLj^Z*#)8u6LtCCZOU6=`cu8z_zMJlD9_|>Y75Bn`5 zG-(WedxkI;#Y>j#yIBsuCy9fdhA7*szjRRlOJh>JUc>8&f9$TL0kbQDyfbYw2|_&n zD9bu{SH~j*lULpG|K^zIA9yS)D;wi~9O3*=eXUIYLe&0;v-Ph-pnvnW!u&7Z)=!k} zzr3w~wfyI_=>L?v^}`GMZzKG3X!UOZV5Yy^vj4p!wL(=}c9tK_J6qR&d6s<(j*;%UR#A}A;mt4)TxI4aKc;k}X z{eTWR5lAstcdTI1Ox+oMiFp_1HwB#;0d-L{ zd3On5y5bj!ixHa@V69SX$mF0P1V_ZArn6UL*i#zt=E9wga21B&w=GOpSdYD|)rFJb z-|EHZDp?z|iRD*Q*chGMFz3L&vNi<)X{9)_M9xcRsd7L}Fe;dGr-v-8UkYW_*l#C- z40!0kk+|;(fiigcADnWOz+>Nn>S{n#>T+zAItYuFO-c|VC5Ggs$89y0%82HAuwMOD z4D>}N^I9m>YT`^)FS~w(FMb{OZnU(cicZRMFc|5_n}axw8j=VX9yFYvoWyxhkjly3 z=+}{xZs1H!n$*bmc>uJQ>allxKa8txxg+1un|LCqT2%%Qpc$+5Z!E=Syj=j@I+5<- z4uF|AT|~?V4vn&aChg{qj|9*!{RAnc`O<5%t(P)&T_=Ffev)dUTB;p8?0*!-pCKT+G=~kwa+N^GsJ^%}RJ$CD(>5WpUdjB>#KzCa*n) zP7JsIXjSQg_SUA?TgSa^?;avdl`(=9hT`&3XQyF!vgmSWjsP?!;j-q?HSJFi=lma z9iz#BN$~*cjnqJ#)c_tVYWTH-E{P3S`hldPl&a$#K5EMQ#E|vPw6+~^(BX^g50Gv#OIo6ha_H}l+KXLqnBLFdxHCojYv|LQeg%4UtERLipOq%bp0+dt-zKf=_ak{a;)R(u*pPR&4HWV^XeLZ zWlszp7+7tRI<|Hc(Uul|;htv^^~ReY=JVBvo%r8ugP$A1=QhN}$@U*x)4wA@{=Xnj znEpn1e4s=A1vz2*8x-;%n3Ml{7yNAb|LZRJ?}zgLITy$O3J>!MadG|&Rm1Sj+{p0* z0Q31uPyl?uW&Vo(0@uK3|Jydp{O?efzef<#cha}AHT@f?Q+SpOX*j2&STHi)aQCjJ5*p0BU zqoISjos+Et4D-KWG$J-1_d3jNKDdz9H~g#me^Nxv9UPs6%=8@qEX*I5L-hZZVgB%a zFsPavIhp-6@sIZ+BY>6hlU+stC(GxPWa4B5uzaLG!zY!Q?eF|&%m`p({4WE{KV=_n zIR5VOv&H9!gP9q?&d%{y_$&Rv1sfatU-ZxX-(&q#!pX@6^O^n(pS3=qOdoAOQ|uq@ zKBi zOs$MRMrIIjH2g%kadLf#<>#QE89HVr_K!uXZzpbSZffQP!_NNEucDK&waUkz{qv%e z{$E1R`axCuyUNG3IR3684q*9cE8t@KmyCai_D8?c`fh*cKKX$8i^C6RoiQ`uFG*7P z_l5rN%Hh8-Ys{a^K;fVJRZIX@4wjGg^50&tnr(CUQkq-i^BHe;{Wa!dNEn2Cda&+!{^&Js`Ze2g;jUa$5-vX(N?6BhsfYgO z%Zn|N;61HQCwmOdH&3Xkwz_i5N8{#NLlUCnKYL~Dw4;ea8iMB!Ghqd=$-5)Fc+)CDL4x4|1lIecm60Ux)&$qnA zSx4+F;uE-0Qk|HddaI8_a*Ut8aW?9qlS>ihC3D^d{z()ePAal*uq%IM_){>ou?((i z+;KHaN3Zf(>ZH$b^FEKyQ+M+y9FlkAac(&`o%Cma{|}j?30%7@a?VxZeTm(&hId|~ zz4JcJ@%KV}^12{dh(H5hZy|rXFeezo*tMSLU9bRg(pmALIT9x#zJMGYF7h^x$yp^L zL}KYADs$hT#oLUwhpz6_KGbc4ck!H8dE3IixtivWhVOl$d8x-W&13xb=KZ0;+*{GR z7f@0Ud3FJ$4yN#=Bh=%pSn>ivgh(s}eX@5@z5sPFQKPL)fAMmId-*N&`gaYtP+EBz z&yo$Zk}hyxDsJiWJ_bA#gs5M5c#P`Q;=MRqCS$`)eS^p{#2Ere!xme}wb=TQ9{?_5 zCXzdnd?XFG$jhcn&VgpkgLLE~Bo&fVek7)#=v*JN=pgT&0@6T7TNE1#aPdaiMyjLe z*cQ1rydvlDX(v$rv7;7XF9AePdP^j}e2>v8-{bztY_4 zNpk2PbLj80e|(XVz1g!a>X;Ualoo&R;!c^8JX!UX=LfMhZQuY!0FsbEpg{J$;rBBa zOpytNIHLeDC&(AVi@o1^&{GD9BP`PG%zb+hjWl`XNK|L}NN9VNMtf?3iy93EImomL z#QKauzs#6f?0Y9udnY3t!nL^^`HiN3o3b?iE}DXK?GXb+Z)l8Y>WhH)>Z@J7-D zzROG(EB*G{s;*jnwH#$8a@Iqs7*L_+?_+>2DS`h5?GbZ!iYc>&Ub{*r#tB$kgW^QP zxpc0_L_Y-Z5R@RtP!9zQHn%xc<0EYi5y~y1oxW7EkvDN1eH4A7tbWUMYkw!dC;!^5rp;pxKZKDj6)SEwhe)LJDeWWVa6UPP#V~Yve06G^| zpCaG~myX|)-=%jTxZePX)XKfQED~x0Y2eOKws5K7aDt$UR@LCepSU+mn1)d4cXa=H@Uk zgMdBRd+jhA@{vA6O7GAgqmi z4Ae1Yuqt9PdNSyeG=Oq!kV!UWFsrL3`wUak$k5$C8x131MU76QM8E)I(=Ya1al19q zEgfZn#$K3Rn!|^8?P1kvCvqmz-bgKgbu(}!k(2TSH$s*TOA{wT1YT6@w@g6a5KMyF zZ^Ne=6y2OYG6$c@2jj_yiJU7!fgMl*L)S@8-l2(Wy)zv34wMsjsSBm;BT2mYTb~_< zmjQR59jwlpZgE%n2!AOhgKU&mcPT#akySJD7SqF`eB_0^S7(to0nvoOsG9}02goBk z-!(K+p;zbDn3vYhbN2VTn{TDntWWN~y9u4Sepv-G!quEl6H_;YPB`v(do1F~B;F~r zQlCPk72WuO;wNS3@Rh>ql=N=4Hxo!8_ zuVh7Xo|?YH?4*r%$>oBzd3+;~Zsxw)HJ#mp$?;G--k9-}J0z)09)l%Kf2b3Rc4cgi z8MaK>{@}t*OT2D5klOQ(n3|{5Q#W4Uha`sF#8oqMB0|%AcZn~KODk5#Zi@{}P+B4E zfKZj^L+9Ch|9Ou6wzU4s>m4%_G^=N*WgE4(9u;18dA?drR*n)w>8C=GvmYvru3u3N`@7Ujt6 zo26vdUN0bdEA-%JbGuQ1ujty&+whb?m`O7NNBs3m!xn)reCZ&^C-!8FG_Q* zohzX?gHar60XX4mx^ z6&f@asYzznkYX|;67-dOW#}c+@U_yR#k*uNqG900;*Jp(*z+>;Zs=P%Tj5k zvSU#a?1&bk^dnKy?4}JB8ktrha)N(551o`Y9QpojwnWK=oxJmrt7Dk> z;L_y6Dr+vudu%1EVlx%l*3;2tq`a%&Iz*jAb8Xfw#rZ0~{fx0a%$Y;G@8n21@=Vv2 zglImM)EKO$e)G@`-7e4 z2sFP#%)f5oBg9i$tDXxbKJU`U4&4G{MBM3X+&qr6zWLJ8W z+Zzcvv|YGl4T&-O4fh#y1U9_~7Iy`bV4b_<@$3Ea-EBL$^_%W_L#nP7O6NlFi2J*J zmC5=GW2Y+1=Xas^Pwz;38bg|Ry5K;T1Q0+LGKClMBzFeiDqD@xSy~QbuqkH; z@|=Vicx9Smr7rm61OVB;Dt$V58e%p$QytnE}BUH$}Juhcp(wG@`GE-lJ%&CW-@kv9xaPqi!@ zFCMr2cK&+ovcRKWq|sQ3KR2J3yC7&KVCHP*XeMUndN>@D#-X+9B3`+I|KusI25I%v z^2&0%v8N(-dm`L&L7&lb9%YYnndDNJr8NN;hZYR|5qovkLqLv@YXHzzW^zF~>M?G1nt#z+b`qek zuq?LAY47BXU(gHQlpaRz>lAf^+^T|l7nOrX7YuT+k4SxAUWj3n2wNhiEZ>_-+to-D z4*qkb(@LHy45=duE}5cf>>*F^iriv?->n1$5TBGN(e0J(<*_?IjOOq|EA#om`27Ky zd$MvhFrqEQ4rZ%&>xkq)iAKWhVLCQG06sGt3qDYIIMawDG=`-fvrv6FDbTgkN-e1$sN z#Q1~MFk?tUA^Ikm1g=&0bNn{f2A8{|E z?IE6G??A7SxKqpkh#m1B=32j1DS|$$HVGL4w8_ z5mfTk2x&0gO%fJPfj7!kz$wy1i47hv*UQzV zDv5VB4;#qEGGltQKE|62EK@MC2bH$adqnJokt{U107{Qw;v`@UV>zgz=d7nH@Eygo zZ?NB0X+{ZFIoc8fL6`UCN&I@~HU$<&`!}Q%HBq7w+@&dYwK>_qLQr0a73o3@?;oMf z(F!BP-8BJ+gf?NHZ*Tm(z+h1D24l&a=gGh)HaUEK_K0h;*+mS~$%mddjy}~$QD9{z zw^k_WV@Y+mcB|u2bsS3}0b*)!udSGa^eF-B5Q!N5$jabRC8cMjVzUgEC}|~WBFn1d znFQ1X7KBebjMy&lzP1nJTL&TU%*?JvpaXpm?j{J4|}pM1Nraz&vI$ z&&yKfQR}sR5B7&A^V8tT1d%ty$&_H|V}cyy{z5n?*z%*Yt0whs+VaTx#>0c9U5M*^ zpzf?7dRmAxt5D4DB`GV1aI$k+7CR;dx@(^@ybi6bJ6^^>Q4O{gYS7^qju_^N{t0ld z*dox^v2`>nL9|(y+)27sk{4k&0XLxs1tWvd6qJ*`=52?oFIQ|=&R29-G>N)%`5L*) z1s(!tDypRi#SF?DAOAb#HTNk)w8A{7sYU)e*hCl?y#I+T>lvW;^qZl;C$dt0FI=@2 zRSgp%_gh2Icy~m}l9hhqrO;NV^5`Kp5&uT6cMn<5NNh;p@TqQ(#g}CK0;2A!(jck> za%Mq7(~FwAQw9Uz%-_5}p|3EWST*US462#@3g1L&k*XBEp)9*D#OYRqqiZnrn_Fyl zi}F<(!n5Sg465S&+~Ssopv#ZxPU$mlbWc%!W!8r#mR1dsrQC}ts~U7rR9Doixe2X` ztYh|^W=~e0gUz`m%3zY5K9>jcB7}Vv_TFVZBiFt`Af7QjBs>HdZF~2SI6<%gX{ejz z8|qoPD??3l1aeQ04~iR>`~9kAv5LAZe`=BgZ#dw5v~@s7H^XUieB+R!AB1Vj>=&3x zWc9dA7AUD3tEC?LqjIM&v5 zE4-tEZ~XVB0Y1zi?UDJ#1hdmoHP#TFltx68J>dTr05L$$zqs|kK>vRYs^3iiMy{X% ztgK@sA@Y`+#3oRtPSD@&IF^tOlAuLt3bv5HT>k3t0Xx`965gpK*T|EY!Dv zvOOr4VAKIn=5CyE|L{~IXeDW32W@TeK!i7$~$M(!e$ z0K1Dut{d4-7K}VNaxpaAStI*Ef7g%nlWMX>)z0Rsl42!j>`V0L;77lw+d;Rdl6OFl zqSQhDisL63{{+=Dq)+@EXlMP%O(VaBl{Ep&>ph?|(?Pv1BY(rZO%;xj%5kJi%D@Im<@;*!UYE+;-y~L;53*6l{6^K@E` zS|tilrAwN^lRE?1SZ7v@g{Mv}<>l}~R4n{fMQ1jEs>$E&lMV3m7NY4pJ!SM<_8&dv zTu=Gyo>U6dk-E~7KvOu7eYG(h7^L$rY(xI$#&CNeds@lQRPsBOyb<|e5GMqh94i_F zS=t$B%1&OtqOYm5v9yF9(C1AGPg5k2nEWu0N1ipvPmriDwgtrxa6HM)br_R^9powPW-C`-81oK(7yNy=@q>PcCRa+|;< zSv(Ed9ym~Ptna2li7e_&8kU5YEWEfaD=fq{nzEK8?%SBPUvm&^Nm+9L(0c!|e+jJqPkh z-$fe+oN~bV-Imgl1Ck{-tp`lz(Tu_P?aWe4d9F>#DcyOl`J%7qmGUu$r$K4R1{Mb} zlD06OX)KSHjwOAI$71^NYo|CXy9DsKB&(a$*(cSaUcw{NtSTyn1AU(nz(0KYlke6n z9Ia7DrOya&R%&Qqa#re`nv2`>g2B<{^?y5QaO4a>OG&;tZS&VO8`qt&Hz1te<;U+qvk=&quj7)`0aUpw*%tk0f{pCn! zA-zQ;WIDnbgc5{V2mype2yF;65H_+Jl2!eZNUFIcgfJcXu=rWY(EJ?437N%BJZSPN8d3n-YY3$<^w}u%FT(e#AU6=O@wUK2zl=RU%$!F^Zw7jsY>%WJnhr;r*woh zuxr`tY*;83ekN9_ylR8`4$b$p4()Ba2Xyc2iww^guQZjLUFJ`vzgrE~2lCtU|6FLX zm)W0n9Cqp4H+sswRsKL=R^XAqxu7d-kJLn0#9mK4Uo4cgl>Q|(F}=9r?#zuZJqHZY zu%rD%B@VLZo)So&hGz~^;?$~DN72rRK#PcuuB6{5j->Q?-B8^H(%HJ1Lv^GcdFdRY z@^sJ=v_uh6k4Vl1gk$Gq75NJ5@fgm(0XoC8u;LhjQTIPR==_!Q3xi-79QoHkI9jQs zrRhpKIP&2@aaAQ59Ql>(MaJns;{(Sxxuu&+6b9__JBrknUMD93uI8J zddg_b6HUm-g)W!fl6RT-Gy7%4LM=DAJ+}uhy$ZJU&pU=bKW(wrq;e6dAF9K@Je{Q7 z9XWdx?V&9t zi-&(t6IYeUwd2a;KOBA=6HTU${7StT&n;l3g|HqPXp&aC+7{)_Cul|l8VXLS!kDC zwUdjB86;$Z>f>W4{JVN=YZ;Zwb1r=`;rvs1&vma^m~>F4EX$D|E%m#U#MnvO-Ia%^m@Bw81hV!@cb z&{60t6x4>OH<~H0_L7Fk1TU>gA~&uGIh&<`(^0>dy7b6-bjVeuD!jBlhGXkRA_XA%4iXByQ1>@mQk_zd|mPy(m^{YtOY?1BXu|w<1nhWhtvi6_CkAM zrh+RO@ffWr{BFBO>2SVT)pah?F1&kg&Xzm&t#7%$rFHw{OiQ69Qs7UAD#`=Gqf>64 z`PkZN+ge(-O-YAK%R=e0NID%Yh&$J)^h|m za0DOJ|4RRs{zJWJ;JE#?S}@t|E*(c~InQ9wT_*GzXaAV5z0*M73#6u{jUSMDEWq+K z?ZMR8zKy86s_m7frQ;OjOkAA=KBl9$aD2qp)$nw-5!O~4sg=WbS{|{m8x7kmEbnfe zh1?B0BKU)DpDEO;rt04O+27|k>^MCH?7&JtU4Ob9dNn4$AQq3YDv69KP^%ec-N)F* zyO!>}pH_T+^{xwouIZbGS4U?oyM^|>L96M=<;9Jk4Bz?Un@{#VaxcbEhB3@l#!w?i zio{~=RF#06TQCS4%munU3?S#kA*g!`+LWjM?jUG~tqPE~7Dz->Rb6cbxPkC@FWqtf z@M~XOy?bWR*}PG;q_}z6t;1LRcKFxB^zvxa$8_b3zs>eN%tvtf@Zb$FF?Gxw z>cBEOeac#cm1C~~Fgj?r4MXAx7!GMTcAlpu>dTjR>ogjD)S6#j+gv?y`HtboOF}zZ zZAM+bt~OIXdCh|52RMvo(O%ZZ?4TL-a)7CNy-TV$t3U|2$B#gmL|dUT?4*0>ajJ${ zv+^Kh6TZ5o&xblVpi-w1^Pwbd!Gho{rW*Q+*E!yV1vYbM%N%U%T}F>#N-&0W1X=*v6C?|X5rPQ1u*n(hIx4p zh@~@ng!=HHqjL*1@96$~CLaVJ1}M=C8VN^25m<3y9Aaus)Z_NLeQrT*ioE})D+b=R!yyB zyFR2X7d7mDarntkh7aBG{_|f9e|X@Ig+2Re&4>4`Ne9Qz8onQpI0FW(#*7t`yX1V? z(dpRZIO!0HLw2zBFa#aDG*eH{oswbYQ$ zPM@QnC6me;@9wZgGnV|qLVH25Y67d`6TlJr44odd)m=QyI>#2~X`-&^MDdpoezm1% ztdB*b%v*jHd-wL@fX~mdtpv~bIG)i*SIF0D9Qqo&!!y3pAtQ3~$m}aDEYj3zrfDA4 zsO7--#QECq+2=b}YS&uUTJP6CV7kY$U%%hm7P;rE`jNn|((><8U%&cR3ACwNY>~l^P}`aB`by9Kaj# zK_Md>B$aE2lkRpt>156$LIx9e^8jTA-ypqNHWKxpT5Jn!t8JTYqHU0BWSfkKa*==> z=nV*+fjt2ha6Uu7#5y%nSeVmlNosRr4mhsA~ADD4j^}e)O7$*L5HM#Tz@9FaOcO;WJ~Ua=v#t3#)uC z;8{Uu942`qC;JUGI_`>BXQ*k=HRVm#H;2U6bhM~wY>`~qS@~M!$;vPCG^CO?=r)J1 zDtjz)IC7-yrLq&@6Vcz7{WbJa)G$q3G)Qk6NF*dO$W9F$Pt)|EPLNrkj=>%Y|acPI3J!TIHx$h={Vid4fu3)pMr!7(vv*h zQ-8YSG?y9`8dP1D^2PGZqB;}|2P45~P*kg;rdTWw#3dz`E}}j&azTB35zW(;snd(7 z-{|GkMyea#BrU!MKR~Rzqla{Zb#pK&NXq3EjXF5WGVEAM60S->vyecX&&oMH2Ctqe|^jH%@1=L9>8in4Cv-TM*p3hC^S+tX)?-Y zK{nH311->i;8H=SQd7~OHxkic6x9YJR+UG#YP9(pjaDm&8nppCct&b`hTadYLQi+g zMio`-v}(0hB^nIkGjJ^th?Xvs^*Wtdpu2@91ttvAFJuR;SC*HVc7k4=Gz(_6tf3mG z>04{4yH0^n9TpI>A4pvPsIN)oq*!UFr_NGi;hdO@uOj4Eo;91zplCf1Te^E_LD&+u z1gmHU2^9_>+B@_DTX*^1;RrqZ!{K}BvR>iZb2qUEhhX5RWD)Rv19qf?)F)5+u}H1$ zzDs8iwrITgLrqsR-~jZi=DZ7M-#D0wty{dlWjX~du$q; zv*;TUA9LMlMeK_)s*_h1?od2p0y>#;q^iMB#NN<2}W)$8$jnA#FG#-h3~ zD1_u*M1m&dB6+bz)ME`SA|V4J#lwONDoGFxg=EkIlb|_I*SU;hskBC_GZebbd2+#t zzi{XEk36(8vh$YhuPopA%JzlN-Ac{>S~>KJb;{(-w0YaMZi>xQt%w?1e)P+2i%(`B zyXmou2dMWDojTk$)VO6<=X(=VKfe3%e+RG%W{jM|UjGcpjL#p2>-xz7n|lHrvEg8c zj8jWhLa}Zlk&T_kJ;qF1-!t6QsDtG-@!y;>YaTyV%9a7f6*TyKP2u8IE_``vIIHY$g= zcc*7(ys8yhL21i#XqVj(inB|T2e#DR;U`a#NdjHN+j78 zSzN6u<+8ZV=xxw}uIqsLJfxAAK#L_(C08-IsDN$8w-P_6?(P_Ia)d?8Drs4a*FRbhzZ8t)=nl3%=t zlv%+MLn2tDkBj5U!qP<~WkK|fJf7tIkrNyId$4l9R-iP%a7E)+Of*rhWpP~Rm~T__ zWRzRdZ4Y+5^5~C$wd(O~&5U;rJilu023mflynflT-m3EIS*GQ@ejoF}-s1BRub+a-^i$Vi-^=D-OTY5| z+V^|&Thhno52X)%Uz-0Ved+zuSE4gFyO|#KkOA$fkg7YBilP38qH^EBh3DaJpbVaJV>g=Gp)kV-TMO zyB@0BV#@L4EghyZM{)~f{iK7yKqnphpYaxH)6eTPb@(exTHzNwqCMcwL2mX^)Hvv| zV`zrvbUMXEp=ZLQL;t)uG;Y!GoH@?S1p0?CeXFKp*3d^6)+8?f;1l|bH(TQVlqMQA zJJPp_7k_p4)(ch9s8|*(SwM{}GW0H|l_6|1d;kvNgY|zb+a#ywlli`FWUFsm<{sAr z@%^s-@sC`8jsGQO7)!2-Z^+zRaZhG%9oy_}DjToU(V}&FXsNJr7Uty#$QIIaH zXo^g)oJZT7^W%3ii$tW_Idp!cv$m_Yw{}mh)|GZuv=TwmxFWuUGbO5-;IsQ$GFv0} zM&8b70=05&YwcpTSm;!Bsyj8E>GkS0t~Ks1-`dEU_*IGP)i=6t^zF#>*8V#6PU_>x zza!3et=aF^1w)eGT^I~!B1C|TM^(}v5kf^{OEN-PC{b0VD=bRb?S-r?!7*hgWD>qs zYO9oFB2Rh;>KiI~abVJ9C6)70KVyN1=K0bd=9wcV{bNhY`SeIlRaP0ikr6~EVG!n( zc}7bm5otiAV7RZ#(Gs=I#^#h5U?4zjG$IZGm(3EJV-E15c~?#Cv-EWmBnznnG%9() z*(59jr-3`rOp_hm+|RUJDE$b&?MP0yLy3Vc@N7p9?~+XRDB>g+DIsX5PJ?K0!l8)Z z_8M+U04r!nRfZD|AJw?rPB&AlW05F}W?~6PEJIV8@(d08Vi}>5mS=>xyC_4`sFFUo-Cr{fxZrfq9E(tggH?*5m)k9%IR&9$ zacUba?z;Gi*HzZcuurB+#{^|L9it1h&+A8vIor?P5requh>`k z)oS-cSGL?bTf|-qg@x=rl^L`;LYX{K=S$HPO9?6eo#uOd51Ajb9x^{|)oXniJw(~7 z3$84@S?DW#K)BPjUwB5)83dEayi?&as7h%iOT-O4SHZtR&kdjmRI<_B%RaznKf6_wzmU;`bxMU!pt}fu9W{2Ow9140k zrbU-5R;+Kyj-Hv^TJd>LHyC|4$kPzC;hMr`JO4Y?ds@9FNKwy-kkFdbVt493{iVmQE=x;4U8VGg-5;_H6@ zgbyCv^3JC9r|-W0*BkuHhR-}R{N&-jL$v;jlr-z==3K{CL?nu@Jg1}Q`cgp#41?> z*0fc&s;q+}e>}w3lyzE|uf+-VRnXJ|VcHeu+Mo~)+LhI4S5~84nQl7-QG!s}EQ)9o?DlE{uQ(D>{r0am+dat%P|YoS7Rb^zapsfDh^T=l4H9yzcj#=YB{l zhX3o#{58=r!8O9F&4H3=-|%z48Giq{Hx_y5WO#5oX(RS-DBlMbpTdl1Xti7~S1tEk z;khsUsAGTnne@pj?ObPS4bDt0+kVq4s*+A9_4qt!_Ww2ijlH z?LR~#X<9S_uJwW3s$6S*+5VEGzchtYSGo2FAkmy~3+`F1Rb3E(g(@N`ppq1*ND8Q= zP&A@|Dxy?I6i`KWRh~RdwqT>h-;@$#KUlLOJ)t47?yHZ}>14p}ip)-n1?G4`rXsOa#fCl%m#rO6 zEcS#G!wvJ}_CRXFrs4fjyCg3by07siqQk#i*;)YYn}Rr{ zb)qgWn-cC$9!dTp`L^(z_D61 zRpQCSeo{n9QMyPj>M9b8cB<8QrME>xkHT=oMYA$uQXpwkfM!x4X$twg3M9QsrB{KZ zcURf}Sah&4_0%a_rQB))=>4q&P|N`ju*08&5Y$b9-q86J#wJiWa8GLCAOjG~SU7CS zZB1riP5+uN9=(IYb(el7m!m3<;_{|0FaK@n6b$(Cv7`RO(?T}~ZiIdBiGww<- z$ssvepjxi(tiDp)<>+!=S-jJ+)0uT-o%+($di@r?;HWNhwN`gk-y}XEo~#xP!j1Z4 z)xuOQ;Ntja$jYG>u2lHXfWm+90vQpSU!xMe*V|Hm)Iv zn1hyaG~kyknq*!nF@_OI0chvCxVn_;qoz_WTR`sMk~wn4&`i0j0K|+A%0&;($Njo4x14b+110_ZNnb-4Tn@t;_0zu~=pF zvf+gjYR~=ji-8~i?2>toqJl8rW9HoM0j$&}dPPnne0qw^Cyhi0M?OEuArh?|962Xj zdA3r){YnKyl{R#cZM@n>LkjFd%6bXGDFZtt08vQk7;-g8&?LPGB?u{mGB_vwiRcjO z5$d30>c^8vq-;DZ^W-s7pHei-SD{dT@`)1B6lODd>{UQ6nfycY7+f&ia(CC1JyVWP zIXOkNP1)sGwHjzruO#*f$Q^H^DFsmJ5iT5TaFhv=T>PNuil@T=LJkznQ5Vp?Y(3~eGM#3P3h@RZC`qS>o=wk+Ifwd`rt+a@ zMo9!(SQ91@g52-u^(%6O!7eo{Id{i5RDwP*y!e}vu<~2oIL*jFSh^CJ<0sF{tAh#` z4l1YM*Iwlt#Zp zPdRNpce^rYe1@Rs;vIt-WD?8DgIAlLGqF{KZYJy4)u#374OLfFAI&>r)LurYRctE5 zbXBuCY$@wy+vJ_>9(lm{l<7$3$fVyIe^+6&>ZxGDHnyr_JK0jPi|nU+Os`jH^^npD zQyKg^pRpM4tEF`Hx)$9{Pn27m9;Oemz49S;C@(wlRlH_! zoW6mVS3V{mB`yk9&)?!&WbxKb)y#2V9Ffw)%ltl3rInl znbJgv!=K0PP&>3mF?W1i9Lp9I^+hG(N?kW+b~UM?I=5V-Hfr2z&Jlv4l;52QhUyx| zyX#=)b*rEUgG2cJ?szafuCmrW4z{n5$(Rqpxq)Dl?x$WEh>ubk2~ykwOSQtDsA0uqA8tD ziM=Y?rJ|}qHdZ#s##STiHPS5hp9a~Na!JapaDTHxx6BGPGl#&KID(BbCI$a|yBeFG zrF9T<7Emq;F~KD%}I9;-ayr*GNlcviKUaoj`ofu-*3sQb`%=x^8)!b_+G}zLsE?_ znQFMRT)(LXzH%p#pmeNX;-zB;knlyIaIW^89hHt+efVx7wqyRUA)XTgHd- z{B9{24)_CbNR?r(`isYcZ&-wxpvAIV#Rl2WHLM`#q^twH`DJs-cOry7PElg?Fxp<6e2y4yO@9x3%$X_+1p>i6!EBK8jgvujaF<*`D=47- zl5CXB0kef$zsePc0&c>&Je2IuJ9q}W$f;ofbaa6OtT_0N-q#zY*f4b{JEo%_XyCjb z+^8EXSlJ+TrzE(U&|DEp|kF2YkD}5hQ^+6YKG?+{e+&Py{UQ7;G?anp=0TJ;liQM z#C7M^Z}JyMqm_XkVg3AsHy-`!_o7lf*Y|Zp-&fng=SJQi`75-C86-}hmz(>n)VhOG zCbv}WVASemG|ozGV{KR3?uM7k2-Da?AuC{(7YqSr?jT-Gge5eb&*2)jS}l~ZkTsNV zwT7@leduFUfK;-l zhG+Xn6Sq=2f1KoOhgR#yO2( zI*%$}Z4(>M{q%fEc(SR)rh~1G>z!9Z?=%oMIUtwcW!hEp_}`V9c!Z!z*!`XVfss?UFO4 zZuoK|^NnX<1$A=|Kv&EbGZ`$UaGG$G(tHhag(~EvJYy+!GDKEwK`F6|h(oi=s4a^7A6`dJdK8mbRepR&=*q*S43BzS zZB?plonx(gt!HDRtLzSsW`pDD$dSbF-M{y|6HzG_Nx+Ls~$gVDz$~CQwg8;TmQby&nY#G~KcC74pnNU_j z6;X;RtdJ_A6b+e8%4!?6t|HM-t9X7W9SSZ+@N$?0}Q zcF*k~a5t+OZd1u^9e5%|c8?>Ph!xqfSrG9ciaU!lH0p+~JBsPBYfhWJL6&?VKjCp= z$QKv~5c&z_L>r4Bw~-8I7QP%|lld+QJ-<6}-cvlT><>Wh;ciy+$DY{C%Ase#pYz=? zhthu>`tO~;|5bTUL)Asz6?aX&es-pnT|K<6*AMCD83~? z@qT*bYI-$W<6je4AG$KyN4Es-F2%PMqEE-3Eggvzr~}v2n(;5vCy`4UPhhfPQnY~qjyA?NQiKp^i)CAoCU`BNt{}1 zT)<964o@n6l(`ojJf-D6e7UYPoXXI0AEL5QNrr?Y=|Dw>lJgFHumNEY1J)nT>hTpK z#Ss$n_nH3ungo~6L61SGkv(%zs!2;`kY;7?iX_42lgVI^s^JzM|39IkhRf@45P$_y z@o~_oR^Y=5RhfX%CwXEssuZMDghu+ew_e@x(Bst6*|+-Kc$-J}!i&4F!|vr(*wr6i z|6SCqe{|iZ!PxNCH?|qr?R4L@n|I^IFG_kx-V;@z+GE)~*=fC_1fT9QGd(_JCniXd zD%nC?n9fo=NGHq3tH)Nm1h=@rvB0^&wZN@b8C51ye5_Vnt6yteYg%va@^$&UQeEk7 z+8gy-j9X0Co3|wQiTg5=)tE6>8mqh+Z>6`2Z-14F0bjshR8)#>=LxJ{Ogq!Qv>y)A zmE)_X8mAV|*3UK0ljau9O~Q}C&)gY*mAiVjW43d)tG(jl%*B-#S6y5^e~d}c>x*o9 zcTrd$s2x|7uI;h**tSLP*4&-CC%rFq4DU$&B6+O#Ol|%J+OckYM9}>teGNPJo9XE8 znQW}Ow;X=ttNm`D?}!)sX>z6W-h4QP*BMOt27@VSC^m^Posy`-bO>)JCd!3yg72zP z*%zvWI|1L!rD0i0S&mxR34DCS@}%X2MSugqmZ$yueMyOX9HWDOcNsle_V=<8kW_d- z$z`vhAds>^SsH{^EPIwth7ZqVWiyuhICUhu!RC6-a!28wp`MzQ;*wv_w-pq38y;~r z)0ERM&s#hRw|#a{sT=!r$}U}XB(1R}V)_zYh7_4OF|{Gmq*2T(HDrk1P?C&GAgyLo zQE}7?@~Ta#InL)yp9(F_?Y{#1f%_d?PhG5EW?U{UPI5gJPRL1o&OniV4SI*UMogP) zG6^w3qs~4#L#gn1)Y`HC;uTwy6Fz!w zd-LC)9ari9nak;gJA$k2;HpixjH!(e|M<2UC!bigVXWO1%mXJ*ZrStw%@riPy9V&Ovz5|z_6 zF-bLJ_=VY#+A&|9Jud=^29s^o#qb!ztm;Qjst#aQ%jk87NgCpAajG(nu?_?xN-EDT z^i(F)wdxt_4Q4SKj>ap(74fEUQ+#h+Qxva>v)1%l{nh4s<45EFimB^NIlCi_O3q-Y zSg||8@aqVMp%Q{mv1r1mD~4|HkEi&oA@hNvI4Bt|{TFc!LDK28vY|$cS6u?yG(Hmu zwL-S!=fl*f7`wG>62+O?GGrx3@E@1D8LMatk%so zy4pr<3h%OEZxS|iAHTEBnL7K8-of<@1xVay6uY_jfgPN$n8>qmUvs85N8{0mDS%B1OUzJ&9g4i56fGqQJt7Xa8Sb>_TXDA3X28sz&-MwW)twB zM)2kKA`a>3*rHgzWe__AP;2l(9*-{V7WG!9hxZva3G6j6uMHi??-)&SPcx?uwnlv1 zQK!kvORH-%R+Gb4V?c0oeA4PFk@oV`gH-+ynQC2CYPzmnY|}lWQ^(XvZHYc%NZ1mt zB6o2jUS6%PaaE?Ls2erS`l;^O>NZWAwmq-S(B^7O&n~}2y+pH0zrwY`y)v_2T(4fQ zS)X^M{%XV3t}ES}JXZ$RrEU;!()M|_rnaWHmEWqlM}M2`Hpe}#yWO`Z?nvF9-lu&` z_n7`M*FN{7p2xh8qy{tt+NbjdT~DQdnf_AyrT(1v%fR#%sio-^<=b@PSobR5YX9Y> z;!@30?FyaHtefGVnrKdm?e2N03)4cYrd2y%52t##?dkQN!c?)R$X~9h(VsWsdWm&h ztvjvrhnRKdLt-)&-FUg*CIf~^rYdn#FXixBkKzylFqZ_wNy%&y5z5Q|( z_h*6|l;YMD!&|`V^4{{DN_;Nc>3cxIuR5K#)7uK{Y&m$|;?4NJD285ivQ$31ChtR1)cHe_{9y!?_NQ!?}!h zG{e~`zPI^5J?;KyjYcxnX>q_pz^%9)qd%-ZuApE6aszID0`F7Q`*;*cBl0_}`g)}U z7YW;G0UVIj8n`Coj|DoqgUHz>wPCY6cjWeyluD|YanVh)scNDxKRJA30{Z8xk>Sfq z4f##u=;w~Au_aXhULpV)+U9iHikK7`TUkm)%1XS2vGJ-I(O6~p`mdf57N2`ST=oMy z-tLX2L*X9`Y1o#Y?^VQXMynPk+jLQ8^N^o?yfJNuQW@pc9U`OZQH8qy_9+$iIuts& z4@Q~v_Z{DNwx-1r`_=Xhu?_L<_HA*sQ{{x~d%8eVkO-t()2jccu{VK_qq@??>sIwz zy;bjfceQ#~x4N}-OKMqmmv>vXu)vmWVT2WMLLg);o3IAQO28P04Ax`^GLf@yFTogN z>@Wl+aR^x)JQ>1}ydm)N_Yeq}c=8B;CLsBJ=T^&*`DK3Z$?m$hs;jDNIrpsJIoD{! zofiC}Aq6jP#$3o}3fW3$=PIUkUd*%|$#t(56}7 zl>m>oV%x*C$@)LkJV^_Wqps#V^ICo+KbfD(Pv_0~-G10fcLKh6Z^dNex3DI}zRq%s247 z<^;vD0#O_-Lga0-H(=CJgX@0r0Xx^yNLWxZ2RR#|0sL1}89;1=y16iUn+9`f=`{Ow zB(md@!TA%h+$Ci=9HA%7e{5NG$d?mWB&r)0vnZ1XDKPiuM-Tc)eLiD)K+qH~l zJ!BOptPEFbGUotXG@_`BNU_V{|sO!CA{H8IETUW=Cg+M z{HjglyIOnxEM^k8AJse-OKWvmuBlBP9c}_MAddQ@=1SILvyoqeMnqPyqe}ld&F1ll zqdfuI1?#`%3K}}EX?_|&J$&DuhMBi!c#iN?R4Wbv1T9&5D!SwRoJCZhVDe z5l?9e+6n1odqXV5w}$v0AtjqI526xG6&Mh3@NtCb@nTD-s&h2Bo+_OJ^57KTRGOWA zwp2Q)Jar0jbD$4)+F~E=5j`7wnMX}FrkY>39+M?c$+Jh;({qP#m+h{)DeegjD9x#+ z!5Uj{TyI*bu54RBpfwM~Ewa^UAtgEKYPsDuor9%+!lPF19b%wO(d~4VV6|i++a7@2Qg}& z(TcXlhS2uV&X80M?FDA@Ltt8T$DM=PporU^q~ynrI-c8cX_e+S*ebIftYac8^f>JH zS`82WExZBKXwM@I70Thcz%L?#m`!G4>qm^de&;CiGisDy!FQo|qD2&NeItOoK5pLvbMx$Sp0o9r@nm z$zkA^FkWG~ZavcUkT3?mXE%7~(JtI6DDruPRjE1VZ1kzPcb#vdd0w;G8X*;F&GVp4I0oz&o5a)1S9OfDvNm;3`b?sbDn!8@Q!ElGP-|$uGTNd+bGaIn@ z@{Xn6gl|PCh%B{eKv0l^xo{<2N#f)a;OX}pCF9F>%-#^n*zF3i36qX#hhYkf!w#g3 zD-PAsiSDf9w8QMc=zn;y?wH8_V4WVP!oVQQkOt2j;P(7VcQ@U^2G7#xL!%H@C7U^? zN(q%keQV?^n_X%s*6w89j-K~7w*BKj?x@!b;_D~=C|D&q~tDPbygH2Nd;wEDd8??!7MD$Z^WjmEa8 zCqfgk8$!EdANAbropw)$z6z)M{pkl$H}}WpABSJEoQeHGeT{`o;*e*H=dR>k>Qwq% z+U!=@KcP^eicb;@1J$yK0e6BfCQ?%=QAjB%mBW}OQ`7H=N}o$PQtydB2Y2g_16ivX zxAzQu$kb^4o(68xmil?p&NkQ&+C_U+;rN9K6#tzT4hyp=3TmXiV{~p!(?1y7&WY{h z#I|kQwr$(ClM~yvePY{oGP&>Peb)b-wPwwHn6tJ}2qOZ503V(7v*0tws>C&564`6%c#1`_ghPr zrv5l4+@iy2*~{xjYd_7xr=PZ&rrqz@RcG99$4S9x-v-n5v~@Z#Uil2YGT!KH! zwpI&kwo#7Mpi1*`0?5xGn$s8{FS_|L{_%rVg+fP|OB~ncquj>Q+@5v%mI+f!`SniA z7j2KUs1`}aJ=l&?W8&Q3vyWWwW`D>KUpU=Xv7^J$SqB2TU&J7(73CBK`s5rAE(%#8 zop3Vy*C@;U7BYx4ktJ&wZHJ2*Kit0(B3aApCWPEFSg$k8$H+&EKA~uUMRaRv- z<>_R!UNVUR`bul8R?l(@K*n z4rSC}i@Z(UU(7KYEXh39BL1c&D(?qNcP6h5AvH~S6&a?L#}Wkf9G-px23w;*6`aj${XiRU9E+x4kuc3Uo@YH@u4yqE{r%lN{-Iu0NH#o3U ztR@7|7>lNyDQArEBBnuY;Ke2QpZa0AwXqrWHyN-r{Az+H(sXk-e%(^R_I0PG);F zKF>hUFm8GfB|ae_DoKSej;)Y3CUDqk6L#YX2sV;ehlly$^mzMacM7YQfQSU#(X~~< z-z?PnOKXV9&Di;T+&Z*o(`Bh*$oA_*)avnu^(&rnoHh5e|16EO>zwQ&9W^l;_S$=| z^cyOoBOvB8@x0LiZ>K}fTID`m;;bT+wnxdE^ zQsN)q+%S_(lkosPNI)-XQN?3|y>jwLmoIZ45Ds4NG?Bpk2@_yIL{Cs(5^83R0|o?j zL2Ol7z?u%eDf{mvJBSOh0!e%))E6nE2X&R}HvKc?TPyh<$>*cOILUf?bID z5AO%QYh+NdiWK1lfqHa=CY3WTmg z_06Hf1IYbB*XCmwDu!|k2GIaJXyP6WcX#<2B;tetD)RnC!>qPeHH!r$wa%5#cfBr) z1*>TEg-5|-QEMC`-x>>mw}vI3PqCEmd%8$Hs2-#5NiUWwwn;04-!$&r$VZS9#8+2- zkMBohTE~-dP9`Il1xCg82K{W9XHC^es+z3Ctl^n|RP&Br0I&UTxVPvud#LeRxQw`| zztujvUOGRSS54WiL;5_|sILlAa&SPk;0J^oM|O}ofPLD5>FfyBKRrJgzm7k^dU)UH zk{-e3!fO1FhKO`5a^{xZemz9dMKTls-p|nZ3pDOR)n@y5`3C}0++@tK3eRqsNB%Wu zH*=pRq(FoHy(vDS-|HfsV$1OfwhJ(adQ^Dl#6c0NjFc6j=qz!ZH7TxYFx22gD@$!_ zPWu}$kBKDfg@)*WKYZt7gDBL&cwUIxV0>9O+ER~Y)!19cA79f$ z%Z!D!8zLzzl}%W1o((L~i1}dNMe@gt2Qd`6Qmp`gbdb0?xKq$u<2Ts#I>YbccaDUw zT%RW@y6=%XPrTgqnQj}Ue$^+NPHC-&dw8#%@k}r9FLc8mpY(L!rz!ucbf3$C*-#dU z0H?@XLjm!|MiJNMz==w@=*B}h8&%QW8P|-oA$21(B!kI7(@$_%#yb5IPcunq7k$E8 zkc%!-O@>vFvlw6?w=CgUH>spKZx@wx>l^`MEp!P4@rH5nL;E-IV94jSU!!DliV-pV zCi+T|=+c;3V+zE1q2@{HW*`jJs`nFaA`Q25qb>XOJ_C=Ir45Ch_yKwPsV2AgrTG4% zg$PS28^v4uJ!E86aVmdTf}X|SFNQr~|E$%qGDnZSnou(?&rs#M;$78JlHQbGUam_T zOzv@I)OvMB<4C8)N}|!E%S;mE3VL*KCc@UwLqu&)KNXBv3wXo>0r5`Ly5Hc#Z0+xz zi>i5^vMDl&=8tmGw%pmLMjD5y;bo4jOM~-keEUB6+0%JJk)l z?((Fao4MhFz)WESy@ku6_I;ADu7a!OIuara%8S}=I$%Elxj(lvH!EwwtIpbS`Otja zlD&==UG{}re045u!{{@`u(%!t1z66?jDp%fv0Z&*^XTEN0-XhyL;V`c6(@Ew$pi61 z++>Hpn!&jG-VkeCd}06e8rqyvqGzSAI>rC)VFEzwPE8VD$2W3=7FtwWgO-77AgFY0 z3E_;HA!z)ILy(B)h;y1c!?iT{QrH_qEg#~2N&1=EbRt$#JA<2xSaraPm@7C#FE*k_ z2$iUkDXJ)iR+=S!0KFk{!g%H8xkzTT10)4jU+ru$Aq?s2g6l8)0)=ykShvfX_=WQ_ z7OJ@g^19ITQSV=o<5E%755jGMjyyqpVVGL!G=_LUW`;_@MGa0hJuIli;MM-qfc%{z z?%U+6NReaZDZzI0T@Po?V;1Xm0xrjW77iwphLvV^cpnc`&7m%~%JxM7M^I8=*??6% z-fyxg=@HP2A`FlhX2R}|vZJojv5ZkhtoT91a71!=;q)7^f!klU_9~4v_U}0y@C)1$ zs=Do$9T%hoLkHk+avQ-RplfXzz<2c+T)lx&7d)hNKlS+abaeZ zrWBRH8pAfhPoZUgNo;RMXKhIk?0_I3Neq78F+Azsw2?F&<~#ALa4`*W5#R?HgAf8G zEq~S-Y8ez`hXM(}Mt;J|%BogLGSEy}k1E$+woR={)1zqyL1CVs4OG-dF)QoW)nOuw zaD*84lN3u+Y9eZ&8Wz6N&^NnFdf`WG7Dfg=Y70_ovMOCK4IWayydVweMQJN_v5{I) z>->SD3Y?JFL{}~ENNdd%cV&r>ao>$i7(Ida^F?y$Xl)Qrq=GVQgp0ES1-S*N?Pt@o z%6}1$Ba4mCwvi~+B#~>y3eyteYRIN`lgKNPYT$_}(Bs$$5TufWNp?Bzm!cyZKFg%# zc0Oqi$4km86C`hwV13H{7>`&wG^$`^d5s*}=d5|%$lTs;nLet{Y*~?jJ;Gb7D`2Vr zib&?Zw^iZoi(P z#@n9X#DX$TRj-k9Az4U`e79{qyw^;*>^44{H@5X|HMMMJY%sdc(7@O8n-#sJ=YQL- zJ@Oa|q*=wd4xI&tQFEElLKx47sS0Id1G1T8b*sh8K7OQbic9RA%EuO;AV&10Rjxed zRD(Jk8Jt7m6|Eo=IzGw!4R0-c?0i)(Ac|*r(pk`Uas%hn4F^UA!}$6I*RcF0GrFhP zOGdYd@22g1HB#{m|Tu(R&NaN(39wJcWVQJK1La&4FMcKcoV?V z)fA#{{j2Jp#D`vYK>Rf6s&jIvW>UC%a;Sde&goW~W?QwaC${({hoYN(=vivlX4-y+ zjw8(lc@iwU1))Qrw6Ld!q^WJz=22fTg`60k5owDa_vdJL_ai>$PkB9TSIDOEOO*pVa>MMDRsHm~195i|9|act@sBdFBtc3=ahE73DJLlx zj1rC#&cOkkD23i1GjmrA4$`-7sB?rElivjM)cO7j&}~v1K+ix%Jy*Pip)=~ibV!VZ zvzSvuQM(w7nuJGe;m0YZQJf(Zsi_|rHVGr`k|}qr9~4G+291ar0ge2Otgv-W$=T9a zQ?JxmTd(oO=+YuxGHr^Px_c|_#uRM@4d)pIzJ^O5y?c#x2NEJ5%4GK#*Et<~Dao01 z3*GGBvYI=l6GnIJhlI_03#qD>SVxAbstM^)s#n|8G|N?@5tA?m`onH@kk$rN42}Y$ zd3eGmv=MrLTMf$LJ$rS5e-rmIN@74<^o!Y*dBGIDBuqOxZE~=2mNht)Z_V|4dzZ*ZYiWl{5q_2dJPTy5 zO13<=qZP&ntpS;XFL8l7 z>^TAZz`Z+g43IJ;rk_scPqszLPemCCMWbONWo>RUZ#}`E(AdFx;Vey;FH!D_!2%C* zE*_DSa$%so-Y77~Yv{wTcP+FggUugZnAC{oh}_ijGrWvC;GQmOO|?JpbZjEJ7zt>U z`$LPOl)C1z;201Qupnl@NEfDEw8hao5qlpqI|kS9Z!1UKx54j$B&@NB1V*Xmav-y^~n9kP$KfCA-JGE};WkUq_!R zJzAWCMQ|{(OX@mTG-kdSmm;jNJN%$@nRP=@(ljE|^x9InaUdGaEP8%klQ)>{6by=r zk#RfWDs*>#TQt`s%^U+f_s+o!n5N27nR{iGsr;!@KaDHWqdW@NxdgTHdsBk>IikMT z*)^imWqhI3Nd$JQg)gmmM85<3T`MTP|BsMt`>LvIQjFiHd2#%_)9Lwi%?Nz56s1au z#LstGsieZ7!f2)BeRzMw@681BMXb8!&_atj8<8myGD3p1y1pn~mni@yk(io3mrrJ3 zBuZt8Fx36eGo=Y_U#1BDj&vgj4P)A5`O8(uilPc9hZm}Em`Fs^iq8+YEZGsbhYv0;&luwZaLs3?9uid!y;eSm&V}*d86h6vL+x~ecZ;%zThU8g0(AR)KS0L#qC~IztF|#o@?gK zZqGs?qt1KbrRA~lnDCNt{^vawS8^e`Eh6#LqM>fQNwa&)xIE%AB174635*ahVznl! zUNT9#f}qMuUsH>Y+*c*aa?>(_xAu$g>)Xh1_Lp43t&u}<>!fjgo3@VVAg(J(miU5* zIm%oby>X&v;tGn((0=%V$lNRxmy#zzIp3I6{DBwF%!vxh()C-pgoq)zo5)!Q zNi|U_p&V?=%||0LvP&tK$p7^z=&{e*e9$|qgs4Y01NFJqug|4ba8Vp4{4oMG*OYVI zaVU((F6jt85(xsv1D)~4*TS07E{gHP5ukz0D;2mQnY9}qOEkI12>xjA=m50@ zt{0mzR{U+{RjZl+YBaiwyu#-Oif&vC7{5^~zi-p}07jj;j1Zdb4N%+{riLz-61cr# z)X+PS1KteLO4N|L&Qzr9Z@);cu3n=i>R$kA9f?v2O9#73Q;yy7p72cP{d!2X#=*Et zazEiX*Z*-nki7dkUeUOHG7q@n^wVbC>8SB z)rae{^l@7Eg)m{X(X4l7V7@weR>sKCH!M2*fCo%!T=jz(fu|mG(r!oo*$;i-*<{ffV7lA&07y1`j8#&`E()F*uHa4i{536}Jng z#g-#4uN0EPM<$nv5ZB@E>5kP6c7^ZWBllk|cFLkTLd_j#2M=fi~3!t)eTy2W9u3PG7ycc-C`T4>6pQ zUNs7;+ZDlGvvbb{G#U|48cj}*3fGFQ#EnA%sd&onNNED|*j0hO8tqj!+DRn9cWS5oepiqGwJeRmQB zlw0=LyZR0xu4iZYj%7dAySuC0z-#Vq17dgrX59mQJA6L>E-&};YXPT3-moV)6}J(3yv|d)YO&wgKOc4j?DjeNpzvu0O*EpWbR?!zyOtF zE#bk;$L+La@B~V-&Q{G51sbg{OS{V*x+~3Um3DoSG;prd12thM;0~ivHVH$@xxz|; z`%eVKe>SoeUp0iHE9jIDZxdJhjf7X!5ntSo$Fe=P$woS_!pJ*dS1 zvgSxkWU7G4B`l69=aYSMV{v>x96<;6<71n9H$X=bA1(hfxV&Ei$X_5@7kwQluFaIG ztq?-17%Ps{x>A5!hbH!h>&8|8!ve@`h>iKu%nRQEJKHg&00O=PknJNPt-)Cnq4lk; zHyPOG(CCXBr_L7B`>LRmz<>PxKjh5A@=IdV-8?E#GYq`3Lwv8P^)ywU~;B zuw9SQIJ?!(_mLT!h}FkO+LNkCx~se6P1k>Y7TTQ)fm^1xYt>XMIE>?`pUPLZt6=l2e~-M@>wv|SG|GqTz zG|r$-$FeT+yRrec{ge-9i$yYf*w@q>WT}npe8Jf;)L*{%YU~YV8E!>7^t|+ZZ7h7T^GQ9E1?R>n2?o!IBDh%@U3inHh@w9Q`Fe%VK z%6+XLPBRS}a0hcD5e*pNKGuSK;@3-$s_uT6dWH3o00ai)?eMq=QRD7^L%mUI7B}HgBX-h%;$F}>{Zy5@w=A9Tz_WeBE zr&?#-mzQ?SCBKE`CM%zK)$@2*$xz$QzM~@l6U`;i8civ@P*dMpoBN%rdsbWVkhYnr zCjZS=(-qO9{e6Enk+m}%s|D3%q1Iy{DJ(a!D8^I-^h=s-0x;${u%zt55Dk`W0X;L% zLB4|Q?w^hF;%Kn$Oah3`#)RN$QA7n=NX@I-mOfOKx16WQ**^J#^02-UifL2PaA-3- zgm<;QgIVA*%iqKoPsHHjcJe|40QF(t5W)JY9wi?ami~r-Qoy#{nBPrH2d`a|%WA4% z%`eY&vFh-xJ~+%|O;_#yfkY+=7xv$>gas)^mNd|+`+YN079=O%SbPy+E*U9AA&>(_ z`fGN7^%yz@f@d=`yunDPLA3yO1IFkzHC$V3?QuvMx*q^N4K9Jj2Q(%x-pC2^={tI< zZyXzlfl_Th$p{FYiNfc}oO_BoQM^Yei-04*Zdw@vxVxyY`aY-(SW*%jO0w1u6+xvq zO6&blyW%jpYXs%hmHPW_p(_ObhJo5c*hI>KBj5`F1upYB_u`pdoe!N$w~V&1+1_rT zwDLH4PX=JCa(68KQrw6p=i$@O^6V`Ex8N)AC_~GE*U8E$DHI$8^#iQecPOxX6TI%& z9qW?aa#+gAwf)m$yE-Yl^_X9&D*5c|^ZkkEWqC&B>isVG?Y=*MbZqNw@YNFP^RhNV zs^^sTJ@ZO%+L71PIg7Ba(Ws&|c>A>Jz-g|B0@urM01I8A^x%Mdq0GL4dp4!p8Vj7= z!nQ438r-j3EHGwhmq7@E!)NX`1@>5&;QSPm9#<#Osj{P`rk5vUr#8rlFb6Q;y$%hC z4QSW4Q?1ury45T!1F?C$?GXl1r%9RtQZUU#IS~nF=bUBJl_lz7PeTmc`h2FGeL zps8d-tD(B3(mm`g{mL;hb(J~MqSO?|_gq_uFH@do+oK$LTFOZXPol5d*tb9P*SymB) zV1>GC9^iQv92!*~4hZz1fGSTZ*XDOvtOnw)hrVM`oN!PyzDCXf9HCO_+%S+(b$HP> zZMC^kRbK%diECLi-fo4O91uk_4_a8Xn(L>Ag_T&(?KAHL!Jv2K6ov#$GmYU#+SrQ~ z%`p(fE}bME5i?5Iwgk@V^-!SDZ{hU8R9zyT^VcA(C#P z*+qp>!SH#3wdZ_oJ(t6CTc@Ye@T+*j6G^FdV!27M-QW-7!3O8|RQ9QV6?)h4Af2l= zNyU@a|u)Di`a;$o|*< zR=$cJ#SbRs+Lx`gp4z*4&x~6$FD9ImSn{68ak)Y4+x2;c1geA-#YDm)Ey>dc-HZ)m zn9)#U#78_`5|vQmQlR8Ac{dB6QU+-4&`%E4lDDxhLn9xuZ{K0%(CkRPJ)NC?*+m&Z zVl(*k();22D$^{-VYuZlP-OjbB|ESt0qngbU1~&&aAtH&}q>;5jMOM`|ge{Qn zrV6DCIUI$^4IH^qfZiW0tieg8tvu;dq;x5ufSrQAE8CqBM@hOX_H&=ThJ^5NCs-7+Az&( z9wySRBb;E=q=I-|y<pP&qI>#l@w@MBMmX1kt`a_EhQ4^x`V~k4@XA5PUUl1Ie{Owbt0E z4NJXcd{1f>!|>YFE*A2I#Nwq zJjp5Ov54z9Fr@4dd9!Q!<3bSkdDMmXF#IoJ!{*IS!af&b9sn^dG*sbJsP7_w z{R|Q9>YS^vA#rUU!GoI~*sdCUX;4W@R91HB1ty}vS&ymb=@f_#iNWysvjfvg* zlUpSw&B7?Jk~rH*AM-1o!$izND-j1p`vTK?(~6OUGxljEYu>+yGuXV2Ig?qFUzTsD zlQk1`7&>A-aqi_-_e>M}hdt+i6?sPCBEX9&zEO^(Ga_(K*wpOhvN16iu+j`1Uc;Mn zi)iR6cJB3bGVJIzx<@=>y>j2eAIKP3WXI&m;geY(tghS2-4tKpj#n0*pF7FB5E{^F zv>|up>H8A?K*@gCt8frn~_cw_pYAhw1rAKl{9S~0fX zb<&gmjSZG)>Vhtxax0664;~3&m5f0K&nmY z`c+{WN+q5*HqPkI(wFEd7+BF}XVj0KuQyZb7htUvZ{u>n zC75bgZ+gS6gd4Gv<1U(xZdS~2(HJ#wnXPHB_Kej2hrsMI+4I|wk40X>So;;|o-b5%!a^HBQ!eEj^16}s#yAPF8u z*$QT+7SmoNqi*v&?MYU&q;R3mbB!l4=d@O_T%n%EKl@jbf|bR=Lp-a8ATA0Gq<(UQq}{dyeFe+xnlQ zFAtt#zc1#_S+z6oz;VizF78$TY}{49Zo4>bJojzw;xXKVe41&x;zg>C#TB7Gc{klS&M>QOB%Z zYycWQd#2A#bm-8Ic0!v`IO*&U19BWY{Y_diq38hOk-|piCIpa}<6;gC&zsJ%dNs?{ z9_{yBfBgm5Uk<_w@e^*nI+$ncGIAp`g0;Z4|HO=E%@ApcLQDRrXKiE|C|sPk)F0(` zy_uZRVZV1Yn?i&ZG`f{9qJ}UehU)MR9*4XC_;J|KcA^Jq2H5N;st;bSG5`gpZZFeL z6XElaKeU>Kt(EDza2;mPh_~zu{kNh2{x15!Q92tZ0)N61*oe!!%Sf)^jnIprdlK4X zXg^~3Tt7PZAhECMxka&zQa1hSxzn6K8DI|?&iQ8d$qXpg(C2uWh-Hn^erWt+pvN>= zApJhpZ9Rolb@Uhryr*ebY1fRW^j7%a7Qc)|Y2cjW&@meNTXO87G(f2Ps;vB9CTUqX zR(||mP+FV|rs(+DzN{m}k9s1KJlE>F@GGry8zj-Km>Zc^C#R*JP~{|7ULF?0#+L8A z585k;)W5oUzv(KEx*3~`)LQq(b1qdQuBAb%brL;U#YGUiquEar1ynAqr)6I13FK79qCj_jc#NlWG?G+)qxL$vJB5|HhFu=y`AmswYZ2 zhe+MlBbVc248S8Y4G=>28f67!7ggS*g++-J>(J+zs&om)t{E&Vq7g@0DgJEU?(DKv z`bcO>l7UHqWYCjQmWbqmCC~*!P!T4&MY&O@WQ|kE4jF+O#T?l+`R+A_#>5~Mz&*cv za;3S+!>8Hz@S+{u9@~7+Q?rw0;0wfL1@b@uvsWys=lp?Umc84x+mdT|BmrM!9%HVc zH@LOIpI~evh@UE@ac^GCs{JEpy~Q@g&%Q9vn zH#g~b>I)EmA4GJs9i!JQVgF-3sS_S1V;zqZM*9rR?SB@K0~UvU0U(AIfj4j|Bgd{2 zZ4XFMD#%wrpUQa@F(~IrUNIOiD~-V}ktIM&VJ|%Y)$vYFXW%U63};WW{)Cu4V2jyFw%<*AOsWoG`0d}4Z& zm3h8Hhe>yl%!I$tX7)sVwv5t8w{*@>r4r#b?SQ0P zpejyYRcyGELB&!NYGl=GDM-#WS_%3i_SU+%hxEp)yBG47M^J&{7bWZ?p>+Hw;F7SwnS{ zN|0*6l1NpP$FyzaEjHe>kkNV@q1)y);xTfUt{`akIS$21iU-Yu?PGJ4Xl}JxH&`cC z7rqPgG_9^+jah$^)S{Ed`P90gtY!3efBoLQOS<%N`SPf$B-oHyb-gN0)4qkxyi@tJ z);{-LFVneKA<=U5dE5$i#kII ztW1Hq?PZ-sTt|0H3WGoTJEP`XUd+f^v-r=JV#+tyGj>4t5|* zzy;IWlwZK%0({UM1fTZvq8Pv4rUdIFeo^V^(D6hqXZUx+7y@cQW$DdW*0<=(%hyBU znIQ)G;2Q(23U|&!>84TUfS7H81mWcn7d z`4Y;BwRaTDnmAgtJ8ltkyzR?WPK1QNZiFk_c2Qz_Rz@rZ%&AAL-WR3t`) z8kiM%Jl-Z^!}QX$vm&57p>DO>Ev4%dSepZzxuT(Bs&W_#Jut*Mr8u7Se%;`Tc110t ze5K4I&S{D#FJmN&f?LTI?_F?neo+HmN+sSG=3ey~lE-X0tqEOXz}>2)euYb~W3YwY zUgXy6BG<;~gzts#ZTd95p+wTU?ICLCE90Ox@;7=s+6H(SWXptws!&NjfP&vuKz^Y- zh!SN2vX6ulx!Q;=-cQ|kBoG9f%f~0SB)A?EwaLBeNUCm!3Jp3h#HxG2;8FjK&x}mp zS7iFlh&{4RhC@(ammG!mNDhvU-0)+}4L-J3^>-N3MKXBeeJB81pD4QyQq`8@w3N((Hm|=_qE()E z$`h{ilQeQ)Mr>(+m3FbGEN?75y`RkM(^|3V+~&HZ+C`C`5O;=e%_But^6`2>G|`s&L=emzAhnB)20 z$o-qyRl!PN(hyU*?V+y4`CLm}eQ6&zjUl}8`n>b|b3RvtqmY{{)Ph|w)K5{l&osux z4Ydw0+fYEXzl!k}2y(Bn$*VvtMI8C&wK_#ZU`*h2;MHf_XH%)QEVystw{+~1GQZJC zN*!>BnP>hwHda|H?Ip8P#&F5J)AMmgut=4oUX^uVeP0)49ZO4hZFLE(6`bvK42+iT zmgRcoh4VG+1+33FSK(x951I#Ys@*T`h>1%|vg9I8b=vVEJ!igQ>ilI%Pf;+m+AvJ5 z=c!(f`REP5h^AyM8e*&Skg_%W3z_0gsam?_9*F>hPx&W zZLo+E@UzfW?CGVmrW}uD$KP_GT;*_0OnR}`kg>gnN>f)-*TT_Pf#xZ2rhtYQr!R_i zY1f#23tNh&&V_SXu$fco9?PmVrkDO+-3=pG=cjc8H!$j#eK}q%R_8TG0`*2U>F2Zu zv?RBr6{sGt2s<{c;)fA{;R!h5LR!N~R#bRvn0o$DNEcb~75ZS(CTJr1+R%7#ym&n5 zEVEzl4 zadhLFlWJr6AcpZu+5$(yVX(MZ?@VtvyqIRo1_W}3b1Y6!rONqdUSND@agCH?3?uth zlQnhuXRPGGvi(>o13hI7s!@$^iVlk~qd_%ypZGFR>0Igov>WxM#)9O$_VuY0`ge`_ zzJ)6$W8JnQGnkKTI1u9YjOTF&p+H_EGtiYaqEQx@6nDFI&R}=a^`@sy5Nk!Vb~or% zi7MO0c6c2Jg~Nwu3Qt8GC;jQLy`bI69Bw2I;%u{qz(?e1d=-#(dUm2URWS=!2uwJ_qR$?LZ z8TADCPMuNQZFDb`h^a{mY#gK6_Ms2~Ih0ACYM!ua@!%1nH06Ne0!rQxUkJ>*ySe36 z-q!*#{VgO&{P?Ra7y7(M(Jvj&=bA39`+`kvtah1)|VXA zxD$3*jv@;@L;L)*q8MBK)VcDe^LlpUNvJBat=T>1(BS3xGCT@iz?uv{3f`ZK%47K{ zb+Aw@8nn>l%UYcO<6L|Aisugi_(6mdof0EjYuWX zilPu!aG|zHlhOxQsfgXa>D8hg(RB%cVBp{cjmAUHWM=G`T#bX`8?kSmn<;zArP62< zcwo7FNK%$4v#)p>U&h|fGhKONs$LE5d9jc6lw*1t9G4{%i96<^|nQ zz=*|wR&0pe_^Q|W_0wis!NwM{9SvqCBA%4Wyvq>W+0Y-+`KCM_-uDodM>QCNfhhf9 z`u&fP-PS%XWTTj!$C&CZL_h*DUZ{HTY@L|7CY}aqizsMX?R1Bx z6cX?kiL?z}k1!IVJdf4@?K`d z{c`AV3%>=few^iTHr7{ry3(uG+I5;au#IKyZ{@b{ET6;Ix1xJ9HdWwIer#CiI*#D> z-y@Q7BIn^sE$!PJ55YxTy*N{`TCKzn&aNIZ5#Vf6_!XIxiy~)L0myA-S0K{5EbwJ5SI?G;o_{dir5j+*3I1kp8`c(gM&sid$I>) zCe$B1q<0jTH$#^YP%qnIcZJd}16d$jQtRUlX5! z`S*{c|9vqsGUESp|Cjb34(8v#@&CF1*9G;z^8Z6;WnlQp{pa~lEh{VA&y$hupBed$ z&%(m`Z~EVM%*-tRDgQr6`cH5Fa4<13;r}=NN3Z{A!SJu-e7SneDbO&`v*6PS>f4DKo1208i1$ z*jfdjo|*jzgU0&*f|coqto9!sd?ser|L}<6Gco>%bus-HoA^u&KXhq*w|`4e|0?qD zs6@=Ij2V7l`=f)x|DKb72>+cEDPtQ`Co_D8f8h1ANd8l#$7f)qXJh*rvbmL$vBSSZ zcQO_-{#iiA{|Ne@mF)(&)amY}IrPZN6_n`_|0y=l-YH7S&9a`t)RWI-}!J7NcXA^#rPAcsvYp&S)i_Cc9V?nUX97 zj!u_%CNEZtZy1j^ zigxp5g%uYYRIBO2(Guh9?lFRGm(`&%<26~2@P_Z}_{6fwZEBr|Bsi<>CJVBu-qcP? z6XQ!`Xj~R4Vz@@DZ@1U>mBUsPEj|*yFVUkBFCv7fPy(@}za#WBl04D$-O_OG;-E=S zQ@_G@{_=nX^4$&|!_&i`@zwerf@+ONVQC#DJFR-q*POUq|n{i6kcw>r@FMR z7m#Smi>UH2VqsfWpHxhxmy$6C#e-xDsk|}KfE{*SuWgVf;0P5DN&R?ATZI5F=-&j; zRMWl?YBdD{UhoI^A_EU8V=Vi&3CydoEj6CWJe85X<;i-eHu2K>f)1D&2{cpvlFTK8 zDW^J(KSnYv&P7c$nicGs8@~g&Dg`W{fT!Te24MSfLO7wx@*Y(Zf;a(KVM`SZU~JJh zd|CzVN_Rn)gQA-I=MR*_M-h{|BZUOWAx9E@P_Z@}_cWC>q9RhLgF1AJ2`H0!1X;vK zJ?V%z#&eIik64j7Yo`GwNYrzV>Bz*{mJ{a?v&%pD665iNSw-ook=DepfteEI?&pLz5h zg7}?z*X&3%m*Fsva%f9ZY=;f@2oLcIND%Lfm;cOP^30#{&u=c`ncf_d*FOt#hxoN+ z6#tu)9l({hpVCQB6pUz8tc3`fmYR3kH@F9kh#;r;uY8dXX8~hFigse%A-UN~E$G$@ z*Efec#E5?FmL8SEJ>*xVgeqKAYuBG%UsER$n9U`1jl7cVD6pCPfe+_#u?rOu{7-Em zin^N>s^C)5sO3BDye}jn)`4w!R=8)tEGRR{*-#QAQ0z3;ZEl0+Bu$x{iKKo<#v`ax z_ikYAYdMihqJ0QT>l{X8Idujd-B;H-6>4I@FXd{6${;BP4X1$X!4LIOy3s%Jvrod9 zixa~}ePUC8?&gs{fQ+fpjEdw~*c12Vfi5w%U38kFPOAQpR6!YV6*5!3-lmg`jfh-S zJCutbm{N3A`N_Um#MS&NDj()3mkX2UAzb_YzH)OY5j!rC_S-HE^3}jmi!U&N7i2FVgyHN7tJjqDWQ#31KfrH-z=Ji|4m5XNY8v*bTD+ z&kExTVFPnTofVRGfb1p<^tbpXba&pabVn&qnSM<^iVN$SEb*@raeZWmYL2jLytWg1 zOXPO(TJaXiRlUXGxR`xgr@uSNBvp{=W)0RN?D>mA_Mh|R+k)wueUZ`;h75I7fO_dP z<=f0^*LI$(J?ka#AmCqDMv3tnV-&6MJ8ljM5nreXl11@3*9N*Wn+N(_CrG2Rb&A3)e3J=iUa=2LrUFsrOHS6P(TZS&ad7Eu*LWS zWTWOG!41$I-?JmId;ZxiK=>iWyfdHL7hsaWtqtI%9%X1daFhfM<@Izx{Y2zI>240pHi=pJG@_4#qE=667E~Nx`iv zjQJLlKXK~9eyR5M;9a3$%O>1l?XpnOL-HKL^{2MIi$F!x?K(ai7wp6X^ikg!^XCAG zaJ3y$Wl(Q;m)-Fwx#LwB(L+|p50#9xdNNkQJa0{2P^T%|SCs$iAh#UvI{1w(iq_ww z3rVueN=c9ctm4d*BXZK1zzFdjhh%W5M)MbZ$5$og_-f!<=jZ>tY_UxWEVex-0grn1 zre^u8)k%*`rg83U3KT$*di()CMYs{e3+V^%p>MjO9^KoX>KAm1Zef-nsc)!s9k_H{ z==6+-4K5^+{Dz3{<~`fEDg6cA{u0&xlF zUEqvbh~d221*K43sfR6}@L6Z-!STsv9D`=_wyAHv_@jgXW{A`wB9a%a46$l>Q$w_T zlQ@t{sO?DbVbOan7a_0S1l6aTQ zSyk}PO|B5u#T)5J=a#}W0UN>)4T3#gNk+#M!JFuX{){`)`|DGbQMW*HlK&!Tvd=ef zYaqngrw1dW->_XC$-ksd9PeZJ^E2E;6@4@STH1yPz!p^@zvK$yu02T=lCFNq9+^+` z#+c((If>Im$beM%pGnix@yR~uH7cVHc~QueOCw_W^4DpSXjm~$%3`r04Py>@@eTCJ zQ7kh=;xu-o%FuOzlGM=joSb&I$v$lQ%*p()%;n?Ao-jlz%-6nz!kPOf=wrGl0gTm! z-o!wN&`<8kJ}jT;s6trwN89FvG2?{Q01Sa7M*5zIt+Tukaiqs$aqG^CDT3{pOySe{ z7>5QfiV}cn9_Y`J;^&U#G3%$80lif1e(^7}a)-#W8)m$+ETWwe>Cx-hU&MJrKMUN6 zSN4nNq2+SsP5nZe7EP4l@D0p|R2B<@jKh4R3f0Fry!<5>nRAov^Ek~_-Q?hmEXYB* z2EWsOf>U%5xCnpPa$t1`E!k_S#VVm;@HnlzNirSCe@B>D$L-`(2anT?44jA8q%03# zBe+$er0F2OLo#K6G0GeNoG)UJ6&D`q1C!@e_X#6{8kDA}r;S=#RF zo5Mpr5)eN|OP-SYiNyO`5y6E}Qpy^qGoOr@b__V3=7KHWk z{Iq?Vth#%Tr8dNhbyIm`B3mPC3zj9^{PNxW`|n$-kk@j{^2-Ul`+YwD)ytO~t@JRr zX_>wIf)O=cj?PAaR^4J}+df-k$jaorYud^nul0m&OXSL|(`SjoV|6zl<-)9J2rHsV zQ8ZBA?Sxo}z`&bb9o}4#pR2BUpK%4$MWg76sZ#==ry28Um#>6~-%yux@ar!c$wfDY zJLm(Qeg`mu7D=>+^v0p}&!!?Xu?ld0i`a4my>NAlj1%#(Ay%(2qWSQlg*Cjs(qucW?m!-LYPMnk22 zVUWt>s|fAJIOB^c_j0G%EeC{3g_Jcyxuuj2uGuYKa(Ucy;|Xp87w4?2|4kjuA^S8@ zrRCUYf^lPja>HFD^`aKwCf|9@{Y{Ix^}VOnsdm_Gu@#kk_w7_mfZFY|+rrnsP7oKL ztBP@$6jNj82}ByxGt19o7}%Szu4M_H)>(~96!0wM5{0zCJLaFuLMI182-Z_s0#jy4 z*&0f|NrjLm1dxo?jEXjl8a7v!!~Dq1kD4$H5p4)e8Deo>c~X9{?5nk0LHWTOTdq;- zujfL|wO+6Ts)xmYarp^JZ|~=Y%NxrF(!^!!lA3l2)A}Qaxw$>+8Bsp4rfiX z`aRIn-zp7yUw+vr-PspK(vMV#!SP$SOqfI?79n^WvL9mga&cHxOx#dN27`2 zJu>X{v#9gC>K@u6A)vd_9QIx`2d5nAJwZiFPs1tne!mhiBMcJ5a0zONUHfT?8e`2% ztNDqfJ;Xf%JIB6YgS2&s?!fg5S)4~2n3TFV6M3j$hL&RZkmX>Z)z&>c=8~O?JPRD;rQ~% zo(Ix7GUT^I{z&6EBi<2^MBqs~psFbVe;r_A0NIS;;g+B+g>b!h7&Mh?R+Dn_E3PFp z()RN$)=}*YX9(P@M0Wo3V9$=*j?fjYnR%ha!^j6jS`V_z6O+vH#|*9=(IZ{>>iwl# zh`NdQxX{=}uJzQ$W#Y9L|5#Ks)D_|*8BN~tVZA1ib9#&W2Soxv9Ev+!WI)>-Y(cnD zAV)b9CU=Lm8oydE8y|Jmsf1F(om7$RiMWSVw6nu;Mf`NDa0PS4aWE&{dQjP9{)l)R zF{6^(E>;8Iz9@i8FfbIPDCIaSdO9mZWy0+0#Eg^Mxqa3Ab)lzn3!&6HeVxGr%c55= zd{jMdmA8V7^{2rO@gtW@qU5AzoMet6(P=>=pO9w==a5b>-Tt}!P)VzzOmRo$lcJ@P zm)DW8LLtLZS%&;{fDRr8J}1L`?nAq%r+~{*KKCqB>bQUu)xoFsK-?_n>7aw$8}irB zIEB_T2nU^Ptj<*Tj%Q%^jLtDw``3@?8^aqDodwWEp1DtgwRVhNM*+nM1#mN$hoOg{ zGaE^s_OATd{J0@Mg{OU&>Z4(E?;!`%cT&->3x{L|=@`zXDLMf=)Fu~DDD+6dDI>x7qX?b(hrA2;1M$+&RgZKXbxKD0h& zUgMQI6^&fFiNA(FmDva*O?BKIb`Lcc49-3kw-&%ltLW20C%Lez+EYx2v$G;7Y6}Ej ztnfF)QZWc~GHh*hjj~HJ?tncKxWE~B#k7SWc`NLMU%6q$v5AllA@B0aFwbozTDnn? z&-gA)%PjadEN#r)aJT~6Av^tUG;ThJpHCc29MT<7JV8DYJP9^|E%c7@MYS}CHFLix z+mtr1tnIODHMM9 z<#x=z6#}@2`fwifl@L37OWWrj+&ed0zC<-w&J=cd^UXd_`ZCKFh&=8$TWi~Y68>#% zzkrxyrc`C*7uGJEK~!1nm2Fq_&hOc^8*|@{&^cbY(*dKjB31gpF$Y(&(BBLdtbpY`G{%H-0o_w zSoxOa&GiX!0nDxidA$9nt--}_x2{rjJm%OQ)#KlU!=avuFa8kKnJ@egaLrLCWD{5L z-!}0!1Dj3Xf+%^KaY(x#e;liHWnS4R-?BA_mkk^|6}~gw(wi;vnu)?`2_;4=?>Kzj zxB;3x=KdDCy16=Zn`=K}d!$I0!Px<~p<@-kq^-9y14)uG!08aDVej zRF7O@%3iPF&hnIht$S)c%KQS{p)6bW%i200Vn4z3Y{4|RgnxwGa|I~NFwDy!sZaTE zSs*Qp@W`l}&|-igP=jq)^`ldRk~HQd91oVO%0%fRJW{Yr2(_fSC${;-Z~x-ZiMThH zlOb#Vs<aUmSW>Gi@lA!-h*mNgXWwD_ggV?(yontr@p* zB-NSRP?7wy~Aj3;ru7Y5m> z@vbuAu|yIhJ>(ml!npqMK117-#&z#Lnm3W``7+;i2V;QmCUT(TD4mg|0 zEyCVZ`s21dOOZ_Ba(Wf6 zoy*h}R2wRuDCdT9rL&A1`EcX6%rXC1w&OF)jUQ9<^Yo4{j*p~Og}eI`(~#x$xyCdS zbRJfF_IiA|i^z%L1~{ki_nM9C-88{V#HkdVMIxgS$u5&8eO(#|uGB(*WCzrvw%;#< z^7H+Zt99WR^~+HSH~y1_n0w{r1kcI~-k*NBQi$g-coQDav?RQFY*8mf(oWZL5C*rVHU|BT*BkL6K`yzBpO~wyhz#pKR!JKjBv!Vy$uj|Prxv-*p8$AK z8-Lt-B^cc)OwJErn_fP6Zrd>ldEwJ17`%mD;pE10?pN?0RR^0&BYf;0w_c?Q(O_< zT(=m_Py2j7l0$#G*tpH{;1jMAW+zbZU5iD#Qk)EF3_u6mKo)3V&1&LhGv$zObG1$=w%=31kiAU?4J6N-hQ^ z$`AQv$xmN!EHeAOG76%3sgxY7QTodi)nXuBJcB32-EJ_{a|!rUHp~KzGi??DQ^jSZaK3(LrhQc%)ID7 zPqmX2{+gJuOoQp-)mhAdnSFla6;snW}9M@Qx<~ixd~z1GX9TL} zL0TRwEj#&Om&uq4%JeeF3`Zah_)`B=fPzQ?g!pnw!dgl}tu9egE|b+T z)9vc)eL<2-5OwMszyr3X*#|6>)E*p^N8H}fPe-L>+W~jmsLjRa!Bl4fY+X%Re1u>R zDqFXXQAtSbd05HPl1gPd4rF;**~jvy%Z%}Z@$7Muh1mWOZo0}(|KN(jd|NjhPmx}& zycv_j_+|H`8vu*r=dNJgnr2~_c5DJWwiH6UHZW}=l(v>73XGa#g3;_%|FLgzS*!rb zYfmKRA@<0)A%&Ms9W^ALQ0v2#1M|aZEk1V5pU^epzGX4^rH1xD;Til?>AuGEE`V`T z+H~uGF+{7y2GQ{$|MeR{K6jo%!3D$K~f5m>uh6@W%FyJef& z8D=Rw($2q{7M3CZqYJ~vyQAZY*0v_f)<;<5Gzckk8ezMRF4EMbKDQDjk{2lCqNJ41 zi~=-}{kjRyvC^71RB|D7ZD$F53vFh3a=vYm$#@diuqBpX9X4MJ6eC(`l`bj)y z7z0{$EG@K!KS8(YE0N?Oh8!x*_u!(SCqrBL{n{IjZ#g7(gO&Gj!jQQ=Qvy?$2Z!Xn z6Gc3fBCXMO_$iTQcRGP6^u%}R*E>#9(l?0CxxXkekL{NZq~c_`@`p= z%H!}~()6q6={+VgX{q>IrNI33FlTIv1>ZjZbQj+tbVs2&j)+L4Hn~^@JOX~)?|v)T z1TC75U@tz^!K&Xo+*nVs2t5R#2eRhBYCtWPV>@9b@hQ~{d}@TZkEhUR%2~}PE~cX- z=XRT@c&9bje4Azs^lJ7P;lX{po!vCxDQMr=@Orn4;whH1P796baCp9JjlxTR+HLn^ z)v?{}f6qD1-6$Lx!2C4?-H(7OlZpE4NGi|QDkj}k4rh;OFQ+swHIHkPD|#i=D)dkQ z;K1;Pm98HyO_MdlF~-poA4t_R@OWM#9AL0%FlE5tVP4cO8hJ|7!cfq`>gs&Lut_ks z9hgYOLpwbRXa!a-vKBv$m&>;P*nm#i^Vmv$&?HcH?DI@M!f7amhzRx8*08fix{UDwOSobID1(? z^&L4uUsY%+zSPiE&cbg&>bRa37iV%`Y|UEVv7azCa$-aX&dReHug_*0&6RtZZFdx! zuzM)ZI-$#_w1$Cbe{;u##dvv~*BvzK3&6Il z=%aFBxAxnz_V5!9Y&dw+v^TaYB078? zBBGJ?fc!DmGDvms<4=^@6)-G7179x@VD!t$!3%n|M@+=GmG4dWWXIneH#KX!m*i7_ zYa8i>bmMGDm=-^;d%i?^&`ND&_ z$8&!3qTA4#g?tVd02LA(-92ctBP)o5kv{;O!Xw|IM{R=0j(ACPkBzho0`BkkY=tDC z4uS_kKP+*TC5Do(s(78P5}R6pnP-;qET}{^8#~TXrYos3X70AXH@B)jSmc%|r&4k! zzXxc)=SUycbM74CEj#FX#Jo9CzTmlfnP`=(vA%B#pnv=fTmtgDx1Nb$p+$m@vW2k= z#;By$WQ)IeWh<17+O~Qy1@>usbE`yE>18@6HOwlKD;hI`D|NV-tCN3!7`|pyo7gPn0>Vj5m-ypD9w^2?}%FN-z_{nBd-=>JWpQE z^{Zd``+}oUPE@%C^$B1pKSB?jvtA!qo4@94m17+!R$KM$J0bn8+TAt8`Y)l-oJ+XC z+l2Iw-2}11tb#X89~(CPH$?*R)FgFB+ARjKeZNJ=?v4%@4Bd$jPFa*QZc0u1m!S-+ zm;2)@$bwjD^(EEUxIUg^gcX3MR6C(c90N28N~3 z;=av?LQVwtlfGM!!q8;CalWIz6H*Y}u6|!42-YtjrkhU~xh*f&pgf<%F5AIcw9{Nt zbE!+vE_GqLJbf|W<@5b znCJ;bqM`MN5nXQOJcSlgN-JKJgf5|Iq39Ghm2bU;o-2OWxts}oS7MOULodrhadfie zZB=#)Q0_B5uDh=1tYHnqqd?}MrlN?S0$A9 z4{H+;m0u`Y$#>%`yaKfmB-39(&nGjKvqHD}y%YEbQL$+Y=~#sYZKvm~w_TU$x}B$9 zwpTJTXGb2hT*h6xCq7rxqOn$~T+CH#PU{dgU4yjhL4Yz4J2+ruYsH`Tm24s%ku2`972Ixp5_J%LSP)fbF7$Tq$#oj7P@|^y-ZYnU zV=S6mQno>2Zgd#tP)HjXvLgbDC{V{@cBkgid$91>)oy$_$4$7G?5>FCuaaBh=Lp&s zWT_CoxwRA#WQB1=WHXuaWu}JkCs$Iz$5weM3_2{&6UR|W+ZGt)t4(doePHn>K1$JA zNm;+fJdO&^;BxDxNzhpO?5=a{k1^?1uaXU06z)aB0z42JW3O(Co?kMWkUDvM;hCn0G{|Nzf6{=k-yAMzFqQ%I3@>yGMcrVo=oy4!Je^Biek2aT;$>@FL{a83;?Krx>H zP=v?un1`F%i(%9id~Gy%T1O|by&6267wMn7zS^cKJt_1W9ZadpdmVv`$B<0I^^1T& zo!4H98xP9F16=1L8gu>zI1eGNBCMNp=#6|2$=9O0ByqR&I|!Vh)cYr`>54(Eqt+gf zQz8M6_Ss?4(JvK0Q8nR1`RcO1Eo&q+Q>5^diy~Ne*1r$ErB`b4c)6W+X9?p~X1RgA zzO;lk1C_wXB<8Nk$eiILHXP0e77~@qmAGoi$tFgGNbxd`Hc(1ZIgeKgb89`b5*wYTuE5 zZOLgRvJ&-_0oQ)QiBbLSXpo?d-5Pt}9J>NEy+C&ML{+q{ouHf0`AmnevgzpqlBUB5czZrhi(Pv;T$fEiqn@O8~ z-%P2bA3j0`fg z3^J1;u${^PC+}_4%$;qIy@x)f8xPA0mduV?9NoJy;(o;>_NAO7u5U*jMv!A3y8)Fr z9Uf|y$G*%FxH3WuuWoPZXal=gn#u#}rakJHn<&?fwllUf428o+rSso154JVs z`3oI+$RD-g89=cLyPsTAxPz@xgb(`$>Mps2bLYL`JOfT_PrrOTvGoatmgc3g2(RX( z+7HBiW-=hAc~JG05TF>B()X?!Mn~@^G>~mDXlsex7PF#vAs-?kInZI$V6E{(d9{d- z2&0@*kgwIiPz(sEVd%pQ^@@v-G87Uf*HxuM%F)+37@C!@gb?8jxQ5NU1`TEC1y9l9L0cz@1Gv3&Z9S32dull9td0BiMR8 zw$ZKEWRe}j5iYAj;SNk;{1)soH+)g`?n@;4C!M>=H^0ljGn{}`VUBooSZ^f+T#3jmtyP3%| zI{5()xxva_t})+GHt9N=8E zeF}-!BwnL*gA1!io}!m;9_WyfW$V(Mmd787yhjZ(CF)xw*SAFvKv5G)>bBHR#~5U9lE^W4ff`4>f||GQtkBJy+TO{j-xX*+PgOEIcBM5MtL*&Slsdo7L_s zLwJ6_-uCBhf?VG4oN67YNURA=W?^(9@mNi#LrhBgId>DEoC_Zs{U|IB0GOO8 z)GHL~Z~G0uw%LY;TN!k-2B`U3BLDsnDC+yBFPA|}Rp@D#OC3cA(iUP$0y-AIDa*5m zLx({~y)w}hYanQX58)GYfAPsIy-JXIt*M%J9+EzV6;^iGt)I+V@Bxvv9>G_+J1nn; zB~}r+u?yCxA6Lgpmt!FN6BdY5N!hFNRnm>$Y!F$cKvqMlG%u z2g+%aaGp3d2%bL9Ib%gUibW>I0x3M>CSoI!x`EDQ%$8H`5|C6N01Cev-11xJ$w>J7 zvZ+JcfZG(KDYe}*8u~BR(M(K3POREL!$MPm$iBv=k}L`mwX>tl&R~KGUl2kF>w{wk zLvq{H8$=ZHRf{x3)#c?wC5C)V+LGF3_|2*{RpTg)Kx*@U*k8WsceMU!OQPzif!Si@ z?9k)iPj(fyw#`8({n4L!g7t)43*;9I#0Urx-ajJSzLmX-L6Qv2hz>ceK^Ej^S4yvj#@pr3)?MBa2IwJ29ILL$~x6~Q4OU8eMv`AizQrX~j$01t&p z3e8ZUm42I~BUdrB6-NbvSovkxqQ@w3mi%5yomDYO7t(YR$d1Q&ot{J57wI6%Bo6@dy#!dEjcz& zDuf$0n>0Q(HDTfsd#*b)mJ9I-IO<;4rEn_Y>&=f>&mt+}JddSHC1tALEBg#?*_Sf# z{Lp;kC%loySb>Gsi$`X4RXr2mp$7EyGk!x$ur!Q8O6d&^gAh2Y4s&L^i;{Yq%quD# zRq=u+%Nk_p)*d;A$GM1(nTav#7&56##)JU+R%tQj0JYji%K3K4+X%oYLpx3AxY&?s z#bu%U)*`}!)`|p=HE!vv+h_R4X>O(Ig@cu-uB7uinL!=;K#I>e2f5rpb#EVm(7+U? z9xP16uAU#CBDJ`6PLS4D5U%*w5Q9~^N0tp`V<^v z*XQO+Y?5GFlj2$i0cB;NLuEd)Uw=^06L%Tm8MA*-%G zB^4dV{@SnjtzbrKHM_uJMinX3pLM;L)n(5i+%8N61P0%&JHd6Gw~$&kO=PsI;|V-m z&0xw`qc-UBgjQ4^2{3>{Z>_grC_MKg%{bgmI2MX*r0o$RR-HUzPiSP8gqzaj3a**k zjEN+`4(}GzV<`e{xi}|_aW`&Mt45$f%(HwPVNRIl5CpszZ_=zt-;_jFm!Kiw`Gc@; zRp#i;%yu2`gNuk0!503o zAH0?nzyi8*JRa!|QC?udkriXbV7Toqu9-#wIk+VPTR*IZf80(eNPjpH9e;^u=60>6(8`)$w^`NF)f%(@7C+BxupQ?~6DL9K zE4u`KbEjpuNZ=@asKN@pS@ql5YO5EHf<^=FwCPzb!Vz&_ao}=->g_5#TbXPUELDOz zJ$?|8EMCHBwyd`5uiKK1!AMy?)zIyo@$*L2QP&CY6cy7pSzh7(Y_)J{9wItN zm1&$wJMeaYqV}X7y+lx{2&YjGn2LrKnPv9YRTeT}q6X!u-6=H~W$Ewugzlc2>JjjO z3tzn-i)yDeCQ>0zX2uP8VR52lO@{hkM})143_Y;<;GhLbpar?0scDD!XA64e(EMt^ zHS7o{KzcY-ZTa}(@{^~!YUNVGBR8pk8jBIl)yhNnCX{C#T=f(e6(*H)Cq>aacYUgb zEY4w!kBhTkqLCKUg^gK?TjZmHhULR6G3JoF`?B@2tWbxiOll?PvrseAt^=^ZY&dZ?*NgBT!YWi+lqEQ& z*9*eoYVq~efAW*eT~*R+flpt4ydIf9bALrMUguV0SOBs(#1XrLle#g*jCB4u)wz59 z-4AKH8CE&{kXFI-Vq&n7=92OI1cm8hr@|dFR`O$Cj$&_7Ut+PwAzTglky&j0kM9m9 zD0VTjK}k+M7C!!ZaYme*rj!~)x9;RHB@Qkr1iVs%sbseZmU2$!W+o_+Q!&0nM!HS(B+y)m5+4 zJr&b;L(V9dA?u3D492O*!yK@pMN&)iqaxRuLp~XR*@@Jy{FilU7tjSD{ zjzK&wYk2qWw`-+-%?RgZT372EuE$o!^BvOUa)|~I$WtH(gmMDq-B!r8640<15=Rq6 z+Ji?H2P5*Z@Wy%KBq`@3q#AHI874^~h zC+eYD$F4%Z=19J@WLGKn8mV}S>R3g4-2VRIG8d5`u|z5m3Z~PsJ}q)HF^%%TV%1j- znqoU<>eL*@O@$>I-AB^Qrb3Nu2*|08@rj;R6!+2Y1OcY~F6{SppcfUB?QJjI=tVWG zPb>QlV|%yS+z|qWxB!JbRE!JolY$>L8k7&UgINkG7Cg~&3NE|2NVffnv7;B`+=S0y zxC`o}2ps`RcwXw!$TC;Q6t74-c_8)JECkOphaStUkg{Z3&aFPobGMJpx-cKhpzydd zZM0vSAjOLF#mC=oEOxRC8 zI;TqEM3`H26phi>Mgmibgy75Re!47d}HVUforrB~vQngD_7GZj>!&ah{ZfvAVL-6)OVR?s&+mx=aTDjL@ z1vTE$QEZa1CX?#{)6r4~oHYKz2C7egId(U_bbEXz#kAqF^1`lNI(E3%$$HyfSy2k_ z#wEnl1U&R@Z7~S6TW&X&(JHI+7!OQ=QMTs5u2$s_9}p^hA79H)=`G)Kv)P@h+8CtB z=obC{@z&@;Zq16zz%4e)3&b6s|BoKjHT7Db9vtb~v`Q9i<~`S9==W)HnqqcG>Vd;W zy~h1P^jB<_e8B_jp|t9>OKk3e*pVg}PAh-v#Xj0peq8@g!7#B}NWn4vLjaEx7Fr~j ziMgH^s6LDhEP3+)B8N9v8TKh7VJ0UEi<|OBYPo9D1na4(_0saE^aWf~WhrT{X4k6# zp`n+#@#?u+Kj$g009PH~FVbQ}<7mSV1EN3IJ=6TBT12uD{Reu|#t)eL!|fwV)VDoc zWOgFF$hYzz?T!q)pG z0KGO;lH88xphPr2^w($wHGNO8eobK2IISjQxjg$&7~@z?79+jDZHXDlaHlex+tE`3 z78+d}hw>zEXN%!>O?hb92e z-;;|&RGRs=joYfQ#m95dvEyw^tH(fip@*>tCVl*7c=0z~$;dsiV&Q%MeR-dSr1}`) z-5lCV4Le{l|6I&(WSnpC@Ky<|s-hVisfi0#h^K75`Tm;v3`B$m<6atYEAU0i6OsWVr@>9_gox4PRo{n5(+f zdW$?yJ#o`)R>hxJL!=VoiqwjQ9+XoeGY61~2x`7~Q0@p+@WU9nW9&{n=F|2S=V_^8 z_iDRTZl<*jsA^x%XhODIUM1Y-n(Br?F5ewA?DwXP5`K7ighz0=msNB3^RZ8*EQlrt zHQRF-R4RG<`FI)WhJbg+7ph1R}cgqfQacqR?JUOmZH(0;4t@QC&)hBUcqp)B?e6Z9|PisfqiP((8#W4uJwJvucuP;-mt=Wx;*J1PGJTEpf#`P>}dv1;EzS- z)>@+yI_Ji~8MJuw()g3nV7pl-t@e^mhv{wFW4k@4K|AcYf0AUW8DsC#)*Z(^C7t{C z=o#Qa=F69Q>GJk@@Wk}Cq?U)DZ?#J!v*T&lLIpq-;`l|hLPjs66!(}X6)B%W-wrv< zyNx!Ha|z~GZi&uvHKlWV-a%l0TwM%7j_#cf zgjXUSJ$7&gUQAt>TbUJm6R*%7E2X1ae0rxY>SEcT<-FoRs_BVc&df^i2XmXAo=zV& z#?amU`an9d*&07&pI05W_SwvE{m~u@7XR5(XsRRm>NUWiykzj5AiWc~=ERq;eEbTm z=Q8I^IB|D4+JDc;Syx5%C?XRfGtu<08|mt>bn}|K;gMe?r|u`m&cFz;ta;a2?X+w5 zj@zI5-HLYeQh%|E-e=Ng)T`v-pm5k;_{%r{K}M<2PsrXRKUT#`0y(O6&76m87(YwGCN}?6okNV}`bm@*$Da~P( zo^c5;ZqUv(-&b5{I5t}Dj2t>1z4EfohXFWU@5LYdp-&;-rIo`UQm7$yVghVinF7yy zVjr$bP~IDJaoz=+{6_n{oRXsGbWt~-pZgwK+$lQX&I#HdC|^xq8b9^RS=yW+UW{R= zu~gO@iy6BQJ&nv;X;s|~!?5TyTwR^@h3t)`&O6W$ykoUh<)HlGUi^!@@`vp3hxWk0 z!pg?_zg&zzc#%IUxqyNgjevk8AI1M(`okgl>(U1&1z$!1U-biPLCe5M%YrZGsAuQk ziZ3fDEbu{T0se6KPp7{@F65$S)^-l|Ce~)u_~MQhrg~QYYXkU$`1ot!|40mfODfiO zhX0ZOpi%y^6y#u}XJDgeXJBJwU}6O@s?*bxeVqTX{F^F6{vYWN@8wTt4SaG6e67FJ z`rFfAAP`fB4>HFGHRCVl1)rmX$sbP4pAV@? z3iBZdd|+)pz%tN)KSTcAHnp}A)N=s-;m7Y53b1n9QZ8E%m8M5BmDoXv9K^Re0ckF;9LEVMh{^A zL#g?P#=ywT`d@bR^nds>|FHY$`zPmrX)Meiq5V^1VPgL%#XmF#MplN8`u#&=Vq;?a zn0)`x=mD%Bpp$=TEKIB)AH_fH7#Qf;KFa$Kjh==1PnrIqeb}-5J7xxkKkSl!*)anC z5Ox05m+`;iV_^Ki6aCwck%{%c{4%nz{5xL;0Mj4p&p&;9xc#^GQ5TkfkH^GL&;H*U z;J^BQU}Bj5o!f`@|FUEMcOEP(fd85sAKHJ_fQ5yL{sR&7k32rKKhyu;+W*5Z(;tG- z-*ygmdLIlLyT9l=3Z||f%Yzx8PTtzu;RBcRf0u!nm60|6|12ai!4JyPcNTUa8xY9I z!p_XfZotZ5X!sG00VC_j+%%v!`heSTL;wFx@c&`}**kpPuKrp`EG#S^_dXI5AsJ!l F{{ta8d+z`M literal 0 HcmV?d00001 diff --git a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md index 8f33951fe..e96ebc792 100644 --- a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md +++ b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md @@ -1,6 +1,9 @@ # How to Contribute CSI is under [Apache 2.0](LICENSE) and accepts contributions via GitHub pull requests. + +Contributions require signing an individual or Corporate CLA available [here](https://github.com/container-storage-interface/spec/blob/master/CCLA.pdf) which should be signed and mailed to the [mailing list]( https://groups.google.com/forum/#!topic/container-storage-interface-community/). + This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted. ## Markdown style diff --git a/vendor/github.com/container-storage-interface/spec/OWNERS b/vendor/github.com/container-storage-interface/spec/OWNERS index b11f91910..7225bd014 100644 --- a/vendor/github.com/container-storage-interface/spec/OWNERS +++ b/vendor/github.com/container-storage-interface/spec/OWNERS @@ -3,8 +3,8 @@ approvers: - thockin # Representing Kubernetes - jieyu # Representing Mesos - jdef # Representing Mesos - - cpuguy83 # Representing Docker - - mycure # Representing Docker - - julian-hj # Representing Cloud Foundry - - paulcwarren # Representing Cloud Foundry + - anusha-ragunathan # Representing Docker + - ddebroy # Representing Docker + - julian-hj # Representing Cloud Foundry + - paulcwarren # Representing Cloud Foundry reviewers: diff --git a/vendor/github.com/container-storage-interface/spec/README.md b/vendor/github.com/container-storage-interface/spec/README.md index d270cedda..c686e423f 100644 --- a/vendor/github.com/container-storage-interface/spec/README.md +++ b/vendor/github.com/container-storage-interface/spec/README.md @@ -8,6 +8,6 @@ This project contains the CSI [specification](spec.md) and [protobuf](csi.proto) ### Container Orchestrators (CO) -* [Cloud Foundry](https://github.com/cloudfoundry/csi-local-volume-release) +* [Cloud Foundry](https://github.com/cloudfoundry/csi-plugins-release/blob/master/CSI_SUPPORT.md) * [Kubernetes](https://kubernetes-csi.github.io/docs/) * [Mesos](http://mesos.apache.org/documentation/latest/csi/) diff --git a/vendor/github.com/container-storage-interface/spec/VERSION b/vendor/github.com/container-storage-interface/spec/VERSION index 0d91a54c7..3eefcb9dd 100644 --- a/vendor/github.com/container-storage-interface/spec/VERSION +++ b/vendor/github.com/container-storage-interface/spec/VERSION @@ -1 +1 @@ -0.3.0 +1.0.0 diff --git a/vendor/github.com/container-storage-interface/spec/csi.proto b/vendor/github.com/container-storage-interface/spec/csi.proto index 22cff40ca..d240b6682 100644 --- a/vendor/github.com/container-storage-interface/spec/csi.proto +++ b/vendor/github.com/container-storage-interface/spec/csi.proto @@ -1,10 +1,18 @@ // Code generated by make; DO NOT EDIT. syntax = "proto3"; -package csi.v0; +package csi.v1; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} service Identity { rpc GetPluginInfo(GetPluginInfoRequest) returns (GetPluginInfoResponse) {} @@ -64,20 +72,12 @@ service Node { rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) returns (NodeUnpublishVolumeResponse) {} - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - rpc NodeGetId (NodeGetIdRequest) - returns (NodeGetIdResponse) { - option deprecated = true; - } + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) returns (NodeGetCapabilitiesResponse) {} - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. rpc NodeGetInfo (NodeGetInfoRequest) returns (NodeGetInfoResponse) {} } @@ -86,13 +86,13 @@ message GetPluginInfoRequest { } message GetPluginInfoResponse { - // The name MUST follow reverse domain name notation format - // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). - // It SHOULD include the plugin's host company name and the plugin - // name, to minimize the possibility of collisions. It MUST be 63 + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), underscores (_), - // dots (.), and alphanumerics between. This field is REQUIRED. + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. string name = 1; // This field is REQUIRED. Value of this field is opaque to the CO. @@ -108,7 +108,7 @@ message GetPluginCapabilitiesRequest { message GetPluginCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated PluginCapability capabilities = 2; + repeated PluginCapability capabilities = 1; } // Specifies a capability of the plugin. @@ -119,7 +119,7 @@ message PluginCapability { // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for // the ControllerService. Plugins SHOULD provide this capability. - // In rare cases certain plugins may wish to omit the + // In rare cases certain plugins MAY wish to omit the // ControllerService entirely from their implementation, but such // SHOULD NOT be the common case. // The presence of this capability determines whether the CO will @@ -127,13 +127,13 @@ message PluginCapability { // as specific RPCs as indicated by ControllerGetCapabilities. CONTROLLER_SERVICE = 1; - // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this - // plugin may not be equally accessible by all nodes in the + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the // cluster. The CO MUST use the topology information returned by // CreateVolumeRequest along with the topology information // returned by NodeGetInfo to ensure that a given volume is // accessible from a given node when scheduling workloads. - ACCESSIBILITY_CONSTRAINTS = 2; + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; } Type type = 1; } @@ -174,37 +174,53 @@ message CreateVolumeRequest { // The suggested name for the storage space. This field is REQUIRED. // It serves two purposes: // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. If `CreateVolume` fails, the volume may or may not - // be provisioned. In this case, the CO may call `CreateVolume` - // again, with the same name, to ensure the volume exists. The - // Plugin should ensure that multiple `CreateVolume` calls for the - // same name do not result in more than one piece of storage - // provisioned corresponding to that name. If a Plugin is unable to - // enforce idempotency, the CO's error recovery logic could result - // in multiple (unused) volumes being provisioned. + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). // 2) Suggested name - Some storage systems allow callers to specify // an identifier by which to refer to the newly provisioned // storage. If a storage system supports this, it can optionally // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 1; // This field is OPTIONAL. This allows the CO to specify the capacity // requirement of the volume to be provisioned. If not specified, the // Plugin MAY choose an implementation-defined capacity range. If // specified it MUST always be honored, even when creating volumes - // from a source; which may force some backends to internally extend + // from a source; which MAY force some backends to internally extend // the volume after creating it. - CapacityRange capacity_range = 2; - // The capabilities that the provisioned volume MUST have: the Plugin - // MUST provision a volume that could satisfy ALL of the - // capabilities specified in this list. The Plugin MUST assume that - // the CO MAY use the provisioned volume later with ANY of the - // capabilities specified in this list. This also enables the CO to do - // early validation: if ANY of the specified volume capabilities are - // not supported by the Plugin, the call SHALL fail. This field is - // REQUIRED. + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. repeated VolumeCapability volume_capabilities = 3; // Plugin specific parameters passed in as opaque key-value pairs. @@ -215,7 +231,7 @@ message CreateVolumeRequest { // Secrets required by plugin to complete volume creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_create_secrets = 5; + map secrets = 5 [(csi_secret) = true]; // If specified, the new volume will be pre-populated with data from // this source. This field is OPTIONAL. @@ -228,10 +244,10 @@ message CreateVolumeRequest { // topological accessibility information supported by the SP. // This field is OPTIONAL. // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. TopologyRequirement accessibility_requirements = 7; } @@ -243,11 +259,19 @@ message VolumeContentSource { // This field is REQUIRED. Plugin is REQUIRED to support creating // volume from snapshot if it supports the capability // CREATE_DELETE_SNAPSHOT. - string id = 1; + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; } oneof type { SnapshotSource snapshot = 1; + VolumeSource volume = 2; } } @@ -334,7 +358,7 @@ message CapacityRange { int64 limit_bytes = 2; } -// The information about a provisioned volume. +// Information about a specific volume. message Volume { // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is @@ -342,20 +366,32 @@ message Volume { // The value of this field MUST NOT be negative. int64 capacity_bytes = 1; - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - string id = 2; - - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - map attributes = 3; + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; // If specified, indicates that the volume is not empty and is // pre-populated with data from the specified source. @@ -365,7 +401,7 @@ message Volume { // Specifies where (regions, zones, racks, etc.) the provisioned // volume is accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // An SP MAY specify multiple topologies to indicate the volume is // accessible from multiple locations. // COs MAY use this information along with the topology information @@ -373,7 +409,7 @@ message Volume { // from a given node when scheduling workloads. // This field is OPTIONAL. If it is not specified, the CO MAY assume // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available + // MAY schedule workloads referencing the volume on any available // node. // // Example 1: @@ -527,15 +563,18 @@ message TopologyRequirement { // A topological segment is a specific instance of a topological domain, // like "zone3", "rack3", etc. // For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated +// Valid keys have two segments: an OPTIONAL prefix and name, separated // by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). // The key prefix SHOULD include the plugin's host company name and/or // the plugin name, to minimize the possibility of collisions with keys // from other plugins. @@ -558,7 +597,7 @@ message DeleteVolumeRequest { // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_delete_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteVolumeResponse { @@ -573,31 +612,44 @@ message ControllerPublishVolumeRequest { // field to match the node ID returned by `NodeGetInfo`. string node_id = 2; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 3; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. bool readonly = 4; // Secrets required by plugin to complete controller publish volume // request. This field is OPTIONAL. Refer to the // `Secrets Requirements` section on how to use this field. - map controller_publish_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message ControllerPublishVolumeResponse { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - map publish_info = 1; + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; } message ControllerUnpublishVolumeRequest { // The ID of the volume. This field is REQUIRED. @@ -615,7 +667,7 @@ message ControllerUnpublishVolumeRequest { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_unpublish_secrets = 3; + map secrets = 3 [(csi_secret) = true]; } message ControllerUnpublishVolumeResponse { @@ -625,30 +677,52 @@ message ValidateVolumeCapabilitiesRequest { // The ID of the volume to check. This field is REQUIRED. string volume_id = 1; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities + // call SHALL return "confirmed" only if all the volume capabilities // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; + repeated VolumeCapability volume_capabilities = 3; - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - map volume_attributes = 3; + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - repeated Topology accessible_topology = 4; + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; } message ValidateVolumeCapabilitiesResponse { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - bool supported = 1; + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; - // Message to the CO if `supported` above is false. This field is + // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. string message = 2; @@ -705,7 +779,7 @@ message GetCapacityRequest { // `accessible_topology`. This is the same as the // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. Topology accessible_topology = 3; } @@ -725,7 +799,7 @@ message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 2; + repeated ControllerServiceCapability capabilities = 1; } // Specifies a capability of the controller service. @@ -742,11 +816,15 @@ message ControllerServiceCapability { // CREATE_DELETE_SNAPSHOT MUST support creating volume from // snapshot. CREATE_DELETE_SNAPSHOT = 5; - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; } Type type = 1; @@ -764,12 +842,16 @@ message CreateSnapshotRequest { // The suggested name for the snapshot. This field is REQUIRED for // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 2; // Secrets required by plugin to complete snapshot creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map create_snapshot_secrets = 3; + map secrets = 3 [(csi_secret) = true]; // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and @@ -791,7 +873,7 @@ message CreateSnapshotResponse { Snapshot snapshot = 1; } -// The information about a provisioned snapshot. +// Information about a specific snapshot. message Snapshot { // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to @@ -802,11 +884,16 @@ message Snapshot { // zero means it is unspecified. int64 size_bytes = 1; - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - string id = 2; + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; // Identity information for the source volume. Note that creating a // snapshot from a snapshot is not supported here so the source has to @@ -814,43 +901,13 @@ message Snapshot { string source_volume_id = 3; // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - int64 created_at = 4; - - // The status of a snapshot. - SnapshotStatus status = 5; -} + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; -// The status of a snapshot. -message SnapshotStatus { - enum Type { - UNKNOWN = 0; - // A snapshot is ready for use. - READY = 1; - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - UPLOADING = 2; - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - ERROR_UPLOADING = 3; - } - // This field is REQUIRED. - Type type = 1; - - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - string details = 2; + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; } message DeleteSnapshotRequest { // The ID of the snapshot to be deleted. @@ -860,7 +917,7 @@ message DeleteSnapshotRequest { // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map delete_snapshot_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteSnapshotResponse {} @@ -890,7 +947,8 @@ message ListSnapshotsRequest { // Identity information for a specific snapshot. This field is // OPTIONAL. It can be used to list only a specific snapshot. // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. + // and will not block if the snapshot is being processed after + // it is cut. string snapshot_id = 4; } @@ -918,28 +976,33 @@ message NodeStageVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the volume will be published. It MUST be an + // The path to which the volume MAY be staged. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. + // `staging_target_path` per volume. The CO SHALL ensure that the + // process serving the request has `read` and `write` permission to + // the path, and is able to create files or directories at the path + // if it does not exist. // This is a REQUIRED field. string staging_target_path = 3; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 4; // Secrets required by plugin to complete node stage volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_stage_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message NodeStageVolumeResponse { @@ -949,7 +1012,7 @@ message NodeUnstageVolumeRequest { // The ID of the volume. This field is REQUIRED. string volume_id = 1; - // The path at which the volume was published. It MUST be an absolute + // The path at which the volume was staged. It MUST be an absolute // path in the root filesystem of the process serving this request. // This is a REQUIRED field. string staging_target_path = 2; @@ -967,9 +1030,9 @@ message NodePublishVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the device was mounted by `NodeStageVolume`. + // The path to which the volume was staged by `NodeStageVolume`. // It MUST be an absolute path in the root filesystem of the process // serving this request. // It MUST be set if the Node Plugin implements the @@ -980,28 +1043,31 @@ message NodePublishVolumeRequest { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. + // The CO SHALL ensure that the process serving the request has + // `read` and `write` permissions to the path, and is able to create + // files or directories at the path if it does not exist. // This is a REQUIRED field. string target_path = 4; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 5; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. bool readonly = 6; // Secrets required by plugin to complete node publish volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_publish_secrets = 7; + map secrets = 7 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - map volume_attributes = 8; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; } message NodePublishVolumeResponse { @@ -1020,15 +1086,43 @@ message NodeUnpublishVolumeRequest { message NodeUnpublishVolumeResponse { // Intentionally empty. } -message NodeGetIdRequest { - // Intentionally empty. -} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; -message NodeGetIdResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. // This is a REQUIRED field. - string node_id = 1; + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; } message NodeGetCapabilitiesRequest { // Intentionally empty. @@ -1046,6 +1140,10 @@ message NodeServiceCapability { enum Type { UNKNOWN = 0; STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; } Type type = 1; @@ -1060,9 +1158,14 @@ message NodeGetInfoRequest { } message NodeGetInfoResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. string node_id = 1; // Maximum number of volumes that controller can publish to the node. @@ -1075,7 +1178,7 @@ message NodeGetInfoResponse { // Specifies where (regions, zones, racks, etc.) the node is // accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // COs MAY use this information along with the topology information // returned in CreateVolumeResponse to ensure that a given volume is // accessible from a given node when scheduling workloads. diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile index a7443eae0..3b1c5eaba 100644 --- a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile +++ b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile @@ -58,14 +58,14 @@ $(PROTOC): PROTOC_GEN_GO_PKG := github.com/golang/protobuf/protoc-gen-go PROTOC_GEN_GO := protoc-gen-go $(PROTOC_GEN_GO): PROTOBUF_PKG := $(dir $(PROTOC_GEN_GO_PKG)) -$(PROTOC_GEN_GO): PROTOBUF_VERSION := v1.1.0 +$(PROTOC_GEN_GO): PROTOBUF_VERSION := v1.2.0 $(PROTOC_GEN_GO): mkdir -p $(dir $(GOPATH)/src/$(PROTOBUF_PKG)) test -d $(GOPATH)/src/$(PROTOBUF_PKG)/.git || git clone https://$(PROTOBUF_PKG) $(GOPATH)/src/$(PROTOBUF_PKG) (cd $(GOPATH)/src/$(PROTOBUF_PKG) && \ (test "$$(git describe --tags | head -1)" = "$(PROTOBUF_VERSION)" || \ (git fetch && git checkout tags/$(PROTOBUF_VERSION)))) - (cd $(GOPATH)/src/$(PROTOBUF_PKG) && go get -v -d ./...) && \ + (cd $(GOPATH)/src/$(PROTOBUF_PKG) && go get -v -d $$(go list -f '{{ .ImportPath }}' ./...)) && \ go build -o "$@" $(PROTOC_GEN_GO_PKG) @@ -83,18 +83,25 @@ export PATH := $(shell pwd):$(PATH) ## BUILD ## ######################################################################## CSI_PROTO := ../../csi.proto -CSI_PKG := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\);$$/\1/p'|tr '.' '/') -CSI_GO := $(CSI_PKG)/csi.pb.go +CSI_PKG_ROOT := github.com/container-storage-interface/spec +CSI_PKG_SUB := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\).v[0-9]\+;$$/\1/p'|tr '.' '/') +CSI_BUILD := $(CSI_PKG_SUB)/.build +CSI_GO := $(CSI_PKG_SUB)/csi.pb.go CSI_A := csi.a -CSI_GO_TMP := $(CSI_PKG)/.build/csi.pb.go +CSI_GO_TMP := $(CSI_BUILD)/$(CSI_PKG_ROOT)/csi.pb.go # This recipe generates the go language bindings to a temp area. +$(CSI_GO_TMP): HERE := $(shell pwd) +$(CSI_GO_TMP): PTYPES_PKG := github.com/golang/protobuf/ptypes $(CSI_GO_TMP): GO_OUT := plugins=grpc -$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers -$(CSI_GO_TMP): INCLUDE = -I$(PROTOC_TMP_DIR)/include +$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor +$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/wrappers.proto=$(PTYPES_PKG)/wrappers +$(CSI_GO_TMP): GO_OUT := $(GO_OUT):"$(HERE)/$(CSI_BUILD)" +$(CSI_GO_TMP): INCLUDE := -I$(GOPATH)/src -I$(HERE)/$(PROTOC_TMP_DIR)/include $(CSI_GO_TMP): $(CSI_PROTO) | $(PROTOC) $(PROTOC_GEN_GO) @mkdir -p "$(@D)" - $(PROTOC) -I "$( controller_create_secrets = 5; + map secrets = 5 [(csi_secret) = true]; // If specified, the new volume will be pre-populated with data from // this source. This field is OPTIONAL. @@ -669,10 +708,10 @@ message CreateVolumeRequest { // topological accessibility information supported by the SP. // This field is OPTIONAL. // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. TopologyRequirement accessibility_requirements = 7; } @@ -684,11 +723,19 @@ message VolumeContentSource { // This field is REQUIRED. Plugin is REQUIRED to support creating // volume from snapshot if it supports the capability // CREATE_DELETE_SNAPSHOT. - string id = 1; + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; } oneof type { SnapshotSource snapshot = 1; + VolumeSource volume = 2; } } @@ -775,7 +822,7 @@ message CapacityRange { int64 limit_bytes = 2; } -// The information about a provisioned volume. +// Information about a specific volume. message Volume { // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is @@ -783,20 +830,32 @@ message Volume { // The value of this field MUST NOT be negative. int64 capacity_bytes = 1; - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - string id = 2; - - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - map attributes = 3; + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; // If specified, indicates that the volume is not empty and is // pre-populated with data from the specified source. @@ -806,7 +865,7 @@ message Volume { // Specifies where (regions, zones, racks, etc.) the provisioned // volume is accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // An SP MAY specify multiple topologies to indicate the volume is // accessible from multiple locations. // COs MAY use this information along with the topology information @@ -814,7 +873,7 @@ message Volume { // from a given node when scheduling workloads. // This field is OPTIONAL. If it is not specified, the CO MAY assume // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available + // MAY schedule workloads referencing the volume on any available // node. // // Example 1: @@ -968,15 +1027,18 @@ message TopologyRequirement { // A topological segment is a specific instance of a topological domain, // like "zone3", "rack3", etc. // For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated +// Valid keys have two segments: an OPTIONAL prefix and name, separated // by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). // The key prefix SHOULD include the plugin's host company name and/or // the plugin name, to minimize the possibility of collisions with keys // from other plugins. @@ -1001,18 +1063,17 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| +| Source incompatible or not supported | 3 INVALID_ARGUMENT | Besides the general cases, this code MUST also be used to indicate when plugin supporting CREATE_DELETE_VOLUME cannot create a volume from the requested source (`SnapshotSource` or `VolumeSource`). Failure MAY be caused by not supporting the source (CO SHOULD NOT have provided that source) or incompatibility between `parameters` from the source and the ones requested for the new volume. More human-readable information SHOULD be provided in the gRPC `status.message` field if the problem is the source. | On source related issues, caller MUST use different parameters, a different source, or no source at all. | +| Source does not exist | 5 NOT_FOUND | Indicates that the specified source does not exist. | Caller MUST verify that the `volume_content_source` is correct, the source is accessible, and has not been deleted before retrying with exponential back off. | | Volume already exists but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified volume `name` already exists but is incompatible with the specified `capacity_range`, `volume_capabilities` or `parameters`. | Caller MUST fix the arguments or use a different `name` before retrying. | | Unable to provision in `accessible_topology` | 8 RESOURCE_EXHAUSTED | Indicates that although the `accessible_topology` field is valid, a new volume can not be provisioned with the specified topology constraints. More human-readable information MAY be provided in the gRPC `status.message` field. | Caller MUST ensure that whatever is preventing volumes from being provisioned in the specified location (e.g. quota issues) is addressed before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Unsupported `capacity_range` | 11 OUT_OF_RANGE | Indicates that the capacity range is not allowed by the Plugin, for example when trying to create a volume smaller than the source snapshot. More human-readable information MAY be provided in the gRPC `status.message` field. | Caller MUST fix the capacity range before retrying. | -| Call not implemented | 12 UNIMPLEMENTED | CreateVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `DeleteVolume` A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_VOLUME` capability. This RPC will be called by the CO to deprovision a volume. -If successful, the storage space associated with the volume MUST be released and all the data in the volume SHALL NOT be accessible anymore. This operation MUST be idempotent. If a volume corresponding to the specified `volume_id` does not exist or the artifacts associated with the volume do not exist anymore, the Plugin MUST reply `0 OK`. @@ -1026,7 +1087,7 @@ message DeleteVolumeRequest { // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_delete_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteVolumeResponse { @@ -1043,8 +1104,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume in use | 9 FAILED_PRECONDITION | Indicates that the volume corresponding to the specified `volume_id` could not be deleted because it is in use by another resource. | Caller SHOULD ensure that there are no other resources using the volume, and then retry with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | DeleteVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ControllerPublishVolume` @@ -1071,31 +1130,44 @@ message ControllerPublishVolumeRequest { // field to match the node ID returned by `NodeGetInfo`. string node_id = 2; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 3; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. bool readonly = 4; // Secrets required by plugin to complete controller publish volume // request. This field is OPTIONAL. Refer to the // `Secrets Requirements` section on how to use this field. - map controller_publish_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message ControllerPublishVolumeResponse { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - map publish_info = 1; + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; } ``` @@ -1112,8 +1184,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the node corresponding to the specified `volume_id` but is incompatible with the specified `volume_capability` or `readonly` flag . | Caller MUST fix the arguments before retying. | | Volume published to another node | 9 FAILED_PRECONDITION | Indicates that a volume corresponding to the specified `volume_id` has already been published at another node and does not have MULTI_NODE volume capability. If this error code is returned, the Plugin SHOULD specify the `node_id` of the node at which the volume is published as part of the gRPC `status.message`. | Caller SHOULD ensure the specified volume is not published at any other node before retrying with exponential back off. | | Max volumes attached | 8 RESOURCE_EXHAUSTED | Indicates that the maximum supported number of volumes that can be attached to the specified node are already attached. Therefore, this operation will fail until at least one of the existing attached volumes is detached from the node. | Caller MUST ensure that the number of volumes already attached to the node is less then the maximum supported number of volumes before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | ControllerPublishVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ControllerUnpublishVolume` @@ -1146,7 +1216,7 @@ message ControllerUnpublishVolumeRequest { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_unpublish_secrets = 3; + map secrets = 3 [(csi_secret) = true]; } message ControllerUnpublishVolumeResponse { @@ -1164,46 +1234,69 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Node does not exist | 5 NOT_FOUND | Indicates that a node corresponding to the specified `node_id` does not exist. | Caller MUST verify that the `node_id` is correct and that the node is available and has not been terminated or deleted before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | ControllerUnpublishVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ValidateVolumeCapabilities` A Controller Plugin MUST implement this RPC call. This RPC will be called by the CO to check if a pre-provisioned volume has all the capabilities that the CO wants. -This RPC call SHALL return `supported` only if all the volume capabilities specified in the request are supported. +This RPC call SHALL return `confirmed` only if all the volume capabilities specified in the request are supported (see caveat below). This operation MUST be idempotent. +NOTE: Older plugins will parse but likely not "process" newer fields that MAY be present in capability-validation messages (and sub-messages) sent by a CO that is communicating using a newer, backwards-compatible version of the CSI protobufs. +Therefore, the CO SHALL reconcile successful capability-validation responses by comparing the validated capabilities with those that it had originally requested. + ```protobuf message ValidateVolumeCapabilitiesRequest { // The ID of the volume to check. This field is REQUIRED. string volume_id = 1; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities + // call SHALL return "confirmed" only if all the volume capabilities // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; + repeated VolumeCapability volume_capabilities = 3; - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - map volume_attributes = 3; + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - repeated Topology accessible_topology = 4; + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; } message ValidateVolumeCapabilitiesResponse { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - bool supported = 1; + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; - // Message to the CO if `supported` above is false. This field is + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. string message = 2; @@ -1225,6 +1318,8 @@ The CO MUST implement the specified error recovery behavior when it encounters t A Controller Plugin MUST implement this RPC call if it has `LIST_VOLUMES` capability. The Plugin SHALL return the information about all the volumes that it knows about. +If volumes are created and/or deleted while the CO is concurrently paging through `ListVolumes` results then it is possible that the CO MAY either witness duplicate volumes in the list, not witness existing volumes, or both. +The CO SHALL NOT expect a consistent "view" of all volumes when paging through the volume list via multiple calls to `ListVolumes`. ```protobuf message ListVolumesRequest { @@ -1298,7 +1393,7 @@ message GetCapacityRequest { // `accessible_topology`. This is the same as the // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. Topology accessible_topology = 3; } @@ -1329,7 +1424,7 @@ message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 2; + repeated ControllerServiceCapability capabilities = 1; } // Specifies a capability of the controller service. @@ -1346,11 +1441,15 @@ message ControllerServiceCapability { // CREATE_DELETE_SNAPSHOT MUST support creating volume from // snapshot. CREATE_DELETE_SNAPSHOT = 5; - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; } Type type = 1; @@ -1373,17 +1472,43 @@ A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_SNAPSH This RPC will be called by the CO to create a new snapshot from a source volume on behalf of a user. This operation MUST be idempotent. -If a snapshot corresponding to the specified snapshot `name` is already successfully cut and uploaded (if upload is part of the process) and is compatible with the specified `source_volume_id` and `parameters` in the `CreateSnapshotRequest`, the Plugin MUST reply `0 OK` with the corresponding `CreateSnapshotResponse`. +If a snapshot corresponding to the specified snapshot `name` is successfully cut and ready to use (meaning it MAY be specified as a `volume_content_source` in a `CreateVolumeRequest`), the Plugin MUST reply `0 OK` with the corresponding `CreateSnapshotResponse`. If an error occurs before a snapshot is cut, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. -For plugins that implement snapshot uploads, `CreateSnapshot` SHOULD return `10 ABORTED`, a gRPC code that indicates the operation is pending for snapshot, during the snapshot uploading processs. -If an error occurs during the uploading process, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. +For plugins that supports snapshot post processing such as uploading, `CreateSnapshot` SHOULD return `0 OK` and `ready_to_use` SHOULD be set to `false` after the snapshot is cut but still being processed. +CO SHOULD then reissue the same `CreateSnapshotRequest` periodically until boolean `ready_to_use` flips to `true` indicating the snapshot has been "processed" and is ready to use to create new volumes. +If an error occurs during the process, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. A snapshot MAY be used as the source to provision a new volume. -A CreateVolumeRequest message may specify an OPTIONAL source snapshot parameter. +A CreateVolumeRequest message MAY specify an OPTIONAL source snapshot parameter. Reverting a snapshot, where data in the original volume is erased and replaced with data in the snapshot, is an advanced functionality not every storage system can support and therefore is currently out of scope. +##### The ready_to_use Parameter + +Some SPs MAY "process" the snapshot after the snapshot is cut, for example, maybe uploading the snapshot somewhere after the snapshot is cut. +The post-cut process MAY be a long process that could take hours. +The CO MAY freeze the application using the source volume before taking the snapshot. +The purpose of `freeze` is to ensure the application data is in consistent state. +When `freeze` is performed, the container is paused and the application is also paused. +When `thaw` is performed, the container and the application start running again. +During the snapshot processing phase, since the snapshot is already cut, a `thaw` operation can be performed so application can start running without waiting for the process to complete. +The `ready_to_use` parameter of the snapshot will become `true` after the process is complete. + +For SPs that do not do additional processing after cut, the `ready_to_use` parameter SHOULD be `true` after the snapshot is cut. +`thaw` can be done when the `ready_to_use` parameter is `true` in this case. + +The `ready_to_use` parameter provides guidance to the CO on when it can "thaw" the application in the process of snapshotting. +If the cloud provider or storage system needs to process the snapshot after the snapshot is cut, the `ready_to_use` parameter returned by CreateSnapshot SHALL be `false`. +CO MAY continue to call CreateSnapshot while waiting for the process to complete until `ready_to_use` becomes `true`. +Note that CreateSnapshot no longer blocks after the snapshot is cut. + +A gRPC error code SHALL be returned if an error occurs during any stage of the snapshotting process. +A CO SHOULD explicitly delete snapshots when an error occurs. + +Based on this information, CO can issue repeated (idemponent) calls to CreateSnapshot, monitor the response, and make decisions. +Note that CreateSnapshot is a synchronous call and it MUST block until the snapshot is cut. + ```protobuf message CreateSnapshotRequest { // The ID of the source volume to be snapshotted. @@ -1392,12 +1517,16 @@ message CreateSnapshotRequest { // The suggested name for the snapshot. This field is REQUIRED for // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 2; // Secrets required by plugin to complete snapshot creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map create_snapshot_secrets = 3; + map secrets = 3 [(csi_secret) = true]; // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and @@ -1419,7 +1548,7 @@ message CreateSnapshotResponse { Snapshot snapshot = 1; } -// The information about a provisioned snapshot. +// Information about a specific snapshot. message Snapshot { // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to @@ -1430,11 +1559,16 @@ message Snapshot { // zero means it is unspecified. int64 size_bytes = 1; - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - string id = 2; + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; // Identity information for the source volume. Note that creating a // snapshot from a snapshot is not supported here so the source has to @@ -1442,43 +1576,13 @@ message Snapshot { string source_volume_id = 3; // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - int64 created_at = 4; - - // The status of a snapshot. - SnapshotStatus status = 5; -} + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; -// The status of a snapshot. -message SnapshotStatus { - enum Type { - UNKNOWN = 0; - // A snapshot is ready for use. - READY = 1; - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - UPLOADING = 2; - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - ERROR_UPLOADING = 3; - } - // This field is REQUIRED. - Type type = 1; - - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - string details = 2; + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; } ``` @@ -1491,16 +1595,14 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Snapshot already exists but is incompatible | 6 ALREADY_EXISTS | Indicates that a snapshot corresponding to the specified snapshot `name` already exists but is incompatible with the specified `volume_id`. | Caller MUST fix the arguments or use a different `name` before retrying. | -| Operation pending for snapshot | 10 ABORTED | Indicates that there is a already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | CreateSnapshot call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` to discover Plugin capabilities. | -| Not enough space to create snapshot | 13 RESOURCE_EXHAUSTED | There is not enough space on the storage system to handle the create snapshot request. | Caller should fail this request. Future calls to CreateSnapshot may succeed if space is freed up. | +| Operation pending for snapshot | 10 ABORTED | Indicates that there is already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | +| Not enough space to create snapshot | 13 RESOURCE_EXHAUSTED | There is not enough space on the storage system to handle the create snapshot request. | Caller SHOULD fail this request. Future calls to CreateSnapshot MAY succeed if space is freed up. | #### `DeleteSnapshot` A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_SNAPSHOT` capability. This RPC will be called by the CO to delete a snapshot. -If successful, the storage space associated with the snapshot MUST be released and all the data in the snapshot SHALL NOT be accessible anymore. This operation MUST be idempotent. If a snapshot corresponding to the specified `snapshot_id` does not exist or the artifacts associated with the snapshot do not exist anymore, the Plugin MUST reply `0 OK`. @@ -1514,7 +1616,7 @@ message DeleteSnapshotRequest { // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map delete_snapshot_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteSnapshotResponse {} @@ -1530,7 +1632,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Snapshot in use | 9 FAILED_PRECONDITION | Indicates that the snapshot corresponding to the specified `snapshot_id` could not be deleted because it is in use by another resource. | Caller SHOULD ensure that there are no other resources using the snapshot, and then retry with exponential back off. | | Operation pending for snapshot | 10 ABORTED | Indicates that there is already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | DeleteSnapshot call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` to discover Plugin capabilities. | #### `ListSnapshots` @@ -1538,6 +1639,8 @@ The CO MUST implement the specified error recovery behavior when it encounters t A Controller Plugin MUST implement this RPC call if it has `LIST_SNAPSHOTS` capability. The Plugin SHALL return the information about all snapshots on the storage system within the given parameters regardless of how they were created. `ListSnapshots` SHALL NOT list a snapshot that is being created but has not been cut successfully yet. +If snapshots are created and/or deleted while the CO is concurrently paging through `ListSnapshots` results then it is possible that the CO MAY either witness duplicate snapshots in the list, not witness existing snapshots, or both. +The CO SHALL NOT expect a consistent "view" of all snapshots when paging through the snapshot list via multiple calls to `ListSnapshots`. ```protobuf // List all snapshots on the storage system regardless of how they were @@ -1566,7 +1669,8 @@ message ListSnapshotsRequest { // Identity information for a specific snapshot. This field is // OPTIONAL. It can be used to list only a specific snapshot. // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. + // and will not block if the snapshot is being processed after + // it is cut. string snapshot_id = 4; } @@ -1621,40 +1725,10 @@ If a `CreateSnapshot` operation times out before the snapshot is cut, leaving th 2. The CO takes no further action regarding the timed out RPC, a snapshot is possibly leaked and the operator/user is expected to clean up. It is NOT REQUIRED for a controller plugin to implement the `LIST_SNAPSHOTS` capability if it supports the `CREATE_DELETE_SNAPSHOT` capability: the onus is upon the CO to take into consideration the full range of plugin capabilities before deciding how to proceed in the above scenario. -A controller plugin COULD implement the `LIST_SNAPSHOTS` capability and call it repeatedly with the `snapshot_id` as a filter to query whether the uploading process is complete or not if it needs to upload a snapshot after it is being cut. -##### Snapshot Statuses - -A snapshot could have the following statusus: UPLOADING, READY, and ERROR. - -Some cloud providers will upload the snapshot to a location in the cloud (i.e., an object store) after the snapshot is cut. -Uploading may be a long process that could take hours. -If a `freeze` operation was done on the application before taking the snapshot, it could be a long time before the application can be running again if we wait until the upload is complete to `thaw` the application. -The purpose of `freeze` is to ensure the application data is in consistent state. -When `freeze` is performed, the container is paused and the application is also paused. -When `thaw` is performed, the container and the application start running again. -During the snapshot uploading phase, since the snapshot is already cut, a `thaw` operation can be performed so application can start running without waiting for the upload to complete. -The status of the snapshot will become `READY` after the upload is complete. - -For cloud providers and storage systems that don't have the uploading process, the status should be `READY` after the snapshot is cut. -`thaw` can be done when the status is `READY` in this case. - -A `CREATING` status is not included here because CreateSnapshot is synchronous and will block until the snapshot is cut. - -`ERROR` is a terminal snapshot status. -A CO SHOULD explicitly delete snapshots in this status. - -The SnapshotStatus parameter provides guidance to the CO on what action can be taken in the process of snapshotting. -Based on this information, CO can issue repeated (idemponent) calls to CreateSnapshot, monitor the response, and make decisions. -Note that CreateSnapshot is a synchronous call and it must block until the snapshot is cut. -If the cloud provider or storage system does not need to upload the snapshot after it is cut, the status returned by CreateSnapshot SHALL be `READY`. -If the cloud provider or storage system needs to upload the snapshot after the snapshot is cut, the status returned by CreateSnapshot SHALL be `UPLOADING`. -CO MAY continue to call CreateSnapshot while waiting for the upload to complete until the status becomes `READY`. -Note that CreateSnapshot no longer blocks after the snapshot is cut. - -Alternatively, ListSnapshots can be called repeatedly with snapshot_id as filtering to wait for the upload to complete. ListSnapshots SHALL return with current information regarding the snapshots on the storage system. -When upload is complete, the status of the snapshot from ListSnapshots SHALL become `READY`. +When processing is complete, the `ready_to_use` parameter of the snapshot from ListSnapshots SHALL become `true`. +The downside of calling ListSnapshots is that ListSnapshots will not return a gRPC error code if an error occurs during the processing. So calling CreateSnapshot repeatedly is the preferred way to check if the processing is complete. ### Node Service RPC @@ -1684,28 +1758,33 @@ message NodeStageVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the volume will be published. It MUST be an + // The path to which the volume MAY be staged. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. + // `staging_target_path` per volume. The CO SHALL ensure that the + // process serving the request has `read` and `write` permission to + // the path, and is able to create files or directories at the path + // if it does not exist. // This is a REQUIRED field. string staging_target_path = 3; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 4; // Secrets required by plugin to complete node stage volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_stage_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message NodeStageVolumeResponse { @@ -1723,7 +1802,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the specified `staging_target_path` but is incompatible with the specified `volume_capability` flag. | Caller MUST fix the arguments before retying. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Exceeds capabilities | 9 FAILED_PRECONDITION | Indicates that the CO has exceeded the volume's capabilities because the volume does not have MULTI_NODE capability. | Caller MAY choose to call `ValidateVolumeCapabilities` to validate the volume capabilities, or wait for the volume to be unpublished on the node. | #### `NodeUnstageVolume` @@ -1751,7 +1829,7 @@ message NodeUnstageVolumeRequest { // The ID of the volume. This field is REQUIRED. string volume_id = 1; - // The path at which the volume was published. It MUST be an absolute + // The path at which the volume was staged. It MUST be an absolute // path in the root filesystem of the process serving this request. // This is a REQUIRED field. string staging_target_path = 2; @@ -1771,7 +1849,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | #### RPC Interactions and Reference Counting `NodeStageVolume`, `NodeUnstageVolume`, `NodePublishVolume`, `NodeUnpublishVolume` @@ -1802,7 +1879,7 @@ The following table shows what the Plugin SHOULD return when receiving a second | MULTI_NODE | OK (idempotent) | ALREADY_EXISTS | OK | OK | | Non MULTI_NODE | OK (idempotent) | ALREADY_EXISTS | FAILED_PRECONDITION | FAILED_PRECONDITION| -(`Tn`: target path of the n-th `NodePublishVolume`, `Pn`: other arguments of the n-th `NodePublishVolume` except `node_publish_secrets`) +(`Tn`: target path of the n-th `NodePublishVolume`, `Pn`: other arguments of the n-th `NodePublishVolume` except `secrets`) ```protobuf message NodePublishVolumeRequest { @@ -1814,9 +1891,9 @@ message NodePublishVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the device was mounted by `NodeStageVolume`. + // The path to which the volume was staged by `NodeStageVolume`. // It MUST be an absolute path in the root filesystem of the process // serving this request. // It MUST be set if the Node Plugin implements the @@ -1827,28 +1904,31 @@ message NodePublishVolumeRequest { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. + // The CO SHALL ensure that the process serving the request has + // `read` and `write` permissions to the path, and is able to create + // files or directories at the path if it does not exist. // This is a REQUIRED field. string target_path = 4; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 5; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. bool readonly = 6; // Secrets required by plugin to complete node publish volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_publish_secrets = 7; + map secrets = 7 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - map volume_attributes = 8; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; } message NodePublishVolumeResponse { @@ -1866,7 +1946,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the specified `target_path` but is incompatible with the specified `volume_capability` or `readonly` flag. | Caller MUST fix the arguments before retying. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Exceeds capabilities | 9 FAILED_PRECONDITION | Indicates that the CO has exceeded the volume's capabilities because the volume does not have MULTI_NODE capability. | Caller MAY choose to call `ValidateVolumeCapabilities` to validate the volume capabilities, or wait for the volume to be unpublished on the node. | | Staging target path not set | 9 FAILED_PRECONDITION | Indicates that `STAGE_UNSTAGE_VOLUME` capability is set but no `staging_target_path` was set. | Caller MUST make sure call to `NodeStageVolume` is made and returns success before retrying with valid `staging_target_path`. | @@ -1910,41 +1989,68 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -#### `NodeGetId` +#### `NodeGetVolumeStats` -`NodeGetId` RPC call is deprecated. -Users of this RPC call SHOULD use `NodeGetInfo`. +A Node plugin MUST implement this RPC call if it has GET_VOLUME_STATS node capability. +`NodeGetVolumeStats` RPC call returns the volume capacity statistics available for the volume. + +If the volume is being used in `BlockVolume` mode then `used` and `available` MAY be omitted from `usage` field of `NodeGetVolumeStatsResponse`. +Similarly, inode information MAY be omitted from `NodeGetVolumeStatsResponse` when unavailable. -A Node Plugin MUST implement this RPC call if the plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability. -The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. -The CO SHOULD call this RPC for the node at which it wants to place the workload. -The result of this call will be used by CO in `ControllerPublishVolume`. ```protobuf -message NodeGetIdRequest { - // Intentionally empty. -} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; -message NodeGetIdResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. // This is a REQUIRED field. - string node_id = 1; + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; } ``` -##### NodeGetId Errors +##### NodeGetVolumeStats Errors -If the plugin is unable to complete the NodeGetId call successfully, it MUST return a non-ok gRPC code in the gRPC status. +If the plugin is unable to complete the `NodeGetVolumeStats` call successfully, it MUST return a non-ok gRPC code in the gRPC status. If the conditions defined below are encountered, the plugin MUST return the specified gRPC error code. The CO MUST implement the specified error recovery behavior when it encounters the gRPC error code. -Condition | gRPC Code | Description | Recovery Behavior -| --- | --- | --- | --- | -| Call not implemented | 12 UNIMPLEMENTED | NodeGetId call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | + +| Condition | gRPC Code | Description | Recovery Behavior | +|-----------|-----------|-------------|-------------------| +| Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist on specified `volume_path`. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible on specified `volume_path` and has not been deleted before retrying with exponential back off. | #### `NodeGetCapabilities` @@ -1968,6 +2074,10 @@ message NodeServiceCapability { enum Type { UNKNOWN = 0; STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; } Type type = 1; @@ -1990,6 +2100,8 @@ If the plugin is unable to complete the NodeGetCapabilities call successfully, i A Node Plugin MUST implement this RPC call if the plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability. The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. The CO SHOULD call this RPC for the node at which it wants to place the workload. +The CO MAY call this RPC more than once for a given node. +The SP SHALL NOT expect the CO to call this RPC more than once. The result of this call will be used by CO in `ControllerPublishVolume`. ```protobuf @@ -1997,9 +2109,14 @@ message NodeGetInfoRequest { } message NodeGetInfoResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. string node_id = 1; // Maximum number of volumes that controller can publish to the node. @@ -2012,7 +2129,7 @@ message NodeGetInfoResponse { // Specifies where (regions, zones, racks, etc.) the node is // accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // COs MAY use this information along with the topology information // returned in CreateVolumeResponse to ensure that a given volume is // accessible from a given node when scheduling workloads. @@ -2033,14 +2150,8 @@ message NodeGetInfoResponse { ##### NodeGetInfo Errors If the plugin is unable to complete the NodeGetInfo call successfully, it MUST return a non-ok gRPC code in the gRPC status. -If the conditions defined below are encountered, the plugin MUST return the specified gRPC error code. The CO MUST implement the specified error recovery behavior when it encounters the gRPC error code. -Condition | gRPC Code | Description | Recovery Behavior -| --- | --- | --- | --- | -| Call not implemented | 12 UNIMPLEMENTED | NodeGetInfo call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | - - ## Protocol ### Connectivity @@ -2051,7 +2162,7 @@ Condition | gRPC Code | Description | Recovery Behavior Support for OPTIONAL RPCs is reported by the `ControllerGetCapabilities` and `NodeGetCapabilities` RPC calls. * The CO SHALL provide the listen-address for the Plugin by way of the `CSI_ENDPOINT` environment variable. Plugin components SHALL create, bind, and listen for RPCs on the specified listen address. - * Only UNIX Domain Sockets may be used as endpoints. + * Only UNIX Domain Sockets MAY be used as endpoints. This will likely change in a future version of this specification to support non-UNIX platforms. * All supported RPC services MUST be available at the listen address of the Plugin. @@ -2060,7 +2171,7 @@ Condition | gRPC Code | Description | Recovery Behavior * The CO operator and Plugin Supervisor SHOULD take steps to ensure that any and all communication between the CO and Plugin Service are secured according to best practices. * Communication between a CO and a Plugin SHALL be transported over UNIX Domain Sockets. * gRPC is compatible with UNIX Domain Sockets; it is the responsibility of the CO operator and Plugin Supervisor to properly secure access to the Domain Socket using OS filesystem ACLs and/or other OS-specific security context tooling. - * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets must provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). + * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets MUST provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). Proxy components transporting communication over IP networks SHALL be responsible for securing communications over such networks. * Both the CO and Plugin SHOULD avoid accidental leakage of sensitive information (such as redacting such information from log files). @@ -2105,8 +2216,8 @@ Condition | gRPC Code | Description | Recovery Behavior * Variables defined by this specification SHALL be identifiable by their `CSI_` name prefix. * Configuration properties not defined by the CSI specification SHALL NOT use the same `CSI_` name prefix; this prefix is reserved for common configuration properties defined by the CSI specification. -* The Plugin Supervisor SHOULD supply all recommended CSI environment variables to a Plugin. -* The Plugin Supervisor SHALL supply all required CSI environment variables to a Plugin. +* The Plugin Supervisor SHOULD supply all RECOMMENDED CSI environment variables to a Plugin. +* The Plugin Supervisor SHALL supply all REQUIRED CSI environment variables to a Plugin. ##### `CSI_ENDPOINT` @@ -2141,8 +2252,8 @@ Supervised plugins MAY be isolated and/or resource-bounded. ##### Available Services * Plugin Packages MAY support all or a subset of CSI services; service combinations MAY be configurable at runtime by the Plugin Supervisor. - * A plugin must know the "mode" in which it is operating (e.g. node, controller, or both). - * This specification does not dictate the mechanism by which mode of operation must be discovered, and instead places that burden upon the SP. + * A plugin MUST know the "mode" in which it is operating (e.g. node, controller, or both). + * This specification does not dictate the mechanism by which mode of operation MUST be discovered, and instead places that burden upon the SP. * Misconfigured plugin software SHOULD fail-fast with an OS-appropriate error code. ##### Linux Capabilities @@ -2158,7 +2269,7 @@ Supervised plugins MAY be isolated and/or resource-bounded. ##### Cgroup Isolation * A Plugin MAY be constrained by cgroups. -* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin may access requisite devices. +* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin MAY access requisite devices. * A Plugin Supervisor MAY define resource limits for a Plugin. ##### Resource Requirements diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore index 984ec0fbb..81c985c4d 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.gitignore +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -11,3 +11,9 @@ *.out bin/mock cmd/csi-sanity/csi-sanity + +# JetBrains GoLand +.idea + +# Vim +*.swp diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml index 9d636c7f6..7a8171919 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -1,14 +1,15 @@ language: go +sudo: required +services: + - docker matrix: include: - - go: 1.x + - go: 1.10.3 script: -- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 -- go vet $(go list ./... | grep -v vendor) -- go test $(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") -- ./hack/e2e.sh +- make test after_success: - if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + make container docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; make push; fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md new file mode 100644 index 000000000..41b73b76e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock index 2737ba719..443ad9700 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock @@ -2,18 +2,23 @@ [[projects]] + digest = "1:26ee2356254e58b9872ba736f66aff1c54a26f08c7d16afbf49695131a87d454" name = "github.com/container-storage-interface/spec" - packages = ["lib/go/csi/v0"] - revision = "2178fdeea87f1150a17a63252eee28d4d8141f72" - version = "v0.3.0" + packages = ["lib/go/csi"] + pruneopts = "UT" + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" [[projects]] + digest = "1:bc38c7c481812e178d85160472e231c5e1c9a7f5845d67e23ee4e706933c10d8" name = "github.com/golang/mock" packages = ["gomock"] + pruneopts = "UT" revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" version = "v1.1.1" [[projects]] + digest = "1:588beb9f80d2b0afddf05663b32d01c867da419458b560471d81cca0286e76b8" name = "github.com/golang/protobuf" packages = [ "proto", @@ -22,12 +27,14 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers" + "ptypes/wrappers", ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] + digest = "1:72f35d3e412bc67b121e15ea4c88a3b3da8bcbc2264339e7ffa4a1865799840c" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -47,12 +54,14 @@ "reporters/stenographer", "reporters/stenographer/support/go-colorable", "reporters/stenographer/support/go-isatty", - "types" + "types", ] + pruneopts = "UT" revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf" version = "v1.5.0" [[projects]] + digest = "1:d0c2c4e2d0006cd28c220a549cda1de8e67abc65ed4c572421492bbf0492ceaf" name = "github.com/onsi/gomega" packages = [ ".", @@ -66,25 +75,31 @@ "matchers/support/goraph/edge", "matchers/support/goraph/node", "matchers/support/goraph/util", - "types" + "types", ] + pruneopts = "UT" revision = "62bff4df71bdbc266561a0caee19f0594b17c240" version = "v1.4.0" [[projects]] + digest = "1:9e9193aa51197513b3abcb108970d831fbcf40ef96aa845c4f03276e1fa316d2" name = "github.com/sirupsen/logrus" packages = ["."] + pruneopts = "UT" revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" version = "v1.0.5" [[projects]] branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" name = "golang.org/x/crypto" packages = ["ssh/terminal"] + pruneopts = "UT" revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" [[projects]] branch = "master" + digest = "1:0bb2e6ef036484991ed446a6c698698b8901766981d4d22cc8e53fedb09709ac" name = "golang.org/x/net" packages = [ "context", @@ -96,20 +111,24 @@ "http2/hpack", "idna", "internal/timeseries", - "trace" + "trace", ] + pruneopts = "UT" revision = "1e491301e022f8f977054da4c2d852decd59571f" [[projects]] branch = "master" + digest = "1:8fbfc6ea1a8a078697633be97f07dd83a83d32a96959d42195464c13c25be374" name = "golang.org/x/sys" packages = [ "unix", - "windows" + "windows", ] + pruneopts = "UT" revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" [[projects]] + digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" name = "golang.org/x/text" packages = [ "collate", @@ -137,18 +156,22 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" + digest = "1:601e63e7d4577f907118bec825902505291918859d223bce015539e79f1160e3" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] + pruneopts = "UT" revision = "32ee49c4dd805befd833990acba36cb75042378c" [[projects]] + digest = "1:7a977fdcd5abff03e94f92e7b374ef37e91c7c389581e5c4348fa98616e6c6be" name = "google.golang.org/grpc" packages = [ ".", @@ -176,20 +199,39 @@ "stats", "status", "tap", - "transport" + "transport", ] + pruneopts = "UT" revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" version = "v1.12.2" [[projects]] + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" name = "gopkg.in/yaml.v2" packages = ["."] + pruneopts = "UT" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "5dd480018adbb94025564b74bad8dd269cc516183b7b428317f6dd04b07726f4" + input-imports = [ + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/mock/gomock", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes", + "github.com/golang/protobuf/ptypes/wrappers", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/sirupsen/logrus", + "golang.org/x/net/context", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/connectivity", + "google.golang.org/grpc/reflection", + "google.golang.org/grpc/status", + "gopkg.in/yaml.v2", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml index e73127854..4e0836d08 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml @@ -27,7 +27,7 @@ [[constraint]] name = "github.com/container-storage-interface/spec" - version = "~0.3.0" + version = "v1.0.0-rc2" [[constraint]] name = "github.com/golang/mock" @@ -35,7 +35,7 @@ [[constraint]] name = "github.com/golang/protobuf" - version = "v1.1.0" + version = "v1.2.0" [[constraint]] name = "github.com/onsi/ginkgo" diff --git a/vendor/github.com/kubernetes-csi/csi-test/Makefile b/vendor/github.com/kubernetes-csi/csi-test/Makefile index b31541f62..7fb42c877 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/Makefile +++ b/vendor/github.com/kubernetes-csi/csi-test/Makefile @@ -38,5 +38,15 @@ container: $(APP) push: container docker push $(IMAGE_NAME):$(IMAGE_VERSION) -.PHONY: all clean container push - +test: $(APP) + files=$$(find ./ -name '*.go' | grep -v '^./vendor' ); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi + go vet $$(go list ./... | grep -v vendor) + go test $$(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") + ./hack/e2e.sh + +.PHONY: all clean container push test diff --git a/vendor/github.com/kubernetes-csi/csi-test/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/OWNERS new file mode 100644 index 000000000..a780cce61 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/OWNERS @@ -0,0 +1,4 @@ +approvers: +- saad-ali +- lpabon +- pohly diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md index f6891ae78..36dce60ba 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -12,7 +12,7 @@ CO developers can use this framework to create drivers based on the ### Mock driver for testing We also provide a container called `quay.io/k8scsi/mock-driver:canary` which can be used as an in-memory mock driver. -It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.2.0`. +It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.3.0`. You will need to setup the environment variable `CSI_ENDPOINT` for the mock driver to know where to create the unix domain socket. @@ -25,5 +25,18 @@ CSI driver. ### Note -* Master is for CSI v0.3.0. Please see the branches for other CSI releases. +* Master is for CSI v0.4.0. Please see the branches for other CSI releases. * Only Golang 1.9+ supported. See [gRPC issue](https://github.com/grpc/grpc-go/issues/711#issuecomment-326626790) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS new file mode 100644 index 000000000..00e28e4eb --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +lpabon diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go index a4f4707a8..4b2d352cc 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go @@ -41,6 +41,7 @@ func init() { flag.StringVar(&config.StagingPath, prefix+"stagingdir", os.TempDir()+"/csi", "Mount point for NodeStage if staging is supported") flag.StringVar(&config.SecretsFile, prefix+"secrets", "", "CSI secrets file") flag.Int64Var(&config.TestVolumeSize, prefix+"testvolumesize", sanity.DefTestVolumeSize, "Base volume size used for provisioned volumes") + flag.StringVar(&config.TestVolumeParametersFile, prefix+"testvolumeparameters", "", "YAML file of volume parameters for provisioned volumes") flag.Parse() } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go index 462118570..01224a3ac 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -14,20 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi/v0 IdentityServer,ControllerServer,NodeServer +//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi IdentityServer,ControllerServer,NodeServer package driver import ( - context "context" + "context" + "encoding/json" "errors" + "fmt" "net" "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) @@ -58,6 +60,8 @@ type CSICreds struct { ControllerUnpublishVolumeSecret string NodeStageVolumeSecret string NodePublishVolumeSecret string + CreateSnapshotSecret string + DeleteSnapshotSecret string } type CSIDriver struct { @@ -100,7 +104,7 @@ func (c *CSIDriver) Start(l net.Listener) error { // Create a new grpc server c.server = grpc.NewServer( - grpc.UnaryInterceptor(c.authInterceptor), + grpc.UnaryInterceptor(c.callInterceptor), ) // Register Mock servers @@ -155,25 +159,54 @@ func (c *CSIDriver) SetDefaultCreds() { ControllerUnpublishVolumeSecret: "secretval4", NodeStageVolumeSecret: "secretval5", NodePublishVolumeSecret: "secretval6", + CreateSnapshotSecret: "secretval7", + DeleteSnapshotSecret: "secretval8", } } -func (c *CSIDriver) authInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := c.authInterceptor(req) + if err != nil { + logGRPC(info.FullMethod, req, nil, err) + return nil, err + } + rsp, err := handler(ctx, req) + logGRPC(info.FullMethod, req, rsp, err) + return rsp, err +} + +func (c *CSIDriver) authInterceptor(req interface{}) error { if c.creds != nil { authenticated, authErr := isAuthenticated(req, c.creds) if !authenticated { if authErr == ErrNoCredentials { - return nil, status.Error(codes.InvalidArgument, authErr.Error()) + return status.Error(codes.InvalidArgument, authErr.Error()) } if authErr == ErrAuthFailed { - return nil, status.Error(codes.Unauthenticated, authErr.Error()) + return status.Error(codes.Unauthenticated, authErr.Error()) } } } + return nil +} - h, err := handler(ctx, req) - - return h, err +func logGRPC(method string, request, reply interface{}, err error) { + // Log JSON with the request and response for easier parsing + logMessage := struct { + Method string + Request interface{} + Response interface{} + Error string + }{ + Method: method, + Request: request, + Response: reply, + } + if err != nil { + logMessage.Error = err.Error() + } + msg, _ := json.Marshal(logMessage) + fmt.Printf("gRPCCall: %s\n", msg) } func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { @@ -190,33 +223,45 @@ func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { return authenticateNodeStageVolume(r, creds) case *csi.NodePublishVolumeRequest: return authenticateNodePublishVolume(r, creds) + case *csi.CreateSnapshotRequest: + return authenticateCreateSnapshot(r, creds) + case *csi.DeleteSnapshotRequest: + return authenticateDeleteSnapshot(r, creds) default: return true, nil } } func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerCreateSecrets(), creds.CreateVolumeSecret) + return credsCheck(req.GetSecrets(), creds.CreateVolumeSecret) } func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerDeleteSecrets(), creds.DeleteVolumeSecret) + return credsCheck(req.GetSecrets(), creds.DeleteVolumeSecret) } func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerPublishSecrets(), creds.ControllerPublishVolumeSecret) + return credsCheck(req.GetSecrets(), creds.ControllerPublishVolumeSecret) } func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerUnpublishSecrets(), creds.ControllerUnpublishVolumeSecret) + return credsCheck(req.GetSecrets(), creds.ControllerUnpublishVolumeSecret) } func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetNodeStageSecrets(), creds.NodeStageVolumeSecret) + return credsCheck(req.GetSecrets(), creds.NodeStageVolumeSecret) } func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetNodePublishSecrets(), creds.NodePublishVolumeSecret) + return credsCheck(req.GetSecrets(), creds.NodePublishVolumeSecret) +} + +func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateSnapshotSecret) +} + +func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret) } func credsCheck(secrets map[string]string, secretVal string) (bool, error) { diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go index abd7d6106..c54acaad5 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -1,849 +1,354 @@ -// Automatically generated by MockGen. DO NOT EDIT! -// Source: ../vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/container-storage-interface/spec/lib/go/csi (interfaces: IdentityServer,ControllerServer,NodeServer) +// Package driver is a generated GoMock package. package driver import ( - v0 "github.com/container-storage-interface/spec/lib/go/csi/v0" + context "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + reflect "reflect" ) -// Mock of isPluginCapability_Type interface -type MockisPluginCapability_Type struct { - ctrl *gomock.Controller - recorder *_MockisPluginCapability_TypeRecorder -} - -// Recorder for MockisPluginCapability_Type (not exported) -type _MockisPluginCapability_TypeRecorder struct { - mock *MockisPluginCapability_Type -} - -func NewMockisPluginCapability_Type(ctrl *gomock.Controller) *MockisPluginCapability_Type { - mock := &MockisPluginCapability_Type{ctrl: ctrl} - mock.recorder = &_MockisPluginCapability_TypeRecorder{mock} - return mock -} - -func (_m *MockisPluginCapability_Type) EXPECT() *_MockisPluginCapability_TypeRecorder { - return _m.recorder -} - -func (_m *MockisPluginCapability_Type) isPluginCapability_Type() { - _m.ctrl.Call(_m, "isPluginCapability_Type") -} - -func (_mr *_MockisPluginCapability_TypeRecorder) isPluginCapability_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isPluginCapability_Type") -} - -// Mock of isVolumeContentSource_Type interface -type MockisVolumeContentSource_Type struct { - ctrl *gomock.Controller - recorder *_MockisVolumeContentSource_TypeRecorder -} - -// Recorder for MockisVolumeContentSource_Type (not exported) -type _MockisVolumeContentSource_TypeRecorder struct { - mock *MockisVolumeContentSource_Type -} - -func NewMockisVolumeContentSource_Type(ctrl *gomock.Controller) *MockisVolumeContentSource_Type { - mock := &MockisVolumeContentSource_Type{ctrl: ctrl} - mock.recorder = &_MockisVolumeContentSource_TypeRecorder{mock} - return mock -} - -func (_m *MockisVolumeContentSource_Type) EXPECT() *_MockisVolumeContentSource_TypeRecorder { - return _m.recorder -} - -func (_m *MockisVolumeContentSource_Type) isVolumeContentSource_Type() { - _m.ctrl.Call(_m, "isVolumeContentSource_Type") -} - -func (_mr *_MockisVolumeContentSource_TypeRecorder) isVolumeContentSource_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isVolumeContentSource_Type") -} - -// Mock of isVolumeCapability_AccessType interface -type MockisVolumeCapability_AccessType struct { - ctrl *gomock.Controller - recorder *_MockisVolumeCapability_AccessTypeRecorder -} - -// Recorder for MockisVolumeCapability_AccessType (not exported) -type _MockisVolumeCapability_AccessTypeRecorder struct { - mock *MockisVolumeCapability_AccessType -} - -func NewMockisVolumeCapability_AccessType(ctrl *gomock.Controller) *MockisVolumeCapability_AccessType { - mock := &MockisVolumeCapability_AccessType{ctrl: ctrl} - mock.recorder = &_MockisVolumeCapability_AccessTypeRecorder{mock} - return mock -} - -func (_m *MockisVolumeCapability_AccessType) EXPECT() *_MockisVolumeCapability_AccessTypeRecorder { - return _m.recorder -} - -func (_m *MockisVolumeCapability_AccessType) isVolumeCapability_AccessType() { - _m.ctrl.Call(_m, "isVolumeCapability_AccessType") -} - -func (_mr *_MockisVolumeCapability_AccessTypeRecorder) isVolumeCapability_AccessType() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isVolumeCapability_AccessType") -} - -// Mock of isControllerServiceCapability_Type interface -type MockisControllerServiceCapability_Type struct { - ctrl *gomock.Controller - recorder *_MockisControllerServiceCapability_TypeRecorder -} - -// Recorder for MockisControllerServiceCapability_Type (not exported) -type _MockisControllerServiceCapability_TypeRecorder struct { - mock *MockisControllerServiceCapability_Type -} - -func NewMockisControllerServiceCapability_Type(ctrl *gomock.Controller) *MockisControllerServiceCapability_Type { - mock := &MockisControllerServiceCapability_Type{ctrl: ctrl} - mock.recorder = &_MockisControllerServiceCapability_TypeRecorder{mock} - return mock -} - -func (_m *MockisControllerServiceCapability_Type) EXPECT() *_MockisControllerServiceCapability_TypeRecorder { - return _m.recorder -} - -func (_m *MockisControllerServiceCapability_Type) isControllerServiceCapability_Type() { - _m.ctrl.Call(_m, "isControllerServiceCapability_Type") -} - -func (_mr *_MockisControllerServiceCapability_TypeRecorder) isControllerServiceCapability_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isControllerServiceCapability_Type") -} - -// Mock of isNodeServiceCapability_Type interface -type MockisNodeServiceCapability_Type struct { - ctrl *gomock.Controller - recorder *_MockisNodeServiceCapability_TypeRecorder -} - -// Recorder for MockisNodeServiceCapability_Type (not exported) -type _MockisNodeServiceCapability_TypeRecorder struct { - mock *MockisNodeServiceCapability_Type -} - -func NewMockisNodeServiceCapability_Type(ctrl *gomock.Controller) *MockisNodeServiceCapability_Type { - mock := &MockisNodeServiceCapability_Type{ctrl: ctrl} - mock.recorder = &_MockisNodeServiceCapability_TypeRecorder{mock} - return mock -} - -func (_m *MockisNodeServiceCapability_Type) EXPECT() *_MockisNodeServiceCapability_TypeRecorder { - return _m.recorder -} - -func (_m *MockisNodeServiceCapability_Type) isNodeServiceCapability_Type() { - _m.ctrl.Call(_m, "isNodeServiceCapability_Type") -} - -func (_mr *_MockisNodeServiceCapability_TypeRecorder) isNodeServiceCapability_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isNodeServiceCapability_Type") -} - -// Mock of IdentityClient interface -type MockIdentityClient struct { - ctrl *gomock.Controller - recorder *_MockIdentityClientRecorder -} - -// Recorder for MockIdentityClient (not exported) -type _MockIdentityClientRecorder struct { - mock *MockIdentityClient -} - -func NewMockIdentityClient(ctrl *gomock.Controller) *MockIdentityClient { - mock := &MockIdentityClient{ctrl: ctrl} - mock.recorder = &_MockIdentityClientRecorder{mock} - return mock -} - -func (_m *MockIdentityClient) EXPECT() *_MockIdentityClientRecorder { - return _m.recorder -} - -func (_m *MockIdentityClient) GetPluginInfo(ctx context.Context, in *v0.GetPluginInfoRequest, opts ...grpc.CallOption) (*v0.GetPluginInfoResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetPluginInfo", _s...) - ret0, _ := ret[0].(*v0.GetPluginInfoResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityClientRecorder) GetPluginInfo(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginInfo", _s...) -} - -func (_m *MockIdentityClient) GetPluginCapabilities(ctx context.Context, in *v0.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*v0.GetPluginCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetPluginCapabilities", _s...) - ret0, _ := ret[0].(*v0.GetPluginCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityClientRecorder) GetPluginCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginCapabilities", _s...) -} - -func (_m *MockIdentityClient) Probe(ctx context.Context, in *v0.ProbeRequest, opts ...grpc.CallOption) (*v0.ProbeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "Probe", _s...) - ret0, _ := ret[0].(*v0.ProbeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityClientRecorder) Probe(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "Probe", _s...) -} - -// Mock of IdentityServer interface +// MockIdentityServer is a mock of IdentityServer interface type MockIdentityServer struct { ctrl *gomock.Controller - recorder *_MockIdentityServerRecorder + recorder *MockIdentityServerMockRecorder } -// Recorder for MockIdentityServer (not exported) -type _MockIdentityServerRecorder struct { +// MockIdentityServerMockRecorder is the mock recorder for MockIdentityServer +type MockIdentityServerMockRecorder struct { mock *MockIdentityServer } +// NewMockIdentityServer creates a new mock instance func NewMockIdentityServer(ctrl *gomock.Controller) *MockIdentityServer { mock := &MockIdentityServer{ctrl: ctrl} - mock.recorder = &_MockIdentityServerRecorder{mock} - return mock -} - -func (_m *MockIdentityServer) EXPECT() *_MockIdentityServerRecorder { - return _m.recorder -} - -func (_m *MockIdentityServer) GetPluginInfo(_param0 context.Context, _param1 *v0.GetPluginInfoRequest) (*v0.GetPluginInfoResponse, error) { - ret := _m.ctrl.Call(_m, "GetPluginInfo", _param0, _param1) - ret0, _ := ret[0].(*v0.GetPluginInfoResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityServerRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginInfo", arg0, arg1) -} - -func (_m *MockIdentityServer) GetPluginCapabilities(_param0 context.Context, _param1 *v0.GetPluginCapabilitiesRequest) (*v0.GetPluginCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "GetPluginCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.GetPluginCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityServerRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginCapabilities", arg0, arg1) -} - -func (_m *MockIdentityServer) Probe(_param0 context.Context, _param1 *v0.ProbeRequest) (*v0.ProbeResponse, error) { - ret := _m.ctrl.Call(_m, "Probe", _param0, _param1) - ret0, _ := ret[0].(*v0.ProbeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityServerRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Probe", arg0, arg1) -} - -// Mock of ControllerClient interface -type MockControllerClient struct { - ctrl *gomock.Controller - recorder *_MockControllerClientRecorder -} - -// Recorder for MockControllerClient (not exported) -type _MockControllerClientRecorder struct { - mock *MockControllerClient -} - -func NewMockControllerClient(ctrl *gomock.Controller) *MockControllerClient { - mock := &MockControllerClient{ctrl: ctrl} - mock.recorder = &_MockControllerClientRecorder{mock} + mock.recorder = &MockIdentityServerMockRecorder{mock} return mock } -func (_m *MockControllerClient) EXPECT() *_MockControllerClientRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { + return m.recorder } -func (_m *MockControllerClient) CreateVolume(ctx context.Context, in *v0.CreateVolumeRequest, opts ...grpc.CallOption) (*v0.CreateVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "CreateVolume", _s...) - ret0, _ := ret[0].(*v0.CreateVolumeResponse) +// GetPluginCapabilities mocks base method +func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerClientRecorder) CreateVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateVolume", _s...) +// GetPluginCapabilities indicates an expected call of GetPluginCapabilities +func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) } -func (_m *MockControllerClient) DeleteVolume(ctx context.Context, in *v0.DeleteVolumeRequest, opts ...grpc.CallOption) (*v0.DeleteVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "DeleteVolume", _s...) - ret0, _ := ret[0].(*v0.DeleteVolumeResponse) +// GetPluginInfo mocks base method +func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerClientRecorder) DeleteVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteVolume", _s...) +// GetPluginInfo indicates an expected call of GetPluginInfo +func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) } -func (_m *MockControllerClient) ControllerPublishVolume(ctx context.Context, in *v0.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*v0.ControllerPublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ControllerPublishVolume", _s...) - ret0, _ := ret[0].(*v0.ControllerPublishVolumeResponse) +// Probe mocks base method +func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *csi.ProbeRequest) (*csi.ProbeResponse, error) { + ret := m.ctrl.Call(m, "Probe", arg0, arg1) + ret0, _ := ret[0].(*csi.ProbeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerClientRecorder) ControllerPublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerPublishVolume", _s...) -} - -func (_m *MockControllerClient) ControllerUnpublishVolume(ctx context.Context, in *v0.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*v0.ControllerUnpublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ControllerUnpublishVolume", _s...) - ret0, _ := ret[0].(*v0.ControllerUnpublishVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerUnpublishVolume", _s...) -} - -func (_m *MockControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *v0.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*v0.ValidateVolumeCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ValidateVolumeCapabilities", _s...) - ret0, _ := ret[0].(*v0.ValidateVolumeCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ValidateVolumeCapabilities", _s...) -} - -func (_m *MockControllerClient) ListVolumes(ctx context.Context, in *v0.ListVolumesRequest, opts ...grpc.CallOption) (*v0.ListVolumesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ListVolumes", _s...) - ret0, _ := ret[0].(*v0.ListVolumesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 +// Probe indicates an expected call of Probe +func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) } -func (_mr *_MockControllerClientRecorder) ListVolumes(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListVolumes", _s...) -} - -func (_m *MockControllerClient) GetCapacity(ctx context.Context, in *v0.GetCapacityRequest, opts ...grpc.CallOption) (*v0.GetCapacityResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetCapacity", _s...) - ret0, _ := ret[0].(*v0.GetCapacityResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) GetCapacity(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetCapacity", _s...) -} - -func (_m *MockControllerClient) ControllerGetCapabilities(ctx context.Context, in *v0.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*v0.ControllerGetCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ControllerGetCapabilities", _s...) - ret0, _ := ret[0].(*v0.ControllerGetCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ControllerGetCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerGetCapabilities", _s...) -} - -func (_m *MockControllerClient) CreateSnapshot(ctx context.Context, in *v0.CreateSnapshotRequest, opts ...grpc.CallOption) (*v0.CreateSnapshotResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "CreateSnapshot", _s...) - ret0, _ := ret[0].(*v0.CreateSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) CreateSnapshot(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateSnapshot", _s...) -} - -func (_m *MockControllerClient) DeleteSnapshot(ctx context.Context, in *v0.DeleteSnapshotRequest, opts ...grpc.CallOption) (*v0.DeleteSnapshotResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "DeleteSnapshot", _s...) - ret0, _ := ret[0].(*v0.DeleteSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) DeleteSnapshot(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteSnapshot", _s...) -} - -func (_m *MockControllerClient) ListSnapshots(ctx context.Context, in *v0.ListSnapshotsRequest, opts ...grpc.CallOption) (*v0.ListSnapshotsResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ListSnapshots", _s...) - ret0, _ := ret[0].(*v0.ListSnapshotsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ListSnapshots(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListSnapshots", _s...) -} - -// Mock of ControllerServer interface +// MockControllerServer is a mock of ControllerServer interface type MockControllerServer struct { ctrl *gomock.Controller - recorder *_MockControllerServerRecorder + recorder *MockControllerServerMockRecorder } -// Recorder for MockControllerServer (not exported) -type _MockControllerServerRecorder struct { +// MockControllerServerMockRecorder is the mock recorder for MockControllerServer +type MockControllerServerMockRecorder struct { mock *MockControllerServer } +// NewMockControllerServer creates a new mock instance func NewMockControllerServer(ctrl *gomock.Controller) *MockControllerServer { mock := &MockControllerServer{ctrl: ctrl} - mock.recorder = &_MockControllerServerRecorder{mock} + mock.recorder = &MockControllerServerMockRecorder{mock} return mock } -func (_m *MockControllerServer) EXPECT() *_MockControllerServerRecorder { - return _m.recorder -} - -func (_m *MockControllerServer) CreateVolume(_param0 context.Context, _param1 *v0.CreateVolumeRequest) (*v0.CreateVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "CreateVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.CreateVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateVolume", arg0, arg1) -} - -func (_m *MockControllerServer) DeleteVolume(_param0 context.Context, _param1 *v0.DeleteVolumeRequest) (*v0.DeleteVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "DeleteVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.DeleteVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteVolume", arg0, arg1) -} - -func (_m *MockControllerServer) ControllerPublishVolume(_param0 context.Context, _param1 *v0.ControllerPublishVolumeRequest) (*v0.ControllerPublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "ControllerPublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.ControllerPublishVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerPublishVolume", arg0, arg1) -} - -func (_m *MockControllerServer) ControllerUnpublishVolume(_param0 context.Context, _param1 *v0.ControllerUnpublishVolumeRequest) (*v0.ControllerUnpublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "ControllerUnpublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.ControllerUnpublishVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerUnpublishVolume", arg0, arg1) +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder { + return m.recorder } -func (_m *MockControllerServer) ValidateVolumeCapabilities(_param0 context.Context, _param1 *v0.ValidateVolumeCapabilitiesRequest) (*v0.ValidateVolumeCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "ValidateVolumeCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.ValidateVolumeCapabilitiesResponse) +// ControllerGetCapabilities mocks base method +func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerGetCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ValidateVolumeCapabilities", arg0, arg1) +// ControllerGetCapabilities indicates an expected call of ControllerGetCapabilities +func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) } -func (_m *MockControllerServer) ListVolumes(_param0 context.Context, _param1 *v0.ListVolumesRequest) (*v0.ListVolumesResponse, error) { - ret := _m.ctrl.Call(_m, "ListVolumes", _param0, _param1) - ret0, _ := ret[0].(*v0.ListVolumesResponse) +// ControllerPublishVolume mocks base method +func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerPublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListVolumes", arg0, arg1) +// ControllerPublishVolume indicates an expected call of ControllerPublishVolume +func (mr *MockControllerServerMockRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerPublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerPublishVolume), arg0, arg1) } -func (_m *MockControllerServer) GetCapacity(_param0 context.Context, _param1 *v0.GetCapacityRequest) (*v0.GetCapacityResponse, error) { - ret := _m.ctrl.Call(_m, "GetCapacity", _param0, _param1) - ret0, _ := ret[0].(*v0.GetCapacityResponse) +// ControllerUnpublishVolume mocks base method +func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerUnpublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetCapacity", arg0, arg1) +// ControllerUnpublishVolume indicates an expected call of ControllerUnpublishVolume +func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) } -func (_m *MockControllerServer) ControllerGetCapabilities(_param0 context.Context, _param1 *v0.ControllerGetCapabilitiesRequest) (*v0.ControllerGetCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "ControllerGetCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.ControllerGetCapabilitiesResponse) +// CreateSnapshot mocks base method +func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateSnapshotResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerGetCapabilities", arg0, arg1) -} - -func (_m *MockControllerServer) CreateSnapshot(_param0 context.Context, _param1 *v0.CreateSnapshotRequest) (*v0.CreateSnapshotResponse, error) { - ret := _m.ctrl.Call(_m, "CreateSnapshot", _param0, _param1) - ret0, _ := ret[0].(*v0.CreateSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateSnapshot", arg0, arg1) -} - -func (_m *MockControllerServer) DeleteSnapshot(_param0 context.Context, _param1 *v0.DeleteSnapshotRequest) (*v0.DeleteSnapshotResponse, error) { - ret := _m.ctrl.Call(_m, "DeleteSnapshot", _param0, _param1) - ret0, _ := ret[0].(*v0.DeleteSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteSnapshot", arg0, arg1) -} - -func (_m *MockControllerServer) ListSnapshots(_param0 context.Context, _param1 *v0.ListSnapshotsRequest) (*v0.ListSnapshotsResponse, error) { - ret := _m.ctrl.Call(_m, "ListSnapshots", _param0, _param1) - ret0, _ := ret[0].(*v0.ListSnapshotsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListSnapshots", arg0, arg1) -} - -// Mock of NodeClient interface -type MockNodeClient struct { - ctrl *gomock.Controller - recorder *_MockNodeClientRecorder -} - -// Recorder for MockNodeClient (not exported) -type _MockNodeClientRecorder struct { - mock *MockNodeClient -} - -func NewMockNodeClient(ctrl *gomock.Controller) *MockNodeClient { - mock := &MockNodeClient{ctrl: ctrl} - mock.recorder = &_MockNodeClientRecorder{mock} - return mock -} - -func (_m *MockNodeClient) EXPECT() *_MockNodeClientRecorder { - return _m.recorder +// CreateSnapshot indicates an expected call of CreateSnapshot +func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) } -func (_m *MockNodeClient) NodeStageVolume(ctx context.Context, in *v0.NodeStageVolumeRequest, opts ...grpc.CallOption) (*v0.NodeStageVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeStageVolume", _s...) - ret0, _ := ret[0].(*v0.NodeStageVolumeResponse) +// CreateVolume mocks base method +func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { + ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeStageVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeStageVolume", _s...) +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) } -func (_m *MockNodeClient) NodeUnstageVolume(ctx context.Context, in *v0.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*v0.NodeUnstageVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeUnstageVolume", _s...) - ret0, _ := ret[0].(*v0.NodeUnstageVolumeResponse) +// DeleteSnapshot mocks base method +func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteSnapshotResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeUnstageVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnstageVolume", _s...) +// DeleteSnapshot indicates an expected call of DeleteSnapshot +func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) } -func (_m *MockNodeClient) NodePublishVolume(ctx context.Context, in *v0.NodePublishVolumeRequest, opts ...grpc.CallOption) (*v0.NodePublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodePublishVolume", _s...) - ret0, _ := ret[0].(*v0.NodePublishVolumeResponse) +// DeleteVolume mocks base method +func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodePublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodePublishVolume", _s...) +// DeleteVolume indicates an expected call of DeleteVolume +func (mr *MockControllerServerMockRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockControllerServer)(nil).DeleteVolume), arg0, arg1) } -func (_m *MockNodeClient) NodeUnpublishVolume(ctx context.Context, in *v0.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*v0.NodeUnpublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeUnpublishVolume", _s...) - ret0, _ := ret[0].(*v0.NodeUnpublishVolumeResponse) +// GetCapacity mocks base method +func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { + ret := m.ctrl.Call(m, "GetCapacity", arg0, arg1) + ret0, _ := ret[0].(*csi.GetCapacityResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeUnpublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnpublishVolume", _s...) +// GetCapacity indicates an expected call of GetCapacity +func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) } -func (_m *MockNodeClient) NodeGetId(ctx context.Context, in *v0.NodeGetIdRequest, opts ...grpc.CallOption) (*v0.NodeGetIdResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeGetId", _s...) - ret0, _ := ret[0].(*v0.NodeGetIdResponse) +// ListSnapshots mocks base method +func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) + ret0, _ := ret[0].(*csi.ListSnapshotsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeGetId(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetId", _s...) +// ListSnapshots indicates an expected call of ListSnapshots +func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) } -func (_m *MockNodeClient) NodeGetCapabilities(ctx context.Context, in *v0.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*v0.NodeGetCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeGetCapabilities", _s...) - ret0, _ := ret[0].(*v0.NodeGetCapabilitiesResponse) +// ListVolumes mocks base method +func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { + ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) + ret0, _ := ret[0].(*csi.ListVolumesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeGetCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetCapabilities", _s...) +// ListVolumes indicates an expected call of ListVolumes +func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1) } -func (_m *MockNodeClient) NodeGetInfo(ctx context.Context, in *v0.NodeGetInfoRequest, opts ...grpc.CallOption) (*v0.NodeGetInfoResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeGetInfo", _s...) - ret0, _ := ret[0].(*v0.NodeGetInfoResponse) +// ValidateVolumeCapabilities mocks base method +func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ValidateVolumeCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeGetInfo(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetInfo", _s...) +// ValidateVolumeCapabilities indicates an expected call of ValidateVolumeCapabilities +func (mr *MockControllerServerMockRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ValidateVolumeCapabilities), arg0, arg1) } -// Mock of NodeServer interface +// MockNodeServer is a mock of NodeServer interface type MockNodeServer struct { ctrl *gomock.Controller - recorder *_MockNodeServerRecorder + recorder *MockNodeServerMockRecorder } -// Recorder for MockNodeServer (not exported) -type _MockNodeServerRecorder struct { +// MockNodeServerMockRecorder is the mock recorder for MockNodeServer +type MockNodeServerMockRecorder struct { mock *MockNodeServer } +// NewMockNodeServer creates a new mock instance func NewMockNodeServer(ctrl *gomock.Controller) *MockNodeServer { mock := &MockNodeServer{ctrl: ctrl} - mock.recorder = &_MockNodeServerRecorder{mock} + mock.recorder = &MockNodeServerMockRecorder{mock} return mock } -func (_m *MockNodeServer) EXPECT() *_MockNodeServerRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { + return m.recorder } -func (_m *MockNodeServer) NodeStageVolume(_param0 context.Context, _param1 *v0.NodeStageVolumeRequest) (*v0.NodeStageVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodeStageVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeStageVolumeResponse) +// NodeGetCapabilities mocks base method +func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeStageVolume", arg0, arg1) +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) } -func (_m *MockNodeServer) NodeUnstageVolume(_param0 context.Context, _param1 *v0.NodeUnstageVolumeRequest) (*v0.NodeUnstageVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodeUnstageVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeUnstageVolumeResponse) +// NodeGetInfo mocks base method +func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnstageVolume", arg0, arg1) +// NodeGetInfo indicates an expected call of NodeGetInfo +func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) } -func (_m *MockNodeServer) NodePublishVolume(_param0 context.Context, _param1 *v0.NodePublishVolumeRequest) (*v0.NodePublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodePublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodePublishVolumeResponse) +// NodeGetVolumeStats mocks base method +func (m *MockNodeServer) NodeGetVolumeStats(arg0 context.Context, arg1 *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + ret := m.ctrl.Call(m, "NodeGetVolumeStats", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetVolumeStatsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodePublishVolume", arg0, arg1) +// NodeGetVolumeStats indicates an expected call of NodeGetVolumeStats +func (mr *MockNodeServerMockRecorder) NodeGetVolumeStats(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetVolumeStats", reflect.TypeOf((*MockNodeServer)(nil).NodeGetVolumeStats), arg0, arg1) } -func (_m *MockNodeServer) NodeUnpublishVolume(_param0 context.Context, _param1 *v0.NodeUnpublishVolumeRequest) (*v0.NodeUnpublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodeUnpublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeUnpublishVolumeResponse) +// NodePublishVolume mocks base method +func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodePublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodePublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnpublishVolume", arg0, arg1) +// NodePublishVolume indicates an expected call of NodePublishVolume +func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) } -func (_m *MockNodeServer) NodeGetId(_param0 context.Context, _param1 *v0.NodeGetIdRequest) (*v0.NodeGetIdResponse, error) { - ret := _m.ctrl.Call(_m, "NodeGetId", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeGetIdResponse) +// NodeStageVolume mocks base method +func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeStageVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeGetId(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetId", arg0, arg1) +// NodeStageVolume indicates an expected call of NodeStageVolume +func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) } -func (_m *MockNodeServer) NodeGetCapabilities(_param0 context.Context, _param1 *v0.NodeGetCapabilitiesRequest) (*v0.NodeGetCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "NodeGetCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeGetCapabilitiesResponse) +// NodeUnpublishVolume mocks base method +func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnpublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetCapabilities", arg0, arg1) +// NodeUnpublishVolume indicates an expected call of NodeUnpublishVolume +func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) } -func (_m *MockNodeServer) NodeGetInfo(_param0 context.Context, _param1 *v0.NodeGetInfoRequest) (*v0.NodeGetInfoResponse, error) { - ret := _m.ctrl.Call(_m, "NodeGetInfo", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeGetInfoResponse) +// NodeUnstageVolume mocks base method +func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnstageVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetInfo", arg0, arg1) +// NodeUnstageVolume indicates an expected call of NodeUnstageVolume +func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go new file mode 100644 index 000000000..10ea5f353 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go @@ -0,0 +1,18 @@ +package apitest + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" +) + +func TestMyDriver(t *testing.T) { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + sanity.Test(t, config) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go new file mode 100644 index 000000000..bca267cb7 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go @@ -0,0 +1,42 @@ +package embedded + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestMyDriverGinkgo(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CSI Sanity Test Suite") +} + +// The test suite into which the sanity tests get embedded may already +// have before/after suite functions. There can only be one such +// function. Here we define empty ones because then Ginkgo +// will start complaining at runtime when invoking the embedded case +// in hack/e2e.sh if a PR adds back such functions in the sanity test +// code. +var _ = BeforeSuite(func() {}) +var _ = AfterSuite(func() {}) + +var _ = Describe("MyCSIDriver", func() { + Context("Config A", func() { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + BeforeEach(func() {}) + + AfterEach(func() {}) + + Describe("CSI Driver Test Suite", func() { + sanity.GinkgoTest(config) + }) + }) +}) diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh index 81f3a02eb..baf4c3045 100755 --- a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh @@ -11,10 +11,10 @@ CSI_MOCK_VERSION="master" # See https://github.com/grpc/grpc/blob/master/doc/naming.md runTest() { - CSI_ENDPOINT=$1 mock & + CSI_ENDPOINT=$1 ./bin/mock & local pid=$! - csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? kill -9 $pid if [ $ret -ne 0 ] ; then @@ -24,10 +24,10 @@ runTest() runTestWithCreds() { - CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true mock & + CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true ./bin/mock & local pid=$! - csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? kill -9 $pid if [ $ret -ne 0 ] ; then @@ -35,7 +35,26 @@ runTestWithCreds() fi } -go install ./mock || exit 1 +runTestAPI() +{ + CSI_ENDPOINT=$1 ./bin/mock & + local pid=$! + + GOCACHE=off go test -v ./hack/_apitest/api_test.go; ret=$? + + if [ $ret -ne 0 ] ; then + exit $ret + fi + + GOCACHE=off go test -v ./hack/_embedded/embedded_test.go; ret=$? + kill -9 $pid + + if [ $ret -ne 0 ] ; then + exit $ret + fi +} + +make cd cmd/csi-sanity make clean install || exit 1 @@ -47,4 +66,7 @@ rm -f $UDS runTestWithCreds "${UDS}" "${UDS}" rm -f $UDS +runTestAPI "${UDS}" +rm -f $UDS + exit 0 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md index d35e2d26e..8274aa2c6 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md @@ -1,2 +1,22 @@ # Mock CSI Driver -Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock` +Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock`. +It can be used for testing of Container Orchestrators that implement client side +of CSI interface. + +``` +Usage of mock: + -disable-attach + Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability. + -name string + CSI driver name. (default "io.kubernetes.storage.mock") +``` + +It prints all received CSI messages to stdout encoded as json, so a test can check that +CO sent the right CSI message. + +Example of such output: + +``` +gRPCCall: {"Method":"/csi.v0.Controller/ControllerGetCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":6}}},{"Type":{"Rpc":{"type":5}}},{"Type":{"Rpc":{"type":2}}}]},"Error":""} +gRPCCall: {"Method":"/csi.v0.Controller/ControllerPublishVolume","Request":{"volume_id":"12","node_id":"some-fake-node-id","volume_capability":{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}},"Response":null,"Error":"rpc error: code = NotFound desc = Not matching Node ID some-fake-node-id to Mock Node ID io.kubernetes.storage.mock"} +``` diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go new file mode 100644 index 000000000..89835e11f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go @@ -0,0 +1,89 @@ +package cache + +import ( + "strings" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +type SnapshotCache interface { + Add(snapshot Snapshot) + + Delete(i int) + + List(ready bool) []csi.Snapshot + + FindSnapshot(k, v string) (int, Snapshot) +} + +type Snapshot struct { + Name string + Parameters map[string]string + SnapshotCSI csi.Snapshot +} + +type snapshotCache struct { + snapshotsRWL sync.RWMutex + snapshots []Snapshot +} + +func NewSnapshotCache() SnapshotCache { + return &snapshotCache{ + snapshots: make([]Snapshot, 0), + } +} + +func (snap *snapshotCache) Add(snapshot Snapshot) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + snap.snapshots = append(snap.snapshots, snapshot) +} + +func (snap *snapshotCache) Delete(i int) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + copy(snap.snapshots[i:], snap.snapshots[i+1:]) + snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] +} + +func (snap *snapshotCache) List(ready bool) []csi.Snapshot { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshots := make([]csi.Snapshot, 0) + for _, v := range snap.snapshots { + if v.SnapshotCSI.GetReadyToUse() { + snapshots = append(snapshots, v.SnapshotCSI) + } + } + + return snapshots +} + +func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshotIdx := -1 + for i, vi := range snap.snapshots { + switch k { + case "id": + if strings.EqualFold(v, vi.SnapshotCSI.GetSnapshotId()) { + return i, vi + } + case "sourceVolumeId": + if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { + return i, vi + } + case "name": + if vi.Name == v { + return i, vi + } + } + } + + return snapshotIdx, Snapshot{} +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go index d66d1881d..486d383be 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go @@ -16,6 +16,7 @@ limitations under the License. package main import ( + "flag" "fmt" "net" "os" @@ -28,6 +29,12 @@ import ( ) func main() { + var config service.Config + flag.BoolVar(&config.DisableAttach, "disable-attach", false, "Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability.") + flag.StringVar(&config.DriverName, "name", service.Name, "CSI driver name.") + flag.Int64Var(&config.AttachLimit, "attach-limit", 0, "number of attachable volumes on a node") + flag.Parse() + endpoint := os.Getenv("CSI_ENDPOINT") if len(endpoint) == 0 { fmt.Println("CSI_ENDPOINT must be defined and must be a path") @@ -39,7 +46,7 @@ func main() { } // Create mock driver - s := service.New() + s := service.New(config) servers := &driver.CSIDriverServers{ Controller: s, Identity: s, diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml index 4bc9c578a..e7c9f20d8 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml @@ -10,3 +10,7 @@ NodeStageVolumeSecret: secretKey: secretval5 NodePublishVolumeSecret: secretKey: secretval6 +CreateSnapshotSecret: + secretKey: secretval7 +DeleteSnapshotSecret: + secretKey: secretval8 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go index d4a44f849..eace79f8c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go @@ -4,6 +4,7 @@ import ( "fmt" "math" "path" + "reflect" "strconv" log "github.com/sirupsen/logrus" @@ -11,11 +12,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" ) const ( MaxStorageCapacity = tib + ReadOnlyKey = "readonly" ) func (s *service) CreateVolume( @@ -60,7 +62,7 @@ func (s *service) CreateVolume( s.volsRWL.Lock() defer s.volsRWL.Unlock() s.vols = append(s.vols, v) - MockVolumes[v.Id] = Volume{ + MockVolumes[v.GetVolumeId()] = Volume{ VolumeCSI: v, NodeID: "", ISStaged: false, @@ -106,6 +108,10 @@ func (s *service) ControllerPublishVolume( req *csi.ControllerPublishVolumeRequest) ( *csi.ControllerPublishVolumeResponse, error) { + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + if len(req.VolumeId) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") } @@ -134,22 +140,49 @@ func (s *service) ControllerPublishVolume( devPathKey := path.Join(req.NodeId, "dev") // Check to see if the volume is already published. - if device := v.Attributes[devPathKey]; device != "" { + if device := v.VolumeContext[devPathKey]; device != "" { + var volRo bool + var roVal string + if ro, ok := v.VolumeContext[ReadOnlyKey]; ok { + roVal = ro + } + + if roVal == "true" { + volRo = true + } else { + volRo = false + } + + // Check if readonly flag is compatible with the publish request. + if req.GetReadonly() != volRo { + return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") + } + return &csi.ControllerPublishVolumeResponse{ - PublishInfo: map[string]string{ - "device": device, + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, }, }, nil } + var roVal string + if req.GetReadonly() { + roVal = "true" + } else { + roVal = "false" + } + // Publish the volume. device := "/dev/mock" - v.Attributes[devPathKey] = device + v.VolumeContext[devPathKey] = device + v.VolumeContext[ReadOnlyKey] = roVal s.vols[i] = v return &csi.ControllerPublishVolumeResponse{ - PublishInfo: map[string]string{ - "device": device, + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, }, }, nil } @@ -159,6 +192,10 @@ func (s *service) ControllerUnpublishVolume( req *csi.ControllerUnpublishVolumeRequest) ( *csi.ControllerUnpublishVolumeResponse, error) { + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + if len(req.VolumeId) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") } @@ -186,12 +223,13 @@ func (s *service) ControllerUnpublishVolume( devPathKey := path.Join(nodeID, "dev") // Check to see if the volume is already unpublished. - if v.Attributes[devPathKey] == "" { + if v.VolumeContext[devPathKey] == "" { return &csi.ControllerUnpublishVolumeResponse{}, nil } // Unpublish the volume. - delete(v.Attributes, devPathKey) + delete(v.VolumeContext, devPathKey) + delete(v.VolumeContext, ReadOnlyKey) s.vols[i] = v return &csi.ControllerUnpublishVolumeResponse{}, nil @@ -214,7 +252,11 @@ func (s *service) ValidateVolumeCapabilities( } return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: true, + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, }, nil } @@ -308,51 +350,228 @@ func (s *service) ControllerGetCapabilities( req *csi.ControllerGetCapabilitiesRequest) ( *csi.ControllerGetCapabilitiesResponse, error) { - return &csi.ControllerGetCapabilitiesResponse{ - Capabilities: []*csi.ControllerServiceCapability{ - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, - }, + caps := []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, }, }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, }, }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, - }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, }, }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, - }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, }, }, }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + } + + if !s.config.DisableAttach { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: caps, }, nil } func (s *service) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - return nil, status.Error(codes.InvalidArgument, "Not Implemented") + // Check arguments + if len(req.GetName()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") + } + if len(req.GetSourceVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") + } + + // Check to see if the snapshot already exists. + if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { + // Requested snapshot name already exists + if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) + } + return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil + } + + // Create the snapshot and add it to the service's in-mem snapshot slice. + snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) + s.snapshots.Add(snapshot) + + return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil } func (s *service) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - return nil, status.Error(codes.InvalidArgument, "Not Implemented") + + // If the snapshot is not specified, return error + if len(req.SnapshotId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") + } + + // If the snapshot does not exist then return an idempotent response. + i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) + if i < 0 { + return &csi.DeleteSnapshotResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + s.snapshots.Delete(i) + log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + return &csi.DeleteSnapshotResponse{}, nil } func (s *service) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - return nil, status.Error(codes.InvalidArgument, "Not Implemented") + + // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. + if len(req.GetSnapshotId()) != 0 { + return getSnapshotById(s, req) + } + + // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. + if len(req.GetSourceVolumeId()) != 0 { + return getSnapshotByVolumeId(s, req) + } + + // case 3: no parameter is set, so we return all the snapshots. + return getAllSnapshots(s, req) +} + +func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSnapshotId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + + if len(req.GetSourceVolumeId()) != 0 { + if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { + return &csi.ListSnapshotsResponse{}, nil + } + } + + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSourceVolumeId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Copy the mock snapshots into a new slice in order to avoid + // locking the service's snapshot slice for the duration of the + // ListSnapshots RPC. + readyToUse := true + snapshots := s.snapshots.List(readyToUse) + + var ( + ulenSnapshots = int32(len(snapshots)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenSnapshots { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(snapshots)=%d", + startingToken, ulenSnapshots) + } + + // Discern the number of remaining entries. + rem := ulenSnapshots - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListSnapshotsResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListSnapshotsResponse_Entry{ + Snapshot: &snapshots[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenSnapshots { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListSnapshotsResponse{ + Entries: entries, + NextToken: nextToken, + }, nil } diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go index c83daea5f..7e8735a93 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go @@ -3,7 +3,8 @@ package service import ( "golang.org/x/net/context" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" ) func (s *service) GetPluginInfo( @@ -12,7 +13,7 @@ func (s *service) GetPluginInfo( *csi.GetPluginInfoResponse, error) { return &csi.GetPluginInfoResponse{ - Name: Name, + Name: s.config.DriverName, VendorVersion: VendorVersion, Manifest: Manifest, }, nil @@ -23,7 +24,9 @@ func (s *service) Probe( req *csi.ProbeRequest) ( *csi.ProbeResponse, error) { - return &csi.ProbeResponse{}, nil + return &csi.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, nil } func (s *service) GetPluginCapabilities( diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go index 0321c7405..886a219a7 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go @@ -8,7 +8,7 @@ import ( "golang.org/x/net/context" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" ) func (s *service) NodeStageVolume( @@ -16,11 +16,15 @@ func (s *service) NodeStageVolume( req *csi.NodeStageVolumeRequest) ( *csi.NodeStageVolumeResponse, error) { - device, ok := req.PublishInfo["device"] + device, ok := req.PublishContext["device"] if !ok { - return nil, status.Error( - codes.InvalidArgument, - "stage volume info 'device' key required") + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } } if len(req.GetVolumeId()) == 0 { @@ -48,14 +52,14 @@ func (s *service) NodeStageVolume( nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) // Check to see if the volume has already been staged. - if v.Attributes[nodeStgPathKey] != "" { + if v.VolumeContext[nodeStgPathKey] != "" { // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" // if the capabilities don't match. return &csi.NodeStageVolumeResponse{}, nil } // Stage the volume. - v.Attributes[nodeStgPathKey] = device + v.VolumeContext[nodeStgPathKey] = device s.vols[i] = v return &csi.NodeStageVolumeResponse{}, nil @@ -87,12 +91,12 @@ func (s *service) NodeUnstageVolume( nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) // Check to see if the volume has already been unstaged. - if v.Attributes[nodeStgPathKey] == "" { + if v.VolumeContext[nodeStgPathKey] == "" { return &csi.NodeUnstageVolumeResponse{}, nil } // Unpublish the volume. - delete(v.Attributes, nodeStgPathKey) + delete(v.VolumeContext, nodeStgPathKey) s.vols[i] = v return &csi.NodeUnstageVolumeResponse{}, nil @@ -103,11 +107,15 @@ func (s *service) NodePublishVolume( req *csi.NodePublishVolumeRequest) ( *csi.NodePublishVolumeResponse, error) { - device, ok := req.PublishInfo["device"] + device, ok := req.PublishContext["device"] if !ok { - return nil, status.Error( - codes.InvalidArgument, - "publish volume info 'device' key required") + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } } if len(req.GetVolumeId()) == 0 { @@ -135,7 +143,7 @@ func (s *service) NodePublishVolume( nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) // Check to see if the volume has already been published. - if v.Attributes[nodeMntPathKey] != "" { + if v.VolumeContext[nodeMntPathKey] != "" { // Requests marked Readonly fail due to volumes published by // the Mock driver supporting only RW mode. @@ -148,9 +156,9 @@ func (s *service) NodePublishVolume( // Publish the volume. if req.GetStagingTargetPath() != "" { - v.Attributes[nodeMntPathKey] = req.GetStagingTargetPath() + v.VolumeContext[nodeMntPathKey] = req.GetStagingTargetPath() } else { - v.Attributes[nodeMntPathKey] = device + v.VolumeContext[nodeMntPathKey] = device } s.vols[i] = v @@ -182,27 +190,17 @@ func (s *service) NodeUnpublishVolume( nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) // Check to see if the volume has already been unpublished. - if v.Attributes[nodeMntPathKey] == "" { + if v.VolumeContext[nodeMntPathKey] == "" { return &csi.NodeUnpublishVolumeResponse{}, nil } // Unpublish the volume. - delete(v.Attributes, nodeMntPathKey) + delete(v.VolumeContext, nodeMntPathKey) s.vols[i] = v return &csi.NodeUnpublishVolumeResponse{}, nil } -func (s *service) NodeGetId( - ctx context.Context, - req *csi.NodeGetIdRequest) ( - *csi.NodeGetIdResponse, error) { - - return &csi.NodeGetIdResponse{ - NodeId: s.nodeID, - }, nil -} - func (s *service) NodeGetCapabilities( ctx context.Context, req *csi.NodeGetCapabilitiesRequest) ( @@ -230,7 +228,17 @@ func (s *service) NodeGetCapabilities( func (s *service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - return &csi.NodeGetInfoResponse{ + csiNodeResponse := &csi.NodeGetInfoResponse{ NodeId: s.nodeID, - }, nil + } + if s.config.AttachLimit > 0 { + csiNodeResponse.MaxVolumesPerNode = s.config.AttachLimit + } + return csiNodeResponse, nil +} + +func (s *service) NodeGetVolumeStats(ctx context.Context, + req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + return &csi.NodeGetVolumeStatsResponse{}, nil + } diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go index dccad79cd..2254ccb83 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go @@ -6,8 +6,11 @@ import ( "sync" "sync/atomic" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/mock/cache" "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes" ) const ( @@ -15,7 +18,7 @@ const ( Name = "io.kubernetes.storage.mock" // VendorVersion is the version returned by GetPluginInfo. - VendorVersion = "0.2.0" + VendorVersion = "0.3.0" ) // Manifest is the SP's manifest. @@ -23,6 +26,12 @@ var Manifest = map[string]string{ "url": "https://github.com/kubernetes-csi/csi-test/mock", } +type Config struct { + DisableAttach bool + DriverName string + AttachLimit int64 +} + // Service is the CSI Mock service provider. type Service interface { csi.ControllerServer @@ -32,10 +41,13 @@ type Service interface { type service struct { sync.Mutex - nodeID string - vols []csi.Volume - volsRWL sync.RWMutex - volsNID uint64 + nodeID string + vols []csi.Volume + volsRWL sync.RWMutex + volsNID uint64 + snapshots cache.SnapshotCache + snapshotsNID uint64 + config Config } type Volume struct { @@ -51,14 +63,23 @@ type Volume struct { var MockVolumes map[string]Volume // New returns a new Service. -func New() Service { - s := &service{nodeID: Name} +func New(config Config) Service { + s := &service{ + nodeID: config.DriverName, + config: config, + } + s.snapshots = cache.NewSnapshotCache() s.vols = []csi.Volume{ s.newVolume("Mock Volume 1", gib100), s.newVolume("Mock Volume 2", gib100), s.newVolume("Mock Volume 3", gib100), } MockVolumes = map[string]Volume{} + + s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) + return s } @@ -73,8 +94,8 @@ const ( func (s *service) newVolume(name string, capcity int64) csi.Volume { return csi.Volume{ - Id: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), - Attributes: map[string]string{"name": name}, + VolumeId: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), + VolumeContext: map[string]string{"name": name}, CapacityBytes: capcity, } } @@ -91,11 +112,11 @@ func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { for i, vi := range s.vols { switch k { case "id": - if strings.EqualFold(v, vi.Id) { + if strings.EqualFold(v, vi.GetVolumeId()) { return i, vi } case "name": - if n, ok := vi.Attributes["name"]; ok && strings.EqualFold(v, n) { + if n, ok := vi.VolumeContext["name"]; ok && strings.EqualFold(v, n) { return i, vi } } @@ -109,3 +130,18 @@ func (s *service) findVolByName( return s.findVol("name", name) } + +func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { + + ptime := ptypes.TimestampNow() + return cache.Snapshot{ + Name: name, + Parameters: parameters, + SnapshotCSI: csi.Snapshot{ + SnapshotId: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), + CreationTime: ptime, + SourceVolumeId: sourceVolumeId, + ReadyToUse: true, + }, + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md index 747744ea4..fd30f1922 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md @@ -13,13 +13,50 @@ Golang `TestXXX` functions. For example: ```go func TestMyDriver(t *testing.T) { - // Setup the full driver and its environment - ... setup driver ... + // Setup the full driver and its environment + ... setup driver ... + config := &sanity.Config{ + TargetPath: ... + StagingPath: ... + Address: endpoint, + } - // Now call the test suite - sanity.Test(t, driverEndpointAddress, "/mnt") + + // Now call the test suite + sanity.Test(t, config) } ``` +Only one such test function is supported because under the hood a +Ginkgo test suite gets constructed and executed by the call. + +Alternatively, the tests can also be embedded inside a Ginkgo test +suite. In that case it is possible to define multiple tests with +different configurations: + +```go +var _ = Describe("MyCSIDriver", func () { + Context("Config A", func () { + var config &sanity.Config + + BeforeEach(func() { + //... setup driver and config... + }) + + AfterEach(func() { + //...tear down driver... + }) + + Describe("CSI sanity", func() { + sanity.GinkgoTest(config) + }) + }) + + Context("Config B", func () { + // other configs + }) +}) +``` + ## Command line program Please see [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go new file mode 100644 index 000000000..65a30334f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + "context" + "log" + + "github.com/container-storage-interface/spec/lib/go/csi" + + . "github.com/onsi/ginkgo" +) + +// VolumeInfo keeps track of the information needed to delete a volume. +type VolumeInfo struct { + // Node on which the volume was published, empty if none + // or publishing is not supported. + NodeID string + + // Volume ID assigned by CreateVolume. + VolumeID string +} + +// Cleanup keeps track of resources, in particular volumes, which need +// to be freed when testing is done. +type Cleanup struct { + Context *SanityContext + ControllerClient csi.ControllerClient + NodeClient csi.NodeClient + ControllerPublishSupported bool + NodeStageSupported bool + + // Maps from volume name to the node ID for which the volume + // is published and the volume ID. + volumes map[string]VolumeInfo +} + +// RegisterVolume adds or updates an entry for the volume with the +// given name. +func (cl *Cleanup) RegisterVolume(name string, info VolumeInfo) { + if cl.volumes == nil { + cl.volumes = make(map[string]VolumeInfo) + } + cl.volumes[name] = info +} + +// MaybeRegisterVolume adds or updates an entry for the volume with +// the given name if CreateVolume was successful. +func (cl *Cleanup) MaybeRegisterVolume(name string, vol *csi.CreateVolumeResponse, err error) { + if err == nil && vol.GetVolume().GetVolumeId() != "" { + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + } +} + +// UnregisterVolume removes the entry for the volume with the +// given name, thus preventing all cleanup operations for it. +func (cl *Cleanup) UnregisterVolume(name string) { + if cl.volumes != nil { + delete(cl.volumes, name) + } +} + +// DeleteVolumes stops using the registered volumes and tries to delete all of them. +func (cl *Cleanup) DeleteVolumes() { + if cl.volumes == nil { + return + } + logger := log.New(GinkgoWriter, "cleanup: ", 0) + ctx := context.Background() + + for name, info := range cl.volumes { + logger.Printf("deleting %s = %s", name, info.VolumeID) + if _, err := cl.NodeClient.NodeUnpublishVolume( + ctx, + &csi.NodeUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + TargetPath: cl.Context.Config.TargetPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnpublishVolume: %s", err) + } + + if cl.NodeStageSupported { + if _, err := cl.NodeClient.NodeUnstageVolume( + ctx, + &csi.NodeUnstageVolumeRequest{ + VolumeId: info.VolumeID, + StagingTargetPath: cl.Context.Config.StagingPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnstageVolume: %s", err) + } + } + + if cl.ControllerPublishSupported && info.NodeID != "" { + if _, err := cl.ControllerClient.ControllerUnpublishVolume( + ctx, + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + NodeId: info.NodeID, + Secrets: cl.Context.Secrets.ControllerUnpublishVolumeSecret, + }, + ); err != nil { + logger.Printf("warning: ControllerUnpublishVolume: %s", err) + } + } + + if _, err := cl.ControllerClient.DeleteVolume( + ctx, + &csi.DeleteVolumeRequest{ + VolumeId: info.VolumeID, + Secrets: cl.Context.Secrets.DeleteVolumeSecret, + }, + ); err != nil { + logger.Printf("error: DeleteVolume: %s", err) + } + + cl.UnregisterVolume(name) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 0fb22392c..830d6cdb4 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -17,13 +17,15 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi" + + "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -34,18 +36,27 @@ const ( // provisioned volumes. 10GB by default, can be overridden by // setting Config.TestVolumeSize. DefTestVolumeSize int64 = 10 * 1024 * 1024 * 1024 + + MaxNameLength int = 128 ) -func TestVolumeSize() int64 { - if config.TestVolumeSize > 0 { - return config.TestVolumeSize +func TestVolumeSize(sc *SanityContext) int64 { + if sc.Config.TestVolumeSize > 0 { + return sc.Config.TestVolumeSize } return DefTestVolumeSize } func verifyVolumeInfo(v *csi.Volume) { Expect(v).NotTo(BeNil()) - Expect(v.GetId()).NotTo(BeEmpty()) + Expect(v.GetVolumeId()).NotTo(BeEmpty()) +} + +func verifySnapshotInfo(snapshot *csi.Snapshot) { + Expect(snapshot).NotTo(BeNil()) + Expect(snapshot.GetSnapshotId()).NotTo(BeEmpty()) + Expect(snapshot.GetSourceVolumeId()).NotTo(BeEmpty()) + Expect(snapshot.GetCreationTime()).NotTo(BeZero()) } func isControllerCapabilitySupported( @@ -69,249 +80,922 @@ func isControllerCapabilitySupported( return false } -var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { +var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { var ( c csi.ControllerClient + n csi.NodeClient + + cl *Cleanup ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - }) + c = csi.NewControllerClient(sc.Conn) + n = csi.NewNodeClient(sc.Conn) - It("should return appropriate capabilities", func() { - caps, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{}) + cl = &Cleanup{ + NodeClient: n, + ControllerClient: c, + Context: sc, + } + }) - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) + AfterEach(func() { + cl.DeleteVolumes() + }) - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + Describe("ControllerGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.ControllerGetCapabilities( + context.Background(), + &csi.ControllerGetCapabilitiesRequest{}) - switch cap.GetRpc().GetType() { - case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: - case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: - case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: - case csi.ControllerServiceCapability_RPC_GET_CAPACITY: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: + case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: + case csi.ControllerServiceCapability_RPC_GET_CAPACITY: + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: + case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } } - } + }) }) -}) -var _ = Describe("GetCapacity [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + Describe("GetCapacity", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { + Skip("GetCapacity not supported") + } + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + It("should return capacity (no optional values added)", func() { + _, err := c.GetCapacity( + context.Background(), + &csi.GetCapacityRequest{}) + Expect(err).NotTo(HaveOccurred()) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { - Skip("GetCapacity not supported") - } + // Since capacity is int64 we will not be checking it + // The value of zero is a possible value. + }) }) - It("should return capacity (no optional values added)", func() { - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{}) - Expect(err).NotTo(HaveOccurred()) + Describe("ListVolumes", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { + Skip("ListVolumes not supported") + } + }) - // Since capacity is int64 we will not be checking it - // The value of zero is a possible value. - }) -}) + It("should return appropriate values (no optional values added)", func() { + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) -var _ = Describe("ListVolumes [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + for _, vol := range vols.GetEntries() { + verifyVolumeInfo(vol.GetVolume()) + } + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + // TODO: Add test to test for tokens - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { - Skip("ListVolumes not supported") - } + // TODO: Add test which checks list of volume is there when created, + // and not there when deleted. }) - It("should return appropriate values (no optional values added)", func() { - vols, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(vols).NotTo(BeNil()) + Describe("CreateVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("CreateVolume not supported") + } + }) + + It("should fail when no name is provided", func() { + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume("", vol, err) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capabilities are provided", func() { + name := uniqueString("sanity-controller-create-no-volume-capabilities") + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume(name, vol, err) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-no-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - for _, vol := range vols.GetEntries() { - verifyVolumeInfo(vol.GetVolume()) - } - }) + By("cleaning up deleting the volume") - // TODO: Add test to test for tokens + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-with-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: TestVolumeSize(sc), + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + if serverError, ok := status.FromError(err); ok && + (serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented) { + Skip("Required bytes not supported") + } + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize(sc))) - // TODO: Add test which checks list of volume is there when created, - // and not there when deleted. -}) + By("cleaning up deleting the volume") -var _ = Describe("CreateVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should not fail when requesting to create a volume with already existing name and same capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice") + size := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + vol2, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol2).NotTo(BeNil()) + Expect(vol2.GetVolume()).NotTo(BeNil()) + Expect(vol2.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + Expect(vol1.GetVolume().GetVolumeId()).To(Equal(vol2.GetVolume().GetVolumeId())) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should fail when requesting to create a volume with already existing name and different capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice-different") + size1 := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + size2 := 2 * TestVolumeSize(sc) + + _, err = c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size2, + LimitBytes: size2, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + It("should not fail when creating volume with maximum-length name", func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("CreateVolume not supported") - } + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' + } + name := string(nameBytes) + By("creating a volume") + size := TestVolumeSize(sc) + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when no name is provided", func() { + Describe("DeleteVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("DeleteVolume not supported") + } + }) - req := &csi.CreateVolumeRequest{} + It("should fail when no volume id is provided", func() { - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - _, err := c.CreateVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + It("should succeed when an invalid volume id is used", func() { + + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: "reallyfakevolumeid", + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a volume") + name := uniqueString("sanity-controller-create-appropriate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // Delete Volume + By("deleting a volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when no volume capabilities are provided", func() { + Describe("ValidateVolumeCapabilities", func() { + It("should fail when no volume id is provided", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capabilities are provided", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-validate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ValidateVolumeCapabilities + By("validating volume capabilities") + valivolcap, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(valivolcap).NotTo(BeNil()) - req := &csi.CreateVolumeRequest{ - Name: "name", - } + // If confirmation is provided then it is REQUIRED to provide + // the volume capabilities + if valivolcap.GetConfirmed() != nil { + Expect(valivolcap.GetConfirmed().GetVolumeCapabilities()).NotTo(BeEmpty()) + } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + By("cleaning up deleting the volume") - _, err := c.CreateVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the requested volume does not exist", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "some-vol-id", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }, + ) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) }) - It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + Describe("ControllerPublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerPublishVolume not supported") + } + }) - By("creating a volume") - name := "sanity" + It("should fail when no volume id is provided", func() { - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Secrets: sc.Secrets.ControllerPublishVolumeSecret, }, - }, - } + ) + Expect(err).To(HaveOccurred()) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - vol, err := c.CreateVolume(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + It("should fail when no node id is provided", func() { - By("cleaning up deleting the volume") + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capability is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + NodeId: "fakenode", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-publish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + By("cleaning up unpublishing the volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) - }) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) - It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + It("should fail when the volume does not exist", func() { - By("creating a volume") - name := "sanity" + By("calling controller publish on a non-existent volume") - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "some-vol-id", + NodeId: "some-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should fail when the node does not exist", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-wrong-node") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: TestVolumeSize(), - }, - } + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: "some-fake-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) - vol, err := c.CreateVolume(context.Background(), req) - if serverError, ok := status.FromError(err); ok { - if serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented { - Skip("Required bytes not supported") - } else { - Expect(err).NotTo(HaveOccurred()) - } - } else { + By("cleaning up deleting the volume") + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the volume is already published but is incompatible", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-published-incompatible") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize())) - } - By("cleaning up deleting the volume") + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } - - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } - - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) - }) - It("should not fail when requesting to create a volume with already exisiting name and same capacity.", func() { + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - By("creating a volume") - name := "sanity" - size := TestVolumeSize() + // ControllerPublishVolume + By("calling controllerpublish on that volume") - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -319,358 +1003,443 @@ var _ = Describe("CreateVolume [Controller Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - } + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).NotTo(HaveOccurred()) + Expect(conpubvol).NotTo(BeNil()) - vol1, err := c.CreateVolume(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(vol1).NotTo(BeNil()) - Expect(vol1.GetVolume()).NotTo(BeNil()) - Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + // Publish again with different attributes. + pubReq.Readonly = true - req2 := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - } + conpubvol, err = c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) - if secrets != nil { - req2.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - vol2, err := c.CreateVolume(context.Background(), req2) - Expect(err).NotTo(HaveOccurred()) - Expect(vol2).NotTo(BeNil()) - Expect(vol2.GetVolume()).NotTo(BeNil()) - Expect(vol2.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) - Expect(vol1.GetVolume().GetId()).To(Equal(vol2.GetVolume().GetId())) + By("cleaning up unpublishing the volume") - By("cleaning up deleting the volume") + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol1.GetVolume().GetId(), - } + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when requesting to create a volume with already exisiting name and different capacity.", func() { - By("creating a volume") - name := "sanity" - size1 := TestVolumeSize() + Describe("ControllerUnpublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerUnpublishVolume not supported") + } + }) - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + It("should fail when no volume id is provided", func() { + + _, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-unpublish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size1, - LimitBytes: size1, - }, - } - - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - vol1, err := c.CreateVolume(context.Background(), req) - Expect(err).ToNot(HaveOccurred()) - Expect(vol1).NotTo(BeNil()) - Expect(vol1.GetVolume()).NotTo(BeNil()) - Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - size2 := 2 * TestVolumeSize() - - req2 := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size2, - LimitBytes: size2, - }, - } - - if secrets != nil { - req2.ControllerCreateSecrets = secrets.CreateVolumeSecret - } - - _, err = c.CreateVolume(context.Background(), req2) - Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - - By("cleaning up deleting the volume") - - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol1.GetVolume().GetId(), - } + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + // ControllerUnpublishVolume + By("calling controllerunpublish on that volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) }) -var _ = Describe("DeleteVolume [Controller Server]", func() { +var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("DeleteVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { + Skip("ListSnapshots not supported") } }) - It("should fail when no volume id is provided", func() { - - req := &csi.DeleteVolumeRequest{} + It("should return appropriate values (no optional values added)", func() { + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + for _, snapshot := range snapshots.GetEntries() { + verifySnapshotInfo(snapshot.GetSnapshot()) } + }) - _, err := c.DeleteVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + It("should return snapshots that match the specify snapshot id", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - It("should succeed when an invalid volume id is used", func() { + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) - req := &csi.DeleteVolumeRequest{ - VolumeId: "reallyfakevolumeid", - } + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SnapshotId: snapshot.GetSnapshot().GetSnapshotId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(BeNumerically("==", 1)) + verifySnapshotInfo(snapshots.GetEntries()[0].GetSnapshot()) + Expect(snapshots.GetEntries()[0].GetSnapshot().GetSnapshotId()).To(Equal(snapshot.GetSnapshot().GetSnapshotId())) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return empty when the specify snapshot id is not exist", func() { - _, err := c.DeleteVolume(context.Background(), req) + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SnapshotId: "none-exist-id"}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) - It("should return appropriate values (no optional values added)", func() { + It("should return snapshots that match the specify source volume id)", func() { - // Create Volume First By("creating a volume") - name := "sanity" + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - createReq := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) - if secrets != nil { - createReq.ControllerCreateSecrets = secrets.CreateVolumeSecret + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SourceVolumeId: snapshot.GetSnapshot().GetSourceVolumeId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + for _, snap := range snapshots.GetEntries() { + verifySnapshotInfo(snap.GetSnapshot()) + Expect(snap.GetSnapshot().GetSourceVolumeId()).To(Equal(snapshot.GetSnapshot().GetSourceVolumeId())) } - vol, err := c.CreateVolume(context.Background(), createReq) - + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - // Delete Volume - By("deleting a volume") - - req := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + It("should return empty when the specify source volume id is not exist", func() { - _, err = c.DeleteVolume(context.Background(), req) + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SourceVolumeId: "none-exist-volume-id"}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) -}) -var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + It("check the presence of new snapshots in the snapshot list", func() { + // List Snapshots before creating new snapshots. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - BeforeEach(func() { - c = csi.NewControllerClient(conn) - }) + totalSnapshots := len(snapshots.GetEntries()) + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - It("should fail when no volume id is provided", func() { + By("creating a snapshot") + snapReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-3", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) - _, err := c.ValidateVolumeCapabilities( + snapshots, err = c.ListSnapshots( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots + 1)) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - It("should fail when no volume capabilities are provided", func() { + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) - _, err := c.ValidateVolumeCapabilities( + // List snapshots and check if the deleted snapshot exists in the snapshot list. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots)) }) - It("should return appropriate values (no optional values added)", func() { + It("should return next token when a limited number of entries are requested", func() { + // minSnapshotCount is the minimum number of snapshots expected to exist, + // based on which paginated snapshot listing is performed. + minSnapshotCount := 5 + // maxEntried is the maximum entries in list snapshot request. + maxEntries := 2 + // currentTotalVols is the total number of volumes at a given time. It + // is used to verify that all the snapshots have been listed. + currentTotalSnapshots := 0 + + // Get the number of existing volumes. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - // Create Volume First - By("creating a single node writer volume") - name := "sanity" + initialTotalSnapshots := len(snapshots.GetEntries()) + currentTotalSnapshots = initialTotalSnapshots - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } + createVols := make([]*csi.Volume, 0) + createSnapshots := make([]*csi.Snapshot, 0) + + // Ensure minimum minVolCount volumes exist. + if initialTotalSnapshots < minSnapshotCount { + + By("creating required new volumes") + requiredSnapshots := minSnapshotCount - initialTotalSnapshots + + for i := 1; i <= requiredSnapshots; i++ { + volReq := MakeCreateVolumeReq(sc, "volume"+strconv.Itoa(i)) + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + Expect(volume).NotTo(BeNil()) + createVols = append(createVols, volume.GetVolume()) + + snapReq := MakeCreateSnapshotReq(sc, "snapshot"+strconv.Itoa(i), volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + createSnapshots = append(createSnapshots, snapshot.GetSnapshot()) + } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + // Update the current total snapshots count. + currentTotalSnapshots += requiredSnapshots } - vol, err := c.CreateVolume(context.Background(), req) + // Request list snapshots with max entries maxEntries. + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{ + MaxEntries: int32(maxEntries), + }) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + Expect(snapshots).NotTo(BeNil()) + + nextToken := snapshots.GetNextToken() + + Expect(len(snapshots.GetEntries())).To(Equal(maxEntries)) - // ValidateVolumeCapabilities - By("validating volume capabilities") - valivolcap, err := c.ValidateVolumeCapabilities( + // Request list snapshots with starting_token and no max entries. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: vol.GetVolume().GetId(), - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, + &csi.ListSnapshotsRequest{ + StartingToken: nextToken, }) Expect(err).NotTo(HaveOccurred()) - Expect(valivolcap).NotTo(BeNil()) - Expect(valivolcap.GetSupported()).To(BeTrue()) + Expect(snapshots).NotTo(BeNil()) - By("cleaning up deleting the volume") + // Ensure that all the remaining entries are returned at once. + Expect(len(snapshots.GetEntries())).To(Equal(currentTotalSnapshots - maxEntries)) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + if initialTotalSnapshots < minSnapshotCount { - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the snapshots") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) + for _, snap := range createSnapshots { + delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + } + + By("cleaning up deleting the volumes") + + for _, vol := range createVols { + delVolReq := MakeDeleteVolumeReq(sc, vol.GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + } + } }) + }) -var _ = Describe("ControllerPublishVolume [Controller Server]", func() { +var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient - n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerPublishVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("DeleteSnapshot not supported") } }) - It("should fail when no volume id is provided", func() { + It("should fail when no snapshot id is provided", func() { - req := &csi.ControllerPublishVolumeRequest{} + req := &csi.DeleteSnapshotRequest{} - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.Secrets = sc.Secrets.DeleteSnapshotSecret } - _, err := c.ControllerPublishVolume(context.Background(), req) + _, err := c.DeleteSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) @@ -678,212 +1447,196 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no node id is provided", func() { + It("should succeed when an invalid snapshot id is used", func() { + + req := MakeDeleteSnapshotReq(sc, "reallyfakesnapshotid") + _, err := c.DeleteSnapshot(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "DeleteSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + // Create Snapshot First + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "DeleteSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) +}) + +var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityContext) { + var ( + c csi.ControllerClient + ) + + BeforeEach(func() { + c = csi.NewControllerClient(sc.Conn) + + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("CreateSnapshot not supported") + } + }) + + It("should fail when no name is provided", func() { - req := &csi.ControllerPublishVolumeRequest{ - VolumeId: "id", + req := &csi.CreateSnapshotRequest{ + SourceVolumeId: "testId", } - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret } - _, err := c.ControllerPublishVolume(context.Background(), req) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no volume capability is provided", func() { + It("should fail when no source volume id is provided", func() { - req := &csi.ControllerPublishVolumeRequest{ - VolumeId: "id", - NodeId: "fakenode", + req := &csi.CreateSnapshotRequest{ + Name: "name", } - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret } - _, err := c.ControllerPublishVolume(context.Background(), req) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should return appropriate values (no optional values added)", func() { - - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } - - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + It("should not fail when requesting to create a snapshot with already existing name and same SourceVolumeId.", func() { - vol, err := c.CreateVolume(context.Background(), req) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - - // ControllerPublishVolume - By("calling controllerpublish on that volume") - - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - } - - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret - } + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - - By("cleaning up unpublishing the volume") - - unpubReq := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - } - - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } - - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } - - _, err = c.DeleteVolume(context.Background(), delReq) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) -}) - -var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - n csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerUnpublishVolume not supported") - } - }) + It("should fail when requesting to create a snapshot with already existing name and different SourceVolumeId.", func() { - It("should fail when no volume id is provided", func() { + By("creating a volume") + volume, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-2")) + Expect(err).ToNot(HaveOccurred()) - req := &csi.ControllerUnpublishVolumeRequest{} + By("creating a snapshot with the created volume source id") + req1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), req1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - if secrets != nil { - req.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + volume2, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3")) + Expect(err).ToNot(HaveOccurred()) - _, err := c.ControllerUnpublishVolume(context.Background(), req) + By("creating a snapshot with the same name but different volume source id") + req2 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume2.GetVolume().GetVolumeId(), nil) + _, err = c.CreateSnapshot(context.Background(), req2) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) }) - It("should return appropriate values (no optional values added)", func() { + It("should not fail when creating snapshot with maximum-length name", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' } + name := string(nameBytes) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, name, volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - vol, err := c.CreateVolume(context.Background(), req) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + }) +}) - // ControllerPublishVolume - By("calling controllerpublish on that volume") +func MakeCreateVolumeReq(sc *SanityContext, name string) *csi.CreateVolumeRequest { + size1 := TestVolumeSize(sc) - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -891,45 +1644,55 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - } + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Parameters: sc.Config.TestVolumeParameters, + } - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret - } + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateVolumeSecret + } - conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) + return req +} - // ControllerUnpublishVolume - By("calling controllerunpublish on that volume") +func MakeCreateSnapshotReq(sc *SanityContext, name, sourceVolumeId string, parameters map[string]string) *csi.CreateSnapshotRequest { + req := &csi.CreateSnapshotRequest{ + Name: name, + SourceVolumeId: sourceVolumeId, + Parameters: parameters, + } - unpubReq := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - } + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + return req +} - conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) +func MakeDeleteSnapshotReq(sc *SanityContext, id string) *csi.DeleteSnapshotRequest { + delSnapReq := &csi.DeleteSnapshotRequest{ + SnapshotId: id, + } - By("cleaning up deleting the volume") + if sc.Secrets != nil { + delSnapReq.Secrets = sc.Secrets.DeleteSnapshotSecret + } - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + return delSnapReq +} - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } +func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest { + delVolReq := &csi.DeleteVolumeRequest{ + VolumeId: id, + } - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) - }) -}) + if sc.Secrets != nil { + delVolReq.Secrets = sc.Secrets.DeleteVolumeSecret + } + + return delVolReq +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index cb5aad48a..c1a5eb7ef 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -17,91 +17,83 @@ limitations under the License. package sanity import ( + "context" "fmt" "regexp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = Describe("GetPluginCapabilities [Identity Service]", func() { +var _ = DescribeSanity("Identity Service", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) - It("should return appropriate capabilities", func() { - req := &csi.GetPluginCapabilitiesRequest{} - res, err := c.GetPluginCapabilities(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("checking successful response") - Expect(res.GetCapabilities()).NotTo(BeNil()) - for _, cap := range res.GetCapabilities() { - switch cap.GetService().GetType() { - case csi.PluginCapability_Service_CONTROLLER_SERVICE: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + Describe("GetPluginCapabilities", func() { + It("should return appropriate capabilities", func() { + req := &csi.GetPluginCapabilitiesRequest{} + res, err := c.GetPluginCapabilities(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("checking successful response") + Expect(res.GetCapabilities()).NotTo(BeNil()) + for _, cap := range res.GetCapabilities() { + switch cap.GetService().GetType() { + case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + } } - } - }) - -}) + }) -var _ = Describe("Probe [Identity Service]", func() { - var ( - c csi.IdentityClient - ) - - BeforeEach(func() { - c = csi.NewIdentityClient(conn) }) - It("should return appropriate information", func() { - req := &csi.ProbeRequest{} - res, err := c.Probe(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying return status") - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code() == codes.FailedPrecondition || - serverError.Code() == codes.OK).To(BeTrue()) - }) -}) - -var _ = Describe("GetPluginInfo [Identity Server]", func() { - var ( - c csi.IdentityClient - ) - - BeforeEach(func() { - c = csi.NewIdentityClient(conn) + Describe("Probe", func() { + It("should return appropriate information", func() { + req := &csi.ProbeRequest{} + res, err := c.Probe(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying return status") + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code() == codes.FailedPrecondition || + serverError.Code() == codes.OK).To(BeTrue()) + + if res.GetReady() != nil { + Expect(res.GetReady().GetValue() == true || + res.GetReady().GetValue() == false).To(BeTrue()) + } + }) }) - It("should return appropriate information", func() { - req := &csi.GetPluginInfoRequest{} - res, err := c.GetPluginInfo(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying name size and characters") - Expect(res.GetName()).ToNot(HaveLen(0)) - Expect(len(res.GetName())).To(BeNumerically("<=", 63)) - Expect(regexp. - MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). - MatchString(res.GetName())).To(BeTrue()) + Describe("GetPluginInfo", func() { + It("should return appropriate information", func() { + req := &csi.GetPluginInfoRequest{} + res, err := c.GetPluginInfo(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying name size and characters") + Expect(res.GetName()).ToNot(HaveLen(0)) + Expect(len(res.GetName())).To(BeNumerically("<=", 63)) + Expect(regexp. + MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). + MatchString(res.GetName())).To(BeTrue()) + }) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index d57621dec..9bd9194b0 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -17,13 +17,13 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -38,7 +38,6 @@ func isNodeCapabilitySupported(c csi.NodeClient, &csi.NodeGetCapabilitiesRequest{}) Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) for _, cap := range caps.GetCapabilities() { Expect(cap.GetRpc()).NotTo(BeNil()) @@ -49,521 +48,470 @@ func isNodeCapabilitySupported(c csi.NodeClient, return false } -var _ = Describe("NodeGetCapabilities [Node Server]", func() { - var ( - c csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should return appropriate capabilities", func() { - caps, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{}) - - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) +func isPluginCapabilitySupported(c csi.IdentityClient, + capType csi.PluginCapability_Service_Type, +) bool { - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + caps, err := c.GetPluginCapabilities( + context.Background(), + &csi.GetPluginCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) - switch cap.GetRpc().GetType() { - case csi.NodeServiceCapability_RPC_UNKNOWN: - case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) - } + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetService()).NotTo(BeNil()) + if cap.GetService().GetType() == capType { + return true } - }) -}) + } + return false +} -var _ = Describe("NodeGetId [Node Server]", func() { +var _ = DescribeSanity("Node Service", func(sc *SanityContext) { var ( - c csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should return appropriate values", func() { - nid, err := c.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) + cl *Cleanup + c csi.NodeClient + s csi.ControllerClient - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - }) -}) - -var _ = Describe("NodePublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient controllerPublishSupported bool nodeStageSupported bool ) BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) + c = csi.NewNodeClient(sc.Conn) + s = csi.NewControllerClient(sc.Conn) + controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + err := createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } + cl = &Cleanup{ + Context: sc, + NodeClient: c, + ControllerClient: s, + ControllerPublishSupported: controllerPublishSupported, + NodeStageSupported: nodeStageSupported, + } }) - It("should fail when no volume id is provided", func() { - - req := &csi.NodePublishVolumeRequest{} - - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret - } + AfterEach(func() { + cl.DeleteVolumes() + }) - _, err := c.NodePublishVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + Describe("NodeGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.NodeServiceCapability_RPC_UNKNOWN: + case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + case csi.NodeServiceCapability_RPC_GET_VOLUME_STATS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } + } + }) }) - It("should fail when no target path is provided", func() { + Describe("NodeGetInfo", func() { + var ( + i csi.IdentityClient + accessibilityConstraintSupported bool + ) - req := &csi.NodePublishVolumeRequest{ - VolumeId: "id", - } + BeforeEach(func() { + i = csi.NewIdentityClient(sc.Conn) + accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS) + }) - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret - } + It("should return approproate values", func() { + ninfo, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) - _, err := c.NodePublishVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) + Expect(ninfo).NotTo(BeNil()) + Expect(ninfo.GetNodeId()).NotTo(BeEmpty()) + Expect(ninfo.GetMaxVolumesPerNode()).NotTo(BeNumerically("<", 0)) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + if accessibilityConstraintSupported { + Expect(ninfo.GetAccessibleTopology()).NotTo(BeNil()) + } + }) }) - It("should fail when no volume capability is provided", func() { - - req := &csi.NodePublishVolumeRequest{ - VolumeId: "id", - TargetPath: config.TargetPath, - } - - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret - } + Describe("NodePublishVolume", func() { + It("should fail when no volume id is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - _, err := c.NodePublishVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + It("should fail when no target path is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) - }) -}) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) -var _ = Describe("NodeUnpublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - nodeStageSupported bool - ) + It("should fail when no volume capability is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + TargetPath: sc.Config.TargetPath, + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) - if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no volume id is provided", func() { + Describe("NodeUnpublishVolume", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - It("should fail when no target path is provided", func() { + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should fail when no target path is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) -}) -// TODO: Tests for NodeStageVolume/NodeUnstageVolume -func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controllerPublishSupported, nodeStageSupported bool) { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } + Describe("NodeStageVolume", func() { + var ( + device string + ) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeStageVolume not supported") + } - vol, err := s.CreateVolume(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + device = "/dev/mock" + }) - By("getting a node id") - nid, err := c.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - var conpubvol *csi.ControllerPublishVolumeResponse - if controllerPublishSupported { - By("controller publishing volume") - - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + It("should fail when no volume id is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - } + ) + Expect(err).To(HaveOccurred()) - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - conpubvol, err = s.ControllerPublishVolume(context.Background(), pubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - } - // NodeStageVolume - if nodeStageSupported { - By("node staging volume") - nodeStageVolReq := &csi.NodeStageVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + It("should fail when no staging target path is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: "id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - StagingTargetPath: config.StagingPath, - } - if controllerPublishSupported { - nodeStageVolReq.PublishInfo = conpubvol.GetPublishInfo() - } - if secrets != nil { - nodeStageVolReq.NodeStageSecrets = secrets.NodeStageVolumeSecret - } - nodestagevol, err := c.NodeStageVolume( - context.Background(), nodeStageVolReq) - Expect(err).NotTo(HaveOccurred()) - Expect(nodestagevol).NotTo(BeNil()) - } - // NodePublishVolume - By("publishing the volume on a node") - nodepubvolRequest := &csi.NodePublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - TargetPath: config.TargetPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - } - if nodeStageSupported { - nodepubvolRequest.StagingTargetPath = config.StagingPath - } - if controllerPublishSupported { - nodepubvolRequest.PublishInfo = conpubvol.GetPublishInfo() - } - if secrets != nil { - nodepubvolRequest.NodePublishSecrets = secrets.NodePublishVolumeSecret - } - nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) - Expect(err).NotTo(HaveOccurred()) - Expect(nodepubvol).NotTo(BeNil()) + ) + Expect(err).To(HaveOccurred()) - // NodeUnpublishVolume - By("cleaning up calling nodeunpublish") - nodeunpubvol, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - TargetPath: config.TargetPath, + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) - if nodeStageSupported { - By("cleaning up calling nodeunstage") - nodeunstagevol, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - StagingTargetPath: config.StagingPath, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunstagevol).NotTo(BeNil()) - } - - if controllerPublishSupported { - By("cleaning up calling controllerunpublishing") + It("should fail when no volume capability is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: "id", + StagingTargetPath: sc.Config.StagingPath, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - unpubReq := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + }) - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + Describe("NodeUnstageVolume", func() { + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeUnstageVolume not supported") + } + }) - controllerunpubvol, err := s.ControllerUnpublishVolume(context.Background(), unpubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(controllerunpubvol).NotTo(BeNil()) - } + It("should fail when no volume id is provided", func() { - By("cleaning up deleting the volume") + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + }) + Expect(err).To(HaveOccurred()) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + It("should fail when no staging target path is provided", func() { - _, err = s.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) -} + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) -var _ = Describe("NodeStageVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - nodeStageSupported bool - device string - ) - - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - device = "/dev/mock" - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) - if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } else { - Skip("NodeStageVolume not supported") - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no volume id is provided", func() { + It("should work", func() { + name := uniqueString("sanity-node-full") - req := &csi.NodeStageVolumeRequest{ - StagingTargetPath: config.StagingPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + // Create Volume First + By("creating a single node writer volume") + vol, err := s.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, }, + Secrets: sc.Secrets.CreateVolumeSecret, }, - PublishInfo: map[string]string{ - "device": device, - }, - } - - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret - } - - _, err := c.NodeStageVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - It("should fail when no staging target path is provided", func() { + By("getting a node id") + nid, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - req := &csi.NodeStageVolumeRequest{ - VolumeId: "id", - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + var conpubvol *csi.ControllerPublishVolumeResponse + if controllerPublishSupported { + By("controller publishing volume") + + conpubvol, err = s.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + } + // NodeStageVolume + if nodeStageSupported { + By("node staging volume") + nodestagevol, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + StagingTargetPath: sc.Config.StagingPath, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - }, - PublishInfo: map[string]string{ - "device": device, - }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodestagevol).NotTo(BeNil()) } - - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret + // NodePublishVolume + By("publishing the volume on a node") + var stagingPath string + if nodeStageSupported { + stagingPath = sc.Config.StagingPath } - - _, err := c.NodeStageVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should fail when no volume capability is provided", func() { - - req := &csi.NodeStageVolumeRequest{ - VolumeId: "id", - StagingTargetPath: config.StagingPath, - PublishInfo: map[string]string{ - "device": device, + nodepubvol, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, + StagingTargetPath: stagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodePublishVolumeSecret, }, - } - - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret - } - - _, err := c.NodeStageVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) - }) -}) + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodepubvol).NotTo(BeNil()) -var _ = Describe("NodeUnstageVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - nodeStageSupported bool - ) + // NodeUnpublishVolume + By("cleaning up calling nodeunpublish") + nodeunpubvol, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunpubvol).NotTo(BeNil()) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + By("cleaning up calling nodeunstage") + nodeunstagevol, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + StagingTargetPath: sc.Config.StagingPath, + }, + ) Expect(err).NotTo(HaveOccurred()) - } else { - Skip("NodeUnstageVolume not supported") + Expect(nodeunstagevol).NotTo(BeNil()) } - }) - - It("should fail when no volume id is provided", func() { - - _, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - StagingTargetPath: config.StagingPath, - }) - Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + if controllerPublishSupported { + By("cleaning up calling controllerunpublishing") + + controllerunpubvol, err := s.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(controllerunpubvol).NotTo(BeNil()) + } - It("should fail when no staging target path is provided", func() { + By("cleaning up deleting the volume") - _, err := c.NodeUnstageVolume( + _, err = s.DeleteVolume( context.Background(), - &csi.NodeUnstageVolumeRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index 58d63b702..e3c1684ed 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -17,10 +17,10 @@ limitations under the License. package sanity import ( + "crypto/rand" "fmt" "io/ioutil" "os" - "sync" "testing" "github.com/kubernetes-csi/csi-test/utils" @@ -40,58 +40,108 @@ type CSISecrets struct { ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` + CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` + DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` } -var ( - config *Config - conn *grpc.ClientConn - lock sync.Mutex - secrets *CSISecrets -) - -// Config provides the configuration for the sanity tests +// Config provides the configuration for the sanity tests. It +// needs to be initialized by the user of the sanity package. type Config struct { - TargetPath string - StagingPath string - Address string - SecretsFile string - TestVolumeSize int64 + TargetPath string + StagingPath string + Address string + SecretsFile string + + TestVolumeSize int64 + TestVolumeParametersFile string + TestVolumeParameters map[string]string +} + +// SanityContext holds the variables that each test can depend on. It +// gets initialized before each test block runs. +type SanityContext struct { + Config *Config + Conn *grpc.ClientConn + Secrets *CSISecrets + + connAddress string } -// Test will test the CSI driver at the specified address +// Test will test the CSI driver at the specified address by +// setting up a Ginkgo suite and running it. func Test(t *testing.T, reqConfig *Config) { - lock.Lock() - defer lock.Unlock() + path := reqConfig.TestVolumeParametersFile + if len(path) != 0 { + yamlFile, err := ioutil.ReadFile(path) + if err != nil { + panic(fmt.Sprintf("failed to read file %q: %v", path, err)) + } + err = yaml.Unmarshal(yamlFile, &reqConfig.TestVolumeParameters) + if err != nil { + panic(fmt.Sprintf("error unmarshaling yaml: %v", err)) + } + } - config = reqConfig + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) RegisterFailHandler(Fail) RunSpecs(t, "CSI Driver Test Suite") } -var _ = BeforeSuite(func() { +func GinkgoTest(reqConfig *Config) { + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) +} + +func (sc *SanityContext) setup() { var err error - if len(config.SecretsFile) > 0 { - secrets, err = loadSecrets(config.SecretsFile) + if len(sc.Config.SecretsFile) > 0 { + sc.Secrets, err = loadSecrets(sc.Config.SecretsFile) Expect(err).NotTo(HaveOccurred()) + } else { + sc.Secrets = &CSISecrets{} } - By("connecting to CSI driver") - conn, err = utils.Connect(config.Address) - Expect(err).NotTo(HaveOccurred()) + // It is possible that a test sets sc.Config.Address + // dynamically (and differently!) in a BeforeEach, so only + // reuse the connection if the address is still the same. + if sc.Conn == nil || sc.connAddress != sc.Config.Address { + By("connecting to CSI driver") + sc.Conn, err = utils.Connect(sc.Config.Address) + Expect(err).NotTo(HaveOccurred()) + sc.connAddress = sc.Config.Address + } else { + By(fmt.Sprintf("reusing connection to CSI driver at %s", sc.connAddress)) + } By("creating mount and staging directories") - err = createMountTargetLocation(config.TargetPath) + err = createMountTargetLocation(sc.Config.TargetPath) Expect(err).NotTo(HaveOccurred()) - if len(config.StagingPath) > 0 { - err = createMountTargetLocation(config.StagingPath) + if len(sc.Config.StagingPath) > 0 { + err = createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } -}) +} -var _ = AfterSuite(func() { - conn.Close() -}) +func (sc *SanityContext) teardown() { + // We intentionally do not close the connection to the CSI + // driver here because the large amount of connection attempts + // caused test failures + // (https://github.com/kubernetes-csi/csi-test/issues/101). We + // could fix this with retries + // (https://github.com/kubernetes-csi/csi-test/pull/97) but + // that requires more discussion, so instead we just connect + // once per process instead of once per test case. This was + // also said to be faster + // (https://github.com/kubernetes-csi/csi-test/pull/98). +} func createMountTargetLocation(targetPath string) error { fileInfo, err := os.Stat(targetPath) @@ -122,3 +172,23 @@ func loadSecrets(path string) (*CSISecrets, error) { return &creds, nil } + +var uniqueSuffix = "-" + pseudoUUID() + +// pseudoUUID returns a unique string generated from random +// bytes, empty string in case of error. +func pseudoUUID() string { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + // Shouldn't happen?! + return "" + } + return fmt.Sprintf("%08X-%08X", b[0:4], b[4:8]) +} + +// uniqueString returns a unique string by appending a random +// number. In case of an error, just the prefix is returned, so it +// alone should already be fairly unique. +func uniqueString(prefix string) string { + return prefix + uniqueSuffix +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go new file mode 100644 index 000000000..47763b752 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + . "github.com/onsi/ginkgo" +) + +type test struct { + text string + body func(*SanityContext) +} + +var tests []test + +// DescribeSanity must be used instead of the usual Ginkgo Describe to +// register a test block. The difference is that the body function +// will be called multiple times with the right context (when +// setting up a Ginkgo suite or a testing.T test, with the right +// configuration). +func DescribeSanity(text string, body func(*SanityContext)) bool { + tests = append(tests, test{text, body}) + return true +} + +// registerTestsInGinkgo invokes the actual Gingko Describe +// for the tests registered earlier with DescribeSanity. +func registerTestsInGinkgo(sc *SanityContext) { + for _, test := range tests { + Describe(test.text, func() { + BeforeEach(func() { + sc.setup() + }) + + test.body(sc) + + AfterEach(func() { + sc.teardown() + }) + }) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go index d4e5dfc38..03b0f052c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go @@ -16,13 +16,16 @@ limitations under the License. package test import ( + "context" + "fmt" + "reflect" "testing" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - gomock "github.com/golang/mock/gomock" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" mock_driver "github.com/kubernetes-csi/csi-test/driver" mock_utils "github.com/kubernetes-csi/csi-test/utils" - "golang.org/x/net/context" ) func TestPluginInfoResponse(t *testing.T) { @@ -58,6 +61,24 @@ func TestPluginInfoResponse(t *testing.T) { } } +type pbMatcher struct { + x proto.Message +} + +func (p pbMatcher) Matches(x interface{}) bool { + y := x.(proto.Message) + return proto.Equal(p.x, y) +} + +func (p pbMatcher) String() string { + return fmt.Sprintf("pb equal to %v", p.x) +} + +func pbMatch(x interface{}) gomock.Matcher { + v := x.(proto.Message) + return &pbMatcher{v} +} + func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup mock @@ -79,7 +100,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup expectation // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value - driver.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, nil).Times(1) + driver.EXPECT().GetPluginInfo(gomock.Any(), pbMatch(in)).Return(out, nil).Times(1) // Create a new RPC server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ @@ -103,3 +124,65 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { t.Errorf("Unknown name: %s\n", name) } } + +func TestGRPCAttach(t *testing.T) { + + // Setup mock + m := gomock.NewController(&mock_utils.SafeGoroutineTester{}) + defer m.Finish() + driver := mock_driver.NewMockControllerServer(m) + + // Setup input + defaultVolumeID := "myname" + defaultNodeID := "MyNodeID" + defaultCaps := &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + } + publishVolumeInfo := map[string]string{ + "first": "foo", + "second": "bar", + "third": "baz", + } + defaultRequest := &csi.ControllerPublishVolumeRequest{ + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + VolumeCapability: defaultCaps, + Readonly: false, + } + + // Setup mock outout + out := &csi.ControllerPublishVolumeResponse{ + PublishContext: publishVolumeInfo, + } + + // Setup expectation + // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value + driver.EXPECT().ControllerPublishVolume(gomock.Any(), pbMatch(defaultRequest)).Return(out, nil).Times(1) + + // Create a new RPC + server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ + Controller: driver, + }) + conn, err := server.Nexus() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer server.Close() + + // Make call + c := csi.NewControllerClient(conn) + r, err := c.ControllerPublishVolume(context.Background(), defaultRequest) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + + info := r.GetPublishContext() + if !reflect.DeepEqual(info, publishVolumeInfo) { + t.Errorf("Invalid publish info: %v", info) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go index 4b0122b6c..ae8c33675 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go @@ -21,7 +21,7 @@ import ( "sync" "testing" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/csi-test/utils" "google.golang.org/grpc" "google.golang.org/grpc/reflection" diff --git a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go index c89a5cf1d..3baf96723 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go +++ b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go @@ -29,7 +29,7 @@ type SafeGoroutineTester struct{} // Errorf prints the error to the screen then panics func (s *SafeGoroutineTester) Errorf(format string, args ...interface{}) { - fmt.Printf(format, args) + fmt.Printf(format, args...) panic("MOCK TEST ERROR") } From afb5c1497874ac25341c53feb77fe9655908e8c8 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 14 Nov 2018 11:32:57 -0800 Subject: [PATCH 3/3] Update external-attacher to use v1.0.0-rc2 csi spec --- pkg/connection/connection.go | 22 +++++++++++----------- pkg/connection/connection_test.go | 28 ++++++++++++++-------------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/connection/connection.go b/pkg/connection/connection.go index 0c0d02bf9..6ab54550c 100644 --- a/pkg/connection/connection.go +++ b/pkg/connection/connection.go @@ -192,32 +192,32 @@ func (c *csiConnection) SupportsPluginControllerService(ctx context.Context) (bo return false, nil } -func (c *csiConnection) Attach(ctx context.Context, volumeID string, readOnly bool, nodeID string, caps *csi.VolumeCapability, attributes, secrets map[string]string) (metadata map[string]string, detached bool, err error) { +func (c *csiConnection) Attach(ctx context.Context, volumeID string, readOnly bool, nodeID string, caps *csi.VolumeCapability, context, secrets map[string]string) (metadata map[string]string, detached bool, err error) { client := csi.NewControllerClient(c.conn) req := csi.ControllerPublishVolumeRequest{ - VolumeId: volumeID, - NodeId: nodeID, - VolumeCapability: caps, - Readonly: readOnly, - VolumeAttributes: attributes, - ControllerPublishSecrets: secrets, + VolumeId: volumeID, + NodeId: nodeID, + VolumeCapability: caps, + Readonly: readOnly, + VolumeContext: context, + Secrets: secrets, } rsp, err := client.ControllerPublishVolume(ctx, &req) if err != nil { return nil, isFinalError(err), err } - return rsp.PublishInfo, false, nil + return rsp.PublishContext, false, nil } func (c *csiConnection) Detach(ctx context.Context, volumeID string, nodeID string, secrets map[string]string) (detached bool, err error) { client := csi.NewControllerClient(c.conn) req := csi.ControllerUnpublishVolumeRequest{ - VolumeId: volumeID, - NodeId: nodeID, - ControllerUnpublishSecrets: secrets, + VolumeId: volumeID, + NodeId: nodeID, + Secrets: secrets, } _, err = client.ControllerUnpublishVolume(ctx, &req) diff --git a/pkg/connection/connection_test.go b/pkg/connection/connection_test.go index 98bfa0adf..4ebbaabae 100644 --- a/pkg/connection/connection_test.go +++ b/pkg/connection/connection_test.go @@ -374,15 +374,15 @@ func TestAttach(t *testing.T) { VolumeId: defaultVolumeID, NodeId: defaultNodeID, VolumeCapability: defaultCaps, - VolumeAttributes: map[string]string{"foo": "bar"}, + VolumeContext: map[string]string{"foo": "bar"}, Readonly: false, } secretsRequest := &csi.ControllerPublishVolumeRequest{ - VolumeId: defaultVolumeID, - NodeId: defaultNodeID, - VolumeCapability: defaultCaps, - ControllerPublishSecrets: map[string]string{"foo": "bar"}, - Readonly: false, + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + VolumeCapability: defaultCaps, + Secrets: map[string]string{"foo": "bar"}, + Readonly: false, } tests := []struct { @@ -407,7 +407,7 @@ func TestAttach(t *testing.T) { caps: defaultCaps, input: defaultRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -432,7 +432,7 @@ func TestAttach(t *testing.T) { readonly: true, input: readOnlyRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -468,7 +468,7 @@ func TestAttach(t *testing.T) { attributes: map[string]string{"foo": "bar"}, input: attributesRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -482,7 +482,7 @@ func TestAttach(t *testing.T) { secrets: map[string]string{"foo": "bar"}, input: secretsRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -519,7 +519,7 @@ func TestAttach(t *testing.T) { t.Errorf("test %q: got error: %v", test.name, err) } if err == nil && !reflect.DeepEqual(publishInfo, test.expectedInfo) { - t.Errorf("got unexpected PublishInfo: %+v", publishInfo) + t.Errorf("got unexpected PublishContext: %+v", publishInfo) } if detached != test.expectDetached { t.Errorf("test %q: expected detached=%v, got %v", test.name, test.expectDetached, detached) @@ -538,9 +538,9 @@ func TestDetachAttach(t *testing.T) { } secretsRequest := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: defaultVolumeID, - NodeId: defaultNodeID, - ControllerUnpublishSecrets: map[string]string{"foo": "bar"}, + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + Secrets: map[string]string{"foo": "bar"}, } tests := []struct {