diff --git a/Gopkg.lock b/Gopkg.lock index c983df7b2..35d295a87 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,66 +2,51 @@ [[projects]] - digest = "1:cf4f5171128e62b46299b0a7cd79543f50e62f483d2ca9364e4957c7bbee7a38" name = "github.com/container-storage-interface/spec" - packages = ["lib/go/csi/v0"] - pruneopts = "" - revision = "2178fdeea87f1150a17a63252eee28d4d8141f72" - version = "v0.3.0" + packages = ["lib/go/csi"] + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" [[projects]] - digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "" revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" [[projects]] - digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22" name = "github.com/ghodss/yaml" packages = ["."] - pruneopts = "" revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" [[projects]] - digest = "1:0a3f6a0c68ab8f3d455f8892295503b179e571b7fefe47cc6c556405d1f83411" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys", + "sortkeys" ] - pruneopts = "" revision = "1adfc126b41513cc696b209667c8656ea7aac67c" version = "v1.0.0" [[projects]] branch = "master" - digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a" name = "github.com/golang/glog" packages = ["."] - pruneopts = "" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" - digest = "1:b7677b91b9250563c6851dd5f2d8083972188bfe4f8fb7b61489a2f832f19b11" name = "github.com/golang/groupcache" packages = ["lru"] - pruneopts = "" revision = "66deaeb636dff1ac7d938ce666d090556056a4b0" [[projects]] - digest = "1:73a7106c799f98af4f3da7552906efc6a2570329f4cd2d2f5fb8f9d6c053ff2f" name = "github.com/golang/mock" packages = ["gomock"] - pruneopts = "" revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" version = "v1.1.1" [[projects]] - digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" packages = [ "proto", @@ -70,151 +55,119 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers", + "ptypes/wrappers" ] - pruneopts = "" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6" name = "github.com/google/btree" packages = ["."] - pruneopts = "" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" - digest = "1:754f77e9c839b24778a4b64422236d38515301d2baeb63113aa3edc42e6af692" name = "github.com/google/gofuzz" packages = ["."] - pruneopts = "" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] - digest = "1:2a131706ff80636629ab6373f2944569b8252ecc018cda8040931b05d32e3c16" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions", + "extensions" ] - pruneopts = "" revision = "ee43cbb60db7bd22502942cccbc39059117352ab" version = "v0.1.0" [[projects]] branch = "master" - digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache", + "diskcache" ] - pruneopts = "" revision = "9cad4c3443a7200dd6400aef47183728de563a38" [[projects]] branch = "master" - digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru", + "simplelru" ] - pruneopts = "" revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" [[projects]] - digest = "1:23bc0b496ba341c6e3ba24d6358ff4a40a704d9eb5f9a3bd8e8fbd57ad869013" name = "github.com/imdario/mergo" packages = ["."] - pruneopts = "" revision = "163f41321a19dd09362d4c63cc2489db2015f1f4" version = "0.3.2" [[projects]] - digest = "1:b79fc583e4dc7055ed86742e22164ac41bf8c0940722dbcb600f1a3ace1a8cb5" name = "github.com/json-iterator/go" packages = ["."] - pruneopts = "" revision = "1624edc4454b8682399def8740d46db5e4362ba4" version = "v1.1.5" [[projects]] - branch = "master" - digest = "1:81b4369f8c84cd71286978502e39bddb351aef515e88e0d566392efa71e1af8b" name = "github.com/kubernetes-csi/csi-test" packages = [ "driver", - "utils", + "utils" ] - pruneopts = "" - revision = "1bf94ed5c3afa2db7d3117f206f1b00249764790" + revision = "42947e04c4a0d2087448841a1dc3ccb20fb903b1" + version = "v1.0.0-rc2" [[projects]] branch = "master" - digest = "1:3162e91e0a20faee7756465f3816ecf389eb3d52850dff8218ca56a1f7060880" name = "github.com/kubernetes-csi/kubernetes-csi-migration-library" packages = [ ".", - "plugins", + "plugins" ] - pruneopts = "" revision = "edcf4b4169dcecadd06e071c4801e0373f14d7a2" [[projects]] - digest = "1:76a22f13ffa6d5d0b91beecdcec5c7651a42d3c5fcc12757e578808826fe4b0a" name = "github.com/modern-go/concurrent" packages = ["."] - pruneopts = "" revision = "938152ca6a933f501bb238954eebd3cbcbf489ff" version = "1.0.2" [[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" name = "github.com/modern-go/reflect2" packages = ["."] - pruneopts = "" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] branch = "master" - digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc" name = "github.com/petar/GoLLRB" packages = ["llrb"] - pruneopts = "" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] - digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f" name = "github.com/peterbourgon/diskv" packages = ["."] - pruneopts = "" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] - digest = "1:261bc565833ef4f02121450d74eb88d5ae4bd74bfe5d0e862cddb8550ec35000" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "" revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" version = "v1.0.0" [[projects]] branch = "master" - digest = "1:79b763a59bc081a752605854f75ac04d4b8fba22bab9bbb11689efd2de255864" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - pruneopts = "" revision = "91a49db82a88618983a78a06c1cbd4e00ab749ab" [[projects]] branch = "master" - digest = "1:4a65e28058fde372f1febbf1bca01ee4aed7472569fd1bc81db9e91bf105f7c8" name = "golang.org/x/net" packages = [ "context", @@ -224,35 +177,29 @@ "idna", "internal/timeseries", "lex/httplex", - "trace", + "trace" ] - pruneopts = "" revision = "22ae77b79946ea320088417e4d50825671d82d57" [[projects]] branch = "master" - digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" name = "golang.org/x/oauth2" packages = [ ".", - "internal", + "internal" ] - pruneopts = "" revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" [[projects]] branch = "master" - digest = "1:0a0c73aced706c77f4f128971976b0ee94db7bdcc95b6088bda9e72594598634" name = "golang.org/x/sys" packages = [ "unix", - "windows", + "windows" ] - pruneopts = "" revision = "dd2ff4accc098aceecb86b36eaa7829b2a17b1c9" [[projects]] - digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ "collate", @@ -268,22 +215,18 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable", + "unicode/rangetable" ] - pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" - digest = "1:8d6915fbd16d945a7e80b46b78fc75f0fadf7d30eb0a90badf36471b23bcd94f" name = "golang.org/x/time" packages = ["rate"] - pruneopts = "" revision = "26559e0f760e39c24d730d3224364aef164ee23f" [[projects]] - digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = [ "internal", @@ -292,22 +235,18 @@ "internal/log", "internal/remote_api", "internal/urlfetch", - "urlfetch", + "urlfetch" ] - pruneopts = "" revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:02b227168a215a14f7f16af45ca649b7c1efc33919ce27a03996dfb54dcf556c" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - pruneopts = "" revision = "2c5e7ac708aaa719366570dd82bda44541ca2a63" [[projects]] - digest = "1:d2dc833c73202298c92b63a7e180e2b007b5a3c3c763e3b9fe1da249b5c7f5b9" name = "google.golang.org/grpc" packages = [ ".", @@ -334,30 +273,24 @@ "stats", "status", "tap", - "transport", + "transport" ] - pruneopts = "" revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655" version = "v1.10.0" [[projects]] - digest = "1:e5d1fb981765b6f7513f793a3fcaac7158408cca77f75f7311ac82cc88e9c445" name = "gopkg.in/inf.v0" packages = ["."] - pruneopts = "" revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" version = "v0.9.0" [[projects]] - digest = "1:5fe876313b07628905b2181e537faabe45032cb9c79c01b49b51c25a0a40040d" name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "" revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" version = "v2.1.1" [[projects]] - digest = "1:5f076f6f9c3ac4f2b99d79dc7974eabd3f51be35254aa0d8c4cf920fdb9c7ff8" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -390,14 +323,12 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1", + "storage/v1beta1" ] - pruneopts = "" revision = "357ec6384fa7e10d6ea160d2299a98ddfdc3ab3c" version = "kubernetes-1.12.1" [[projects]] - digest = "1:7aa037a4df5432be2820d164f378d7c22335e5cbba124e90e42114757ebd11ac" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -441,14 +372,12 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect", + "third_party/forked/golang/reflect" ] - pruneopts = "" revision = "6dd46049f39503a1fc8d65de4bd566829e95faff" version = "kubernetes-1.12.0" [[projects]] - digest = "1:5d4153d12c3aed2c90a94262520d2498d5afa4d692554af55e65a7c5af0bc399" name = "k8s.io/client-go" packages = [ "discovery", @@ -617,15 +546,13 @@ "util/homedir", "util/integer", "util/retry", - "util/workqueue", + "util/workqueue" ] - pruneopts = "" revision = "1638f8970cefaa404ff3a62950f88b08292b2696" version = "kubernetes-1.12.0" [[projects]] branch = "master" - digest = "1:d7c38f69d229b1475d7563256062736682b35e40c347e7479c35c7c3cf3acdc4" name = "k8s.io/csi-api" packages = [ "pkg/apis/csi/v1alpha1", @@ -638,65 +565,19 @@ "pkg/client/informers/externalversions/csi", "pkg/client/informers/externalversions/csi/v1alpha1", "pkg/client/informers/externalversions/internalinterfaces", - "pkg/client/listers/csi/v1alpha1", + "pkg/client/listers/csi/v1alpha1" ] - pruneopts = "" revision = "2966180a4e54fab57c98153a33cf018cc4017ba3" [[projects]] branch = "master" - digest = "1:9a648ff9eb89673d2870c22fc011ec5db0fcff6c4e5174a650298e51be71bbf1" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] - pruneopts = "" revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/container-storage-interface/spec/lib/go/csi/v0", - "github.com/davecgh/go-spew/spew", - "github.com/golang/glog", - "github.com/golang/mock/gomock", - "github.com/golang/protobuf/proto", - "github.com/kubernetes-csi/csi-test/driver", - "github.com/kubernetes-csi/kubernetes-csi-migration-library", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/connectivity", - "google.golang.org/grpc/status", - "k8s.io/api/core/v1", - "k8s.io/api/storage/v1", - "k8s.io/api/storage/v1beta1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/client-go/informers", - "k8s.io/client-go/informers/core/v1", - "k8s.io/client-go/informers/storage/v1beta1", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/listers/core/v1", - "k8s.io/client-go/listers/storage/v1beta1", - "k8s.io/client-go/rest", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/leaderelection", - "k8s.io/client-go/tools/leaderelection/resourcelock", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/util/workqueue", - "k8s.io/csi-api/pkg/apis/csi/v1alpha1", - "k8s.io/csi-api/pkg/client/clientset/versioned", - "k8s.io/csi-api/pkg/client/clientset/versioned/fake", - "k8s.io/csi-api/pkg/client/informers/externalversions", - "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1", - ] + inputs-digest = "4d95364159e4085dd476659e96deee915264cf580ba3c5aed230ab7581a1f60f" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index dca3df0ea..f3e950e28 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -2,7 +2,7 @@ [[constraint]] name = "github.com/container-storage-interface/spec" - version = "~0.3.0" + version = "1.0.0-rc2" [[constraint]] name = "github.com/golang/protobuf" @@ -12,13 +12,9 @@ branch = "master" name = "github.com/golang/glog" -#[[constraint]] -# name = "github.com/golang/mock" -# version = "1.0.0" - [[constraint]] - branch = "master" name = "github.com/kubernetes-csi/csi-test" + version = "1.0.0-rc2" [[constraint]] name = "google.golang.org/grpc" diff --git a/pkg/connection/connection.go b/pkg/connection/connection.go index 2cd13b1d2..6ab54550c 100644 --- a/pkg/connection/connection.go +++ b/pkg/connection/connection.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/glog" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -192,23 +192,23 @@ func (c *csiConnection) SupportsPluginControllerService(ctx context.Context) (bo return false, nil } -func (c *csiConnection) Attach(ctx context.Context, volumeID string, readOnly bool, nodeID string, caps *csi.VolumeCapability, attributes, secrets map[string]string) (metadata map[string]string, detached bool, err error) { +func (c *csiConnection) Attach(ctx context.Context, volumeID string, readOnly bool, nodeID string, caps *csi.VolumeCapability, context, secrets map[string]string) (metadata map[string]string, detached bool, err error) { client := csi.NewControllerClient(c.conn) req := csi.ControllerPublishVolumeRequest{ - VolumeId: volumeID, - NodeId: nodeID, - VolumeCapability: caps, - Readonly: readOnly, - VolumeAttributes: attributes, - ControllerPublishSecrets: secrets, + VolumeId: volumeID, + NodeId: nodeID, + VolumeCapability: caps, + Readonly: readOnly, + VolumeContext: context, + Secrets: secrets, } rsp, err := client.ControllerPublishVolume(ctx, &req) if err != nil { return nil, isFinalError(err), err } - return rsp.PublishInfo, false, nil + return rsp.PublishContext, false, nil } func (c *csiConnection) Detach(ctx context.Context, volumeID string, nodeID string, secrets map[string]string) (detached bool, err error) { @@ -217,7 +217,7 @@ func (c *csiConnection) Detach(ctx context.Context, volumeID string, nodeID stri req := csi.ControllerUnpublishVolumeRequest{ VolumeId: volumeID, NodeId: nodeID, - ControllerUnpublishSecrets: secrets, + Secrets: secrets, } _, err = client.ControllerUnpublishVolume(ctx, &req) diff --git a/pkg/connection/connection_test.go b/pkg/connection/connection_test.go index 6d4b204e9..4ebbaabae 100644 --- a/pkg/connection/connection_test.go +++ b/pkg/connection/connection_test.go @@ -22,7 +22,7 @@ import ( "reflect" "testing" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" "github.com/golang/protobuf/proto" "github.com/kubernetes-csi/csi-test/driver" @@ -374,15 +374,15 @@ func TestAttach(t *testing.T) { VolumeId: defaultVolumeID, NodeId: defaultNodeID, VolumeCapability: defaultCaps, - VolumeAttributes: map[string]string{"foo": "bar"}, + VolumeContext: map[string]string{"foo": "bar"}, Readonly: false, } secretsRequest := &csi.ControllerPublishVolumeRequest{ - VolumeId: defaultVolumeID, - NodeId: defaultNodeID, - VolumeCapability: defaultCaps, - ControllerPublishSecrets: map[string]string{"foo": "bar"}, - Readonly: false, + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + VolumeCapability: defaultCaps, + Secrets: map[string]string{"foo": "bar"}, + Readonly: false, } tests := []struct { @@ -407,7 +407,7 @@ func TestAttach(t *testing.T) { caps: defaultCaps, input: defaultRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -432,7 +432,7 @@ func TestAttach(t *testing.T) { readonly: true, input: readOnlyRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -468,7 +468,7 @@ func TestAttach(t *testing.T) { attributes: map[string]string{"foo": "bar"}, input: attributesRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -482,7 +482,7 @@ func TestAttach(t *testing.T) { secrets: map[string]string{"foo": "bar"}, input: secretsRequest, output: &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, + PublishContext: publishVolumeInfo, }, expectError: false, expectedInfo: publishVolumeInfo, @@ -519,7 +519,7 @@ func TestAttach(t *testing.T) { t.Errorf("test %q: got error: %v", test.name, err) } if err == nil && !reflect.DeepEqual(publishInfo, test.expectedInfo) { - t.Errorf("got unexpected PublishInfo: %+v", publishInfo) + t.Errorf("got unexpected PublishContext: %+v", publishInfo) } if detached != test.expectDetached { t.Errorf("test %q: expected detached=%v, got %v", test.name, test.expectDetached, detached) @@ -540,7 +540,7 @@ func TestDetachAttach(t *testing.T) { secretsRequest := &csi.ControllerUnpublishVolumeRequest{ VolumeId: defaultVolumeID, NodeId: defaultNodeID, - ControllerUnpublishSecrets: map[string]string{"foo": "bar"}, + Secrets: map[string]string{"foo": "bar"}, } tests := []struct { diff --git a/pkg/controller/framework_test.go b/pkg/controller/framework_test.go index 7576ad165..e0d3e2cd3 100644 --- a/pkg/controller/framework_test.go +++ b/pkg/controller/framework_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/davecgh/go-spew/spew" "github.com/golang/glog" "github.com/kubernetes-csi/external-attacher/pkg/connection" diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 0bf421080..55ed6647e 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -21,7 +21,7 @@ import ( "fmt" "regexp" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/glog" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index 79754b707..7af9500bc 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/vendor/github.com/container-storage-interface/spec/.gitignore b/vendor/github.com/container-storage-interface/spec/.gitignore index 4f7ede45c..443a2c83d 100644 --- a/vendor/github.com/container-storage-interface/spec/.gitignore +++ b/vendor/github.com/container-storage-interface/spec/.gitignore @@ -1,3 +1,4 @@ *.tmp .DS_Store .build +*.swp diff --git a/vendor/github.com/container-storage-interface/spec/.travis.yml b/vendor/github.com/container-storage-interface/spec/.travis.yml index 15b11d3a5..65d1a6ab0 100644 --- a/vendor/github.com/container-storage-interface/spec/.travis.yml +++ b/vendor/github.com/container-storage-interface/spec/.travis.yml @@ -29,7 +29,7 @@ jobs: # Lang stage: Go - stage: lang language: go - go: 1.9.5 + go: 1.10.4 go_import_path: github.com/container-storage-interface/spec install: - make -C lib/go protoc diff --git a/vendor/github.com/container-storage-interface/spec/CCLA.pdf b/vendor/github.com/container-storage-interface/spec/CCLA.pdf new file mode 100644 index 000000000..08a9f2a50 Binary files /dev/null and b/vendor/github.com/container-storage-interface/spec/CCLA.pdf differ diff --git a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md index 8f33951fe..e96ebc792 100644 --- a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md +++ b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md @@ -1,6 +1,9 @@ # How to Contribute CSI is under [Apache 2.0](LICENSE) and accepts contributions via GitHub pull requests. + +Contributions require signing an individual or Corporate CLA available [here](https://github.com/container-storage-interface/spec/blob/master/CCLA.pdf) which should be signed and mailed to the [mailing list]( https://groups.google.com/forum/#!topic/container-storage-interface-community/). + This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted. ## Markdown style diff --git a/vendor/github.com/container-storage-interface/spec/OWNERS b/vendor/github.com/container-storage-interface/spec/OWNERS index b11f91910..7225bd014 100644 --- a/vendor/github.com/container-storage-interface/spec/OWNERS +++ b/vendor/github.com/container-storage-interface/spec/OWNERS @@ -3,8 +3,8 @@ approvers: - thockin # Representing Kubernetes - jieyu # Representing Mesos - jdef # Representing Mesos - - cpuguy83 # Representing Docker - - mycure # Representing Docker - - julian-hj # Representing Cloud Foundry - - paulcwarren # Representing Cloud Foundry + - anusha-ragunathan # Representing Docker + - ddebroy # Representing Docker + - julian-hj # Representing Cloud Foundry + - paulcwarren # Representing Cloud Foundry reviewers: diff --git a/vendor/github.com/container-storage-interface/spec/README.md b/vendor/github.com/container-storage-interface/spec/README.md index d270cedda..c686e423f 100644 --- a/vendor/github.com/container-storage-interface/spec/README.md +++ b/vendor/github.com/container-storage-interface/spec/README.md @@ -8,6 +8,6 @@ This project contains the CSI [specification](spec.md) and [protobuf](csi.proto) ### Container Orchestrators (CO) -* [Cloud Foundry](https://github.com/cloudfoundry/csi-local-volume-release) +* [Cloud Foundry](https://github.com/cloudfoundry/csi-plugins-release/blob/master/CSI_SUPPORT.md) * [Kubernetes](https://kubernetes-csi.github.io/docs/) * [Mesos](http://mesos.apache.org/documentation/latest/csi/) diff --git a/vendor/github.com/container-storage-interface/spec/VERSION b/vendor/github.com/container-storage-interface/spec/VERSION index 0d91a54c7..3eefcb9dd 100644 --- a/vendor/github.com/container-storage-interface/spec/VERSION +++ b/vendor/github.com/container-storage-interface/spec/VERSION @@ -1 +1 @@ -0.3.0 +1.0.0 diff --git a/vendor/github.com/container-storage-interface/spec/csi.proto b/vendor/github.com/container-storage-interface/spec/csi.proto index 22cff40ca..d240b6682 100644 --- a/vendor/github.com/container-storage-interface/spec/csi.proto +++ b/vendor/github.com/container-storage-interface/spec/csi.proto @@ -1,10 +1,18 @@ // Code generated by make; DO NOT EDIT. syntax = "proto3"; -package csi.v0; +package csi.v1; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} service Identity { rpc GetPluginInfo(GetPluginInfoRequest) returns (GetPluginInfoResponse) {} @@ -64,20 +72,12 @@ service Node { rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) returns (NodeUnpublishVolumeResponse) {} - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - rpc NodeGetId (NodeGetIdRequest) - returns (NodeGetIdResponse) { - option deprecated = true; - } + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) returns (NodeGetCapabilitiesResponse) {} - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. rpc NodeGetInfo (NodeGetInfoRequest) returns (NodeGetInfoResponse) {} } @@ -86,13 +86,13 @@ message GetPluginInfoRequest { } message GetPluginInfoResponse { - // The name MUST follow reverse domain name notation format - // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). - // It SHOULD include the plugin's host company name and the plugin - // name, to minimize the possibility of collisions. It MUST be 63 + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), underscores (_), - // dots (.), and alphanumerics between. This field is REQUIRED. + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. string name = 1; // This field is REQUIRED. Value of this field is opaque to the CO. @@ -108,7 +108,7 @@ message GetPluginCapabilitiesRequest { message GetPluginCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated PluginCapability capabilities = 2; + repeated PluginCapability capabilities = 1; } // Specifies a capability of the plugin. @@ -119,7 +119,7 @@ message PluginCapability { // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for // the ControllerService. Plugins SHOULD provide this capability. - // In rare cases certain plugins may wish to omit the + // In rare cases certain plugins MAY wish to omit the // ControllerService entirely from their implementation, but such // SHOULD NOT be the common case. // The presence of this capability determines whether the CO will @@ -127,13 +127,13 @@ message PluginCapability { // as specific RPCs as indicated by ControllerGetCapabilities. CONTROLLER_SERVICE = 1; - // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this - // plugin may not be equally accessible by all nodes in the + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the // cluster. The CO MUST use the topology information returned by // CreateVolumeRequest along with the topology information // returned by NodeGetInfo to ensure that a given volume is // accessible from a given node when scheduling workloads. - ACCESSIBILITY_CONSTRAINTS = 2; + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; } Type type = 1; } @@ -174,37 +174,53 @@ message CreateVolumeRequest { // The suggested name for the storage space. This field is REQUIRED. // It serves two purposes: // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. If `CreateVolume` fails, the volume may or may not - // be provisioned. In this case, the CO may call `CreateVolume` - // again, with the same name, to ensure the volume exists. The - // Plugin should ensure that multiple `CreateVolume` calls for the - // same name do not result in more than one piece of storage - // provisioned corresponding to that name. If a Plugin is unable to - // enforce idempotency, the CO's error recovery logic could result - // in multiple (unused) volumes being provisioned. + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). // 2) Suggested name - Some storage systems allow callers to specify // an identifier by which to refer to the newly provisioned // storage. If a storage system supports this, it can optionally // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 1; // This field is OPTIONAL. This allows the CO to specify the capacity // requirement of the volume to be provisioned. If not specified, the // Plugin MAY choose an implementation-defined capacity range. If // specified it MUST always be honored, even when creating volumes - // from a source; which may force some backends to internally extend + // from a source; which MAY force some backends to internally extend // the volume after creating it. - CapacityRange capacity_range = 2; - // The capabilities that the provisioned volume MUST have: the Plugin - // MUST provision a volume that could satisfy ALL of the - // capabilities specified in this list. The Plugin MUST assume that - // the CO MAY use the provisioned volume later with ANY of the - // capabilities specified in this list. This also enables the CO to do - // early validation: if ANY of the specified volume capabilities are - // not supported by the Plugin, the call SHALL fail. This field is - // REQUIRED. + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. repeated VolumeCapability volume_capabilities = 3; // Plugin specific parameters passed in as opaque key-value pairs. @@ -215,7 +231,7 @@ message CreateVolumeRequest { // Secrets required by plugin to complete volume creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_create_secrets = 5; + map secrets = 5 [(csi_secret) = true]; // If specified, the new volume will be pre-populated with data from // this source. This field is OPTIONAL. @@ -228,10 +244,10 @@ message CreateVolumeRequest { // topological accessibility information supported by the SP. // This field is OPTIONAL. // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. TopologyRequirement accessibility_requirements = 7; } @@ -243,11 +259,19 @@ message VolumeContentSource { // This field is REQUIRED. Plugin is REQUIRED to support creating // volume from snapshot if it supports the capability // CREATE_DELETE_SNAPSHOT. - string id = 1; + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; } oneof type { SnapshotSource snapshot = 1; + VolumeSource volume = 2; } } @@ -334,7 +358,7 @@ message CapacityRange { int64 limit_bytes = 2; } -// The information about a provisioned volume. +// Information about a specific volume. message Volume { // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is @@ -342,20 +366,32 @@ message Volume { // The value of this field MUST NOT be negative. int64 capacity_bytes = 1; - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - string id = 2; - - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - map attributes = 3; + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; // If specified, indicates that the volume is not empty and is // pre-populated with data from the specified source. @@ -365,7 +401,7 @@ message Volume { // Specifies where (regions, zones, racks, etc.) the provisioned // volume is accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // An SP MAY specify multiple topologies to indicate the volume is // accessible from multiple locations. // COs MAY use this information along with the topology information @@ -373,7 +409,7 @@ message Volume { // from a given node when scheduling workloads. // This field is OPTIONAL. If it is not specified, the CO MAY assume // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available + // MAY schedule workloads referencing the volume on any available // node. // // Example 1: @@ -527,15 +563,18 @@ message TopologyRequirement { // A topological segment is a specific instance of a topological domain, // like "zone3", "rack3", etc. // For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated +// Valid keys have two segments: an OPTIONAL prefix and name, separated // by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). // The key prefix SHOULD include the plugin's host company name and/or // the plugin name, to minimize the possibility of collisions with keys // from other plugins. @@ -558,7 +597,7 @@ message DeleteVolumeRequest { // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_delete_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteVolumeResponse { @@ -573,31 +612,44 @@ message ControllerPublishVolumeRequest { // field to match the node ID returned by `NodeGetInfo`. string node_id = 2; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 3; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. bool readonly = 4; // Secrets required by plugin to complete controller publish volume // request. This field is OPTIONAL. Refer to the // `Secrets Requirements` section on how to use this field. - map controller_publish_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message ControllerPublishVolumeResponse { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - map publish_info = 1; + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; } message ControllerUnpublishVolumeRequest { // The ID of the volume. This field is REQUIRED. @@ -615,7 +667,7 @@ message ControllerUnpublishVolumeRequest { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_unpublish_secrets = 3; + map secrets = 3 [(csi_secret) = true]; } message ControllerUnpublishVolumeResponse { @@ -625,30 +677,52 @@ message ValidateVolumeCapabilitiesRequest { // The ID of the volume to check. This field is REQUIRED. string volume_id = 1; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities + // call SHALL return "confirmed" only if all the volume capabilities // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; + repeated VolumeCapability volume_capabilities = 3; - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - map volume_attributes = 3; + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - repeated Topology accessible_topology = 4; + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; } message ValidateVolumeCapabilitiesResponse { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - bool supported = 1; + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; - // Message to the CO if `supported` above is false. This field is + // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. string message = 2; @@ -705,7 +779,7 @@ message GetCapacityRequest { // `accessible_topology`. This is the same as the // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. Topology accessible_topology = 3; } @@ -725,7 +799,7 @@ message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 2; + repeated ControllerServiceCapability capabilities = 1; } // Specifies a capability of the controller service. @@ -742,11 +816,15 @@ message ControllerServiceCapability { // CREATE_DELETE_SNAPSHOT MUST support creating volume from // snapshot. CREATE_DELETE_SNAPSHOT = 5; - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; } Type type = 1; @@ -764,12 +842,16 @@ message CreateSnapshotRequest { // The suggested name for the snapshot. This field is REQUIRED for // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 2; // Secrets required by plugin to complete snapshot creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map create_snapshot_secrets = 3; + map secrets = 3 [(csi_secret) = true]; // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and @@ -791,7 +873,7 @@ message CreateSnapshotResponse { Snapshot snapshot = 1; } -// The information about a provisioned snapshot. +// Information about a specific snapshot. message Snapshot { // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to @@ -802,11 +884,16 @@ message Snapshot { // zero means it is unspecified. int64 size_bytes = 1; - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - string id = 2; + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; // Identity information for the source volume. Note that creating a // snapshot from a snapshot is not supported here so the source has to @@ -814,43 +901,13 @@ message Snapshot { string source_volume_id = 3; // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - int64 created_at = 4; - - // The status of a snapshot. - SnapshotStatus status = 5; -} + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; -// The status of a snapshot. -message SnapshotStatus { - enum Type { - UNKNOWN = 0; - // A snapshot is ready for use. - READY = 1; - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - UPLOADING = 2; - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - ERROR_UPLOADING = 3; - } - // This field is REQUIRED. - Type type = 1; - - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - string details = 2; + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; } message DeleteSnapshotRequest { // The ID of the snapshot to be deleted. @@ -860,7 +917,7 @@ message DeleteSnapshotRequest { // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map delete_snapshot_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteSnapshotResponse {} @@ -890,7 +947,8 @@ message ListSnapshotsRequest { // Identity information for a specific snapshot. This field is // OPTIONAL. It can be used to list only a specific snapshot. // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. + // and will not block if the snapshot is being processed after + // it is cut. string snapshot_id = 4; } @@ -918,28 +976,33 @@ message NodeStageVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the volume will be published. It MUST be an + // The path to which the volume MAY be staged. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. + // `staging_target_path` per volume. The CO SHALL ensure that the + // process serving the request has `read` and `write` permission to + // the path, and is able to create files or directories at the path + // if it does not exist. // This is a REQUIRED field. string staging_target_path = 3; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 4; // Secrets required by plugin to complete node stage volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_stage_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message NodeStageVolumeResponse { @@ -949,7 +1012,7 @@ message NodeUnstageVolumeRequest { // The ID of the volume. This field is REQUIRED. string volume_id = 1; - // The path at which the volume was published. It MUST be an absolute + // The path at which the volume was staged. It MUST be an absolute // path in the root filesystem of the process serving this request. // This is a REQUIRED field. string staging_target_path = 2; @@ -967,9 +1030,9 @@ message NodePublishVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the device was mounted by `NodeStageVolume`. + // The path to which the volume was staged by `NodeStageVolume`. // It MUST be an absolute path in the root filesystem of the process // serving this request. // It MUST be set if the Node Plugin implements the @@ -980,28 +1043,31 @@ message NodePublishVolumeRequest { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. + // The CO SHALL ensure that the process serving the request has + // `read` and `write` permissions to the path, and is able to create + // files or directories at the path if it does not exist. // This is a REQUIRED field. string target_path = 4; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 5; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. bool readonly = 6; // Secrets required by plugin to complete node publish volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_publish_secrets = 7; + map secrets = 7 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - map volume_attributes = 8; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; } message NodePublishVolumeResponse { @@ -1020,15 +1086,43 @@ message NodeUnpublishVolumeRequest { message NodeUnpublishVolumeResponse { // Intentionally empty. } -message NodeGetIdRequest { - // Intentionally empty. -} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; -message NodeGetIdResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. // This is a REQUIRED field. - string node_id = 1; + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; } message NodeGetCapabilitiesRequest { // Intentionally empty. @@ -1046,6 +1140,10 @@ message NodeServiceCapability { enum Type { UNKNOWN = 0; STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; } Type type = 1; @@ -1060,9 +1158,14 @@ message NodeGetInfoRequest { } message NodeGetInfoResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. string node_id = 1; // Maximum number of volumes that controller can publish to the node. @@ -1075,7 +1178,7 @@ message NodeGetInfoResponse { // Specifies where (regions, zones, racks, etc.) the node is // accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // COs MAY use this information along with the topology information // returned in CreateVolumeResponse to ensure that a given volume is // accessible from a given node when scheduling workloads. diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile index a7443eae0..3b1c5eaba 100644 --- a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile +++ b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile @@ -58,14 +58,14 @@ $(PROTOC): PROTOC_GEN_GO_PKG := github.com/golang/protobuf/protoc-gen-go PROTOC_GEN_GO := protoc-gen-go $(PROTOC_GEN_GO): PROTOBUF_PKG := $(dir $(PROTOC_GEN_GO_PKG)) -$(PROTOC_GEN_GO): PROTOBUF_VERSION := v1.1.0 +$(PROTOC_GEN_GO): PROTOBUF_VERSION := v1.2.0 $(PROTOC_GEN_GO): mkdir -p $(dir $(GOPATH)/src/$(PROTOBUF_PKG)) test -d $(GOPATH)/src/$(PROTOBUF_PKG)/.git || git clone https://$(PROTOBUF_PKG) $(GOPATH)/src/$(PROTOBUF_PKG) (cd $(GOPATH)/src/$(PROTOBUF_PKG) && \ (test "$$(git describe --tags | head -1)" = "$(PROTOBUF_VERSION)" || \ (git fetch && git checkout tags/$(PROTOBUF_VERSION)))) - (cd $(GOPATH)/src/$(PROTOBUF_PKG) && go get -v -d ./...) && \ + (cd $(GOPATH)/src/$(PROTOBUF_PKG) && go get -v -d $$(go list -f '{{ .ImportPath }}' ./...)) && \ go build -o "$@" $(PROTOC_GEN_GO_PKG) @@ -83,18 +83,25 @@ export PATH := $(shell pwd):$(PATH) ## BUILD ## ######################################################################## CSI_PROTO := ../../csi.proto -CSI_PKG := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\);$$/\1/p'|tr '.' '/') -CSI_GO := $(CSI_PKG)/csi.pb.go +CSI_PKG_ROOT := github.com/container-storage-interface/spec +CSI_PKG_SUB := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\).v[0-9]\+;$$/\1/p'|tr '.' '/') +CSI_BUILD := $(CSI_PKG_SUB)/.build +CSI_GO := $(CSI_PKG_SUB)/csi.pb.go CSI_A := csi.a -CSI_GO_TMP := $(CSI_PKG)/.build/csi.pb.go +CSI_GO_TMP := $(CSI_BUILD)/$(CSI_PKG_ROOT)/csi.pb.go # This recipe generates the go language bindings to a temp area. +$(CSI_GO_TMP): HERE := $(shell pwd) +$(CSI_GO_TMP): PTYPES_PKG := github.com/golang/protobuf/ptypes $(CSI_GO_TMP): GO_OUT := plugins=grpc -$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers -$(CSI_GO_TMP): INCLUDE = -I$(PROTOC_TMP_DIR)/include +$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor +$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/wrappers.proto=$(PTYPES_PKG)/wrappers +$(CSI_GO_TMP): GO_OUT := $(GO_OUT):"$(HERE)/$(CSI_BUILD)" +$(CSI_GO_TMP): INCLUDE := -I$(GOPATH)/src -I$(HERE)/$(PROTOC_TMP_DIR)/include $(CSI_GO_TMP): $(CSI_PROTO) | $(PROTOC) $(PROTOC_GEN_GO) @mkdir -p "$(@D)" - $(PROTOC) -I "$( controller_create_secrets = 5; + map secrets = 5 [(csi_secret) = true]; // If specified, the new volume will be pre-populated with data from // this source. This field is OPTIONAL. @@ -669,10 +708,10 @@ message CreateVolumeRequest { // topological accessibility information supported by the SP. // This field is OPTIONAL. // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. TopologyRequirement accessibility_requirements = 7; } @@ -684,11 +723,19 @@ message VolumeContentSource { // This field is REQUIRED. Plugin is REQUIRED to support creating // volume from snapshot if it supports the capability // CREATE_DELETE_SNAPSHOT. - string id = 1; + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; } oneof type { SnapshotSource snapshot = 1; + VolumeSource volume = 2; } } @@ -775,7 +822,7 @@ message CapacityRange { int64 limit_bytes = 2; } -// The information about a provisioned volume. +// Information about a specific volume. message Volume { // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is @@ -783,20 +830,32 @@ message Volume { // The value of this field MUST NOT be negative. int64 capacity_bytes = 1; - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - string id = 2; - - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - map attributes = 3; + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; // If specified, indicates that the volume is not empty and is // pre-populated with data from the specified source. @@ -806,7 +865,7 @@ message Volume { // Specifies where (regions, zones, racks, etc.) the provisioned // volume is accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // An SP MAY specify multiple topologies to indicate the volume is // accessible from multiple locations. // COs MAY use this information along with the topology information @@ -814,7 +873,7 @@ message Volume { // from a given node when scheduling workloads. // This field is OPTIONAL. If it is not specified, the CO MAY assume // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available + // MAY schedule workloads referencing the volume on any available // node. // // Example 1: @@ -968,15 +1027,18 @@ message TopologyRequirement { // A topological segment is a specific instance of a topological domain, // like "zone3", "rack3", etc. // For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated +// Valid keys have two segments: an OPTIONAL prefix and name, separated // by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). // The key prefix SHOULD include the plugin's host company name and/or // the plugin name, to minimize the possibility of collisions with keys // from other plugins. @@ -1001,18 +1063,17 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| +| Source incompatible or not supported | 3 INVALID_ARGUMENT | Besides the general cases, this code MUST also be used to indicate when plugin supporting CREATE_DELETE_VOLUME cannot create a volume from the requested source (`SnapshotSource` or `VolumeSource`). Failure MAY be caused by not supporting the source (CO SHOULD NOT have provided that source) or incompatibility between `parameters` from the source and the ones requested for the new volume. More human-readable information SHOULD be provided in the gRPC `status.message` field if the problem is the source. | On source related issues, caller MUST use different parameters, a different source, or no source at all. | +| Source does not exist | 5 NOT_FOUND | Indicates that the specified source does not exist. | Caller MUST verify that the `volume_content_source` is correct, the source is accessible, and has not been deleted before retrying with exponential back off. | | Volume already exists but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified volume `name` already exists but is incompatible with the specified `capacity_range`, `volume_capabilities` or `parameters`. | Caller MUST fix the arguments or use a different `name` before retrying. | | Unable to provision in `accessible_topology` | 8 RESOURCE_EXHAUSTED | Indicates that although the `accessible_topology` field is valid, a new volume can not be provisioned with the specified topology constraints. More human-readable information MAY be provided in the gRPC `status.message` field. | Caller MUST ensure that whatever is preventing volumes from being provisioned in the specified location (e.g. quota issues) is addressed before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Unsupported `capacity_range` | 11 OUT_OF_RANGE | Indicates that the capacity range is not allowed by the Plugin, for example when trying to create a volume smaller than the source snapshot. More human-readable information MAY be provided in the gRPC `status.message` field. | Caller MUST fix the capacity range before retrying. | -| Call not implemented | 12 UNIMPLEMENTED | CreateVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `DeleteVolume` A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_VOLUME` capability. This RPC will be called by the CO to deprovision a volume. -If successful, the storage space associated with the volume MUST be released and all the data in the volume SHALL NOT be accessible anymore. This operation MUST be idempotent. If a volume corresponding to the specified `volume_id` does not exist or the artifacts associated with the volume do not exist anymore, the Plugin MUST reply `0 OK`. @@ -1026,7 +1087,7 @@ message DeleteVolumeRequest { // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_delete_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteVolumeResponse { @@ -1043,8 +1104,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume in use | 9 FAILED_PRECONDITION | Indicates that the volume corresponding to the specified `volume_id` could not be deleted because it is in use by another resource. | Caller SHOULD ensure that there are no other resources using the volume, and then retry with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | DeleteVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ControllerPublishVolume` @@ -1071,31 +1130,44 @@ message ControllerPublishVolumeRequest { // field to match the node ID returned by `NodeGetInfo`. string node_id = 2; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 3; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. bool readonly = 4; // Secrets required by plugin to complete controller publish volume // request. This field is OPTIONAL. Refer to the // `Secrets Requirements` section on how to use this field. - map controller_publish_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message ControllerPublishVolumeResponse { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - map publish_info = 1; + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; } ``` @@ -1112,8 +1184,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the node corresponding to the specified `volume_id` but is incompatible with the specified `volume_capability` or `readonly` flag . | Caller MUST fix the arguments before retying. | | Volume published to another node | 9 FAILED_PRECONDITION | Indicates that a volume corresponding to the specified `volume_id` has already been published at another node and does not have MULTI_NODE volume capability. If this error code is returned, the Plugin SHOULD specify the `node_id` of the node at which the volume is published as part of the gRPC `status.message`. | Caller SHOULD ensure the specified volume is not published at any other node before retrying with exponential back off. | | Max volumes attached | 8 RESOURCE_EXHAUSTED | Indicates that the maximum supported number of volumes that can be attached to the specified node are already attached. Therefore, this operation will fail until at least one of the existing attached volumes is detached from the node. | Caller MUST ensure that the number of volumes already attached to the node is less then the maximum supported number of volumes before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | ControllerPublishVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ControllerUnpublishVolume` @@ -1146,7 +1216,7 @@ message ControllerUnpublishVolumeRequest { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_unpublish_secrets = 3; + map secrets = 3 [(csi_secret) = true]; } message ControllerUnpublishVolumeResponse { @@ -1164,46 +1234,69 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Node does not exist | 5 NOT_FOUND | Indicates that a node corresponding to the specified `node_id` does not exist. | Caller MUST verify that the `node_id` is correct and that the node is available and has not been terminated or deleted before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | ControllerUnpublishVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ValidateVolumeCapabilities` A Controller Plugin MUST implement this RPC call. This RPC will be called by the CO to check if a pre-provisioned volume has all the capabilities that the CO wants. -This RPC call SHALL return `supported` only if all the volume capabilities specified in the request are supported. +This RPC call SHALL return `confirmed` only if all the volume capabilities specified in the request are supported (see caveat below). This operation MUST be idempotent. +NOTE: Older plugins will parse but likely not "process" newer fields that MAY be present in capability-validation messages (and sub-messages) sent by a CO that is communicating using a newer, backwards-compatible version of the CSI protobufs. +Therefore, the CO SHALL reconcile successful capability-validation responses by comparing the validated capabilities with those that it had originally requested. + ```protobuf message ValidateVolumeCapabilitiesRequest { // The ID of the volume to check. This field is REQUIRED. string volume_id = 1; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities + // call SHALL return "confirmed" only if all the volume capabilities // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; + repeated VolumeCapability volume_capabilities = 3; - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - map volume_attributes = 3; + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - repeated Topology accessible_topology = 4; + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; } message ValidateVolumeCapabilitiesResponse { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - bool supported = 1; + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; - // Message to the CO if `supported` above is false. This field is + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. string message = 2; @@ -1225,6 +1318,8 @@ The CO MUST implement the specified error recovery behavior when it encounters t A Controller Plugin MUST implement this RPC call if it has `LIST_VOLUMES` capability. The Plugin SHALL return the information about all the volumes that it knows about. +If volumes are created and/or deleted while the CO is concurrently paging through `ListVolumes` results then it is possible that the CO MAY either witness duplicate volumes in the list, not witness existing volumes, or both. +The CO SHALL NOT expect a consistent "view" of all volumes when paging through the volume list via multiple calls to `ListVolumes`. ```protobuf message ListVolumesRequest { @@ -1298,7 +1393,7 @@ message GetCapacityRequest { // `accessible_topology`. This is the same as the // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. Topology accessible_topology = 3; } @@ -1329,7 +1424,7 @@ message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 2; + repeated ControllerServiceCapability capabilities = 1; } // Specifies a capability of the controller service. @@ -1346,11 +1441,15 @@ message ControllerServiceCapability { // CREATE_DELETE_SNAPSHOT MUST support creating volume from // snapshot. CREATE_DELETE_SNAPSHOT = 5; - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; } Type type = 1; @@ -1373,17 +1472,43 @@ A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_SNAPSH This RPC will be called by the CO to create a new snapshot from a source volume on behalf of a user. This operation MUST be idempotent. -If a snapshot corresponding to the specified snapshot `name` is already successfully cut and uploaded (if upload is part of the process) and is compatible with the specified `source_volume_id` and `parameters` in the `CreateSnapshotRequest`, the Plugin MUST reply `0 OK` with the corresponding `CreateSnapshotResponse`. +If a snapshot corresponding to the specified snapshot `name` is successfully cut and ready to use (meaning it MAY be specified as a `volume_content_source` in a `CreateVolumeRequest`), the Plugin MUST reply `0 OK` with the corresponding `CreateSnapshotResponse`. If an error occurs before a snapshot is cut, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. -For plugins that implement snapshot uploads, `CreateSnapshot` SHOULD return `10 ABORTED`, a gRPC code that indicates the operation is pending for snapshot, during the snapshot uploading processs. -If an error occurs during the uploading process, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. +For plugins that supports snapshot post processing such as uploading, `CreateSnapshot` SHOULD return `0 OK` and `ready_to_use` SHOULD be set to `false` after the snapshot is cut but still being processed. +CO SHOULD then reissue the same `CreateSnapshotRequest` periodically until boolean `ready_to_use` flips to `true` indicating the snapshot has been "processed" and is ready to use to create new volumes. +If an error occurs during the process, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. A snapshot MAY be used as the source to provision a new volume. -A CreateVolumeRequest message may specify an OPTIONAL source snapshot parameter. +A CreateVolumeRequest message MAY specify an OPTIONAL source snapshot parameter. Reverting a snapshot, where data in the original volume is erased and replaced with data in the snapshot, is an advanced functionality not every storage system can support and therefore is currently out of scope. +##### The ready_to_use Parameter + +Some SPs MAY "process" the snapshot after the snapshot is cut, for example, maybe uploading the snapshot somewhere after the snapshot is cut. +The post-cut process MAY be a long process that could take hours. +The CO MAY freeze the application using the source volume before taking the snapshot. +The purpose of `freeze` is to ensure the application data is in consistent state. +When `freeze` is performed, the container is paused and the application is also paused. +When `thaw` is performed, the container and the application start running again. +During the snapshot processing phase, since the snapshot is already cut, a `thaw` operation can be performed so application can start running without waiting for the process to complete. +The `ready_to_use` parameter of the snapshot will become `true` after the process is complete. + +For SPs that do not do additional processing after cut, the `ready_to_use` parameter SHOULD be `true` after the snapshot is cut. +`thaw` can be done when the `ready_to_use` parameter is `true` in this case. + +The `ready_to_use` parameter provides guidance to the CO on when it can "thaw" the application in the process of snapshotting. +If the cloud provider or storage system needs to process the snapshot after the snapshot is cut, the `ready_to_use` parameter returned by CreateSnapshot SHALL be `false`. +CO MAY continue to call CreateSnapshot while waiting for the process to complete until `ready_to_use` becomes `true`. +Note that CreateSnapshot no longer blocks after the snapshot is cut. + +A gRPC error code SHALL be returned if an error occurs during any stage of the snapshotting process. +A CO SHOULD explicitly delete snapshots when an error occurs. + +Based on this information, CO can issue repeated (idemponent) calls to CreateSnapshot, monitor the response, and make decisions. +Note that CreateSnapshot is a synchronous call and it MUST block until the snapshot is cut. + ```protobuf message CreateSnapshotRequest { // The ID of the source volume to be snapshotted. @@ -1392,12 +1517,16 @@ message CreateSnapshotRequest { // The suggested name for the snapshot. This field is REQUIRED for // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 2; // Secrets required by plugin to complete snapshot creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map create_snapshot_secrets = 3; + map secrets = 3 [(csi_secret) = true]; // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and @@ -1419,7 +1548,7 @@ message CreateSnapshotResponse { Snapshot snapshot = 1; } -// The information about a provisioned snapshot. +// Information about a specific snapshot. message Snapshot { // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to @@ -1430,11 +1559,16 @@ message Snapshot { // zero means it is unspecified. int64 size_bytes = 1; - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - string id = 2; + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; // Identity information for the source volume. Note that creating a // snapshot from a snapshot is not supported here so the source has to @@ -1442,43 +1576,13 @@ message Snapshot { string source_volume_id = 3; // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - int64 created_at = 4; - - // The status of a snapshot. - SnapshotStatus status = 5; -} + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; -// The status of a snapshot. -message SnapshotStatus { - enum Type { - UNKNOWN = 0; - // A snapshot is ready for use. - READY = 1; - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - UPLOADING = 2; - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - ERROR_UPLOADING = 3; - } - // This field is REQUIRED. - Type type = 1; - - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - string details = 2; + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; } ``` @@ -1491,16 +1595,14 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Snapshot already exists but is incompatible | 6 ALREADY_EXISTS | Indicates that a snapshot corresponding to the specified snapshot `name` already exists but is incompatible with the specified `volume_id`. | Caller MUST fix the arguments or use a different `name` before retrying. | -| Operation pending for snapshot | 10 ABORTED | Indicates that there is a already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | CreateSnapshot call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` to discover Plugin capabilities. | -| Not enough space to create snapshot | 13 RESOURCE_EXHAUSTED | There is not enough space on the storage system to handle the create snapshot request. | Caller should fail this request. Future calls to CreateSnapshot may succeed if space is freed up. | +| Operation pending for snapshot | 10 ABORTED | Indicates that there is already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | +| Not enough space to create snapshot | 13 RESOURCE_EXHAUSTED | There is not enough space on the storage system to handle the create snapshot request. | Caller SHOULD fail this request. Future calls to CreateSnapshot MAY succeed if space is freed up. | #### `DeleteSnapshot` A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_SNAPSHOT` capability. This RPC will be called by the CO to delete a snapshot. -If successful, the storage space associated with the snapshot MUST be released and all the data in the snapshot SHALL NOT be accessible anymore. This operation MUST be idempotent. If a snapshot corresponding to the specified `snapshot_id` does not exist or the artifacts associated with the snapshot do not exist anymore, the Plugin MUST reply `0 OK`. @@ -1514,7 +1616,7 @@ message DeleteSnapshotRequest { // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map delete_snapshot_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteSnapshotResponse {} @@ -1530,7 +1632,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Snapshot in use | 9 FAILED_PRECONDITION | Indicates that the snapshot corresponding to the specified `snapshot_id` could not be deleted because it is in use by another resource. | Caller SHOULD ensure that there are no other resources using the snapshot, and then retry with exponential back off. | | Operation pending for snapshot | 10 ABORTED | Indicates that there is already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | DeleteSnapshot call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` to discover Plugin capabilities. | #### `ListSnapshots` @@ -1538,6 +1639,8 @@ The CO MUST implement the specified error recovery behavior when it encounters t A Controller Plugin MUST implement this RPC call if it has `LIST_SNAPSHOTS` capability. The Plugin SHALL return the information about all snapshots on the storage system within the given parameters regardless of how they were created. `ListSnapshots` SHALL NOT list a snapshot that is being created but has not been cut successfully yet. +If snapshots are created and/or deleted while the CO is concurrently paging through `ListSnapshots` results then it is possible that the CO MAY either witness duplicate snapshots in the list, not witness existing snapshots, or both. +The CO SHALL NOT expect a consistent "view" of all snapshots when paging through the snapshot list via multiple calls to `ListSnapshots`. ```protobuf // List all snapshots on the storage system regardless of how they were @@ -1566,7 +1669,8 @@ message ListSnapshotsRequest { // Identity information for a specific snapshot. This field is // OPTIONAL. It can be used to list only a specific snapshot. // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. + // and will not block if the snapshot is being processed after + // it is cut. string snapshot_id = 4; } @@ -1621,40 +1725,10 @@ If a `CreateSnapshot` operation times out before the snapshot is cut, leaving th 2. The CO takes no further action regarding the timed out RPC, a snapshot is possibly leaked and the operator/user is expected to clean up. It is NOT REQUIRED for a controller plugin to implement the `LIST_SNAPSHOTS` capability if it supports the `CREATE_DELETE_SNAPSHOT` capability: the onus is upon the CO to take into consideration the full range of plugin capabilities before deciding how to proceed in the above scenario. -A controller plugin COULD implement the `LIST_SNAPSHOTS` capability and call it repeatedly with the `snapshot_id` as a filter to query whether the uploading process is complete or not if it needs to upload a snapshot after it is being cut. -##### Snapshot Statuses - -A snapshot could have the following statusus: UPLOADING, READY, and ERROR. - -Some cloud providers will upload the snapshot to a location in the cloud (i.e., an object store) after the snapshot is cut. -Uploading may be a long process that could take hours. -If a `freeze` operation was done on the application before taking the snapshot, it could be a long time before the application can be running again if we wait until the upload is complete to `thaw` the application. -The purpose of `freeze` is to ensure the application data is in consistent state. -When `freeze` is performed, the container is paused and the application is also paused. -When `thaw` is performed, the container and the application start running again. -During the snapshot uploading phase, since the snapshot is already cut, a `thaw` operation can be performed so application can start running without waiting for the upload to complete. -The status of the snapshot will become `READY` after the upload is complete. - -For cloud providers and storage systems that don't have the uploading process, the status should be `READY` after the snapshot is cut. -`thaw` can be done when the status is `READY` in this case. - -A `CREATING` status is not included here because CreateSnapshot is synchronous and will block until the snapshot is cut. - -`ERROR` is a terminal snapshot status. -A CO SHOULD explicitly delete snapshots in this status. - -The SnapshotStatus parameter provides guidance to the CO on what action can be taken in the process of snapshotting. -Based on this information, CO can issue repeated (idemponent) calls to CreateSnapshot, monitor the response, and make decisions. -Note that CreateSnapshot is a synchronous call and it must block until the snapshot is cut. -If the cloud provider or storage system does not need to upload the snapshot after it is cut, the status returned by CreateSnapshot SHALL be `READY`. -If the cloud provider or storage system needs to upload the snapshot after the snapshot is cut, the status returned by CreateSnapshot SHALL be `UPLOADING`. -CO MAY continue to call CreateSnapshot while waiting for the upload to complete until the status becomes `READY`. -Note that CreateSnapshot no longer blocks after the snapshot is cut. - -Alternatively, ListSnapshots can be called repeatedly with snapshot_id as filtering to wait for the upload to complete. ListSnapshots SHALL return with current information regarding the snapshots on the storage system. -When upload is complete, the status of the snapshot from ListSnapshots SHALL become `READY`. +When processing is complete, the `ready_to_use` parameter of the snapshot from ListSnapshots SHALL become `true`. +The downside of calling ListSnapshots is that ListSnapshots will not return a gRPC error code if an error occurs during the processing. So calling CreateSnapshot repeatedly is the preferred way to check if the processing is complete. ### Node Service RPC @@ -1684,28 +1758,33 @@ message NodeStageVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the volume will be published. It MUST be an + // The path to which the volume MAY be staged. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. + // `staging_target_path` per volume. The CO SHALL ensure that the + // process serving the request has `read` and `write` permission to + // the path, and is able to create files or directories at the path + // if it does not exist. // This is a REQUIRED field. string staging_target_path = 3; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 4; // Secrets required by plugin to complete node stage volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_stage_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message NodeStageVolumeResponse { @@ -1723,7 +1802,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the specified `staging_target_path` but is incompatible with the specified `volume_capability` flag. | Caller MUST fix the arguments before retying. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Exceeds capabilities | 9 FAILED_PRECONDITION | Indicates that the CO has exceeded the volume's capabilities because the volume does not have MULTI_NODE capability. | Caller MAY choose to call `ValidateVolumeCapabilities` to validate the volume capabilities, or wait for the volume to be unpublished on the node. | #### `NodeUnstageVolume` @@ -1751,7 +1829,7 @@ message NodeUnstageVolumeRequest { // The ID of the volume. This field is REQUIRED. string volume_id = 1; - // The path at which the volume was published. It MUST be an absolute + // The path at which the volume was staged. It MUST be an absolute // path in the root filesystem of the process serving this request. // This is a REQUIRED field. string staging_target_path = 2; @@ -1771,7 +1849,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | #### RPC Interactions and Reference Counting `NodeStageVolume`, `NodeUnstageVolume`, `NodePublishVolume`, `NodeUnpublishVolume` @@ -1802,7 +1879,7 @@ The following table shows what the Plugin SHOULD return when receiving a second | MULTI_NODE | OK (idempotent) | ALREADY_EXISTS | OK | OK | | Non MULTI_NODE | OK (idempotent) | ALREADY_EXISTS | FAILED_PRECONDITION | FAILED_PRECONDITION| -(`Tn`: target path of the n-th `NodePublishVolume`, `Pn`: other arguments of the n-th `NodePublishVolume` except `node_publish_secrets`) +(`Tn`: target path of the n-th `NodePublishVolume`, `Pn`: other arguments of the n-th `NodePublishVolume` except `secrets`) ```protobuf message NodePublishVolumeRequest { @@ -1814,9 +1891,9 @@ message NodePublishVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the device was mounted by `NodeStageVolume`. + // The path to which the volume was staged by `NodeStageVolume`. // It MUST be an absolute path in the root filesystem of the process // serving this request. // It MUST be set if the Node Plugin implements the @@ -1827,28 +1904,31 @@ message NodePublishVolumeRequest { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. + // The CO SHALL ensure that the process serving the request has + // `read` and `write` permissions to the path, and is able to create + // files or directories at the path if it does not exist. // This is a REQUIRED field. string target_path = 4; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 5; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. bool readonly = 6; // Secrets required by plugin to complete node publish volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_publish_secrets = 7; + map secrets = 7 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - map volume_attributes = 8; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; } message NodePublishVolumeResponse { @@ -1866,7 +1946,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the specified `target_path` but is incompatible with the specified `volume_capability` or `readonly` flag. | Caller MUST fix the arguments before retying. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Exceeds capabilities | 9 FAILED_PRECONDITION | Indicates that the CO has exceeded the volume's capabilities because the volume does not have MULTI_NODE capability. | Caller MAY choose to call `ValidateVolumeCapabilities` to validate the volume capabilities, or wait for the volume to be unpublished on the node. | | Staging target path not set | 9 FAILED_PRECONDITION | Indicates that `STAGE_UNSTAGE_VOLUME` capability is set but no `staging_target_path` was set. | Caller MUST make sure call to `NodeStageVolume` is made and returns success before retrying with valid `staging_target_path`. | @@ -1910,41 +1989,68 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -#### `NodeGetId` +#### `NodeGetVolumeStats` -`NodeGetId` RPC call is deprecated. -Users of this RPC call SHOULD use `NodeGetInfo`. +A Node plugin MUST implement this RPC call if it has GET_VOLUME_STATS node capability. +`NodeGetVolumeStats` RPC call returns the volume capacity statistics available for the volume. + +If the volume is being used in `BlockVolume` mode then `used` and `available` MAY be omitted from `usage` field of `NodeGetVolumeStatsResponse`. +Similarly, inode information MAY be omitted from `NodeGetVolumeStatsResponse` when unavailable. -A Node Plugin MUST implement this RPC call if the plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability. -The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. -The CO SHOULD call this RPC for the node at which it wants to place the workload. -The result of this call will be used by CO in `ControllerPublishVolume`. ```protobuf -message NodeGetIdRequest { - // Intentionally empty. -} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; -message NodeGetIdResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. // This is a REQUIRED field. - string node_id = 1; + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; } ``` -##### NodeGetId Errors +##### NodeGetVolumeStats Errors -If the plugin is unable to complete the NodeGetId call successfully, it MUST return a non-ok gRPC code in the gRPC status. +If the plugin is unable to complete the `NodeGetVolumeStats` call successfully, it MUST return a non-ok gRPC code in the gRPC status. If the conditions defined below are encountered, the plugin MUST return the specified gRPC error code. The CO MUST implement the specified error recovery behavior when it encounters the gRPC error code. -Condition | gRPC Code | Description | Recovery Behavior -| --- | --- | --- | --- | -| Call not implemented | 12 UNIMPLEMENTED | NodeGetId call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | + +| Condition | gRPC Code | Description | Recovery Behavior | +|-----------|-----------|-------------|-------------------| +| Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist on specified `volume_path`. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible on specified `volume_path` and has not been deleted before retrying with exponential back off. | #### `NodeGetCapabilities` @@ -1968,6 +2074,10 @@ message NodeServiceCapability { enum Type { UNKNOWN = 0; STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; } Type type = 1; @@ -1990,6 +2100,8 @@ If the plugin is unable to complete the NodeGetCapabilities call successfully, i A Node Plugin MUST implement this RPC call if the plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability. The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. The CO SHOULD call this RPC for the node at which it wants to place the workload. +The CO MAY call this RPC more than once for a given node. +The SP SHALL NOT expect the CO to call this RPC more than once. The result of this call will be used by CO in `ControllerPublishVolume`. ```protobuf @@ -1997,9 +2109,14 @@ message NodeGetInfoRequest { } message NodeGetInfoResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. string node_id = 1; // Maximum number of volumes that controller can publish to the node. @@ -2012,7 +2129,7 @@ message NodeGetInfoResponse { // Specifies where (regions, zones, racks, etc.) the node is // accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // COs MAY use this information along with the topology information // returned in CreateVolumeResponse to ensure that a given volume is // accessible from a given node when scheduling workloads. @@ -2033,14 +2150,8 @@ message NodeGetInfoResponse { ##### NodeGetInfo Errors If the plugin is unable to complete the NodeGetInfo call successfully, it MUST return a non-ok gRPC code in the gRPC status. -If the conditions defined below are encountered, the plugin MUST return the specified gRPC error code. The CO MUST implement the specified error recovery behavior when it encounters the gRPC error code. -Condition | gRPC Code | Description | Recovery Behavior -| --- | --- | --- | --- | -| Call not implemented | 12 UNIMPLEMENTED | NodeGetInfo call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | - - ## Protocol ### Connectivity @@ -2051,7 +2162,7 @@ Condition | gRPC Code | Description | Recovery Behavior Support for OPTIONAL RPCs is reported by the `ControllerGetCapabilities` and `NodeGetCapabilities` RPC calls. * The CO SHALL provide the listen-address for the Plugin by way of the `CSI_ENDPOINT` environment variable. Plugin components SHALL create, bind, and listen for RPCs on the specified listen address. - * Only UNIX Domain Sockets may be used as endpoints. + * Only UNIX Domain Sockets MAY be used as endpoints. This will likely change in a future version of this specification to support non-UNIX platforms. * All supported RPC services MUST be available at the listen address of the Plugin. @@ -2060,7 +2171,7 @@ Condition | gRPC Code | Description | Recovery Behavior * The CO operator and Plugin Supervisor SHOULD take steps to ensure that any and all communication between the CO and Plugin Service are secured according to best practices. * Communication between a CO and a Plugin SHALL be transported over UNIX Domain Sockets. * gRPC is compatible with UNIX Domain Sockets; it is the responsibility of the CO operator and Plugin Supervisor to properly secure access to the Domain Socket using OS filesystem ACLs and/or other OS-specific security context tooling. - * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets must provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). + * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets MUST provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). Proxy components transporting communication over IP networks SHALL be responsible for securing communications over such networks. * Both the CO and Plugin SHOULD avoid accidental leakage of sensitive information (such as redacting such information from log files). @@ -2105,8 +2216,8 @@ Condition | gRPC Code | Description | Recovery Behavior * Variables defined by this specification SHALL be identifiable by their `CSI_` name prefix. * Configuration properties not defined by the CSI specification SHALL NOT use the same `CSI_` name prefix; this prefix is reserved for common configuration properties defined by the CSI specification. -* The Plugin Supervisor SHOULD supply all recommended CSI environment variables to a Plugin. -* The Plugin Supervisor SHALL supply all required CSI environment variables to a Plugin. +* The Plugin Supervisor SHOULD supply all RECOMMENDED CSI environment variables to a Plugin. +* The Plugin Supervisor SHALL supply all REQUIRED CSI environment variables to a Plugin. ##### `CSI_ENDPOINT` @@ -2141,8 +2252,8 @@ Supervised plugins MAY be isolated and/or resource-bounded. ##### Available Services * Plugin Packages MAY support all or a subset of CSI services; service combinations MAY be configurable at runtime by the Plugin Supervisor. - * A plugin must know the "mode" in which it is operating (e.g. node, controller, or both). - * This specification does not dictate the mechanism by which mode of operation must be discovered, and instead places that burden upon the SP. + * A plugin MUST know the "mode" in which it is operating (e.g. node, controller, or both). + * This specification does not dictate the mechanism by which mode of operation MUST be discovered, and instead places that burden upon the SP. * Misconfigured plugin software SHOULD fail-fast with an OS-appropriate error code. ##### Linux Capabilities @@ -2158,7 +2269,7 @@ Supervised plugins MAY be isolated and/or resource-bounded. ##### Cgroup Isolation * A Plugin MAY be constrained by cgroups. -* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin may access requisite devices. +* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin MAY access requisite devices. * A Plugin Supervisor MAY define resource limits for a Plugin. ##### Resource Requirements diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore index 984ec0fbb..81c985c4d 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.gitignore +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -11,3 +11,9 @@ *.out bin/mock cmd/csi-sanity/csi-sanity + +# JetBrains GoLand +.idea + +# Vim +*.swp diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml index 9d636c7f6..7a8171919 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -1,14 +1,15 @@ language: go +sudo: required +services: + - docker matrix: include: - - go: 1.x + - go: 1.10.3 script: -- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 -- go vet $(go list ./... | grep -v vendor) -- go test $(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") -- ./hack/e2e.sh +- make test after_success: - if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + make container docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; make push; fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md new file mode 100644 index 000000000..41b73b76e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock index 2737ba719..443ad9700 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock @@ -2,18 +2,23 @@ [[projects]] + digest = "1:26ee2356254e58b9872ba736f66aff1c54a26f08c7d16afbf49695131a87d454" name = "github.com/container-storage-interface/spec" - packages = ["lib/go/csi/v0"] - revision = "2178fdeea87f1150a17a63252eee28d4d8141f72" - version = "v0.3.0" + packages = ["lib/go/csi"] + pruneopts = "UT" + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" [[projects]] + digest = "1:bc38c7c481812e178d85160472e231c5e1c9a7f5845d67e23ee4e706933c10d8" name = "github.com/golang/mock" packages = ["gomock"] + pruneopts = "UT" revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" version = "v1.1.1" [[projects]] + digest = "1:588beb9f80d2b0afddf05663b32d01c867da419458b560471d81cca0286e76b8" name = "github.com/golang/protobuf" packages = [ "proto", @@ -22,12 +27,14 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers" + "ptypes/wrappers", ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] + digest = "1:72f35d3e412bc67b121e15ea4c88a3b3da8bcbc2264339e7ffa4a1865799840c" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -47,12 +54,14 @@ "reporters/stenographer", "reporters/stenographer/support/go-colorable", "reporters/stenographer/support/go-isatty", - "types" + "types", ] + pruneopts = "UT" revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf" version = "v1.5.0" [[projects]] + digest = "1:d0c2c4e2d0006cd28c220a549cda1de8e67abc65ed4c572421492bbf0492ceaf" name = "github.com/onsi/gomega" packages = [ ".", @@ -66,25 +75,31 @@ "matchers/support/goraph/edge", "matchers/support/goraph/node", "matchers/support/goraph/util", - "types" + "types", ] + pruneopts = "UT" revision = "62bff4df71bdbc266561a0caee19f0594b17c240" version = "v1.4.0" [[projects]] + digest = "1:9e9193aa51197513b3abcb108970d831fbcf40ef96aa845c4f03276e1fa316d2" name = "github.com/sirupsen/logrus" packages = ["."] + pruneopts = "UT" revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" version = "v1.0.5" [[projects]] branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" name = "golang.org/x/crypto" packages = ["ssh/terminal"] + pruneopts = "UT" revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" [[projects]] branch = "master" + digest = "1:0bb2e6ef036484991ed446a6c698698b8901766981d4d22cc8e53fedb09709ac" name = "golang.org/x/net" packages = [ "context", @@ -96,20 +111,24 @@ "http2/hpack", "idna", "internal/timeseries", - "trace" + "trace", ] + pruneopts = "UT" revision = "1e491301e022f8f977054da4c2d852decd59571f" [[projects]] branch = "master" + digest = "1:8fbfc6ea1a8a078697633be97f07dd83a83d32a96959d42195464c13c25be374" name = "golang.org/x/sys" packages = [ "unix", - "windows" + "windows", ] + pruneopts = "UT" revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" [[projects]] + digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" name = "golang.org/x/text" packages = [ "collate", @@ -137,18 +156,22 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" + digest = "1:601e63e7d4577f907118bec825902505291918859d223bce015539e79f1160e3" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] + pruneopts = "UT" revision = "32ee49c4dd805befd833990acba36cb75042378c" [[projects]] + digest = "1:7a977fdcd5abff03e94f92e7b374ef37e91c7c389581e5c4348fa98616e6c6be" name = "google.golang.org/grpc" packages = [ ".", @@ -176,20 +199,39 @@ "stats", "status", "tap", - "transport" + "transport", ] + pruneopts = "UT" revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" version = "v1.12.2" [[projects]] + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" name = "gopkg.in/yaml.v2" packages = ["."] + pruneopts = "UT" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "5dd480018adbb94025564b74bad8dd269cc516183b7b428317f6dd04b07726f4" + input-imports = [ + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/mock/gomock", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes", + "github.com/golang/protobuf/ptypes/wrappers", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/sirupsen/logrus", + "golang.org/x/net/context", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/connectivity", + "google.golang.org/grpc/reflection", + "google.golang.org/grpc/status", + "gopkg.in/yaml.v2", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml index e73127854..4e0836d08 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml @@ -27,7 +27,7 @@ [[constraint]] name = "github.com/container-storage-interface/spec" - version = "~0.3.0" + version = "v1.0.0-rc2" [[constraint]] name = "github.com/golang/mock" @@ -35,7 +35,7 @@ [[constraint]] name = "github.com/golang/protobuf" - version = "v1.1.0" + version = "v1.2.0" [[constraint]] name = "github.com/onsi/ginkgo" diff --git a/vendor/github.com/kubernetes-csi/csi-test/Makefile b/vendor/github.com/kubernetes-csi/csi-test/Makefile index b31541f62..7fb42c877 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/Makefile +++ b/vendor/github.com/kubernetes-csi/csi-test/Makefile @@ -38,5 +38,15 @@ container: $(APP) push: container docker push $(IMAGE_NAME):$(IMAGE_VERSION) -.PHONY: all clean container push - +test: $(APP) + files=$$(find ./ -name '*.go' | grep -v '^./vendor' ); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi + go vet $$(go list ./... | grep -v vendor) + go test $$(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") + ./hack/e2e.sh + +.PHONY: all clean container push test diff --git a/vendor/github.com/kubernetes-csi/csi-test/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/OWNERS new file mode 100644 index 000000000..a780cce61 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/OWNERS @@ -0,0 +1,4 @@ +approvers: +- saad-ali +- lpabon +- pohly diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md index f6891ae78..36dce60ba 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -12,7 +12,7 @@ CO developers can use this framework to create drivers based on the ### Mock driver for testing We also provide a container called `quay.io/k8scsi/mock-driver:canary` which can be used as an in-memory mock driver. -It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.2.0`. +It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.3.0`. You will need to setup the environment variable `CSI_ENDPOINT` for the mock driver to know where to create the unix domain socket. @@ -25,5 +25,18 @@ CSI driver. ### Note -* Master is for CSI v0.3.0. Please see the branches for other CSI releases. +* Master is for CSI v0.4.0. Please see the branches for other CSI releases. * Only Golang 1.9+ supported. See [gRPC issue](https://github.com/grpc/grpc-go/issues/711#issuecomment-326626790) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS new file mode 100644 index 000000000..00e28e4eb --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +lpabon diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go index a4f4707a8..4b2d352cc 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go @@ -41,6 +41,7 @@ func init() { flag.StringVar(&config.StagingPath, prefix+"stagingdir", os.TempDir()+"/csi", "Mount point for NodeStage if staging is supported") flag.StringVar(&config.SecretsFile, prefix+"secrets", "", "CSI secrets file") flag.Int64Var(&config.TestVolumeSize, prefix+"testvolumesize", sanity.DefTestVolumeSize, "Base volume size used for provisioned volumes") + flag.StringVar(&config.TestVolumeParametersFile, prefix+"testvolumeparameters", "", "YAML file of volume parameters for provisioned volumes") flag.Parse() } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go index 462118570..01224a3ac 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -14,20 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi/v0 IdentityServer,ControllerServer,NodeServer +//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi IdentityServer,ControllerServer,NodeServer package driver import ( - context "context" + "context" + "encoding/json" "errors" + "fmt" "net" "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) @@ -58,6 +60,8 @@ type CSICreds struct { ControllerUnpublishVolumeSecret string NodeStageVolumeSecret string NodePublishVolumeSecret string + CreateSnapshotSecret string + DeleteSnapshotSecret string } type CSIDriver struct { @@ -100,7 +104,7 @@ func (c *CSIDriver) Start(l net.Listener) error { // Create a new grpc server c.server = grpc.NewServer( - grpc.UnaryInterceptor(c.authInterceptor), + grpc.UnaryInterceptor(c.callInterceptor), ) // Register Mock servers @@ -155,25 +159,54 @@ func (c *CSIDriver) SetDefaultCreds() { ControllerUnpublishVolumeSecret: "secretval4", NodeStageVolumeSecret: "secretval5", NodePublishVolumeSecret: "secretval6", + CreateSnapshotSecret: "secretval7", + DeleteSnapshotSecret: "secretval8", } } -func (c *CSIDriver) authInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := c.authInterceptor(req) + if err != nil { + logGRPC(info.FullMethod, req, nil, err) + return nil, err + } + rsp, err := handler(ctx, req) + logGRPC(info.FullMethod, req, rsp, err) + return rsp, err +} + +func (c *CSIDriver) authInterceptor(req interface{}) error { if c.creds != nil { authenticated, authErr := isAuthenticated(req, c.creds) if !authenticated { if authErr == ErrNoCredentials { - return nil, status.Error(codes.InvalidArgument, authErr.Error()) + return status.Error(codes.InvalidArgument, authErr.Error()) } if authErr == ErrAuthFailed { - return nil, status.Error(codes.Unauthenticated, authErr.Error()) + return status.Error(codes.Unauthenticated, authErr.Error()) } } } + return nil +} - h, err := handler(ctx, req) - - return h, err +func logGRPC(method string, request, reply interface{}, err error) { + // Log JSON with the request and response for easier parsing + logMessage := struct { + Method string + Request interface{} + Response interface{} + Error string + }{ + Method: method, + Request: request, + Response: reply, + } + if err != nil { + logMessage.Error = err.Error() + } + msg, _ := json.Marshal(logMessage) + fmt.Printf("gRPCCall: %s\n", msg) } func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { @@ -190,33 +223,45 @@ func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { return authenticateNodeStageVolume(r, creds) case *csi.NodePublishVolumeRequest: return authenticateNodePublishVolume(r, creds) + case *csi.CreateSnapshotRequest: + return authenticateCreateSnapshot(r, creds) + case *csi.DeleteSnapshotRequest: + return authenticateDeleteSnapshot(r, creds) default: return true, nil } } func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerCreateSecrets(), creds.CreateVolumeSecret) + return credsCheck(req.GetSecrets(), creds.CreateVolumeSecret) } func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerDeleteSecrets(), creds.DeleteVolumeSecret) + return credsCheck(req.GetSecrets(), creds.DeleteVolumeSecret) } func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerPublishSecrets(), creds.ControllerPublishVolumeSecret) + return credsCheck(req.GetSecrets(), creds.ControllerPublishVolumeSecret) } func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerUnpublishSecrets(), creds.ControllerUnpublishVolumeSecret) + return credsCheck(req.GetSecrets(), creds.ControllerUnpublishVolumeSecret) } func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetNodeStageSecrets(), creds.NodeStageVolumeSecret) + return credsCheck(req.GetSecrets(), creds.NodeStageVolumeSecret) } func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetNodePublishSecrets(), creds.NodePublishVolumeSecret) + return credsCheck(req.GetSecrets(), creds.NodePublishVolumeSecret) +} + +func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateSnapshotSecret) +} + +func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret) } func credsCheck(secrets map[string]string, secretVal string) (bool, error) { diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go index abd7d6106..c54acaad5 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -1,849 +1,354 @@ -// Automatically generated by MockGen. DO NOT EDIT! -// Source: ../vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/container-storage-interface/spec/lib/go/csi (interfaces: IdentityServer,ControllerServer,NodeServer) +// Package driver is a generated GoMock package. package driver import ( - v0 "github.com/container-storage-interface/spec/lib/go/csi/v0" + context "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + reflect "reflect" ) -// Mock of isPluginCapability_Type interface -type MockisPluginCapability_Type struct { - ctrl *gomock.Controller - recorder *_MockisPluginCapability_TypeRecorder -} - -// Recorder for MockisPluginCapability_Type (not exported) -type _MockisPluginCapability_TypeRecorder struct { - mock *MockisPluginCapability_Type -} - -func NewMockisPluginCapability_Type(ctrl *gomock.Controller) *MockisPluginCapability_Type { - mock := &MockisPluginCapability_Type{ctrl: ctrl} - mock.recorder = &_MockisPluginCapability_TypeRecorder{mock} - return mock -} - -func (_m *MockisPluginCapability_Type) EXPECT() *_MockisPluginCapability_TypeRecorder { - return _m.recorder -} - -func (_m *MockisPluginCapability_Type) isPluginCapability_Type() { - _m.ctrl.Call(_m, "isPluginCapability_Type") -} - -func (_mr *_MockisPluginCapability_TypeRecorder) isPluginCapability_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isPluginCapability_Type") -} - -// Mock of isVolumeContentSource_Type interface -type MockisVolumeContentSource_Type struct { - ctrl *gomock.Controller - recorder *_MockisVolumeContentSource_TypeRecorder -} - -// Recorder for MockisVolumeContentSource_Type (not exported) -type _MockisVolumeContentSource_TypeRecorder struct { - mock *MockisVolumeContentSource_Type -} - -func NewMockisVolumeContentSource_Type(ctrl *gomock.Controller) *MockisVolumeContentSource_Type { - mock := &MockisVolumeContentSource_Type{ctrl: ctrl} - mock.recorder = &_MockisVolumeContentSource_TypeRecorder{mock} - return mock -} - -func (_m *MockisVolumeContentSource_Type) EXPECT() *_MockisVolumeContentSource_TypeRecorder { - return _m.recorder -} - -func (_m *MockisVolumeContentSource_Type) isVolumeContentSource_Type() { - _m.ctrl.Call(_m, "isVolumeContentSource_Type") -} - -func (_mr *_MockisVolumeContentSource_TypeRecorder) isVolumeContentSource_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isVolumeContentSource_Type") -} - -// Mock of isVolumeCapability_AccessType interface -type MockisVolumeCapability_AccessType struct { - ctrl *gomock.Controller - recorder *_MockisVolumeCapability_AccessTypeRecorder -} - -// Recorder for MockisVolumeCapability_AccessType (not exported) -type _MockisVolumeCapability_AccessTypeRecorder struct { - mock *MockisVolumeCapability_AccessType -} - -func NewMockisVolumeCapability_AccessType(ctrl *gomock.Controller) *MockisVolumeCapability_AccessType { - mock := &MockisVolumeCapability_AccessType{ctrl: ctrl} - mock.recorder = &_MockisVolumeCapability_AccessTypeRecorder{mock} - return mock -} - -func (_m *MockisVolumeCapability_AccessType) EXPECT() *_MockisVolumeCapability_AccessTypeRecorder { - return _m.recorder -} - -func (_m *MockisVolumeCapability_AccessType) isVolumeCapability_AccessType() { - _m.ctrl.Call(_m, "isVolumeCapability_AccessType") -} - -func (_mr *_MockisVolumeCapability_AccessTypeRecorder) isVolumeCapability_AccessType() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isVolumeCapability_AccessType") -} - -// Mock of isControllerServiceCapability_Type interface -type MockisControllerServiceCapability_Type struct { - ctrl *gomock.Controller - recorder *_MockisControllerServiceCapability_TypeRecorder -} - -// Recorder for MockisControllerServiceCapability_Type (not exported) -type _MockisControllerServiceCapability_TypeRecorder struct { - mock *MockisControllerServiceCapability_Type -} - -func NewMockisControllerServiceCapability_Type(ctrl *gomock.Controller) *MockisControllerServiceCapability_Type { - mock := &MockisControllerServiceCapability_Type{ctrl: ctrl} - mock.recorder = &_MockisControllerServiceCapability_TypeRecorder{mock} - return mock -} - -func (_m *MockisControllerServiceCapability_Type) EXPECT() *_MockisControllerServiceCapability_TypeRecorder { - return _m.recorder -} - -func (_m *MockisControllerServiceCapability_Type) isControllerServiceCapability_Type() { - _m.ctrl.Call(_m, "isControllerServiceCapability_Type") -} - -func (_mr *_MockisControllerServiceCapability_TypeRecorder) isControllerServiceCapability_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isControllerServiceCapability_Type") -} - -// Mock of isNodeServiceCapability_Type interface -type MockisNodeServiceCapability_Type struct { - ctrl *gomock.Controller - recorder *_MockisNodeServiceCapability_TypeRecorder -} - -// Recorder for MockisNodeServiceCapability_Type (not exported) -type _MockisNodeServiceCapability_TypeRecorder struct { - mock *MockisNodeServiceCapability_Type -} - -func NewMockisNodeServiceCapability_Type(ctrl *gomock.Controller) *MockisNodeServiceCapability_Type { - mock := &MockisNodeServiceCapability_Type{ctrl: ctrl} - mock.recorder = &_MockisNodeServiceCapability_TypeRecorder{mock} - return mock -} - -func (_m *MockisNodeServiceCapability_Type) EXPECT() *_MockisNodeServiceCapability_TypeRecorder { - return _m.recorder -} - -func (_m *MockisNodeServiceCapability_Type) isNodeServiceCapability_Type() { - _m.ctrl.Call(_m, "isNodeServiceCapability_Type") -} - -func (_mr *_MockisNodeServiceCapability_TypeRecorder) isNodeServiceCapability_Type() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "isNodeServiceCapability_Type") -} - -// Mock of IdentityClient interface -type MockIdentityClient struct { - ctrl *gomock.Controller - recorder *_MockIdentityClientRecorder -} - -// Recorder for MockIdentityClient (not exported) -type _MockIdentityClientRecorder struct { - mock *MockIdentityClient -} - -func NewMockIdentityClient(ctrl *gomock.Controller) *MockIdentityClient { - mock := &MockIdentityClient{ctrl: ctrl} - mock.recorder = &_MockIdentityClientRecorder{mock} - return mock -} - -func (_m *MockIdentityClient) EXPECT() *_MockIdentityClientRecorder { - return _m.recorder -} - -func (_m *MockIdentityClient) GetPluginInfo(ctx context.Context, in *v0.GetPluginInfoRequest, opts ...grpc.CallOption) (*v0.GetPluginInfoResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetPluginInfo", _s...) - ret0, _ := ret[0].(*v0.GetPluginInfoResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityClientRecorder) GetPluginInfo(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginInfo", _s...) -} - -func (_m *MockIdentityClient) GetPluginCapabilities(ctx context.Context, in *v0.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*v0.GetPluginCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetPluginCapabilities", _s...) - ret0, _ := ret[0].(*v0.GetPluginCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityClientRecorder) GetPluginCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginCapabilities", _s...) -} - -func (_m *MockIdentityClient) Probe(ctx context.Context, in *v0.ProbeRequest, opts ...grpc.CallOption) (*v0.ProbeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "Probe", _s...) - ret0, _ := ret[0].(*v0.ProbeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityClientRecorder) Probe(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "Probe", _s...) -} - -// Mock of IdentityServer interface +// MockIdentityServer is a mock of IdentityServer interface type MockIdentityServer struct { ctrl *gomock.Controller - recorder *_MockIdentityServerRecorder + recorder *MockIdentityServerMockRecorder } -// Recorder for MockIdentityServer (not exported) -type _MockIdentityServerRecorder struct { +// MockIdentityServerMockRecorder is the mock recorder for MockIdentityServer +type MockIdentityServerMockRecorder struct { mock *MockIdentityServer } +// NewMockIdentityServer creates a new mock instance func NewMockIdentityServer(ctrl *gomock.Controller) *MockIdentityServer { mock := &MockIdentityServer{ctrl: ctrl} - mock.recorder = &_MockIdentityServerRecorder{mock} - return mock -} - -func (_m *MockIdentityServer) EXPECT() *_MockIdentityServerRecorder { - return _m.recorder -} - -func (_m *MockIdentityServer) GetPluginInfo(_param0 context.Context, _param1 *v0.GetPluginInfoRequest) (*v0.GetPluginInfoResponse, error) { - ret := _m.ctrl.Call(_m, "GetPluginInfo", _param0, _param1) - ret0, _ := ret[0].(*v0.GetPluginInfoResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityServerRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginInfo", arg0, arg1) -} - -func (_m *MockIdentityServer) GetPluginCapabilities(_param0 context.Context, _param1 *v0.GetPluginCapabilitiesRequest) (*v0.GetPluginCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "GetPluginCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.GetPluginCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityServerRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPluginCapabilities", arg0, arg1) -} - -func (_m *MockIdentityServer) Probe(_param0 context.Context, _param1 *v0.ProbeRequest) (*v0.ProbeResponse, error) { - ret := _m.ctrl.Call(_m, "Probe", _param0, _param1) - ret0, _ := ret[0].(*v0.ProbeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockIdentityServerRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Probe", arg0, arg1) -} - -// Mock of ControllerClient interface -type MockControllerClient struct { - ctrl *gomock.Controller - recorder *_MockControllerClientRecorder -} - -// Recorder for MockControllerClient (not exported) -type _MockControllerClientRecorder struct { - mock *MockControllerClient -} - -func NewMockControllerClient(ctrl *gomock.Controller) *MockControllerClient { - mock := &MockControllerClient{ctrl: ctrl} - mock.recorder = &_MockControllerClientRecorder{mock} + mock.recorder = &MockIdentityServerMockRecorder{mock} return mock } -func (_m *MockControllerClient) EXPECT() *_MockControllerClientRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { + return m.recorder } -func (_m *MockControllerClient) CreateVolume(ctx context.Context, in *v0.CreateVolumeRequest, opts ...grpc.CallOption) (*v0.CreateVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "CreateVolume", _s...) - ret0, _ := ret[0].(*v0.CreateVolumeResponse) +// GetPluginCapabilities mocks base method +func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerClientRecorder) CreateVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateVolume", _s...) +// GetPluginCapabilities indicates an expected call of GetPluginCapabilities +func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) } -func (_m *MockControllerClient) DeleteVolume(ctx context.Context, in *v0.DeleteVolumeRequest, opts ...grpc.CallOption) (*v0.DeleteVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "DeleteVolume", _s...) - ret0, _ := ret[0].(*v0.DeleteVolumeResponse) +// GetPluginInfo mocks base method +func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerClientRecorder) DeleteVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteVolume", _s...) +// GetPluginInfo indicates an expected call of GetPluginInfo +func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) } -func (_m *MockControllerClient) ControllerPublishVolume(ctx context.Context, in *v0.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*v0.ControllerPublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ControllerPublishVolume", _s...) - ret0, _ := ret[0].(*v0.ControllerPublishVolumeResponse) +// Probe mocks base method +func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *csi.ProbeRequest) (*csi.ProbeResponse, error) { + ret := m.ctrl.Call(m, "Probe", arg0, arg1) + ret0, _ := ret[0].(*csi.ProbeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerClientRecorder) ControllerPublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerPublishVolume", _s...) -} - -func (_m *MockControllerClient) ControllerUnpublishVolume(ctx context.Context, in *v0.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*v0.ControllerUnpublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ControllerUnpublishVolume", _s...) - ret0, _ := ret[0].(*v0.ControllerUnpublishVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerUnpublishVolume", _s...) -} - -func (_m *MockControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *v0.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*v0.ValidateVolumeCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ValidateVolumeCapabilities", _s...) - ret0, _ := ret[0].(*v0.ValidateVolumeCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ValidateVolumeCapabilities", _s...) -} - -func (_m *MockControllerClient) ListVolumes(ctx context.Context, in *v0.ListVolumesRequest, opts ...grpc.CallOption) (*v0.ListVolumesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ListVolumes", _s...) - ret0, _ := ret[0].(*v0.ListVolumesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 +// Probe indicates an expected call of Probe +func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) } -func (_mr *_MockControllerClientRecorder) ListVolumes(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListVolumes", _s...) -} - -func (_m *MockControllerClient) GetCapacity(ctx context.Context, in *v0.GetCapacityRequest, opts ...grpc.CallOption) (*v0.GetCapacityResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetCapacity", _s...) - ret0, _ := ret[0].(*v0.GetCapacityResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) GetCapacity(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetCapacity", _s...) -} - -func (_m *MockControllerClient) ControllerGetCapabilities(ctx context.Context, in *v0.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*v0.ControllerGetCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ControllerGetCapabilities", _s...) - ret0, _ := ret[0].(*v0.ControllerGetCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ControllerGetCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerGetCapabilities", _s...) -} - -func (_m *MockControllerClient) CreateSnapshot(ctx context.Context, in *v0.CreateSnapshotRequest, opts ...grpc.CallOption) (*v0.CreateSnapshotResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "CreateSnapshot", _s...) - ret0, _ := ret[0].(*v0.CreateSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) CreateSnapshot(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateSnapshot", _s...) -} - -func (_m *MockControllerClient) DeleteSnapshot(ctx context.Context, in *v0.DeleteSnapshotRequest, opts ...grpc.CallOption) (*v0.DeleteSnapshotResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "DeleteSnapshot", _s...) - ret0, _ := ret[0].(*v0.DeleteSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) DeleteSnapshot(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteSnapshot", _s...) -} - -func (_m *MockControllerClient) ListSnapshots(ctx context.Context, in *v0.ListSnapshotsRequest, opts ...grpc.CallOption) (*v0.ListSnapshotsResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ListSnapshots", _s...) - ret0, _ := ret[0].(*v0.ListSnapshotsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerClientRecorder) ListSnapshots(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListSnapshots", _s...) -} - -// Mock of ControllerServer interface +// MockControllerServer is a mock of ControllerServer interface type MockControllerServer struct { ctrl *gomock.Controller - recorder *_MockControllerServerRecorder + recorder *MockControllerServerMockRecorder } -// Recorder for MockControllerServer (not exported) -type _MockControllerServerRecorder struct { +// MockControllerServerMockRecorder is the mock recorder for MockControllerServer +type MockControllerServerMockRecorder struct { mock *MockControllerServer } +// NewMockControllerServer creates a new mock instance func NewMockControllerServer(ctrl *gomock.Controller) *MockControllerServer { mock := &MockControllerServer{ctrl: ctrl} - mock.recorder = &_MockControllerServerRecorder{mock} + mock.recorder = &MockControllerServerMockRecorder{mock} return mock } -func (_m *MockControllerServer) EXPECT() *_MockControllerServerRecorder { - return _m.recorder -} - -func (_m *MockControllerServer) CreateVolume(_param0 context.Context, _param1 *v0.CreateVolumeRequest) (*v0.CreateVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "CreateVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.CreateVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateVolume", arg0, arg1) -} - -func (_m *MockControllerServer) DeleteVolume(_param0 context.Context, _param1 *v0.DeleteVolumeRequest) (*v0.DeleteVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "DeleteVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.DeleteVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteVolume", arg0, arg1) -} - -func (_m *MockControllerServer) ControllerPublishVolume(_param0 context.Context, _param1 *v0.ControllerPublishVolumeRequest) (*v0.ControllerPublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "ControllerPublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.ControllerPublishVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerPublishVolume", arg0, arg1) -} - -func (_m *MockControllerServer) ControllerUnpublishVolume(_param0 context.Context, _param1 *v0.ControllerUnpublishVolumeRequest) (*v0.ControllerUnpublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "ControllerUnpublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.ControllerUnpublishVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerUnpublishVolume", arg0, arg1) +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder { + return m.recorder } -func (_m *MockControllerServer) ValidateVolumeCapabilities(_param0 context.Context, _param1 *v0.ValidateVolumeCapabilitiesRequest) (*v0.ValidateVolumeCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "ValidateVolumeCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.ValidateVolumeCapabilitiesResponse) +// ControllerGetCapabilities mocks base method +func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerGetCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ValidateVolumeCapabilities", arg0, arg1) +// ControllerGetCapabilities indicates an expected call of ControllerGetCapabilities +func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) } -func (_m *MockControllerServer) ListVolumes(_param0 context.Context, _param1 *v0.ListVolumesRequest) (*v0.ListVolumesResponse, error) { - ret := _m.ctrl.Call(_m, "ListVolumes", _param0, _param1) - ret0, _ := ret[0].(*v0.ListVolumesResponse) +// ControllerPublishVolume mocks base method +func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerPublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListVolumes", arg0, arg1) +// ControllerPublishVolume indicates an expected call of ControllerPublishVolume +func (mr *MockControllerServerMockRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerPublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerPublishVolume), arg0, arg1) } -func (_m *MockControllerServer) GetCapacity(_param0 context.Context, _param1 *v0.GetCapacityRequest) (*v0.GetCapacityResponse, error) { - ret := _m.ctrl.Call(_m, "GetCapacity", _param0, _param1) - ret0, _ := ret[0].(*v0.GetCapacityResponse) +// ControllerUnpublishVolume mocks base method +func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerUnpublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetCapacity", arg0, arg1) +// ControllerUnpublishVolume indicates an expected call of ControllerUnpublishVolume +func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) } -func (_m *MockControllerServer) ControllerGetCapabilities(_param0 context.Context, _param1 *v0.ControllerGetCapabilitiesRequest) (*v0.ControllerGetCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "ControllerGetCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.ControllerGetCapabilitiesResponse) +// CreateSnapshot mocks base method +func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateSnapshotResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockControllerServerRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ControllerGetCapabilities", arg0, arg1) -} - -func (_m *MockControllerServer) CreateSnapshot(_param0 context.Context, _param1 *v0.CreateSnapshotRequest) (*v0.CreateSnapshotResponse, error) { - ret := _m.ctrl.Call(_m, "CreateSnapshot", _param0, _param1) - ret0, _ := ret[0].(*v0.CreateSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateSnapshot", arg0, arg1) -} - -func (_m *MockControllerServer) DeleteSnapshot(_param0 context.Context, _param1 *v0.DeleteSnapshotRequest) (*v0.DeleteSnapshotResponse, error) { - ret := _m.ctrl.Call(_m, "DeleteSnapshot", _param0, _param1) - ret0, _ := ret[0].(*v0.DeleteSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteSnapshot", arg0, arg1) -} - -func (_m *MockControllerServer) ListSnapshots(_param0 context.Context, _param1 *v0.ListSnapshotsRequest) (*v0.ListSnapshotsResponse, error) { - ret := _m.ctrl.Call(_m, "ListSnapshots", _param0, _param1) - ret0, _ := ret[0].(*v0.ListSnapshotsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockControllerServerRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListSnapshots", arg0, arg1) -} - -// Mock of NodeClient interface -type MockNodeClient struct { - ctrl *gomock.Controller - recorder *_MockNodeClientRecorder -} - -// Recorder for MockNodeClient (not exported) -type _MockNodeClientRecorder struct { - mock *MockNodeClient -} - -func NewMockNodeClient(ctrl *gomock.Controller) *MockNodeClient { - mock := &MockNodeClient{ctrl: ctrl} - mock.recorder = &_MockNodeClientRecorder{mock} - return mock -} - -func (_m *MockNodeClient) EXPECT() *_MockNodeClientRecorder { - return _m.recorder +// CreateSnapshot indicates an expected call of CreateSnapshot +func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) } -func (_m *MockNodeClient) NodeStageVolume(ctx context.Context, in *v0.NodeStageVolumeRequest, opts ...grpc.CallOption) (*v0.NodeStageVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeStageVolume", _s...) - ret0, _ := ret[0].(*v0.NodeStageVolumeResponse) +// CreateVolume mocks base method +func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { + ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeStageVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeStageVolume", _s...) +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) } -func (_m *MockNodeClient) NodeUnstageVolume(ctx context.Context, in *v0.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*v0.NodeUnstageVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeUnstageVolume", _s...) - ret0, _ := ret[0].(*v0.NodeUnstageVolumeResponse) +// DeleteSnapshot mocks base method +func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteSnapshotResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeUnstageVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnstageVolume", _s...) +// DeleteSnapshot indicates an expected call of DeleteSnapshot +func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) } -func (_m *MockNodeClient) NodePublishVolume(ctx context.Context, in *v0.NodePublishVolumeRequest, opts ...grpc.CallOption) (*v0.NodePublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodePublishVolume", _s...) - ret0, _ := ret[0].(*v0.NodePublishVolumeResponse) +// DeleteVolume mocks base method +func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodePublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodePublishVolume", _s...) +// DeleteVolume indicates an expected call of DeleteVolume +func (mr *MockControllerServerMockRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockControllerServer)(nil).DeleteVolume), arg0, arg1) } -func (_m *MockNodeClient) NodeUnpublishVolume(ctx context.Context, in *v0.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*v0.NodeUnpublishVolumeResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeUnpublishVolume", _s...) - ret0, _ := ret[0].(*v0.NodeUnpublishVolumeResponse) +// GetCapacity mocks base method +func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { + ret := m.ctrl.Call(m, "GetCapacity", arg0, arg1) + ret0, _ := ret[0].(*csi.GetCapacityResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeUnpublishVolume(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnpublishVolume", _s...) +// GetCapacity indicates an expected call of GetCapacity +func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) } -func (_m *MockNodeClient) NodeGetId(ctx context.Context, in *v0.NodeGetIdRequest, opts ...grpc.CallOption) (*v0.NodeGetIdResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeGetId", _s...) - ret0, _ := ret[0].(*v0.NodeGetIdResponse) +// ListSnapshots mocks base method +func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) + ret0, _ := ret[0].(*csi.ListSnapshotsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeGetId(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetId", _s...) +// ListSnapshots indicates an expected call of ListSnapshots +func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) } -func (_m *MockNodeClient) NodeGetCapabilities(ctx context.Context, in *v0.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*v0.NodeGetCapabilitiesResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeGetCapabilities", _s...) - ret0, _ := ret[0].(*v0.NodeGetCapabilitiesResponse) +// ListVolumes mocks base method +func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { + ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) + ret0, _ := ret[0].(*csi.ListVolumesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeGetCapabilities(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetCapabilities", _s...) +// ListVolumes indicates an expected call of ListVolumes +func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1) } -func (_m *MockNodeClient) NodeGetInfo(ctx context.Context, in *v0.NodeGetInfoRequest, opts ...grpc.CallOption) (*v0.NodeGetInfoResponse, error) { - _s := []interface{}{ctx, in} - for _, _x := range opts { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "NodeGetInfo", _s...) - ret0, _ := ret[0].(*v0.NodeGetInfoResponse) +// ValidateVolumeCapabilities mocks base method +func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ValidateVolumeCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeClientRecorder) NodeGetInfo(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetInfo", _s...) +// ValidateVolumeCapabilities indicates an expected call of ValidateVolumeCapabilities +func (mr *MockControllerServerMockRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ValidateVolumeCapabilities), arg0, arg1) } -// Mock of NodeServer interface +// MockNodeServer is a mock of NodeServer interface type MockNodeServer struct { ctrl *gomock.Controller - recorder *_MockNodeServerRecorder + recorder *MockNodeServerMockRecorder } -// Recorder for MockNodeServer (not exported) -type _MockNodeServerRecorder struct { +// MockNodeServerMockRecorder is the mock recorder for MockNodeServer +type MockNodeServerMockRecorder struct { mock *MockNodeServer } +// NewMockNodeServer creates a new mock instance func NewMockNodeServer(ctrl *gomock.Controller) *MockNodeServer { mock := &MockNodeServer{ctrl: ctrl} - mock.recorder = &_MockNodeServerRecorder{mock} + mock.recorder = &MockNodeServerMockRecorder{mock} return mock } -func (_m *MockNodeServer) EXPECT() *_MockNodeServerRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { + return m.recorder } -func (_m *MockNodeServer) NodeStageVolume(_param0 context.Context, _param1 *v0.NodeStageVolumeRequest) (*v0.NodeStageVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodeStageVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeStageVolumeResponse) +// NodeGetCapabilities mocks base method +func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeStageVolume", arg0, arg1) +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) } -func (_m *MockNodeServer) NodeUnstageVolume(_param0 context.Context, _param1 *v0.NodeUnstageVolumeRequest) (*v0.NodeUnstageVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodeUnstageVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeUnstageVolumeResponse) +// NodeGetInfo mocks base method +func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnstageVolume", arg0, arg1) +// NodeGetInfo indicates an expected call of NodeGetInfo +func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) } -func (_m *MockNodeServer) NodePublishVolume(_param0 context.Context, _param1 *v0.NodePublishVolumeRequest) (*v0.NodePublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodePublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodePublishVolumeResponse) +// NodeGetVolumeStats mocks base method +func (m *MockNodeServer) NodeGetVolumeStats(arg0 context.Context, arg1 *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + ret := m.ctrl.Call(m, "NodeGetVolumeStats", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetVolumeStatsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodePublishVolume", arg0, arg1) +// NodeGetVolumeStats indicates an expected call of NodeGetVolumeStats +func (mr *MockNodeServerMockRecorder) NodeGetVolumeStats(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetVolumeStats", reflect.TypeOf((*MockNodeServer)(nil).NodeGetVolumeStats), arg0, arg1) } -func (_m *MockNodeServer) NodeUnpublishVolume(_param0 context.Context, _param1 *v0.NodeUnpublishVolumeRequest) (*v0.NodeUnpublishVolumeResponse, error) { - ret := _m.ctrl.Call(_m, "NodeUnpublishVolume", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeUnpublishVolumeResponse) +// NodePublishVolume mocks base method +func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodePublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodePublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUnpublishVolume", arg0, arg1) +// NodePublishVolume indicates an expected call of NodePublishVolume +func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) } -func (_m *MockNodeServer) NodeGetId(_param0 context.Context, _param1 *v0.NodeGetIdRequest) (*v0.NodeGetIdResponse, error) { - ret := _m.ctrl.Call(_m, "NodeGetId", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeGetIdResponse) +// NodeStageVolume mocks base method +func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeStageVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeGetId(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetId", arg0, arg1) +// NodeStageVolume indicates an expected call of NodeStageVolume +func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) } -func (_m *MockNodeServer) NodeGetCapabilities(_param0 context.Context, _param1 *v0.NodeGetCapabilitiesRequest) (*v0.NodeGetCapabilitiesResponse, error) { - ret := _m.ctrl.Call(_m, "NodeGetCapabilities", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeGetCapabilitiesResponse) +// NodeUnpublishVolume mocks base method +func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnpublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetCapabilities", arg0, arg1) +// NodeUnpublishVolume indicates an expected call of NodeUnpublishVolume +func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) } -func (_m *MockNodeServer) NodeGetInfo(_param0 context.Context, _param1 *v0.NodeGetInfoRequest) (*v0.NodeGetInfoResponse, error) { - ret := _m.ctrl.Call(_m, "NodeGetInfo", _param0, _param1) - ret0, _ := ret[0].(*v0.NodeGetInfoResponse) +// NodeUnstageVolume mocks base method +func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnstageVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNodeServerRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeGetInfo", arg0, arg1) +// NodeUnstageVolume indicates an expected call of NodeUnstageVolume +func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go new file mode 100644 index 000000000..10ea5f353 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go @@ -0,0 +1,18 @@ +package apitest + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" +) + +func TestMyDriver(t *testing.T) { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + sanity.Test(t, config) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go new file mode 100644 index 000000000..bca267cb7 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go @@ -0,0 +1,42 @@ +package embedded + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestMyDriverGinkgo(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CSI Sanity Test Suite") +} + +// The test suite into which the sanity tests get embedded may already +// have before/after suite functions. There can only be one such +// function. Here we define empty ones because then Ginkgo +// will start complaining at runtime when invoking the embedded case +// in hack/e2e.sh if a PR adds back such functions in the sanity test +// code. +var _ = BeforeSuite(func() {}) +var _ = AfterSuite(func() {}) + +var _ = Describe("MyCSIDriver", func() { + Context("Config A", func() { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + BeforeEach(func() {}) + + AfterEach(func() {}) + + Describe("CSI Driver Test Suite", func() { + sanity.GinkgoTest(config) + }) + }) +}) diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh index 81f3a02eb..baf4c3045 100755 --- a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh @@ -11,10 +11,10 @@ CSI_MOCK_VERSION="master" # See https://github.com/grpc/grpc/blob/master/doc/naming.md runTest() { - CSI_ENDPOINT=$1 mock & + CSI_ENDPOINT=$1 ./bin/mock & local pid=$! - csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? kill -9 $pid if [ $ret -ne 0 ] ; then @@ -24,10 +24,10 @@ runTest() runTestWithCreds() { - CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true mock & + CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true ./bin/mock & local pid=$! - csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? kill -9 $pid if [ $ret -ne 0 ] ; then @@ -35,7 +35,26 @@ runTestWithCreds() fi } -go install ./mock || exit 1 +runTestAPI() +{ + CSI_ENDPOINT=$1 ./bin/mock & + local pid=$! + + GOCACHE=off go test -v ./hack/_apitest/api_test.go; ret=$? + + if [ $ret -ne 0 ] ; then + exit $ret + fi + + GOCACHE=off go test -v ./hack/_embedded/embedded_test.go; ret=$? + kill -9 $pid + + if [ $ret -ne 0 ] ; then + exit $ret + fi +} + +make cd cmd/csi-sanity make clean install || exit 1 @@ -47,4 +66,7 @@ rm -f $UDS runTestWithCreds "${UDS}" "${UDS}" rm -f $UDS +runTestAPI "${UDS}" +rm -f $UDS + exit 0 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md index d35e2d26e..8274aa2c6 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md @@ -1,2 +1,22 @@ # Mock CSI Driver -Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock` +Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock`. +It can be used for testing of Container Orchestrators that implement client side +of CSI interface. + +``` +Usage of mock: + -disable-attach + Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability. + -name string + CSI driver name. (default "io.kubernetes.storage.mock") +``` + +It prints all received CSI messages to stdout encoded as json, so a test can check that +CO sent the right CSI message. + +Example of such output: + +``` +gRPCCall: {"Method":"/csi.v0.Controller/ControllerGetCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":6}}},{"Type":{"Rpc":{"type":5}}},{"Type":{"Rpc":{"type":2}}}]},"Error":""} +gRPCCall: {"Method":"/csi.v0.Controller/ControllerPublishVolume","Request":{"volume_id":"12","node_id":"some-fake-node-id","volume_capability":{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}},"Response":null,"Error":"rpc error: code = NotFound desc = Not matching Node ID some-fake-node-id to Mock Node ID io.kubernetes.storage.mock"} +``` diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go new file mode 100644 index 000000000..89835e11f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go @@ -0,0 +1,89 @@ +package cache + +import ( + "strings" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +type SnapshotCache interface { + Add(snapshot Snapshot) + + Delete(i int) + + List(ready bool) []csi.Snapshot + + FindSnapshot(k, v string) (int, Snapshot) +} + +type Snapshot struct { + Name string + Parameters map[string]string + SnapshotCSI csi.Snapshot +} + +type snapshotCache struct { + snapshotsRWL sync.RWMutex + snapshots []Snapshot +} + +func NewSnapshotCache() SnapshotCache { + return &snapshotCache{ + snapshots: make([]Snapshot, 0), + } +} + +func (snap *snapshotCache) Add(snapshot Snapshot) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + snap.snapshots = append(snap.snapshots, snapshot) +} + +func (snap *snapshotCache) Delete(i int) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + copy(snap.snapshots[i:], snap.snapshots[i+1:]) + snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] +} + +func (snap *snapshotCache) List(ready bool) []csi.Snapshot { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshots := make([]csi.Snapshot, 0) + for _, v := range snap.snapshots { + if v.SnapshotCSI.GetReadyToUse() { + snapshots = append(snapshots, v.SnapshotCSI) + } + } + + return snapshots +} + +func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshotIdx := -1 + for i, vi := range snap.snapshots { + switch k { + case "id": + if strings.EqualFold(v, vi.SnapshotCSI.GetSnapshotId()) { + return i, vi + } + case "sourceVolumeId": + if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { + return i, vi + } + case "name": + if vi.Name == v { + return i, vi + } + } + } + + return snapshotIdx, Snapshot{} +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go index d66d1881d..486d383be 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go @@ -16,6 +16,7 @@ limitations under the License. package main import ( + "flag" "fmt" "net" "os" @@ -28,6 +29,12 @@ import ( ) func main() { + var config service.Config + flag.BoolVar(&config.DisableAttach, "disable-attach", false, "Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability.") + flag.StringVar(&config.DriverName, "name", service.Name, "CSI driver name.") + flag.Int64Var(&config.AttachLimit, "attach-limit", 0, "number of attachable volumes on a node") + flag.Parse() + endpoint := os.Getenv("CSI_ENDPOINT") if len(endpoint) == 0 { fmt.Println("CSI_ENDPOINT must be defined and must be a path") @@ -39,7 +46,7 @@ func main() { } // Create mock driver - s := service.New() + s := service.New(config) servers := &driver.CSIDriverServers{ Controller: s, Identity: s, diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml index 4bc9c578a..e7c9f20d8 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml @@ -10,3 +10,7 @@ NodeStageVolumeSecret: secretKey: secretval5 NodePublishVolumeSecret: secretKey: secretval6 +CreateSnapshotSecret: + secretKey: secretval7 +DeleteSnapshotSecret: + secretKey: secretval8 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go index d4a44f849..eace79f8c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go @@ -4,6 +4,7 @@ import ( "fmt" "math" "path" + "reflect" "strconv" log "github.com/sirupsen/logrus" @@ -11,11 +12,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" ) const ( MaxStorageCapacity = tib + ReadOnlyKey = "readonly" ) func (s *service) CreateVolume( @@ -60,7 +62,7 @@ func (s *service) CreateVolume( s.volsRWL.Lock() defer s.volsRWL.Unlock() s.vols = append(s.vols, v) - MockVolumes[v.Id] = Volume{ + MockVolumes[v.GetVolumeId()] = Volume{ VolumeCSI: v, NodeID: "", ISStaged: false, @@ -106,6 +108,10 @@ func (s *service) ControllerPublishVolume( req *csi.ControllerPublishVolumeRequest) ( *csi.ControllerPublishVolumeResponse, error) { + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + if len(req.VolumeId) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") } @@ -134,22 +140,49 @@ func (s *service) ControllerPublishVolume( devPathKey := path.Join(req.NodeId, "dev") // Check to see if the volume is already published. - if device := v.Attributes[devPathKey]; device != "" { + if device := v.VolumeContext[devPathKey]; device != "" { + var volRo bool + var roVal string + if ro, ok := v.VolumeContext[ReadOnlyKey]; ok { + roVal = ro + } + + if roVal == "true" { + volRo = true + } else { + volRo = false + } + + // Check if readonly flag is compatible with the publish request. + if req.GetReadonly() != volRo { + return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") + } + return &csi.ControllerPublishVolumeResponse{ - PublishInfo: map[string]string{ - "device": device, + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, }, }, nil } + var roVal string + if req.GetReadonly() { + roVal = "true" + } else { + roVal = "false" + } + // Publish the volume. device := "/dev/mock" - v.Attributes[devPathKey] = device + v.VolumeContext[devPathKey] = device + v.VolumeContext[ReadOnlyKey] = roVal s.vols[i] = v return &csi.ControllerPublishVolumeResponse{ - PublishInfo: map[string]string{ - "device": device, + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, }, }, nil } @@ -159,6 +192,10 @@ func (s *service) ControllerUnpublishVolume( req *csi.ControllerUnpublishVolumeRequest) ( *csi.ControllerUnpublishVolumeResponse, error) { + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + if len(req.VolumeId) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") } @@ -186,12 +223,13 @@ func (s *service) ControllerUnpublishVolume( devPathKey := path.Join(nodeID, "dev") // Check to see if the volume is already unpublished. - if v.Attributes[devPathKey] == "" { + if v.VolumeContext[devPathKey] == "" { return &csi.ControllerUnpublishVolumeResponse{}, nil } // Unpublish the volume. - delete(v.Attributes, devPathKey) + delete(v.VolumeContext, devPathKey) + delete(v.VolumeContext, ReadOnlyKey) s.vols[i] = v return &csi.ControllerUnpublishVolumeResponse{}, nil @@ -214,7 +252,11 @@ func (s *service) ValidateVolumeCapabilities( } return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: true, + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, }, nil } @@ -308,51 +350,228 @@ func (s *service) ControllerGetCapabilities( req *csi.ControllerGetCapabilitiesRequest) ( *csi.ControllerGetCapabilitiesResponse, error) { - return &csi.ControllerGetCapabilitiesResponse{ - Capabilities: []*csi.ControllerServiceCapability{ - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, - }, + caps := []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, }, }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, }, }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, - }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, }, }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, - }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, }, }, }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + } + + if !s.config.DisableAttach { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: caps, }, nil } func (s *service) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - return nil, status.Error(codes.InvalidArgument, "Not Implemented") + // Check arguments + if len(req.GetName()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") + } + if len(req.GetSourceVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") + } + + // Check to see if the snapshot already exists. + if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { + // Requested snapshot name already exists + if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) + } + return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil + } + + // Create the snapshot and add it to the service's in-mem snapshot slice. + snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) + s.snapshots.Add(snapshot) + + return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil } func (s *service) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - return nil, status.Error(codes.InvalidArgument, "Not Implemented") + + // If the snapshot is not specified, return error + if len(req.SnapshotId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") + } + + // If the snapshot does not exist then return an idempotent response. + i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) + if i < 0 { + return &csi.DeleteSnapshotResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + s.snapshots.Delete(i) + log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + return &csi.DeleteSnapshotResponse{}, nil } func (s *service) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - return nil, status.Error(codes.InvalidArgument, "Not Implemented") + + // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. + if len(req.GetSnapshotId()) != 0 { + return getSnapshotById(s, req) + } + + // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. + if len(req.GetSourceVolumeId()) != 0 { + return getSnapshotByVolumeId(s, req) + } + + // case 3: no parameter is set, so we return all the snapshots. + return getAllSnapshots(s, req) +} + +func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSnapshotId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + + if len(req.GetSourceVolumeId()) != 0 { + if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { + return &csi.ListSnapshotsResponse{}, nil + } + } + + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSourceVolumeId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Copy the mock snapshots into a new slice in order to avoid + // locking the service's snapshot slice for the duration of the + // ListSnapshots RPC. + readyToUse := true + snapshots := s.snapshots.List(readyToUse) + + var ( + ulenSnapshots = int32(len(snapshots)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenSnapshots { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(snapshots)=%d", + startingToken, ulenSnapshots) + } + + // Discern the number of remaining entries. + rem := ulenSnapshots - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListSnapshotsResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListSnapshotsResponse_Entry{ + Snapshot: &snapshots[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenSnapshots { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListSnapshotsResponse{ + Entries: entries, + NextToken: nextToken, + }, nil } diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go index c83daea5f..7e8735a93 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go @@ -3,7 +3,8 @@ package service import ( "golang.org/x/net/context" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" ) func (s *service) GetPluginInfo( @@ -12,7 +13,7 @@ func (s *service) GetPluginInfo( *csi.GetPluginInfoResponse, error) { return &csi.GetPluginInfoResponse{ - Name: Name, + Name: s.config.DriverName, VendorVersion: VendorVersion, Manifest: Manifest, }, nil @@ -23,7 +24,9 @@ func (s *service) Probe( req *csi.ProbeRequest) ( *csi.ProbeResponse, error) { - return &csi.ProbeResponse{}, nil + return &csi.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, nil } func (s *service) GetPluginCapabilities( diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go index 0321c7405..886a219a7 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go @@ -8,7 +8,7 @@ import ( "golang.org/x/net/context" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" ) func (s *service) NodeStageVolume( @@ -16,11 +16,15 @@ func (s *service) NodeStageVolume( req *csi.NodeStageVolumeRequest) ( *csi.NodeStageVolumeResponse, error) { - device, ok := req.PublishInfo["device"] + device, ok := req.PublishContext["device"] if !ok { - return nil, status.Error( - codes.InvalidArgument, - "stage volume info 'device' key required") + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } } if len(req.GetVolumeId()) == 0 { @@ -48,14 +52,14 @@ func (s *service) NodeStageVolume( nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) // Check to see if the volume has already been staged. - if v.Attributes[nodeStgPathKey] != "" { + if v.VolumeContext[nodeStgPathKey] != "" { // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" // if the capabilities don't match. return &csi.NodeStageVolumeResponse{}, nil } // Stage the volume. - v.Attributes[nodeStgPathKey] = device + v.VolumeContext[nodeStgPathKey] = device s.vols[i] = v return &csi.NodeStageVolumeResponse{}, nil @@ -87,12 +91,12 @@ func (s *service) NodeUnstageVolume( nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) // Check to see if the volume has already been unstaged. - if v.Attributes[nodeStgPathKey] == "" { + if v.VolumeContext[nodeStgPathKey] == "" { return &csi.NodeUnstageVolumeResponse{}, nil } // Unpublish the volume. - delete(v.Attributes, nodeStgPathKey) + delete(v.VolumeContext, nodeStgPathKey) s.vols[i] = v return &csi.NodeUnstageVolumeResponse{}, nil @@ -103,11 +107,15 @@ func (s *service) NodePublishVolume( req *csi.NodePublishVolumeRequest) ( *csi.NodePublishVolumeResponse, error) { - device, ok := req.PublishInfo["device"] + device, ok := req.PublishContext["device"] if !ok { - return nil, status.Error( - codes.InvalidArgument, - "publish volume info 'device' key required") + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } } if len(req.GetVolumeId()) == 0 { @@ -135,7 +143,7 @@ func (s *service) NodePublishVolume( nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) // Check to see if the volume has already been published. - if v.Attributes[nodeMntPathKey] != "" { + if v.VolumeContext[nodeMntPathKey] != "" { // Requests marked Readonly fail due to volumes published by // the Mock driver supporting only RW mode. @@ -148,9 +156,9 @@ func (s *service) NodePublishVolume( // Publish the volume. if req.GetStagingTargetPath() != "" { - v.Attributes[nodeMntPathKey] = req.GetStagingTargetPath() + v.VolumeContext[nodeMntPathKey] = req.GetStagingTargetPath() } else { - v.Attributes[nodeMntPathKey] = device + v.VolumeContext[nodeMntPathKey] = device } s.vols[i] = v @@ -182,27 +190,17 @@ func (s *service) NodeUnpublishVolume( nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) // Check to see if the volume has already been unpublished. - if v.Attributes[nodeMntPathKey] == "" { + if v.VolumeContext[nodeMntPathKey] == "" { return &csi.NodeUnpublishVolumeResponse{}, nil } // Unpublish the volume. - delete(v.Attributes, nodeMntPathKey) + delete(v.VolumeContext, nodeMntPathKey) s.vols[i] = v return &csi.NodeUnpublishVolumeResponse{}, nil } -func (s *service) NodeGetId( - ctx context.Context, - req *csi.NodeGetIdRequest) ( - *csi.NodeGetIdResponse, error) { - - return &csi.NodeGetIdResponse{ - NodeId: s.nodeID, - }, nil -} - func (s *service) NodeGetCapabilities( ctx context.Context, req *csi.NodeGetCapabilitiesRequest) ( @@ -230,7 +228,17 @@ func (s *service) NodeGetCapabilities( func (s *service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - return &csi.NodeGetInfoResponse{ + csiNodeResponse := &csi.NodeGetInfoResponse{ NodeId: s.nodeID, - }, nil + } + if s.config.AttachLimit > 0 { + csiNodeResponse.MaxVolumesPerNode = s.config.AttachLimit + } + return csiNodeResponse, nil +} + +func (s *service) NodeGetVolumeStats(ctx context.Context, + req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + return &csi.NodeGetVolumeStatsResponse{}, nil + } diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go index dccad79cd..2254ccb83 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go @@ -6,8 +6,11 @@ import ( "sync" "sync/atomic" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/mock/cache" "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes" ) const ( @@ -15,7 +18,7 @@ const ( Name = "io.kubernetes.storage.mock" // VendorVersion is the version returned by GetPluginInfo. - VendorVersion = "0.2.0" + VendorVersion = "0.3.0" ) // Manifest is the SP's manifest. @@ -23,6 +26,12 @@ var Manifest = map[string]string{ "url": "https://github.com/kubernetes-csi/csi-test/mock", } +type Config struct { + DisableAttach bool + DriverName string + AttachLimit int64 +} + // Service is the CSI Mock service provider. type Service interface { csi.ControllerServer @@ -32,10 +41,13 @@ type Service interface { type service struct { sync.Mutex - nodeID string - vols []csi.Volume - volsRWL sync.RWMutex - volsNID uint64 + nodeID string + vols []csi.Volume + volsRWL sync.RWMutex + volsNID uint64 + snapshots cache.SnapshotCache + snapshotsNID uint64 + config Config } type Volume struct { @@ -51,14 +63,23 @@ type Volume struct { var MockVolumes map[string]Volume // New returns a new Service. -func New() Service { - s := &service{nodeID: Name} +func New(config Config) Service { + s := &service{ + nodeID: config.DriverName, + config: config, + } + s.snapshots = cache.NewSnapshotCache() s.vols = []csi.Volume{ s.newVolume("Mock Volume 1", gib100), s.newVolume("Mock Volume 2", gib100), s.newVolume("Mock Volume 3", gib100), } MockVolumes = map[string]Volume{} + + s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) + return s } @@ -73,8 +94,8 @@ const ( func (s *service) newVolume(name string, capcity int64) csi.Volume { return csi.Volume{ - Id: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), - Attributes: map[string]string{"name": name}, + VolumeId: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), + VolumeContext: map[string]string{"name": name}, CapacityBytes: capcity, } } @@ -91,11 +112,11 @@ func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { for i, vi := range s.vols { switch k { case "id": - if strings.EqualFold(v, vi.Id) { + if strings.EqualFold(v, vi.GetVolumeId()) { return i, vi } case "name": - if n, ok := vi.Attributes["name"]; ok && strings.EqualFold(v, n) { + if n, ok := vi.VolumeContext["name"]; ok && strings.EqualFold(v, n) { return i, vi } } @@ -109,3 +130,18 @@ func (s *service) findVolByName( return s.findVol("name", name) } + +func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { + + ptime := ptypes.TimestampNow() + return cache.Snapshot{ + Name: name, + Parameters: parameters, + SnapshotCSI: csi.Snapshot{ + SnapshotId: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), + CreationTime: ptime, + SourceVolumeId: sourceVolumeId, + ReadyToUse: true, + }, + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md index 747744ea4..fd30f1922 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md @@ -13,13 +13,50 @@ Golang `TestXXX` functions. For example: ```go func TestMyDriver(t *testing.T) { - // Setup the full driver and its environment - ... setup driver ... + // Setup the full driver and its environment + ... setup driver ... + config := &sanity.Config{ + TargetPath: ... + StagingPath: ... + Address: endpoint, + } - // Now call the test suite - sanity.Test(t, driverEndpointAddress, "/mnt") + + // Now call the test suite + sanity.Test(t, config) } ``` +Only one such test function is supported because under the hood a +Ginkgo test suite gets constructed and executed by the call. + +Alternatively, the tests can also be embedded inside a Ginkgo test +suite. In that case it is possible to define multiple tests with +different configurations: + +```go +var _ = Describe("MyCSIDriver", func () { + Context("Config A", func () { + var config &sanity.Config + + BeforeEach(func() { + //... setup driver and config... + }) + + AfterEach(func() { + //...tear down driver... + }) + + Describe("CSI sanity", func() { + sanity.GinkgoTest(config) + }) + }) + + Context("Config B", func () { + // other configs + }) +}) +``` + ## Command line program Please see [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go new file mode 100644 index 000000000..65a30334f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + "context" + "log" + + "github.com/container-storage-interface/spec/lib/go/csi" + + . "github.com/onsi/ginkgo" +) + +// VolumeInfo keeps track of the information needed to delete a volume. +type VolumeInfo struct { + // Node on which the volume was published, empty if none + // or publishing is not supported. + NodeID string + + // Volume ID assigned by CreateVolume. + VolumeID string +} + +// Cleanup keeps track of resources, in particular volumes, which need +// to be freed when testing is done. +type Cleanup struct { + Context *SanityContext + ControllerClient csi.ControllerClient + NodeClient csi.NodeClient + ControllerPublishSupported bool + NodeStageSupported bool + + // Maps from volume name to the node ID for which the volume + // is published and the volume ID. + volumes map[string]VolumeInfo +} + +// RegisterVolume adds or updates an entry for the volume with the +// given name. +func (cl *Cleanup) RegisterVolume(name string, info VolumeInfo) { + if cl.volumes == nil { + cl.volumes = make(map[string]VolumeInfo) + } + cl.volumes[name] = info +} + +// MaybeRegisterVolume adds or updates an entry for the volume with +// the given name if CreateVolume was successful. +func (cl *Cleanup) MaybeRegisterVolume(name string, vol *csi.CreateVolumeResponse, err error) { + if err == nil && vol.GetVolume().GetVolumeId() != "" { + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + } +} + +// UnregisterVolume removes the entry for the volume with the +// given name, thus preventing all cleanup operations for it. +func (cl *Cleanup) UnregisterVolume(name string) { + if cl.volumes != nil { + delete(cl.volumes, name) + } +} + +// DeleteVolumes stops using the registered volumes and tries to delete all of them. +func (cl *Cleanup) DeleteVolumes() { + if cl.volumes == nil { + return + } + logger := log.New(GinkgoWriter, "cleanup: ", 0) + ctx := context.Background() + + for name, info := range cl.volumes { + logger.Printf("deleting %s = %s", name, info.VolumeID) + if _, err := cl.NodeClient.NodeUnpublishVolume( + ctx, + &csi.NodeUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + TargetPath: cl.Context.Config.TargetPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnpublishVolume: %s", err) + } + + if cl.NodeStageSupported { + if _, err := cl.NodeClient.NodeUnstageVolume( + ctx, + &csi.NodeUnstageVolumeRequest{ + VolumeId: info.VolumeID, + StagingTargetPath: cl.Context.Config.StagingPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnstageVolume: %s", err) + } + } + + if cl.ControllerPublishSupported && info.NodeID != "" { + if _, err := cl.ControllerClient.ControllerUnpublishVolume( + ctx, + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + NodeId: info.NodeID, + Secrets: cl.Context.Secrets.ControllerUnpublishVolumeSecret, + }, + ); err != nil { + logger.Printf("warning: ControllerUnpublishVolume: %s", err) + } + } + + if _, err := cl.ControllerClient.DeleteVolume( + ctx, + &csi.DeleteVolumeRequest{ + VolumeId: info.VolumeID, + Secrets: cl.Context.Secrets.DeleteVolumeSecret, + }, + ); err != nil { + logger.Printf("error: DeleteVolume: %s", err) + } + + cl.UnregisterVolume(name) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 0fb22392c..830d6cdb4 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -17,13 +17,15 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi" + + "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -34,18 +36,27 @@ const ( // provisioned volumes. 10GB by default, can be overridden by // setting Config.TestVolumeSize. DefTestVolumeSize int64 = 10 * 1024 * 1024 * 1024 + + MaxNameLength int = 128 ) -func TestVolumeSize() int64 { - if config.TestVolumeSize > 0 { - return config.TestVolumeSize +func TestVolumeSize(sc *SanityContext) int64 { + if sc.Config.TestVolumeSize > 0 { + return sc.Config.TestVolumeSize } return DefTestVolumeSize } func verifyVolumeInfo(v *csi.Volume) { Expect(v).NotTo(BeNil()) - Expect(v.GetId()).NotTo(BeEmpty()) + Expect(v.GetVolumeId()).NotTo(BeEmpty()) +} + +func verifySnapshotInfo(snapshot *csi.Snapshot) { + Expect(snapshot).NotTo(BeNil()) + Expect(snapshot.GetSnapshotId()).NotTo(BeEmpty()) + Expect(snapshot.GetSourceVolumeId()).NotTo(BeEmpty()) + Expect(snapshot.GetCreationTime()).NotTo(BeZero()) } func isControllerCapabilitySupported( @@ -69,249 +80,922 @@ func isControllerCapabilitySupported( return false } -var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { +var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { var ( c csi.ControllerClient + n csi.NodeClient + + cl *Cleanup ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - }) + c = csi.NewControllerClient(sc.Conn) + n = csi.NewNodeClient(sc.Conn) - It("should return appropriate capabilities", func() { - caps, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{}) + cl = &Cleanup{ + NodeClient: n, + ControllerClient: c, + Context: sc, + } + }) - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) + AfterEach(func() { + cl.DeleteVolumes() + }) - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + Describe("ControllerGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.ControllerGetCapabilities( + context.Background(), + &csi.ControllerGetCapabilitiesRequest{}) - switch cap.GetRpc().GetType() { - case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: - case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: - case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: - case csi.ControllerServiceCapability_RPC_GET_CAPACITY: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: + case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: + case csi.ControllerServiceCapability_RPC_GET_CAPACITY: + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: + case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } } - } + }) }) -}) -var _ = Describe("GetCapacity [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + Describe("GetCapacity", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { + Skip("GetCapacity not supported") + } + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + It("should return capacity (no optional values added)", func() { + _, err := c.GetCapacity( + context.Background(), + &csi.GetCapacityRequest{}) + Expect(err).NotTo(HaveOccurred()) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { - Skip("GetCapacity not supported") - } + // Since capacity is int64 we will not be checking it + // The value of zero is a possible value. + }) }) - It("should return capacity (no optional values added)", func() { - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{}) - Expect(err).NotTo(HaveOccurred()) + Describe("ListVolumes", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { + Skip("ListVolumes not supported") + } + }) - // Since capacity is int64 we will not be checking it - // The value of zero is a possible value. - }) -}) + It("should return appropriate values (no optional values added)", func() { + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) -var _ = Describe("ListVolumes [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + for _, vol := range vols.GetEntries() { + verifyVolumeInfo(vol.GetVolume()) + } + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + // TODO: Add test to test for tokens - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { - Skip("ListVolumes not supported") - } + // TODO: Add test which checks list of volume is there when created, + // and not there when deleted. }) - It("should return appropriate values (no optional values added)", func() { - vols, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(vols).NotTo(BeNil()) + Describe("CreateVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("CreateVolume not supported") + } + }) + + It("should fail when no name is provided", func() { + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume("", vol, err) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capabilities are provided", func() { + name := uniqueString("sanity-controller-create-no-volume-capabilities") + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume(name, vol, err) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-no-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - for _, vol := range vols.GetEntries() { - verifyVolumeInfo(vol.GetVolume()) - } - }) + By("cleaning up deleting the volume") - // TODO: Add test to test for tokens + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-with-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: TestVolumeSize(sc), + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + if serverError, ok := status.FromError(err); ok && + (serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented) { + Skip("Required bytes not supported") + } + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize(sc))) - // TODO: Add test which checks list of volume is there when created, - // and not there when deleted. -}) + By("cleaning up deleting the volume") -var _ = Describe("CreateVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should not fail when requesting to create a volume with already existing name and same capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice") + size := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + vol2, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol2).NotTo(BeNil()) + Expect(vol2.GetVolume()).NotTo(BeNil()) + Expect(vol2.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + Expect(vol1.GetVolume().GetVolumeId()).To(Equal(vol2.GetVolume().GetVolumeId())) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should fail when requesting to create a volume with already existing name and different capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice-different") + size1 := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + size2 := 2 * TestVolumeSize(sc) + + _, err = c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size2, + LimitBytes: size2, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + It("should not fail when creating volume with maximum-length name", func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("CreateVolume not supported") - } + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' + } + name := string(nameBytes) + By("creating a volume") + size := TestVolumeSize(sc) + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when no name is provided", func() { + Describe("DeleteVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("DeleteVolume not supported") + } + }) - req := &csi.CreateVolumeRequest{} + It("should fail when no volume id is provided", func() { - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - _, err := c.CreateVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + It("should succeed when an invalid volume id is used", func() { + + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: "reallyfakevolumeid", + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a volume") + name := uniqueString("sanity-controller-create-appropriate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // Delete Volume + By("deleting a volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when no volume capabilities are provided", func() { + Describe("ValidateVolumeCapabilities", func() { + It("should fail when no volume id is provided", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capabilities are provided", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-validate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ValidateVolumeCapabilities + By("validating volume capabilities") + valivolcap, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(valivolcap).NotTo(BeNil()) - req := &csi.CreateVolumeRequest{ - Name: "name", - } + // If confirmation is provided then it is REQUIRED to provide + // the volume capabilities + if valivolcap.GetConfirmed() != nil { + Expect(valivolcap.GetConfirmed().GetVolumeCapabilities()).NotTo(BeEmpty()) + } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + By("cleaning up deleting the volume") - _, err := c.CreateVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the requested volume does not exist", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "some-vol-id", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }, + ) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) }) - It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + Describe("ControllerPublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerPublishVolume not supported") + } + }) - By("creating a volume") - name := "sanity" + It("should fail when no volume id is provided", func() { - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Secrets: sc.Secrets.ControllerPublishVolumeSecret, }, - }, - } + ) + Expect(err).To(HaveOccurred()) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - vol, err := c.CreateVolume(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + It("should fail when no node id is provided", func() { - By("cleaning up deleting the volume") + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capability is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + NodeId: "fakenode", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-publish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + By("cleaning up unpublishing the volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) - }) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) - It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + It("should fail when the volume does not exist", func() { - By("creating a volume") - name := "sanity" + By("calling controller publish on a non-existent volume") - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "some-vol-id", + NodeId: "some-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should fail when the node does not exist", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-wrong-node") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: TestVolumeSize(), - }, - } + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: "some-fake-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) - vol, err := c.CreateVolume(context.Background(), req) - if serverError, ok := status.FromError(err); ok { - if serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented { - Skip("Required bytes not supported") - } else { - Expect(err).NotTo(HaveOccurred()) - } - } else { + By("cleaning up deleting the volume") + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the volume is already published but is incompatible", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-published-incompatible") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize())) - } - By("cleaning up deleting the volume") + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } - - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } - - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) - }) - It("should not fail when requesting to create a volume with already exisiting name and same capacity.", func() { + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - By("creating a volume") - name := "sanity" - size := TestVolumeSize() + // ControllerPublishVolume + By("calling controllerpublish on that volume") - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -319,358 +1003,443 @@ var _ = Describe("CreateVolume [Controller Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - } + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).NotTo(HaveOccurred()) + Expect(conpubvol).NotTo(BeNil()) - vol1, err := c.CreateVolume(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(vol1).NotTo(BeNil()) - Expect(vol1.GetVolume()).NotTo(BeNil()) - Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + // Publish again with different attributes. + pubReq.Readonly = true - req2 := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - } + conpubvol, err = c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) - if secrets != nil { - req2.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - vol2, err := c.CreateVolume(context.Background(), req2) - Expect(err).NotTo(HaveOccurred()) - Expect(vol2).NotTo(BeNil()) - Expect(vol2.GetVolume()).NotTo(BeNil()) - Expect(vol2.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) - Expect(vol1.GetVolume().GetId()).To(Equal(vol2.GetVolume().GetId())) + By("cleaning up unpublishing the volume") - By("cleaning up deleting the volume") + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol1.GetVolume().GetId(), - } + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when requesting to create a volume with already exisiting name and different capacity.", func() { - By("creating a volume") - name := "sanity" - size1 := TestVolumeSize() + Describe("ControllerUnpublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerUnpublishVolume not supported") + } + }) - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + It("should fail when no volume id is provided", func() { + + _, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-unpublish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size1, - LimitBytes: size1, - }, - } - - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - vol1, err := c.CreateVolume(context.Background(), req) - Expect(err).ToNot(HaveOccurred()) - Expect(vol1).NotTo(BeNil()) - Expect(vol1.GetVolume()).NotTo(BeNil()) - Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - size2 := 2 * TestVolumeSize() - - req2 := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size2, - LimitBytes: size2, - }, - } - - if secrets != nil { - req2.ControllerCreateSecrets = secrets.CreateVolumeSecret - } - - _, err = c.CreateVolume(context.Background(), req2) - Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - - By("cleaning up deleting the volume") - - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol1.GetVolume().GetId(), - } + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + // ControllerUnpublishVolume + By("calling controllerunpublish on that volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) }) -var _ = Describe("DeleteVolume [Controller Server]", func() { +var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("DeleteVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { + Skip("ListSnapshots not supported") } }) - It("should fail when no volume id is provided", func() { - - req := &csi.DeleteVolumeRequest{} + It("should return appropriate values (no optional values added)", func() { + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + for _, snapshot := range snapshots.GetEntries() { + verifySnapshotInfo(snapshot.GetSnapshot()) } + }) - _, err := c.DeleteVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + It("should return snapshots that match the specify snapshot id", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - It("should succeed when an invalid volume id is used", func() { + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) - req := &csi.DeleteVolumeRequest{ - VolumeId: "reallyfakevolumeid", - } + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SnapshotId: snapshot.GetSnapshot().GetSnapshotId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(BeNumerically("==", 1)) + verifySnapshotInfo(snapshots.GetEntries()[0].GetSnapshot()) + Expect(snapshots.GetEntries()[0].GetSnapshot().GetSnapshotId()).To(Equal(snapshot.GetSnapshot().GetSnapshotId())) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return empty when the specify snapshot id is not exist", func() { - _, err := c.DeleteVolume(context.Background(), req) + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SnapshotId: "none-exist-id"}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) - It("should return appropriate values (no optional values added)", func() { + It("should return snapshots that match the specify source volume id)", func() { - // Create Volume First By("creating a volume") - name := "sanity" + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - createReq := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) - if secrets != nil { - createReq.ControllerCreateSecrets = secrets.CreateVolumeSecret + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SourceVolumeId: snapshot.GetSnapshot().GetSourceVolumeId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + for _, snap := range snapshots.GetEntries() { + verifySnapshotInfo(snap.GetSnapshot()) + Expect(snap.GetSnapshot().GetSourceVolumeId()).To(Equal(snapshot.GetSnapshot().GetSourceVolumeId())) } - vol, err := c.CreateVolume(context.Background(), createReq) - + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - // Delete Volume - By("deleting a volume") - - req := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + It("should return empty when the specify source volume id is not exist", func() { - _, err = c.DeleteVolume(context.Background(), req) + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SourceVolumeId: "none-exist-volume-id"}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) -}) -var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + It("check the presence of new snapshots in the snapshot list", func() { + // List Snapshots before creating new snapshots. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - BeforeEach(func() { - c = csi.NewControllerClient(conn) - }) + totalSnapshots := len(snapshots.GetEntries()) + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - It("should fail when no volume id is provided", func() { + By("creating a snapshot") + snapReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-3", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) - _, err := c.ValidateVolumeCapabilities( + snapshots, err = c.ListSnapshots( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots + 1)) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - It("should fail when no volume capabilities are provided", func() { + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) - _, err := c.ValidateVolumeCapabilities( + // List snapshots and check if the deleted snapshot exists in the snapshot list. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots)) }) - It("should return appropriate values (no optional values added)", func() { + It("should return next token when a limited number of entries are requested", func() { + // minSnapshotCount is the minimum number of snapshots expected to exist, + // based on which paginated snapshot listing is performed. + minSnapshotCount := 5 + // maxEntried is the maximum entries in list snapshot request. + maxEntries := 2 + // currentTotalVols is the total number of volumes at a given time. It + // is used to verify that all the snapshots have been listed. + currentTotalSnapshots := 0 + + // Get the number of existing volumes. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - // Create Volume First - By("creating a single node writer volume") - name := "sanity" + initialTotalSnapshots := len(snapshots.GetEntries()) + currentTotalSnapshots = initialTotalSnapshots - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } + createVols := make([]*csi.Volume, 0) + createSnapshots := make([]*csi.Snapshot, 0) + + // Ensure minimum minVolCount volumes exist. + if initialTotalSnapshots < minSnapshotCount { + + By("creating required new volumes") + requiredSnapshots := minSnapshotCount - initialTotalSnapshots + + for i := 1; i <= requiredSnapshots; i++ { + volReq := MakeCreateVolumeReq(sc, "volume"+strconv.Itoa(i)) + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + Expect(volume).NotTo(BeNil()) + createVols = append(createVols, volume.GetVolume()) + + snapReq := MakeCreateSnapshotReq(sc, "snapshot"+strconv.Itoa(i), volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + createSnapshots = append(createSnapshots, snapshot.GetSnapshot()) + } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + // Update the current total snapshots count. + currentTotalSnapshots += requiredSnapshots } - vol, err := c.CreateVolume(context.Background(), req) + // Request list snapshots with max entries maxEntries. + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{ + MaxEntries: int32(maxEntries), + }) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + Expect(snapshots).NotTo(BeNil()) + + nextToken := snapshots.GetNextToken() + + Expect(len(snapshots.GetEntries())).To(Equal(maxEntries)) - // ValidateVolumeCapabilities - By("validating volume capabilities") - valivolcap, err := c.ValidateVolumeCapabilities( + // Request list snapshots with starting_token and no max entries. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: vol.GetVolume().GetId(), - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, + &csi.ListSnapshotsRequest{ + StartingToken: nextToken, }) Expect(err).NotTo(HaveOccurred()) - Expect(valivolcap).NotTo(BeNil()) - Expect(valivolcap.GetSupported()).To(BeTrue()) + Expect(snapshots).NotTo(BeNil()) - By("cleaning up deleting the volume") + // Ensure that all the remaining entries are returned at once. + Expect(len(snapshots.GetEntries())).To(Equal(currentTotalSnapshots - maxEntries)) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + if initialTotalSnapshots < minSnapshotCount { - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + By("cleaning up deleting the snapshots") - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) + for _, snap := range createSnapshots { + delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + } + + By("cleaning up deleting the volumes") + + for _, vol := range createVols { + delVolReq := MakeDeleteVolumeReq(sc, vol.GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + } + } }) + }) -var _ = Describe("ControllerPublishVolume [Controller Server]", func() { +var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient - n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerPublishVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("DeleteSnapshot not supported") } }) - It("should fail when no volume id is provided", func() { + It("should fail when no snapshot id is provided", func() { - req := &csi.ControllerPublishVolumeRequest{} + req := &csi.DeleteSnapshotRequest{} - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.Secrets = sc.Secrets.DeleteSnapshotSecret } - _, err := c.ControllerPublishVolume(context.Background(), req) + _, err := c.DeleteSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) @@ -678,212 +1447,196 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no node id is provided", func() { + It("should succeed when an invalid snapshot id is used", func() { + + req := MakeDeleteSnapshotReq(sc, "reallyfakesnapshotid") + _, err := c.DeleteSnapshot(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "DeleteSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + // Create Snapshot First + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "DeleteSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) +}) + +var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityContext) { + var ( + c csi.ControllerClient + ) + + BeforeEach(func() { + c = csi.NewControllerClient(sc.Conn) + + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("CreateSnapshot not supported") + } + }) + + It("should fail when no name is provided", func() { - req := &csi.ControllerPublishVolumeRequest{ - VolumeId: "id", + req := &csi.CreateSnapshotRequest{ + SourceVolumeId: "testId", } - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret } - _, err := c.ControllerPublishVolume(context.Background(), req) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no volume capability is provided", func() { + It("should fail when no source volume id is provided", func() { - req := &csi.ControllerPublishVolumeRequest{ - VolumeId: "id", - NodeId: "fakenode", + req := &csi.CreateSnapshotRequest{ + Name: "name", } - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret } - _, err := c.ControllerPublishVolume(context.Background(), req) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should return appropriate values (no optional values added)", func() { - - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } - - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + It("should not fail when requesting to create a snapshot with already existing name and same SourceVolumeId.", func() { - vol, err := c.CreateVolume(context.Background(), req) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - - // ControllerPublishVolume - By("calling controllerpublish on that volume") - - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - } - - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret - } + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - - By("cleaning up unpublishing the volume") - - unpubReq := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - } - - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } - - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } - - _, err = c.DeleteVolume(context.Background(), delReq) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) -}) - -var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - n csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerUnpublishVolume not supported") - } - }) + It("should fail when requesting to create a snapshot with already existing name and different SourceVolumeId.", func() { - It("should fail when no volume id is provided", func() { + By("creating a volume") + volume, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-2")) + Expect(err).ToNot(HaveOccurred()) - req := &csi.ControllerUnpublishVolumeRequest{} + By("creating a snapshot with the created volume source id") + req1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), req1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - if secrets != nil { - req.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + volume2, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3")) + Expect(err).ToNot(HaveOccurred()) - _, err := c.ControllerUnpublishVolume(context.Background(), req) + By("creating a snapshot with the same name but different volume source id") + req2 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume2.GetVolume().GetVolumeId(), nil) + _, err = c.CreateSnapshot(context.Background(), req2) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) }) - It("should return appropriate values (no optional values added)", func() { + It("should not fail when creating snapshot with maximum-length name", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' } + name := string(nameBytes) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, name, volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - vol, err := c.CreateVolume(context.Background(), req) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + }) +}) - // ControllerPublishVolume - By("calling controllerpublish on that volume") +func MakeCreateVolumeReq(sc *SanityContext, name string) *csi.CreateVolumeRequest { + size1 := TestVolumeSize(sc) - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -891,45 +1644,55 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - } + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Parameters: sc.Config.TestVolumeParameters, + } - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret - } + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateVolumeSecret + } - conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) + return req +} - // ControllerUnpublishVolume - By("calling controllerunpublish on that volume") +func MakeCreateSnapshotReq(sc *SanityContext, name, sourceVolumeId string, parameters map[string]string) *csi.CreateSnapshotRequest { + req := &csi.CreateSnapshotRequest{ + Name: name, + SourceVolumeId: sourceVolumeId, + Parameters: parameters, + } - unpubReq := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - } + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + return req +} - conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) +func MakeDeleteSnapshotReq(sc *SanityContext, id string) *csi.DeleteSnapshotRequest { + delSnapReq := &csi.DeleteSnapshotRequest{ + SnapshotId: id, + } - By("cleaning up deleting the volume") + if sc.Secrets != nil { + delSnapReq.Secrets = sc.Secrets.DeleteSnapshotSecret + } - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + return delSnapReq +} - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } +func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest { + delVolReq := &csi.DeleteVolumeRequest{ + VolumeId: id, + } - _, err = c.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) - }) -}) + if sc.Secrets != nil { + delVolReq.Secrets = sc.Secrets.DeleteVolumeSecret + } + + return delVolReq +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index cb5aad48a..c1a5eb7ef 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -17,91 +17,83 @@ limitations under the License. package sanity import ( + "context" "fmt" "regexp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = Describe("GetPluginCapabilities [Identity Service]", func() { +var _ = DescribeSanity("Identity Service", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) - It("should return appropriate capabilities", func() { - req := &csi.GetPluginCapabilitiesRequest{} - res, err := c.GetPluginCapabilities(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("checking successful response") - Expect(res.GetCapabilities()).NotTo(BeNil()) - for _, cap := range res.GetCapabilities() { - switch cap.GetService().GetType() { - case csi.PluginCapability_Service_CONTROLLER_SERVICE: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + Describe("GetPluginCapabilities", func() { + It("should return appropriate capabilities", func() { + req := &csi.GetPluginCapabilitiesRequest{} + res, err := c.GetPluginCapabilities(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("checking successful response") + Expect(res.GetCapabilities()).NotTo(BeNil()) + for _, cap := range res.GetCapabilities() { + switch cap.GetService().GetType() { + case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + } } - } - }) - -}) + }) -var _ = Describe("Probe [Identity Service]", func() { - var ( - c csi.IdentityClient - ) - - BeforeEach(func() { - c = csi.NewIdentityClient(conn) }) - It("should return appropriate information", func() { - req := &csi.ProbeRequest{} - res, err := c.Probe(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying return status") - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code() == codes.FailedPrecondition || - serverError.Code() == codes.OK).To(BeTrue()) - }) -}) - -var _ = Describe("GetPluginInfo [Identity Server]", func() { - var ( - c csi.IdentityClient - ) - - BeforeEach(func() { - c = csi.NewIdentityClient(conn) + Describe("Probe", func() { + It("should return appropriate information", func() { + req := &csi.ProbeRequest{} + res, err := c.Probe(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying return status") + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code() == codes.FailedPrecondition || + serverError.Code() == codes.OK).To(BeTrue()) + + if res.GetReady() != nil { + Expect(res.GetReady().GetValue() == true || + res.GetReady().GetValue() == false).To(BeTrue()) + } + }) }) - It("should return appropriate information", func() { - req := &csi.GetPluginInfoRequest{} - res, err := c.GetPluginInfo(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying name size and characters") - Expect(res.GetName()).ToNot(HaveLen(0)) - Expect(len(res.GetName())).To(BeNumerically("<=", 63)) - Expect(regexp. - MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). - MatchString(res.GetName())).To(BeTrue()) + Describe("GetPluginInfo", func() { + It("should return appropriate information", func() { + req := &csi.GetPluginInfoRequest{} + res, err := c.GetPluginInfo(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying name size and characters") + Expect(res.GetName()).ToNot(HaveLen(0)) + Expect(len(res.GetName())).To(BeNumerically("<=", 63)) + Expect(regexp. + MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). + MatchString(res.GetName())).To(BeTrue()) + }) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index d57621dec..9bd9194b0 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -17,13 +17,13 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -38,7 +38,6 @@ func isNodeCapabilitySupported(c csi.NodeClient, &csi.NodeGetCapabilitiesRequest{}) Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) for _, cap := range caps.GetCapabilities() { Expect(cap.GetRpc()).NotTo(BeNil()) @@ -49,521 +48,470 @@ func isNodeCapabilitySupported(c csi.NodeClient, return false } -var _ = Describe("NodeGetCapabilities [Node Server]", func() { - var ( - c csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should return appropriate capabilities", func() { - caps, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{}) - - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) +func isPluginCapabilitySupported(c csi.IdentityClient, + capType csi.PluginCapability_Service_Type, +) bool { - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + caps, err := c.GetPluginCapabilities( + context.Background(), + &csi.GetPluginCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) - switch cap.GetRpc().GetType() { - case csi.NodeServiceCapability_RPC_UNKNOWN: - case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) - } + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetService()).NotTo(BeNil()) + if cap.GetService().GetType() == capType { + return true } - }) -}) + } + return false +} -var _ = Describe("NodeGetId [Node Server]", func() { +var _ = DescribeSanity("Node Service", func(sc *SanityContext) { var ( - c csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should return appropriate values", func() { - nid, err := c.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) + cl *Cleanup + c csi.NodeClient + s csi.ControllerClient - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - }) -}) - -var _ = Describe("NodePublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient controllerPublishSupported bool nodeStageSupported bool ) BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) + c = csi.NewNodeClient(sc.Conn) + s = csi.NewControllerClient(sc.Conn) + controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + err := createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } + cl = &Cleanup{ + Context: sc, + NodeClient: c, + ControllerClient: s, + ControllerPublishSupported: controllerPublishSupported, + NodeStageSupported: nodeStageSupported, + } }) - It("should fail when no volume id is provided", func() { - - req := &csi.NodePublishVolumeRequest{} - - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret - } + AfterEach(func() { + cl.DeleteVolumes() + }) - _, err := c.NodePublishVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + Describe("NodeGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.NodeServiceCapability_RPC_UNKNOWN: + case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + case csi.NodeServiceCapability_RPC_GET_VOLUME_STATS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } + } + }) }) - It("should fail when no target path is provided", func() { + Describe("NodeGetInfo", func() { + var ( + i csi.IdentityClient + accessibilityConstraintSupported bool + ) - req := &csi.NodePublishVolumeRequest{ - VolumeId: "id", - } + BeforeEach(func() { + i = csi.NewIdentityClient(sc.Conn) + accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS) + }) - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret - } + It("should return approproate values", func() { + ninfo, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) - _, err := c.NodePublishVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) + Expect(ninfo).NotTo(BeNil()) + Expect(ninfo.GetNodeId()).NotTo(BeEmpty()) + Expect(ninfo.GetMaxVolumesPerNode()).NotTo(BeNumerically("<", 0)) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + if accessibilityConstraintSupported { + Expect(ninfo.GetAccessibleTopology()).NotTo(BeNil()) + } + }) }) - It("should fail when no volume capability is provided", func() { - - req := &csi.NodePublishVolumeRequest{ - VolumeId: "id", - TargetPath: config.TargetPath, - } - - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret - } + Describe("NodePublishVolume", func() { + It("should fail when no volume id is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - _, err := c.NodePublishVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + It("should fail when no target path is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) - }) -}) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) -var _ = Describe("NodeUnpublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - nodeStageSupported bool - ) + It("should fail when no volume capability is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + TargetPath: sc.Config.TargetPath, + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) - if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no volume id is provided", func() { + Describe("NodeUnpublishVolume", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - It("should fail when no target path is provided", func() { + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should fail when no target path is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) -}) -// TODO: Tests for NodeStageVolume/NodeUnstageVolume -func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controllerPublishSupported, nodeStageSupported bool) { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - } + Describe("NodeStageVolume", func() { + var ( + device string + ) - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret - } + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeStageVolume not supported") + } - vol, err := s.CreateVolume(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + device = "/dev/mock" + }) - By("getting a node id") - nid, err := c.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - var conpubvol *csi.ControllerPublishVolumeResponse - if controllerPublishSupported { - By("controller publishing volume") - - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + It("should fail when no volume id is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - } + ) + Expect(err).To(HaveOccurred()) - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - conpubvol, err = s.ControllerPublishVolume(context.Background(), pubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - } - // NodeStageVolume - if nodeStageSupported { - By("node staging volume") - nodeStageVolReq := &csi.NodeStageVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + It("should fail when no staging target path is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: "id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - StagingTargetPath: config.StagingPath, - } - if controllerPublishSupported { - nodeStageVolReq.PublishInfo = conpubvol.GetPublishInfo() - } - if secrets != nil { - nodeStageVolReq.NodeStageSecrets = secrets.NodeStageVolumeSecret - } - nodestagevol, err := c.NodeStageVolume( - context.Background(), nodeStageVolReq) - Expect(err).NotTo(HaveOccurred()) - Expect(nodestagevol).NotTo(BeNil()) - } - // NodePublishVolume - By("publishing the volume on a node") - nodepubvolRequest := &csi.NodePublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - TargetPath: config.TargetPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - } - if nodeStageSupported { - nodepubvolRequest.StagingTargetPath = config.StagingPath - } - if controllerPublishSupported { - nodepubvolRequest.PublishInfo = conpubvol.GetPublishInfo() - } - if secrets != nil { - nodepubvolRequest.NodePublishSecrets = secrets.NodePublishVolumeSecret - } - nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) - Expect(err).NotTo(HaveOccurred()) - Expect(nodepubvol).NotTo(BeNil()) + ) + Expect(err).To(HaveOccurred()) - // NodeUnpublishVolume - By("cleaning up calling nodeunpublish") - nodeunpubvol, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - TargetPath: config.TargetPath, + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) - if nodeStageSupported { - By("cleaning up calling nodeunstage") - nodeunstagevol, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - StagingTargetPath: config.StagingPath, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunstagevol).NotTo(BeNil()) - } - - if controllerPublishSupported { - By("cleaning up calling controllerunpublishing") + It("should fail when no volume capability is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: "id", + StagingTargetPath: sc.Config.StagingPath, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - unpubReq := &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + }) - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret - } + Describe("NodeUnstageVolume", func() { + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeUnstageVolume not supported") + } + }) - controllerunpubvol, err := s.ControllerUnpublishVolume(context.Background(), unpubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(controllerunpubvol).NotTo(BeNil()) - } + It("should fail when no volume id is provided", func() { - By("cleaning up deleting the volume") + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + }) + Expect(err).To(HaveOccurred()) - delReq := &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret - } + It("should fail when no staging target path is provided", func() { - _, err = s.DeleteVolume(context.Background(), delReq) - Expect(err).NotTo(HaveOccurred()) -} + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) -var _ = Describe("NodeStageVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - nodeStageSupported bool - device string - ) - - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - device = "/dev/mock" - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) - if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } else { - Skip("NodeStageVolume not supported") - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no volume id is provided", func() { + It("should work", func() { + name := uniqueString("sanity-node-full") - req := &csi.NodeStageVolumeRequest{ - StagingTargetPath: config.StagingPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + // Create Volume First + By("creating a single node writer volume") + vol, err := s.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, }, + Secrets: sc.Secrets.CreateVolumeSecret, }, - PublishInfo: map[string]string{ - "device": device, - }, - } - - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret - } - - _, err := c.NodeStageVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - It("should fail when no staging target path is provided", func() { + By("getting a node id") + nid, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - req := &csi.NodeStageVolumeRequest{ - VolumeId: "id", - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, + var conpubvol *csi.ControllerPublishVolumeResponse + if controllerPublishSupported { + By("controller publishing volume") + + conpubvol, err = s.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + } + // NodeStageVolume + if nodeStageSupported { + By("node staging volume") + nodestagevol, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + StagingTargetPath: sc.Config.StagingPath, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - }, - PublishInfo: map[string]string{ - "device": device, - }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodestagevol).NotTo(BeNil()) } - - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret + // NodePublishVolume + By("publishing the volume on a node") + var stagingPath string + if nodeStageSupported { + stagingPath = sc.Config.StagingPath } - - _, err := c.NodeStageVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should fail when no volume capability is provided", func() { - - req := &csi.NodeStageVolumeRequest{ - VolumeId: "id", - StagingTargetPath: config.StagingPath, - PublishInfo: map[string]string{ - "device": device, + nodepubvol, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, + StagingTargetPath: stagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodePublishVolumeSecret, }, - } - - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret - } - - _, err := c.NodeStageVolume(context.Background(), req) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) - }) -}) + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodepubvol).NotTo(BeNil()) -var _ = Describe("NodeUnstageVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - nodeStageSupported bool - ) + // NodeUnpublishVolume + By("cleaning up calling nodeunpublish") + nodeunpubvol, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunpubvol).NotTo(BeNil()) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + By("cleaning up calling nodeunstage") + nodeunstagevol, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + StagingTargetPath: sc.Config.StagingPath, + }, + ) Expect(err).NotTo(HaveOccurred()) - } else { - Skip("NodeUnstageVolume not supported") + Expect(nodeunstagevol).NotTo(BeNil()) } - }) - - It("should fail when no volume id is provided", func() { - - _, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - StagingTargetPath: config.StagingPath, - }) - Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + if controllerPublishSupported { + By("cleaning up calling controllerunpublishing") + + controllerunpubvol, err := s.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(controllerunpubvol).NotTo(BeNil()) + } - It("should fail when no staging target path is provided", func() { + By("cleaning up deleting the volume") - _, err := c.NodeUnstageVolume( + _, err = s.DeleteVolume( context.Background(), - &csi.NodeUnstageVolumeRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index 58d63b702..e3c1684ed 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -17,10 +17,10 @@ limitations under the License. package sanity import ( + "crypto/rand" "fmt" "io/ioutil" "os" - "sync" "testing" "github.com/kubernetes-csi/csi-test/utils" @@ -40,58 +40,108 @@ type CSISecrets struct { ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` + CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` + DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` } -var ( - config *Config - conn *grpc.ClientConn - lock sync.Mutex - secrets *CSISecrets -) - -// Config provides the configuration for the sanity tests +// Config provides the configuration for the sanity tests. It +// needs to be initialized by the user of the sanity package. type Config struct { - TargetPath string - StagingPath string - Address string - SecretsFile string - TestVolumeSize int64 + TargetPath string + StagingPath string + Address string + SecretsFile string + + TestVolumeSize int64 + TestVolumeParametersFile string + TestVolumeParameters map[string]string +} + +// SanityContext holds the variables that each test can depend on. It +// gets initialized before each test block runs. +type SanityContext struct { + Config *Config + Conn *grpc.ClientConn + Secrets *CSISecrets + + connAddress string } -// Test will test the CSI driver at the specified address +// Test will test the CSI driver at the specified address by +// setting up a Ginkgo suite and running it. func Test(t *testing.T, reqConfig *Config) { - lock.Lock() - defer lock.Unlock() + path := reqConfig.TestVolumeParametersFile + if len(path) != 0 { + yamlFile, err := ioutil.ReadFile(path) + if err != nil { + panic(fmt.Sprintf("failed to read file %q: %v", path, err)) + } + err = yaml.Unmarshal(yamlFile, &reqConfig.TestVolumeParameters) + if err != nil { + panic(fmt.Sprintf("error unmarshaling yaml: %v", err)) + } + } - config = reqConfig + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) RegisterFailHandler(Fail) RunSpecs(t, "CSI Driver Test Suite") } -var _ = BeforeSuite(func() { +func GinkgoTest(reqConfig *Config) { + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) +} + +func (sc *SanityContext) setup() { var err error - if len(config.SecretsFile) > 0 { - secrets, err = loadSecrets(config.SecretsFile) + if len(sc.Config.SecretsFile) > 0 { + sc.Secrets, err = loadSecrets(sc.Config.SecretsFile) Expect(err).NotTo(HaveOccurred()) + } else { + sc.Secrets = &CSISecrets{} } - By("connecting to CSI driver") - conn, err = utils.Connect(config.Address) - Expect(err).NotTo(HaveOccurred()) + // It is possible that a test sets sc.Config.Address + // dynamically (and differently!) in a BeforeEach, so only + // reuse the connection if the address is still the same. + if sc.Conn == nil || sc.connAddress != sc.Config.Address { + By("connecting to CSI driver") + sc.Conn, err = utils.Connect(sc.Config.Address) + Expect(err).NotTo(HaveOccurred()) + sc.connAddress = sc.Config.Address + } else { + By(fmt.Sprintf("reusing connection to CSI driver at %s", sc.connAddress)) + } By("creating mount and staging directories") - err = createMountTargetLocation(config.TargetPath) + err = createMountTargetLocation(sc.Config.TargetPath) Expect(err).NotTo(HaveOccurred()) - if len(config.StagingPath) > 0 { - err = createMountTargetLocation(config.StagingPath) + if len(sc.Config.StagingPath) > 0 { + err = createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } -}) +} -var _ = AfterSuite(func() { - conn.Close() -}) +func (sc *SanityContext) teardown() { + // We intentionally do not close the connection to the CSI + // driver here because the large amount of connection attempts + // caused test failures + // (https://github.com/kubernetes-csi/csi-test/issues/101). We + // could fix this with retries + // (https://github.com/kubernetes-csi/csi-test/pull/97) but + // that requires more discussion, so instead we just connect + // once per process instead of once per test case. This was + // also said to be faster + // (https://github.com/kubernetes-csi/csi-test/pull/98). +} func createMountTargetLocation(targetPath string) error { fileInfo, err := os.Stat(targetPath) @@ -122,3 +172,23 @@ func loadSecrets(path string) (*CSISecrets, error) { return &creds, nil } + +var uniqueSuffix = "-" + pseudoUUID() + +// pseudoUUID returns a unique string generated from random +// bytes, empty string in case of error. +func pseudoUUID() string { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + // Shouldn't happen?! + return "" + } + return fmt.Sprintf("%08X-%08X", b[0:4], b[4:8]) +} + +// uniqueString returns a unique string by appending a random +// number. In case of an error, just the prefix is returned, so it +// alone should already be fairly unique. +func uniqueString(prefix string) string { + return prefix + uniqueSuffix +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go new file mode 100644 index 000000000..47763b752 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + . "github.com/onsi/ginkgo" +) + +type test struct { + text string + body func(*SanityContext) +} + +var tests []test + +// DescribeSanity must be used instead of the usual Ginkgo Describe to +// register a test block. The difference is that the body function +// will be called multiple times with the right context (when +// setting up a Ginkgo suite or a testing.T test, with the right +// configuration). +func DescribeSanity(text string, body func(*SanityContext)) bool { + tests = append(tests, test{text, body}) + return true +} + +// registerTestsInGinkgo invokes the actual Gingko Describe +// for the tests registered earlier with DescribeSanity. +func registerTestsInGinkgo(sc *SanityContext) { + for _, test := range tests { + Describe(test.text, func() { + BeforeEach(func() { + sc.setup() + }) + + test.body(sc) + + AfterEach(func() { + sc.teardown() + }) + }) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go index d4e5dfc38..03b0f052c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go @@ -16,13 +16,16 @@ limitations under the License. package test import ( + "context" + "fmt" + "reflect" "testing" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - gomock "github.com/golang/mock/gomock" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" mock_driver "github.com/kubernetes-csi/csi-test/driver" mock_utils "github.com/kubernetes-csi/csi-test/utils" - "golang.org/x/net/context" ) func TestPluginInfoResponse(t *testing.T) { @@ -58,6 +61,24 @@ func TestPluginInfoResponse(t *testing.T) { } } +type pbMatcher struct { + x proto.Message +} + +func (p pbMatcher) Matches(x interface{}) bool { + y := x.(proto.Message) + return proto.Equal(p.x, y) +} + +func (p pbMatcher) String() string { + return fmt.Sprintf("pb equal to %v", p.x) +} + +func pbMatch(x interface{}) gomock.Matcher { + v := x.(proto.Message) + return &pbMatcher{v} +} + func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup mock @@ -79,7 +100,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup expectation // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value - driver.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, nil).Times(1) + driver.EXPECT().GetPluginInfo(gomock.Any(), pbMatch(in)).Return(out, nil).Times(1) // Create a new RPC server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ @@ -103,3 +124,65 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { t.Errorf("Unknown name: %s\n", name) } } + +func TestGRPCAttach(t *testing.T) { + + // Setup mock + m := gomock.NewController(&mock_utils.SafeGoroutineTester{}) + defer m.Finish() + driver := mock_driver.NewMockControllerServer(m) + + // Setup input + defaultVolumeID := "myname" + defaultNodeID := "MyNodeID" + defaultCaps := &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + } + publishVolumeInfo := map[string]string{ + "first": "foo", + "second": "bar", + "third": "baz", + } + defaultRequest := &csi.ControllerPublishVolumeRequest{ + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + VolumeCapability: defaultCaps, + Readonly: false, + } + + // Setup mock outout + out := &csi.ControllerPublishVolumeResponse{ + PublishContext: publishVolumeInfo, + } + + // Setup expectation + // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value + driver.EXPECT().ControllerPublishVolume(gomock.Any(), pbMatch(defaultRequest)).Return(out, nil).Times(1) + + // Create a new RPC + server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ + Controller: driver, + }) + conn, err := server.Nexus() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer server.Close() + + // Make call + c := csi.NewControllerClient(conn) + r, err := c.ControllerPublishVolume(context.Background(), defaultRequest) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + + info := r.GetPublishContext() + if !reflect.DeepEqual(info, publishVolumeInfo) { + t.Errorf("Invalid publish info: %v", info) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go index 4b0122b6c..ae8c33675 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go @@ -21,7 +21,7 @@ import ( "sync" "testing" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/csi-test/utils" "google.golang.org/grpc" "google.golang.org/grpc/reflection" diff --git a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go index c89a5cf1d..3baf96723 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go +++ b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go @@ -29,7 +29,7 @@ type SafeGoroutineTester struct{} // Errorf prints the error to the screen then panics func (s *SafeGoroutineTester) Errorf(format string, args ...interface{}) { - fmt.Printf(format, args) + fmt.Printf(format, args...) panic("MOCK TEST ERROR") }