diff --git a/docs/README.md b/docs/README.md index d17906883e..15508e1c44 100644 --- a/docs/README.md +++ b/docs/README.md @@ -128,12 +128,27 @@ To execute integration tests, run: make test-integration ``` -To execute e2e tests, run: +**Note**: EC2 instance is required to run integration test, since it is exercising the actual flow of creating EBS volume, attaching it and read/write on the disk. + +To execute e2e tests: + +Some tests marked with `[env]` require specific environmental variables to be set, if not set these tests will be skipped. + +``` +export AWS_AVAILABILITY_ZONES="us-west-2a,us-west-2b" ``` + +Replacing `us-west-2a,us-west-2b` with the AZ(s) where your Kubernetes worker nodes are located. + +These tests also rely on having proper [AWS credentials](https://docs.aws.amazon.com/amazonswf/latest/awsrbflowguide/set-up-creds.html) set either via environmental variables or a credentials file. + +Finally run: +``` +export KUBECONFIG=~/.kube/config make test-e2e ``` -**Note**: EC2 instance is required to run integration test, since it is exercising the actual flow of creating EBS volume, attaching it and read/write on the disk. +**Note**: By default `make test-e2e` will run 32 tests concurrently, set `GINKGO_NODES` to change the parallelism. ### Build and Publish Container Image diff --git a/go.mod b/go.mod index 719bfc22af..870de9cf1c 100644 --- a/go.mod +++ b/go.mod @@ -71,9 +71,12 @@ require ( go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.9.1 // indirect golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 // indirect + golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 // indirect + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a // indirect golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect - google.golang.org/genproto v0.0.0-20180831171423-11092d34479b // indirect + google.golang.org/appengine v1.3.0 // indirect + google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898 // indirect google.golang.org/grpc v1.17.0 gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index e8538a2fbf..dee7366b46 100644 --- a/go.sum +++ b/go.sum @@ -89,6 +89,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -197,15 +198,23 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953 h1:LuZIitY8waaxUfNIdtajyE/YzA/zyf0YxXG27VpLrkg= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -218,9 +227,12 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898 h1:yvw+zsSmSM02Z5H3ZdEV7B7Ql7eFrjQTnmByJvK+3J8= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/hack/run-e2e-test b/hack/run-e2e-test index f4f94cd886..6155f526e3 100755 --- a/hack/run-e2e-test +++ b/hack/run-e2e-test @@ -16,4 +16,6 @@ set -euo pipefail -ginkgo -p -v --focus ebs-csi-e2e tests/e2e +NODES=${GINKGO_NODES:-32} + +ginkgo -p -nodes=$NODES -v --focus ebs-csi-e2e tests/e2e diff --git a/pkg/cloud/cloud.go b/pkg/cloud/cloud.go index a70be96637..c291fa7840 100644 --- a/pkg/cloud/cloud.go +++ b/pkg/cloud/cloud.go @@ -129,6 +129,7 @@ type Cloud interface { DeleteDisk(ctx context.Context, volumeID string) (success bool, err error) AttachDisk(ctx context.Context, volumeID string, nodeID string) (devicePath string, err error) DetachDisk(ctx context.Context, volumeID string, nodeID string) (err error) + WaitForAttachmentState(ctx context.Context, volumeID, state string) error GetDiskByName(ctx context.Context, name string, capacityBytes int64) (disk *Disk, err error) GetDiskByID(ctx context.Context, volumeID string) (disk *Disk, err error) IsExistInstance(ctx context.Context, nodeID string) (success bool) @@ -143,16 +144,30 @@ type cloud struct { var _ Cloud = &cloud{} // NewCloud returns a new instance of AWS cloud +// Pass in nil metadata to use an auto created EC2Metadata service // It panics if session is invalid func NewCloud() (Cloud, error) { - sess := session.Must(session.NewSession(&aws.Config{})) - svc := ec2metadata.New(sess) + svc := newEC2MetadataSvc() + var err error metadata, err := NewMetadataService(svc) if err != nil { return nil, fmt.Errorf("could not get metadata from AWS: %v", err) } + return newEC2Cloud(metadata, svc) +} + +func NewCloudWithMetadata(metadata MetadataService) (Cloud, error) { + return newEC2Cloud(metadata, newEC2MetadataSvc()) +} + +func newEC2MetadataSvc() *ec2metadata.EC2Metadata { + sess := session.Must(session.NewSession(&aws.Config{})) + return ec2metadata.New(sess) +} + +func newEC2Cloud(metadata MetadataService, svc *ec2metadata.EC2Metadata) (Cloud, error) { provider := []credentials.Provider{ &credentials.EnvProvider{}, &ec2rolecreds.EC2RoleProvider{Client: svc}, @@ -297,7 +312,7 @@ func (c *cloud) AttachDisk(ctx context.Context, volumeID, nodeID string) (string } // This is the only situation where we taint the device - if err := c.waitForAttachmentState(ctx, volumeID, "attached"); err != nil { + if err := c.WaitForAttachmentState(ctx, volumeID, "attached"); err != nil { device.Taint() return "", err } @@ -336,13 +351,58 @@ func (c *cloud) DetachDisk(ctx context.Context, volumeID, nodeID string) error { return fmt.Errorf("could not detach volume %q from node %q: %v", volumeID, nodeID, err) } - if err := c.waitForAttachmentState(ctx, volumeID, "detached"); err != nil { + if err := c.WaitForAttachmentState(ctx, volumeID, "detached"); err != nil { return err } return nil } +// WaitForAttachmentState polls until the attachment status is the expected value. +func (c *cloud) WaitForAttachmentState(ctx context.Context, volumeID, state string) error { + // Most attach/detach operations on AWS finish within 1-4 seconds. + // By using 1 second starting interval with a backoff of 1.8, + // we get [1, 1.8, 3.24, 5.832000000000001, 10.4976]. + // In total we wait for 2601 seconds. + backoff := wait.Backoff{ + Duration: 1 * time.Second, + Factor: 1.8, + Steps: 13, + } + + verifyVolumeFunc := func() (bool, error) { + request := &ec2.DescribeVolumesInput{ + VolumeIds: []*string{ + aws.String(volumeID), + }, + } + + volume, err := c.getVolume(ctx, request) + if err != nil { + return false, err + } + + if len(volume.Attachments) == 0 { + if state == "detached" { + return true, nil + } + } + + for _, a := range volume.Attachments { + if a.State == nil { + klog.Warningf("Ignoring nil attachment state for volume %q: %v", volumeID, a) + continue + } + if *a.State == state { + return true, nil + } + } + return false, nil + } + + return wait.ExponentialBackoff(backoff, verifyVolumeFunc) +} + func (c *cloud) GetDiskByName(ctx context.Context, name string, capacityBytes int64) (*Disk, error) { request := &ec2.DescribeVolumesInput{ Filters: []*ec2.Filter{ @@ -456,51 +516,6 @@ func (c *cloud) getInstance(ctx context.Context, nodeID string) (*ec2.Instance, return instances[0], nil } -// waitForAttachmentStatus polls until the attachment status is the expected value. -func (c *cloud) waitForAttachmentState(ctx context.Context, volumeID, state string) error { - // Most attach/detach operations on AWS finish within 1-4 seconds. - // By using 1 second starting interval with a backoff of 1.8, - // we get [1, 1.8, 3.24, 5.832000000000001, 10.4976]. - // In total we wait for 2601 seconds. - backoff := wait.Backoff{ - Duration: 1 * time.Second, - Factor: 1.8, - Steps: 13, - } - - verifyVolumeFunc := func() (bool, error) { - request := &ec2.DescribeVolumesInput{ - VolumeIds: []*string{ - aws.String(volumeID), - }, - } - - volume, err := c.getVolume(ctx, request) - if err != nil { - return false, err - } - - if len(volume.Attachments) == 0 { - if state == "detached" { - return true, nil - } - } - - for _, a := range volume.Attachments { - if a.State == nil { - klog.Warningf("Ignoring nil attachment state for volume %q: %v", volumeID, a) - continue - } - if *a.State == state { - return true, nil - } - } - return false, nil - } - - return wait.ExponentialBackoff(backoff, verifyVolumeFunc) -} - // waitForVolume waits for volume to be in the "available" state. // On a random AWS account (shared among several developers) it took 4s on average. func (c *cloud) waitForVolume(ctx context.Context, volumeID string) error { diff --git a/pkg/cloud/fakes.go b/pkg/cloud/fakes.go index 57174ce8fe..8bc085fe9b 100644 --- a/pkg/cloud/fakes.go +++ b/pkg/cloud/fakes.go @@ -83,6 +83,10 @@ func (c *FakeCloudProvider) DetachDisk(ctx context.Context, volumeID, nodeID str return nil } +func (c *FakeCloudProvider) WaitForAttachmentState(ctx context.Context, volumeID, state string) error { + return nil +} + func (c *FakeCloudProvider) GetDiskByName(ctx context.Context, name string, capacityBytes int64) (*Disk, error) { var disks []*fakeDisk for _, d := range c.disks { diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 299533bb30..634879ffc5 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -308,13 +308,13 @@ func pickAvailabilityZone(requirement *csi.TopologyRequirement) string { return "" } for _, topology := range requirement.GetPreferred() { - zone, exists := topology.GetSegments()[topologyKey] + zone, exists := topology.GetSegments()[TopologyKey] if exists { return zone } } for _, topology := range requirement.GetRequisite() { - zone, exists := topology.GetSegments()[topologyKey] + zone, exists := topology.GetSegments()[TopologyKey] if exists { return zone } @@ -332,7 +332,7 @@ func newCreateVolumeResponse(disk *cloud.Disk) *csi.CreateVolumeResponse { }, AccessibleTopology: []*csi.Topology{ { - Segments: map[string]string{topologyKey: disk.AvailabilityZone}, + Segments: map[string]string{TopologyKey: disk.AvailabilityZone}, }, }, }, diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index f9f86fe26f..7317aacc2a 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -233,7 +233,7 @@ func TestCreateVolume(t *testing.T) { AccessibilityRequirements: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { - Segments: map[string]string{topologyKey: expZone}, + Segments: map[string]string{TopologyKey: expZone}, }, }, }, @@ -248,7 +248,7 @@ func TestCreateVolume(t *testing.T) { AccessibilityRequirements: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { - Segments: map[string]string{topologyKey: expZone}, + Segments: map[string]string{TopologyKey: expZone}, }, }, }, @@ -259,7 +259,7 @@ func TestCreateVolume(t *testing.T) { VolumeContext: map[string]string{"fsType": expFsType}, AccessibleTopology: []*csi.Topology{ { - Segments: map[string]string{topologyKey: expZone}, + Segments: map[string]string{TopologyKey: expZone}, }, }, }, @@ -382,12 +382,12 @@ func TestPickAvailabilityZone(t *testing.T) { requirement: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { - Segments: map[string]string{topologyKey: expZone}, + Segments: map[string]string{TopologyKey: expZone}, }, }, Preferred: []*csi.Topology{ { - Segments: map[string]string{topologyKey: expZone}, + Segments: map[string]string{TopologyKey: expZone}, }, }, }, @@ -398,7 +398,7 @@ func TestPickAvailabilityZone(t *testing.T) { requirement: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { - Segments: map[string]string{topologyKey: expZone}, + Segments: map[string]string{TopologyKey: expZone}, }, }, }, diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index 61e49e7643..e865e614ab 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -31,7 +31,7 @@ import ( const ( DriverName = "ebs.csi.aws.com" - topologyKey = "topology." + DriverName + "/zone" + TopologyKey = "topology." + DriverName + "/zone" ) type Driver struct { diff --git a/pkg/driver/node.go b/pkg/driver/node.go index b1efbfd895..dbcfbbf901 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -265,7 +265,7 @@ func (d *Driver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) ( m := d.cloud.GetMetadata() topology := &csi.Topology{ - Segments: map[string]string{topologyKey: m.GetAvailabilityZone()}, + Segments: map[string]string{TopologyKey: m.GetAvailabilityZone()}, } return &csi.NodeGetInfoResponse{ diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 8c48245a94..9556513cc0 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -603,7 +603,7 @@ func TestNodeGetInfo(t *testing.T) { expResp := &csi.NodeGetInfoResponse{ NodeId: "instanceID", AccessibleTopology: &csi.Topology{ - Segments: map[string]string{topologyKey: m.GetAvailabilityZone()}, + Segments: map[string]string{TopologyKey: m.GetAvailabilityZone()}, }, } diff --git a/tests/e2e/driver/driver.go b/tests/e2e/driver/driver.go index cf7711d5dd..aa5ea4576b 100644 --- a/tests/e2e/driver/driver.go +++ b/tests/e2e/driver/driver.go @@ -20,18 +20,31 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type PVTestDriver interface { + DynamicPVTestDriver + PreProvisionedVolumeTestDriver +} + // DynamicPVTestDriver represents an interface for a CSI driver that supports DynamicPV type DynamicPVTestDriver interface { - // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume. - GetDynamicProvisionStorageClass(parameters map[string]string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, namespace string) *storagev1.StorageClass + // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume + GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass +} + +// PreProvisionedVolumeTestDriver represents an interface for a CSI driver that supports pre-provisioned volume +type PreProvisionedVolumeTestDriver interface { + // GetPersistentVolume returns a PersistentVolume with pre-provisioned volumeHandle + GetPersistentVolume(volumeID string, fsType string, size string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, namespace string) *v1.PersistentVolume } func getStorageClass( generateName string, provisioner string, parameters map[string]string, + mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, + allowedTopologies []v1.TopologySelectorTerm, ) *storagev1.StorageClass { if reclaimPolicy == nil { defaultReclaimPolicy := v1.PersistentVolumeReclaimDelete @@ -47,7 +60,9 @@ func getStorageClass( }, Provisioner: provisioner, Parameters: parameters, + MountOptions: mountOptions, ReclaimPolicy: reclaimPolicy, VolumeBindingMode: bindingMode, + AllowedTopologies: allowedTopologies, } } diff --git a/tests/e2e/driver/ebs_csi_driver.go b/tests/e2e/driver/ebs_csi_driver.go index e90e5ada77..340100889c 100644 --- a/tests/e2e/driver/ebs_csi_driver.go +++ b/tests/e2e/driver/ebs_csi_driver.go @@ -19,6 +19,12 @@ import ( ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + True = "true" ) // Implement DynamicPVTestDriver interface @@ -27,21 +33,67 @@ type ebsCSIDriver struct { } // InitEbsCSIDriver returns ebsCSIDriver that implements DynamicPVTestDriver interface -func InitEbsCSIDriver() DynamicPVTestDriver { +func InitEbsCSIDriver() PVTestDriver { return &ebsCSIDriver{ driverName: ebscsidriver.DriverName, } } -func (d *ebsCSIDriver) GetDynamicProvisionStorageClass(parameters map[string]string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, namespace string) *storagev1.StorageClass { +func (d *ebsCSIDriver) GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass { provisioner := d.driverName - generatedName := fmt.Sprintf("%s-%s-sc-", namespace, provisioner) + generatedName := fmt.Sprintf("%s-%s-dynamic-sc-", namespace, provisioner) + allowedTopologies := []v1.TopologySelectorTerm{} + if len(allowedTopologyValues) > 0 { + allowedTopologies = []v1.TopologySelectorTerm{ + { + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: ebscsidriver.TopologyKey, + Values: allowedTopologyValues, + }, + }, + }, + } + } + return getStorageClass(generatedName, provisioner, parameters, mountOptions, reclaimPolicy, bindingMode, allowedTopologies) +} - return getStorageClass(generatedName, provisioner, parameters, reclaimPolicy, bindingMode) +func (d *ebsCSIDriver) GetPersistentVolume(volumeID string, fsType string, size string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, namespace string) *v1.PersistentVolume { + provisioner := d.driverName + generateName := fmt.Sprintf("%s-%s-preprovsioned-pv-", namespace, provisioner) + // Default to Retain ReclaimPolicy for pre-provisioned volumes + pvReclaimPolicy := v1.PersistentVolumeReclaimRetain + if reclaimPolicy != nil { + pvReclaimPolicy = *reclaimPolicy + } + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: generateName, + Namespace: namespace, + // TODO remove if https://github.com/kubernetes-csi/external-provisioner/issues/202 is fixed + Annotations: map[string]string{ + "pv.kubernetes.io/provisioned-by": provisioner, + }, + }, + Spec: v1.PersistentVolumeSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(size), + }, + PersistentVolumeReclaimPolicy: pvReclaimPolicy, + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: provisioner, + VolumeHandle: volumeID, + FSType: fsType, + }, + }, + }, + } } // GetParameters returns the parameters specific for this driver -func GetParameters(volumeType string, fsType string) map[string]string { +func GetParameters(volumeType string, fsType string, encrypted bool) map[string]string { parameters := map[string]string{ "type": volumeType, "fsType": fsType, @@ -49,6 +101,9 @@ func GetParameters(volumeType string, fsType string) map[string]string { if iops := IOPSPerGBForVolumeType(volumeType); iops != "" { parameters["iopsPerGB"] = iops } + if encrypted { + parameters["encrypted"] = True + } return parameters } diff --git a/tests/e2e/dynamic_provisioning.go b/tests/e2e/dynamic_provisioning.go index 6a6fe5c531..995567361c 100644 --- a/tests/e2e/dynamic_provisioning.go +++ b/tests/e2e/dynamic_provisioning.go @@ -18,13 +18,17 @@ import ( "fmt" . "github.com/onsi/ginkgo" "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "math/rand" + "os" + "strings" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" - "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" + awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" ) @@ -43,56 +47,107 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() { ebsDriver = driver.InitEbsCSIDriver() }) - for _, t := range cloud.ValidVolumeTypes { + for _, t := range awscloud.ValidVolumeTypes { for _, fs := range ebscsidriver.ValidFSTypes { volumeType := t fsType := fs - Context(fmt.Sprintf("with volumeType [%q] and fsType [%q]", volumeType, fsType), func() { - It("should create a volume on demand", func() { - pods := []testsuites.PodDetails{ - { - Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", - Volumes: []testsuites.VolumeDetails{ - { - VolumeType: volumeType, - FSType: fsType, - ClaimSize: driver.MinimumSizeForVolumeType(volumeType), - VolumeMount: testsuites.VolumeMountDetails{ - NameGenerate: "test-volume-", - MountPathGenerate: "/mnt/test-", - }, + It(fmt.Sprintf("should create a volume on demand with volumeType %q and fsType %q", volumeType, fsType), func() { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: volumeType, + FSType: fsType, + ClaimSize: driver.MinimumSizeForVolumeType(volumeType), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", }, }, }, - } - test := testsuites.PodDynamicVolumeWriterReaderTest{ - CSIDriver: ebsDriver, - Pods: pods, - } - test.Run(cs, ns) - }) + }, + } + test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) }) } } + for _, t := range awscloud.ValidVolumeTypes { + volumeType := t + It(fmt.Sprintf("should create a volume on demand with volumeType %q and encryption", volumeType), func() { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: volumeType, + FSType: ebscsidriver.FSTypeExt4, + Encrypted: true, + ClaimSize: driver.MinimumSizeForVolumeType(volumeType), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + } + + It("should create a volume on demand with provided mountOptions", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt4, + MountOptions: []string{"rw"}, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + It("should create multiple PV objects, bind to PVCs and attach all to a single pod", func() { pods := []testsuites.PodDetails{ { Cmd: "echo 'hello world' > /mnt/test-1/data && echo 'hello world' > /mnt/test-2/data && grep 'hello world' /mnt/test-1/data && grep 'hello world' /mnt/test-2/data", Volumes: []testsuites.VolumeDetails{ { - VolumeType: cloud.VolumeTypeGP2, + VolumeType: awscloud.VolumeTypeGP2, FSType: ebscsidriver.FSTypeExt3, - ClaimSize: driver.MinimumSizeForVolumeType(cloud.VolumeTypeGP2), + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", MountPathGenerate: "/mnt/test-", }, }, { - VolumeType: cloud.VolumeTypeIO1, + VolumeType: awscloud.VolumeTypeIO1, FSType: ebscsidriver.FSTypeExt4, - ClaimSize: driver.MinimumSizeForVolumeType(cloud.VolumeTypeIO1), + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeIO1), VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", MountPathGenerate: "/mnt/test-", @@ -101,7 +156,7 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() { }, }, } - test := testsuites.PodDynamicVolumeWriterReaderTest{ + test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{ CSIDriver: ebsDriver, Pods: pods, } @@ -114,9 +169,9 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() { Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Volumes: []testsuites.VolumeDetails{ { - VolumeType: cloud.VolumeTypeGP2, + VolumeType: awscloud.VolumeTypeGP2, FSType: ebscsidriver.FSTypeExt3, - ClaimSize: driver.MinimumSizeForVolumeType(cloud.VolumeTypeGP2), + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", MountPathGenerate: "/mnt/test-", @@ -128,9 +183,220 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() { Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Volumes: []testsuites.VolumeDetails{ { - VolumeType: cloud.VolumeTypeIO1, + VolumeType: awscloud.VolumeTypeIO1, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeIO1), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + + It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 1; done", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt3, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + { + Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 1; done", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeIO1, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeIO1), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedCollocatedPodTest{ + CSIDriver: ebsDriver, + Pods: pods, + ColocatePods: true, + } + test.Run(cs, ns) + }) + + // Track issue https://github.com/kubernetes/kubernetes/issues/70505 + It("should create a volume on demand and mount it as readOnly in a pod", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "touch /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, FSType: ebscsidriver.FSTypeExt4, - ClaimSize: driver.MinimumSizeForVolumeType(cloud.VolumeTypeIO1), + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + ReadOnly: true, + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + + It(fmt.Sprintf("should delete PV with reclaimPolicy %q", v1.PersistentVolumeReclaimDelete), func() { + reclaimPolicy := v1.PersistentVolumeReclaimDelete + volumes := []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + ReclaimPolicy: &reclaimPolicy, + }, + } + test := testsuites.DynamicallyProvisionedReclaimPolicyTest{ + CSIDriver: ebsDriver, + Volumes: volumes, + } + test.Run(cs, ns) + }) + + It(fmt.Sprintf("[env] should retain PV with reclaimPolicy %q", v1.PersistentVolumeReclaimRetain), func() { + if os.Getenv(awsAvailabilityZonesEnv) == "" { + Skip(fmt.Sprintf("env %q not set", awsAvailabilityZonesEnv)) + } + reclaimPolicy := v1.PersistentVolumeReclaimRetain + volumes := []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + ReclaimPolicy: &reclaimPolicy, + }, + } + availabilityZones := strings.Split(os.Getenv(awsAvailabilityZonesEnv), ",") + availabilityZone := availabilityZones[rand.Intn(len(availabilityZones))] + metadata := e2eMetdataService{availabilityZone: availabilityZone} + cloud, err := awscloud.NewCloudWithMetadata(metadata) + if err != nil { + Fail(fmt.Sprintf("could not get NewCloud: %v", err)) + } + + test := testsuites.DynamicallyProvisionedReclaimPolicyTest{ + CSIDriver: ebsDriver, + Volumes: volumes, + Cloud: cloud, + } + test.Run(cs, ns) + }) + + It("should create a deployment object, write and read to it, delete the pod and write and read to it again", func() { + pod := testsuites.PodDetails{ + Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 1; done", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt3, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedDeletePodTest{ + CSIDriver: ebsDriver, + Pod: pod, + PodCheck: &testsuites.PodExecCheck{ + Cmd: []string{"cat", "/mnt/test-1/data"}, + ExpectedString: "hello world\nhello world\n", // pod will be restarted so expect to see 2 instances of string + }, + } + test.Run(cs, ns) + }) +}) + +var _ = Describe("[ebs-csi-e2e] [multi-az] Dynamic Provisioning", func() { + f := framework.NewDefaultFramework("ebs") + + var ( + cs clientset.Interface + ns *v1.Namespace + ebsDriver driver.DynamicPVTestDriver + ) + + BeforeEach(func() { + cs = f.ClientSet + ns = f.Namespace + ebsDriver = driver.InitEbsCSIDriver() + }) + + It("should allow for topology aware volume scheduling", func() { + volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeBindingMode: &volumeBindingMode, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedTopologyAwareVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + + // Requires env AWS_AVAILABILITY_ZONES, a comma separated list of AZs + It("[env] should allow for topology aware volume with specified zone in allowedTopologies", func() { + if os.Getenv(awsAvailabilityZonesEnv) == "" { + Skip(fmt.Sprintf("env %q not set", awsAvailabilityZonesEnv)) + } + allowedTopologyZones := strings.Split(os.Getenv(awsAvailabilityZonesEnv), ",") + volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeBindingMode: &volumeBindingMode, + AllowedTopologyValues: allowedTopologyZones, VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", MountPathGenerate: "/mnt/test-", @@ -139,7 +405,7 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() { }, }, } - test := testsuites.PodDynamicVolumeWriterReaderTest{ + test := testsuites.DynamicallyProvisionedTopologyAwareVolumeTest{ CSIDriver: ebsDriver, Pods: pods, } diff --git a/tests/e2e/pre_provsioning.go b/tests/e2e/pre_provsioning.go new file mode 100644 index 0000000000..d840757100 --- /dev/null +++ b/tests/e2e/pre_provsioning.go @@ -0,0 +1,208 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" + . "github.com/onsi/ginkgo" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "math/rand" + "os" + "strings" + + ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" +) + +const ( + defaultDiskSize = 4 + defaultVoluemType = awscloud.VolumeTypeGP2 + + awsAvailabilityZonesEnv = "AWS_AVAILABILITY_ZONES" + + dummyVolumeName = "pre-provisioned" +) + +var ( + defaultDiskSizeBytes int64 = defaultDiskSize * 1024 * 1024 * 1024 +) + +type e2eMetdataService struct { + availabilityZone string +} + +// GetInstanceID will always return an empty string as the test does not need to run on an EC2 machine +func (s e2eMetdataService) GetInstanceID() string { + return "" +} + +func (s e2eMetdataService) GetAvailabilityZone() string { + return s.availabilityZone +} + +// GetRegion will try to determine the Region from the specified AZ, specifically trims the last character +func (s e2eMetdataService) GetRegion() string { + return s.availabilityZone[0 : len(s.availabilityZone)-1] +} + +// Requires env AWS_AVAILABILITY_ZONES a comma separated list of AZs to be set +var _ = Describe("[ebs-csi-e2e] [single-az] Pre-Provisioned", func() { + f := framework.NewDefaultFramework("ebs") + + var ( + cs clientset.Interface + ns *v1.Namespace + ebsDriver driver.PreProvisionedVolumeTestDriver + cloud awscloud.Cloud + volumeID string + diskSize string + // Set to true if the volume should be deleted automatically after test + skipManuallyDeletingVolume bool + ) + + BeforeEach(func() { + cs = f.ClientSet + ns = f.Namespace + ebsDriver = driver.InitEbsCSIDriver() + + // setup EBS volume + if os.Getenv(awsAvailabilityZonesEnv) == "" { + Skip(fmt.Sprintf("env %q not set", awsAvailabilityZonesEnv)) + } + availabilityZones := strings.Split(os.Getenv(awsAvailabilityZonesEnv), ",") + availabilityZone := availabilityZones[rand.Intn(len(availabilityZones))] + diskOptions := &awscloud.DiskOptions{ + CapacityBytes: defaultDiskSizeBytes, + VolumeType: defaultVoluemType, + AvailabilityZone: availabilityZone, + Tags: map[string]string{awscloud.VolumeNameTagKey: dummyVolumeName}, + } + metadata := e2eMetdataService{availabilityZone: availabilityZone} + var err error + cloud, err = awscloud.NewCloudWithMetadata(metadata) + if err != nil { + Fail(fmt.Sprintf("could not get NewCloud: %v", err)) + } + disk, err := cloud.CreateDisk(context.Background(), "", diskOptions) + if err != nil { + Fail(fmt.Sprintf("could not provision a volume: %v", err)) + } + volumeID = disk.VolumeID + diskSize = fmt.Sprintf("%dGi", defaultDiskSize) + By(fmt.Sprintf("Successfully provisioned EBS volume: %q\n", volumeID)) + }) + + AfterEach(func() { + if !skipManuallyDeletingVolume { + err := cloud.WaitForAttachmentState(context.Background(), volumeID, "detached") + if err != nil { + Fail(fmt.Sprintf("could not detach volume %q: %v", volumeID, err)) + } + ok, err := cloud.DeleteDisk(context.Background(), volumeID) + if err != nil || !ok { + Fail(fmt.Sprintf("could not delete volume %q: %v", volumeID, err)) + } + } + }) + + It("[env] should write and read to a pre-provisioned volume", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeID: volumeID, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: diskSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.PreProvisionedVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + + It("[env] should use a pre-provisioned volume and mount it as readOnly in a pod", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + VolumeID: volumeID, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: diskSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + ReadOnly: true, + }, + }, + }, + }, + } + test := testsuites.PreProvisionedReadOnlyVolumeTest{ + CSIDriver: ebsDriver, + Pods: pods, + } + test.Run(cs, ns) + }) + + It(fmt.Sprintf("[env] should use a pre-provisioned volume and retain PV with reclaimPolicy %q", v1.PersistentVolumeReclaimRetain), func() { + reclaimPolicy := v1.PersistentVolumeReclaimRetain + volumes := []testsuites.VolumeDetails{ + { + VolumeID: volumeID, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: diskSize, + ReclaimPolicy: &reclaimPolicy, + }, + } + test := testsuites.PreProvisionedReclaimPolicyTest{ + CSIDriver: ebsDriver, + Volumes: volumes, + } + test.Run(cs, ns) + }) + + It(fmt.Sprintf("[env] should use a pre-provisioned volume and delete PV with reclaimPolicy %q", v1.PersistentVolumeReclaimDelete), func() { + reclaimPolicy := v1.PersistentVolumeReclaimDelete + skipManuallyDeletingVolume = true + volumes := []testsuites.VolumeDetails{ + { + VolumeID: volumeID, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: diskSize, + ReclaimPolicy: &reclaimPolicy, + }, + } + test := testsuites.PreProvisionedReclaimPolicyTest{ + CSIDriver: ebsDriver, + Volumes: volumes, + } + test.Run(cs, ns) + }) +}) diff --git a/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go b/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go new file mode 100644 index 0000000000..2e1348c8ee --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + + . "github.com/onsi/ginkgo" +) + +// DynamicallyProvisionedCollocatedPodTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing if multiple Pod(s) can write simultaneously +type DynamicallyProvisionedCollocatedPodTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails + ColocatePods bool +} + +func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface, namespace *v1.Namespace) { + nodeName := "" + podCleanup := make([]func(), 0) + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver) + if t.ColocatePods && nodeName != "" { + tpod.SetNodeSelector(map[string]string{"name": nodeName}) + } + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + By("deploying the pod") + tpod.Create() + podCleanup = append(podCleanup, tpod.Cleanup) + By("checking that the pod is running") + tpod.WaitForRunning() + nodeName = tpod.pod.Spec.NodeName + } + // call Pod cleanup after all pods are up + for i := range podCleanup { + podCleanup[i]() + } +} diff --git a/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go new file mode 100644 index 0000000000..c15bfff532 --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + . "github.com/onsi/ginkgo" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// DynamicallyProvisionedDeletePodTest will provision required StorageClass and Deployment +// Testing if the Pod can write and read to mounted volumes +// Deleting a pod, and again testing if the Pod can write and read to mounted volumes +type DynamicallyProvisionedDeletePodTest struct { + CSIDriver driver.DynamicPVTestDriver + Pod PodDetails + PodCheck *PodExecCheck +} + +type PodExecCheck struct { + Cmd []string + ExpectedString string +} + +func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, namespace *v1.Namespace) { + tDeployment, cleanup := t.Pod.SetupDeployment(client, namespace, t.CSIDriver) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + By("deploying the deployment") + tDeployment.Create() + + By("checking that the pod is running") + tDeployment.WaitForPodReady() + + By("deleting the pod for deployment") + tDeployment.DeletePodAndWait() + + By("checking again that the pod is running") + tDeployment.WaitForPodReady() + + if t.PodCheck != nil { + By("checking pod exec") + tDeployment.Exec(t.PodCheck.Cmd, t.PodCheck.ExpectedString) + } +} diff --git a/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go new file mode 100644 index 0000000000..68d4803096 --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + "k8s.io/kubernetes/test/e2e/framework" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const expectedReadOnlyLog = "Read-only file system" + +// DynamicallyProvisionedReadOnlyVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing that the Pod(s) cannot write to the volume when mounted +type DynamicallyProvisionedReadOnlyVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails +} + +func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + By("checking that the pods command exits with an error") + tpod.WaitForFailure() + By("checking that pod logs contain expected message") + body, err := tpod.Logs() + framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err)) + Expect(string(body)).To(ContainSubstring(expectedReadOnlyLog)) + } +} diff --git a/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go b/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go new file mode 100644 index 0000000000..e475d406ae --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// DynamicallyProvisionedReclaimPolicyTest will provision required PV(s) and PVC(s) +// Testing the correct behavior for different reclaimPolicies +type DynamicallyProvisionedReclaimPolicyTest struct { + CSIDriver driver.DynamicPVTestDriver + Volumes []VolumeDetails + Cloud cloud.Cloud +} + +func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, volume := range t.Volumes { + tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver) + + // will delete the PVC + // will also wait for PV to be deleted when reclaimPolicy=Delete + tpvc.Cleanup() + // first check PV stills exists, then manually delete it + if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain { + tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased) + tpvc.DeleteBoundPersistentVolume() + tpvc.DeleteBackingVolume(t.Cloud) + } + } +} diff --git a/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go new file mode 100644 index 0000000000..19999036b4 --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + + . "github.com/onsi/ginkgo" +) + +// DynamicallyProvisionedTopologyAwareVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing if the Pod(s) can write and read to mounted volumes +// Validate PVs have expected PV nodeAffinity +type DynamicallyProvisionedTopologyAwareVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails +} + +func (t *DynamicallyProvisionedTopologyAwareVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod := NewTestPod(client, namespace, pod.Cmd) + tpvcs := make([]*TestPersistentVolumeClaim, len(pod.Volumes)) + for n, v := range pod.Volumes { + var cleanup []func() + tpvcs[n], cleanup = v.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver) + for i := range cleanup { + defer cleanup[i]() + } + + tpod.SetupVolume(tpvcs[n].persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly) + } + + By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + By("checking that the pods command exits with no error") + tpod.WaitForSuccess() + By("validating provisioned PVs") + for n := range tpvcs { + tpvcs[n].WaitForBound() + tpvcs[n].ValidateProvisionedPersistentVolume() + } + } +} diff --git a/tests/e2e/testsuites/dynamically_provisioned_writer_reader_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_writer_reader_volume_tester.go new file mode 100644 index 0000000000..059b09db48 --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_writer_reader_volume_tester.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + + . "github.com/onsi/ginkgo" +) + +// DynamicallyProvisionedWriterReaderVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing if the Pod(s) can write and read to mounted volumes +type DynamicallyProvisionedWriterReaderVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails +} + +func (t *DynamicallyProvisionedWriterReaderVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + By("checking that the pods command exits with no error") + tpod.WaitForSuccess() + } +} diff --git a/tests/e2e/testsuites/pod_dynamic_volume_writer_reader.go b/tests/e2e/testsuites/pod_dynamic_volume_writer_reader.go deleted file mode 100644 index 56a06059f1..0000000000 --- a/tests/e2e/testsuites/pod_dynamic_volume_writer_reader.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testsuites - -import ( - "fmt" - "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - "k8s.io/api/core/v1" - clientset "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo" -) - -// PodDynamicVolumeWriterReaderTest will provision required StorageClass(es), PVC(s) and Pod(s) -// Waiting for the PV provisioner to create a new PV -// Testing if the Pod(s) can write and read to mounted volumes -type PodDynamicVolumeWriterReaderTest struct { - CSIDriver driver.DynamicPVTestDriver - Pods []PodDetails -} - -type PodDetails struct { - Cmd string - Volumes []VolumeDetails -} - -type VolumeDetails struct { - VolumeType string - FSType string - ClaimSize string - VolumeMount VolumeMountDetails -} - -type VolumeMountDetails struct { - NameGenerate string - MountPathGenerate string -} - -func (t *PodDynamicVolumeWriterReaderTest) Run(client clientset.Interface, namespace *v1.Namespace) { - for _, pod := range t.Pods { - tpod := NewTestPod(client, namespace, pod.Cmd) - for n, v := range pod.Volumes { - By("setting up the StorageClass") - storageClass := t.CSIDriver.GetDynamicProvisionStorageClass(driver.GetParameters(v.VolumeType, v.FSType), nil, nil, namespace.Name) - tsc := NewTestStorageClass(client, namespace, storageClass) - createdStorageClass := tsc.Create() - defer tsc.Cleanup() - - By("setting up the PVC and PV") - tpvc := NewTestPersistentVolumeClaim(client, namespace, v.ClaimSize, &createdStorageClass) - createdPVC := tpvc.Create() - defer tpvc.Cleanup() - tpvc.ValidateProvisionedPersistentVolume() - - tpod.SetupVolume(&createdPVC, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1)) - } - - By("deploying the pod and checking that it's command exits with no error") - tpod.Create() - defer tpod.Cleanup() - } -} diff --git a/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go b/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go new file mode 100644 index 0000000000..b23bb26ff9 --- /dev/null +++ b/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// PreProvisionedReadOnlyVolumeTest will provision required PV(s), PVC(s) and Pod(s) +// Testing that the Pod(s) cannot write to the volume when mounted +type PreProvisionedReadOnlyVolumeTest struct { + CSIDriver driver.PreProvisionedVolumeTestDriver + Pods []PodDetails +} + +func (t *PreProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithPreProvisionedVolumes(client, namespace, t.CSIDriver) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + By("checking that the pods command exits with an error") + tpod.WaitForFailure() + By("checking that pod logs contain expected message") + body, err := tpod.Logs() + framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err)) + Expect(string(body)).To(ContainSubstring(expectedReadOnlyLog)) + } +} diff --git a/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go b/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go new file mode 100644 index 0000000000..8fd7793a19 --- /dev/null +++ b/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// PreProvisionedReclaimPolicyTest will provision required PV(s) and PVC(s) +// Testing the correct behavior for different reclaimPolicies +type PreProvisionedReclaimPolicyTest struct { + CSIDriver driver.PreProvisionedVolumeTestDriver + Volumes []VolumeDetails +} + +func (t *PreProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, volume := range t.Volumes { + tpvc, _ := volume.SetupPreProvisionedPersistentVolumeClaim(client, namespace, t.CSIDriver) + + // will delete the PVC + // will also wait for PV to be deleted when reclaimPolicy=Delete + tpvc.Cleanup() + // first check PV stills exists, then manually delete it + if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain { + tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased) + tpvc.DeleteBoundPersistentVolume() + } + } +} diff --git a/tests/e2e/testsuites/pre_provisioned_volume_tester.go b/tests/e2e/testsuites/pre_provisioned_volume_tester.go new file mode 100644 index 0000000000..d7e485af85 --- /dev/null +++ b/tests/e2e/testsuites/pre_provisioned_volume_tester.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + + . "github.com/onsi/ginkgo" +) + +// PreProvisionedVolumeTest will provision required PV(s), PVC(s) and Pod(s) +// Testing if the Pod(s) can write and read to mounted volumes +type PreProvisionedVolumeTest struct { + CSIDriver driver.PreProvisionedVolumeTestDriver + Pods []PodDetails +} + +func (t *PreProvisionedVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithPreProvisionedVolumes(client, namespace, t.CSIDriver) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + By("checking that the pods command exits with no error") + tpod.WaitForSuccess() + } +} diff --git a/tests/e2e/testsuites/specs.go b/tests/e2e/testsuites/specs.go new file mode 100644 index 0000000000..8bd427d1df --- /dev/null +++ b/tests/e2e/testsuites/specs.go @@ -0,0 +1,133 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + clientset "k8s.io/client-go/kubernetes" + + . "github.com/onsi/ginkgo" +) + +type PodDetails struct { + Cmd string + Volumes []VolumeDetails +} + +type VolumeDetails struct { + VolumeType string + FSType string + Encrypted bool + MountOptions []string + ClaimSize string + ReclaimPolicy *v1.PersistentVolumeReclaimPolicy + VolumeBindingMode *storagev1.VolumeBindingMode + AllowedTopologyValues []string + VolumeMount VolumeMountDetails + // Optional, used with pre-provisioned volumes + VolumeID string +} + +type VolumeMountDetails struct { + NameGenerate string + MountPathGenerate string + ReadOnly bool +} + +func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestPod, []func()) { + tpod := NewTestPod(client, namespace, pod.Cmd) + cleanupFuncs := make([]func(), 0) + for n, v := range pod.Volumes { + var tpvc *TestPersistentVolumeClaim + tpvc, cleanupFuncs = v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver) + + tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly) + } + return tpod, cleanupFuncs +} + +func (pod *PodDetails) SetupWithPreProvisionedVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver) (*TestPod, []func()) { + tpod := NewTestPod(client, namespace, pod.Cmd) + cleanupFuncs := make([]func(), 0) + for n, v := range pod.Volumes { + var tpvc *TestPersistentVolumeClaim + tpvc, cleanupFuncs = v.SetupPreProvisionedPersistentVolumeClaim(client, namespace, csiDriver) + + tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly) + } + return tpod, cleanupFuncs +} + +func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestDeployment, []func()) { + cleanupFuncs := make([]func(), 0) + volume := pod.Volumes[0] + By("setting up the StorageClass") + storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(volume.VolumeType, volume.FSType, volume.Encrypted), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) + tsc := NewTestStorageClass(client, namespace, storageClass) + createdStorageClass := tsc.Create() + cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) + By("setting up the PVC") + tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, &createdStorageClass) + tpvc.Create() + tpvc.WaitForBound() + tpvc.ValidateProvisionedPersistentVolume() + cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) + By("setting up the Deployment") + tDeployment := NewTestDeployment(client, namespace, pod.Cmd, tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", volume.VolumeMount.NameGenerate, 1), fmt.Sprintf("%s%d", volume.VolumeMount.MountPathGenerate, 1), volume.VolumeMount.ReadOnly) + + cleanupFuncs = append(cleanupFuncs, tDeployment.Cleanup) + return tDeployment, cleanupFuncs +} + +func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestPersistentVolumeClaim, []func()) { + cleanupFuncs := make([]func(), 0) + By("setting up the StorageClass") + storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(volume.VolumeType, volume.FSType, volume.Encrypted), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) + tsc := NewTestStorageClass(client, namespace, storageClass) + createdStorageClass := tsc.Create() + cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) + By("setting up the PVC and PV") + tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, &createdStorageClass) + tpvc.Create() + cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) + // PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer + if volume.VolumeBindingMode == nil || *volume.VolumeBindingMode == storagev1.VolumeBindingImmediate { + tpvc.WaitForBound() + tpvc.ValidateProvisionedPersistentVolume() + } + + return tpvc, cleanupFuncs +} + +func (volume *VolumeDetails) SetupPreProvisionedPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.PreProvisionedVolumeTestDriver) (*TestPersistentVolumeClaim, []func()) { + cleanupFuncs := make([]func(), 0) + By("setting up the PV") + pv := csiDriver.GetPersistentVolume(volume.VolumeID, volume.FSType, volume.ClaimSize, volume.ReclaimPolicy, namespace.Name) + tpv := NewTestPreProvisionedPersistentVolume(client, pv) + tpv.Create() + By("setting up the PVC") + tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, nil) + tpvc.Create() + cleanupFuncs = append(cleanupFuncs, tpvc.DeleteBoundPersistentVolume) + cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) + tpvc.WaitForBound() + tpvc.ValidateProvisionedPersistentVolume() + + return tpvc, cleanupFuncs +} diff --git a/tests/e2e/testsuites/testsuites.go b/tests/e2e/testsuites/testsuites.go index b4b2094c71..4236323dc3 100644 --- a/tests/e2e/testsuites/testsuites.go +++ b/tests/e2e/testsuites/testsuites.go @@ -15,13 +15,18 @@ limitations under the License. package testsuites import ( + "context" "fmt" + "math/rand" "time" + awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -29,6 +34,14 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" ) +const ( + execTimeout = 10 * time.Second + // Some pods can take much longer to get ready due to volume attach/detach latency. + slowPodStartTimeout = 15 * time.Minute + // Description that will printed during tests + failedConditionDescription = "Error status code" +) + type TestStorageClass struct { client clientset.Interface storageClass *storagev1.StorageClass @@ -58,6 +71,27 @@ func (t *TestStorageClass) Cleanup() { framework.ExpectNoError(err) } +type TestPreProvisionedPersistentVolume struct { + client clientset.Interface + persistentVolume *v1.PersistentVolume + requestedPersistentVolume *v1.PersistentVolume +} + +func NewTestPreProvisionedPersistentVolume(c clientset.Interface, pv *v1.PersistentVolume) *TestPreProvisionedPersistentVolume { + return &TestPreProvisionedPersistentVolume{ + client: c, + requestedPersistentVolume: pv, + } +} + +func (pv *TestPreProvisionedPersistentVolume) Create() v1.PersistentVolume { + var err error + By("creating a PV") + pv.persistentVolume, err = pv.client.CoreV1().PersistentVolumes().Create(pv.requestedPersistentVolume) + framework.ExpectNoError(err) + return *pv.persistentVolume +} + type TestPersistentVolumeClaim struct { client clientset.Interface claimSize string @@ -77,14 +111,23 @@ func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claim } } -func (t *TestPersistentVolumeClaim) Create() v1.PersistentVolumeClaim { +func (t *TestPersistentVolumeClaim) Create() { var err error By("creating a PVC") - t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, t.storageClass.Name, t.claimSize) + storageClassName := "" + if t.storageClass != nil { + storageClassName = t.storageClass.Name + } + t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize) t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(t.requestedPersistentVolumeClaim) framework.ExpectNoError(err) +} + +func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim { + var err error + By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound)) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) @@ -138,8 +181,23 @@ func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { Expect(t.persistentVolume.Spec.AccessModes).To(Equal(expectedAccessModes)) Expect(t.persistentVolume.Spec.ClaimRef.Name).To(Equal(t.persistentVolumeClaim.ObjectMeta.Name)) Expect(t.persistentVolume.Spec.ClaimRef.Namespace).To(Equal(t.persistentVolumeClaim.ObjectMeta.Namespace)) - Expect(t.persistentVolume.Spec.PersistentVolumeReclaimPolicy).To(Equal(*t.storageClass.ReclaimPolicy)) - Expect(t.persistentVolume.Spec.MountOptions).To(Equal(t.storageClass.MountOptions)) + // If storageClass is nil, PV was pre-provisioned with these values already set + if t.storageClass != nil { + Expect(t.persistentVolume.Spec.PersistentVolumeReclaimPolicy).To(Equal(*t.storageClass.ReclaimPolicy)) + Expect(t.persistentVolume.Spec.MountOptions).To(Equal(t.storageClass.MountOptions)) + if *t.storageClass.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values). + To(HaveLen(1)) + } + if len(t.storageClass.AllowedTopologies) > 0 { + Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key). + To(Equal(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Key)) + for _, v := range t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values { + Expect(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(ContainElement(v)) + } + + } + } } func (t *TestPersistentVolumeClaim) Cleanup() { @@ -153,10 +211,155 @@ func (t *TestPersistentVolumeClaim) Cleanup() { // kubelet is slowly cleaning up the previous pod, however it should succeed // in a couple of minutes. if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { - By(fmt.Sprintf("deleting the claim's PV %q", t.persistentVolume.Name)) - err = framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) + err := framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) framework.ExpectNoError(err) } + // Wait for the PVC to be deleted + err = framework.WaitForPersistentVolumeClaimDeleted(t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute) + framework.ExpectNoError(err) +} + +func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy { + return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy +} + +func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) { + err := framework.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + framework.ExpectNoError(err) +} + +func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() { + By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name)) + err := framework.DeletePersistentVolume(t.client, t.persistentVolume.Name) + framework.ExpectNoError(err) + By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) + err = framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + framework.ExpectNoError(err) +} + +func (t *TestPersistentVolumeClaim) DeleteBackingVolume(cloud awscloud.Cloud) { + volumeID := t.persistentVolume.Spec.CSI.VolumeHandle + By(fmt.Sprintf("deleting EBS volume %q", volumeID)) + ok, err := cloud.DeleteDisk(context.Background(), volumeID) + if err != nil || !ok { + Fail(fmt.Sprintf("could not delete volume %q: %v", volumeID, err)) + } +} + +type TestDeployment struct { + client clientset.Interface + deployment *apps.Deployment + namespace *v1.Namespace + podName string +} + +func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string, pvc *v1.PersistentVolumeClaim, volumeName, mountPath string, readOnly bool) *TestDeployment { + generateName := "ebs-volume-tester-" + selectorValue := fmt.Sprintf("%s%d", generateName, rand.Int()) + replicas := int32(1) + return &TestDeployment{ + client: c, + namespace: ns, + deployment: &apps.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: generateName, + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": selectorValue}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": selectorValue}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "volume-tester", + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{"/bin/sh"}, + Args: []string{"-c", command}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: mountPath, + ReadOnly: readOnly, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyAlways, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (t *TestDeployment) Create() { + var err error + t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(t.deployment) + framework.ExpectNoError(err) + err = framework.WaitForDeploymentComplete(t.client, t.deployment) + framework.ExpectNoError(err) + pods, err := framework.GetPodsForDeployment(t.client, t.deployment) + framework.ExpectNoError(err) + // always get first pod as there should only be one + t.podName = pods.Items[0].Name +} + +func (t *TestDeployment) WaitForPodReady() { + pods, err := framework.GetPodsForDeployment(t.client, t.deployment) + framework.ExpectNoError(err) + // always get first pod as there should only be one + pod := pods.Items[0] + t.podName = pod.Name + err = framework.WaitForPodRunningInNamespace(t.client, &pod) + framework.ExpectNoError(err) +} + +func (t *TestDeployment) Exec(command []string, expectedString string) { + _, err := framework.LookForStringInPodExec(t.namespace.Name, t.podName, command, expectedString, execTimeout) + framework.ExpectNoError(err) +} + +func (t *TestDeployment) DeletePodAndWait() { + framework.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name) + err := t.client.CoreV1().Pods(t.namespace.Name).Delete(t.podName, nil) + if err != nil { + if !apierrs.IsNotFound(err) { + framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err)) + } + return + } +} + +func (t *TestDeployment) Cleanup() { + framework.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name) + body, err := t.Logs() + if err != nil { + framework.Logf("Error getting logs for pod %s: %v", t.podName, err) + } else { + framework.Logf("Pod %s has the following logs: %s", t.podName, body) + } + err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(t.deployment.Name, nil) + framework.ExpectNoError(err) +} + +func (t *TestDeployment) Logs() ([]byte, error) { + return podLogs(t.client, t.podName, t.namespace.Name) } type TestPod struct { @@ -171,7 +374,7 @@ func NewTestPod(c clientset.Interface, ns *v1.Namespace, command string) *TestPo namespace: ns, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pvc-volume-tester-", + GenerateName: "ebs-volume-tester-", }, Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -195,14 +398,42 @@ func (t *TestPod) Create() { t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(t.pod) framework.ExpectNoError(err) - err = framework.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name) +} + +func (t *TestPod) WaitForSuccess() { + err := framework.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name) + framework.ExpectNoError(err) +} + +func (t *TestPod) WaitForRunning() { + err := framework.WaitForPodRunningInNamespace(t.client, t.pod) + framework.ExpectNoError(err) +} + +// Ideally this would be in "k8s.io/kubernetes/test/e2e/framework" +// Similar to framework.WaitForPodSuccessInNamespaceSlow +var podFailedCondition = func(pod *v1.Pod) (bool, error) { + switch pod.Status.Phase { + case v1.PodFailed: + By("Saw pod failure") + return true, nil + case v1.PodSucceeded: + return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message) + default: + return false, nil + } +} + +func (t *TestPod) WaitForFailure() { + err := framework.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition) framework.ExpectNoError(err) } -func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath string) { +func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath string, readOnly bool) { volumeMount := v1.VolumeMount{ Name: name, MountPath: mountPath, + ReadOnly: readOnly, } t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount) @@ -211,20 +442,35 @@ func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath str VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: pvc.Name, - ReadOnly: false, }, }, } t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume) } -func (p *TestPod) Cleanup() { - framework.Logf("deleting Pod %q/%q", p.namespace.Name, p.pod.Name) - body, err := p.client.CoreV1().Pods(p.namespace.Name).GetLogs(p.pod.Name, &v1.PodLogOptions{}).Do().Raw() +func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) { + t.pod.Spec.NodeSelector = nodeSelector +} + +func (t *TestPod) Cleanup() { + cleanupPodOrFail(t.client, t.pod.Name, t.namespace.Name) +} + +func (t *TestPod) Logs() ([]byte, error) { + return podLogs(t.client, t.pod.Name, t.namespace.Name) +} + +func cleanupPodOrFail(client clientset.Interface, name, namespace string) { + framework.Logf("deleting Pod %q/%q", namespace, name) + body, err := podLogs(client, name, namespace) if err != nil { - framework.Logf("Error getting logs for pod %s: %v", p.pod.Name, err) + framework.Logf("Error getting logs for pod %s: %v", name, err) } else { - framework.Logf("Pod %s has the following logs: %s", p.pod.Name, body) + framework.Logf("Pod %s has the following logs: %s", name, body) } - framework.DeletePodOrFail(p.client, p.namespace.Name, p.pod.Name) + framework.DeletePodOrFail(client, namespace, name) +} + +func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) { + return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do().Raw() }