Skip to content

Commit

Permalink
chore: removed duplicate session code and tidy up comments
Browse files Browse the repository at this point in the history
  • Loading branch information
richardcase committed Nov 12, 2020
1 parent 25d9646 commit 7ba281a
Show file tree
Hide file tree
Showing 8 changed files with 31 additions and 92 deletions.
8 changes: 0 additions & 8 deletions test/e2e/shared/aws.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,6 @@ func NewAWSSession() client.ConfigProvider {
return sess
}

func GetSession() client.ConfigProvider {
sess, err := session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
})
Expect(err).NotTo(HaveOccurred())
return sess
}

// createCloudFormationStack ensures the cloudformation stack is up to date
func createCloudFormationStack(prov client.ConfigProvider, t *cfn_bootstrap.Template) {
Byf("Creating AWS CloudFormation stack for AWS IAM resources: stack-name=%s", t.Spec.StackName)
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/shared/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ func SetupSpecNamespace(ctx context.Context, specName string, e2eCtx *E2EContext
return namespace
}

// (ctx, "", e2eCtx.Environment.BootstrapClusterProxy, e2eCtx.Settings.ArtifactFolder, k, e2eCtx.E2EConfig.GetIntervals, e2eCtx.SkipCleanup)
func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, namespace *corev1.Namespace, e2eCtx *E2EContext) {
Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name)
// Dump all Cluster API related resources to artifacts before deleting them.
Expand All @@ -75,7 +74,7 @@ func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, namespace
}

func DumpMachines(ctx context.Context, e2eCtx *E2EContext, namespace *corev1.Namespace) {
machines := machinesForSpec(ctx, e2eCtx.Environment.BootstrapClusterProxy, namespace)
machines := MachinesForSpec(ctx, e2eCtx.Environment.BootstrapClusterProxy, namespace)
instances, err := allMachines(ctx, e2eCtx)
if err != nil {
return
Expand All @@ -95,7 +94,7 @@ func DumpMachines(ctx context.Context, e2eCtx *E2EContext, namespace *corev1.Nam
}
}

func machinesForSpec(ctx context.Context, clusterProxy framework.ClusterProxy, namespace *corev1.Namespace) *infrav1.AWSMachineList {
func MachinesForSpec(ctx context.Context, clusterProxy framework.ClusterProxy, namespace *corev1.Namespace) *infrav1.AWSMachineList {
lister := clusterProxy.GetClient()
list := new(infrav1.AWSMachineList)
if err := lister.List(ctx, list, client.InNamespace(namespace.GetName())); err != nil {
Expand Down
19 changes: 0 additions & 19 deletions test/e2e/shared/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,6 @@ type E2EContext struct {
E2EConfig *clusterctl.E2EConfig
// Environment represents the runtime enviroment
Environment RuntimeEnvironment
// Lifecycle represents Ginkgo test lifecycle hooks
//Lifecycle TestLifecycle
// AWSSession is the AWS session for the tests
AWSSession client.ConfigProvider
}
Expand Down Expand Up @@ -119,26 +117,9 @@ type RuntimeEnvironment struct {
Scheme *runtime.Scheme
}

// TestLifecycle represents the Ginkgo test lifecycle hook functions
// type TestLifecycle struct {
// BeforeSuiteFirstNode BeforeSuiteFirstNodeFunc
// BeforeSuiteParalelNode BeforeSuiteParalelNodeFunc
// AfterSuiteFirstNode AfterSuiteFunc
// AfterSuiteParallelNode AfterSuiteFunc
// }

// InitSchemeFunc is a function that will create a scheme
type InitSchemeFunc func() *runtime.Scheme

// BeforeSuiteFirstNodeFunc is a function that will be run on the first node before the Ginkgo suite runs
// type BeforeSuiteFirstNodeFunc func(e2eCtx *E2EContext) []byte

// // BeforeSuiteFirstNodeFunc is a function that will be run on the parallel nodes before the Ginkgo suite runs
// type BeforeSuiteParalelNodeFunc func(e2eCtx *E2EContext, data []byte)

// // AfterSuiteFunc is a function that runs after the Ginkgo suit has run
// type AfterSuiteFunc func(e2eCtx *E2EContext)

// WithSchemeInit will set a different function to initalize the scheme
func WithSchemeInit(fn InitSchemeFunc) Option {
return func(ctx *E2EContext) {
Expand Down
17 changes: 17 additions & 0 deletions test/e2e/shared/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ limitations under the License.
package shared

import (
"flag"

"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"

Expand Down Expand Up @@ -52,3 +54,18 @@ func DefaultScheme() *runtime.Scheme {
_ = clientgoscheme.AddToScheme(sc)
return sc
}

// CreateDefaultFlags will create the default flags used for the tests and binds them to the e2e context
func CreateDefaultFlags(ctx *E2EContext) {
flag.StringVar(&ctx.Settings.ConfigPath, "config-path", "", "path to the e2e config file")
flag.StringVar(&ctx.Settings.ArtifactFolder, "artifacts-folder", "", "folder where e2e test artifact should be stored")
flag.BoolVar(&ctx.Settings.UseCIArtifacts, "kubetest.use-ci-artifacts", false, "use the latest build from the main branch of the Kubernetes repository")
flag.StringVar(&ctx.Settings.KubetestConfigFilePath, "kubetest.config-file", "", "path to the kubetest configuration file")
flag.IntVar(&ctx.Settings.GinkgoNodes, "kubetest.ginkgo-nodes", 1, "number of ginkgo nodes to use")
flag.IntVar(&ctx.Settings.GinkgoSlowSpecThreshold, "kubetest.ginkgo-slowSpecThreshold", 120, "time in s before spec is marked as slow")
flag.BoolVar(&ctx.Settings.UseExistingCluster, "use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)")
flag.BoolVar(&ctx.Settings.SkipCleanup, "skip-cleanup", false, "if true, the resource cleanup after tests will be skipped")
flag.BoolVar(&ctx.Settings.SkipCloudFormationDeletion, "skip-cloudformation-deletion", false, "if true, an AWS CloudFormation stack will not be deleted")
flag.BoolVar(&ctx.Settings.SkipCloudFormationCreation, "skip-cloudformation-creation", false, "if true, an AWS CloudFormation stack will not be created")
flag.StringVar(&ctx.Settings.DataFolder, "data-folder", "", "path to the data folder")
}
35 changes: 0 additions & 35 deletions test/e2e/shared/flags.go

This file was deleted.

7 changes: 5 additions & 2 deletions test/e2e/shared/suite.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ type synchronizedBeforeTestSuiteConfig struct {
GinkgoSlowSpecThreshold int `json:"ginkgoSlowSpecThreshold,omitempty"`
}

// Node1BeforeSuite is the common setup down on the first ginkgo node before the test suite runs
func Node1BeforeSuite(e2eCtx *E2EContext) []byte {
flag.Parse()
Expect(e2eCtx.Settings.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. configPath should be an existing file.")
Expand Down Expand Up @@ -121,8 +122,8 @@ func Node1BeforeSuite(e2eCtx *E2EContext) []byte {
return data
}

// AllNodesBeforeSuite is the common setup down on each ginkgo parallel node before the test suite runs
func AllNodesBeforeSuite(e2eCtx *E2EContext, data []byte) {
// Before each ParallelNode.
conf := &synchronizedBeforeTestSuiteConfig{}
err := yaml.UnmarshalStrict(data, conf)
Expect(err).NotTo(HaveOccurred())
Expand All @@ -135,7 +136,7 @@ func AllNodesBeforeSuite(e2eCtx *E2EContext, data []byte) {
e2eCtx.Settings.UseCIArtifacts = conf.UseCIArtifacts
e2eCtx.Settings.GinkgoNodes = conf.GinkgoNodes
e2eCtx.Settings.GinkgoSlowSpecThreshold = conf.GinkgoSlowSpecThreshold
azs := GetAvailabilityZones(GetSession())
azs := GetAvailabilityZones(e2eCtx.AWSSession)
SetEnvVar(AwsAvailabilityZone1, *azs[0].ZoneName, false)
SetEnvVar(AwsAvailabilityZone2, *azs[1].ZoneName, false)
SetEnvVar("AWS_REGION", conf.Region, false)
Expand Down Expand Up @@ -182,6 +183,7 @@ func AllNodesBeforeSuite(e2eCtx *E2EContext, data []byte) {
}()
}

// Node1AfterSuite is cleanup that runs on the first ginkgo node after the test suite finishes
func Node1AfterSuite(e2eCtx *E2EContext) {
if e2eCtx.Environment.ResourceTickerDone != nil {
e2eCtx.Environment.ResourceTickerDone <- true
Expand All @@ -197,6 +199,7 @@ func Node1AfterSuite(e2eCtx *E2EContext) {
}
}

// AllNodesAfterSuite is cleanup that runs on all ginkgo parallel nodes after the test suite finishes
func AllNodesAfterSuite(e2eCtx *E2EContext) {
By("Tearing down the management cluster")
if !e2eCtx.Settings.SkipCleanup {
Expand Down
9 changes: 0 additions & 9 deletions test/e2e/suites/unmanaged/unmanaged_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,3 @@ var _ = SynchronizedAfterSuite(func() {
}, func() {
shared.AllNodesAfterSuite(e2eCtx)
})

// // initScheme creates a new GVK scheme
// func initScheme() *runtime.Scheme {
// sc := runtime.NewScheme()
// framework.TryAddDefaultSchemes(sc)
// _ = v1alpha3.AddToScheme(sc)
// _ = clientgoscheme.AddToScheme(sc)
// return sc
// }
23 changes: 7 additions & 16 deletions test/e2e/suites/unmanaged/unmanaged_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
appsv1 "k8s.io/api/apps/v1"
Expand Down Expand Up @@ -187,7 +186,7 @@ var _ = Describe("functional tests - unmanaged", func() {
Describe("Creating cluster after reaching vpc maximum limit", func() {
It("Cluster created after reaching vpc limit should be in provisioning", func() {
By("Create VPCs until limit is reached")
sess := getSession()
sess := e2eCtx.AWSSession
limit := getElasticIPsLimit(sess)
var vpcsCreated []string
for getCurrentVPCsCount(sess) < limit {
Expand Down Expand Up @@ -641,7 +640,7 @@ func deleteMachine(namespace *corev1.Namespace, md *clusterv1.MachineDeployment)

func deleteRetainedVolumes(awsVolIds []*string) {
By("Deleting dynamically provisioned volumes")
ec2Client := ec2.New(getSession())
ec2Client := ec2.New(e2eCtx.AWSSession)
for _, volumeId := range awsVolIds {
input := &ec2.DeleteVolumeInput{
VolumeId: aws.String(*volumeId),
Expand Down Expand Up @@ -708,19 +707,11 @@ func getEvents(namespace string) *corev1.EventList {
return eventsList
}

func getSession() client.ConfigProvider {
sess, err := session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
})
Expect(err).NotTo(HaveOccurred())
return sess
}

func getSubnetId(filterKey, filterValue string) *string {
var subnetOutput *ec2.DescribeSubnetsOutput
var err error

ec2Client := ec2.New(getSession())
ec2Client := ec2.New(e2eCtx.AWSSession)
subnetInput := &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Expand Down Expand Up @@ -899,7 +890,7 @@ func makeMachineDeployment(namespace, mdName, clusterName string, replicas int32

func assertSpotInstanceType(instanceId string) {
shared.Byf("Finding EC2 spot instance with ID: %s", instanceId)
ec2Client := ec2.New(getSession())
ec2Client := ec2.New(e2eCtx.AWSSession)
input := &ec2.DescribeInstancesInput{
InstanceIds: []*string{
aws.String(instanceId[strings.LastIndex(instanceId, "/")+1:]),
Expand All @@ -915,7 +906,7 @@ func assertSpotInstanceType(instanceId string) {

func terminateInstance(instanceId string) {
shared.Byf("Terminating EC2 instance with ID: %s", instanceId)
ec2Client := ec2.New(getSession())
ec2Client := ec2.New(e2eCtx.AWSSession)
input := &ec2.TerminateInstancesInput{
InstanceIds: []*string{
aws.String(instanceId[strings.LastIndex(instanceId, "/")+1:]),
Expand All @@ -931,7 +922,7 @@ func terminateInstance(instanceId string) {

func verifyElbExists(elbName string, exists bool) {
shared.Byf("Verifying ELB with name %s present", elbName)
elbClient := elb.New(getSession())
elbClient := elb.New(e2eCtx.AWSSession)
input := &elb.DescribeLoadBalancersInput{
LoadBalancerNames: []*string{
aws.String(elbName),
Expand All @@ -952,7 +943,7 @@ func verifyElbExists(elbName string, exists bool) {

func verifyVolumesExists(awsVolumeIds []*string) {
By("Ensuring dynamically provisioned volumes exists")
ec2Client := ec2.New(getSession())
ec2Client := ec2.New(e2eCtx.AWSSession)
input := &ec2.DescribeVolumesInput{
VolumeIds: awsVolumeIds,
}
Expand Down

0 comments on commit 7ba281a

Please sign in to comment.