Skip to content

Commit

Permalink
test: add initial integration tests (#2047)
Browse files Browse the repository at this point in the history
* add initial integration tests
  • Loading branch information
tzneal authored Jul 7, 2022
1 parent db656cd commit 6f55e30
Show file tree
Hide file tree
Showing 12 changed files with 428 additions and 20 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ battletest: ## Run randomized, racing, code coveraged, tests
-tags random_test_delay

e2etests: ## Run the e2e suite against your local cluster
go test -v ./test/... -environment-name=${CLUSTER_NAME}
go test -timeout 60m -v ./test/... -environment-name=${CLUSTER_NAME}

benchmark:
go test -tags=test_performance -run=NoTests -bench=. ./...
Expand Down Expand Up @@ -117,4 +117,4 @@ issues: ## Run GitHub issue analysis scripts
website: ## Serve the docs website locally
cd website && npm install && git submodule update --init --recursive && hugo server

.PHONY: help dev ci release test battletest verify codegen docgen apply delete toolchain release release-gen licenses issues website nightly snapshot
.PHONY: help dev ci release test battletest verify codegen docgen apply delete toolchain release release-gen licenses issues website nightly snapshot e2etests
2 changes: 2 additions & 0 deletions pkg/controllers/provisioning/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3323,6 +3323,7 @@ var _ = Describe("Binpacking", func() {
v1.ResourceCPU: resource.MustParse("1"),
},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
Expand All @@ -3342,6 +3343,7 @@ var _ = Describe("Binpacking", func() {
v1.ResourceCPU: resource.MustParse("1"),
},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Ti"),
Expand Down
3 changes: 3 additions & 0 deletions pkg/controllers/provisioning/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ var _ = Describe("Provisioning", func() {
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
Expand All @@ -339,6 +340,7 @@ var _ = Describe("Provisioning", func() {
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
Expand All @@ -351,6 +353,7 @@ var _ = Describe("Provisioning", func() {
It("should not schedule if initContainer resources are too large", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
},
Expand Down
5 changes: 5 additions & 0 deletions pkg/controllers/state/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,11 @@ func (c *Cluster) populateProvisioner(ctx context.Context, node *v1.Node, n *Nod
if provisionerName, ok := node.Labels[v1alpha5.ProvisionerNameLabelKey]; ok {
var provisioner v1alpha5.Provisioner
if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: provisionerName}, &provisioner); err != nil {
if errors.IsNotFound(err) {
// this occurs if the provisioner was deleted, the node won't last much longer anyway so it's
// safe to just not report this and continue
return nil
}
return fmt.Errorf("getting provisioner, %w", err)
}
n.Provisioner = &provisioner
Expand Down
2 changes: 1 addition & 1 deletion pkg/test/daemonsets.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func DaemonSet(overrides ...DaemonSetOptions) *appsv1.DaemonSet {
options := DaemonSetOptions{}
for _, opts := range overrides {
if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil {
panic(fmt.Sprintf("Failed to merge pod options: %s", err))
panic(fmt.Sprintf("Failed to merge daemonset options: %s", err))
}
}
if options.Name == "" {
Expand Down
67 changes: 67 additions & 0 deletions pkg/test/deployment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package test

import (
"fmt"

"github.com/aws/aws-sdk-go/aws"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

type DeploymentOptions struct {
metav1.ObjectMeta
Labels map[string]string
Replicas int32
PodOptions PodOptions
}

func Deployment(overrides ...DeploymentOptions) *appsv1.Deployment {
options := DeploymentOptions{}
for _, opts := range overrides {
if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil {
panic(fmt.Sprintf("Failed to merge deployment options: %s", err))
}
}

objectMeta := ObjectMeta(options.ObjectMeta)

if options.PodOptions.Image == "" {
options.PodOptions.Image = "public.ecr.aws/eks-distro/kubernetes/pause:3.2"
}
if options.PodOptions.Labels == nil {
options.PodOptions.Labels = map[string]string{
"app": objectMeta.Name,
}
}
pod := Pod(options.PodOptions)
dep := &appsv1.Deployment{
ObjectMeta: objectMeta,
Spec: appsv1.DeploymentSpec{
Replicas: aws.Int32(options.Replicas),
Selector: &metav1.LabelSelector{MatchLabels: options.PodOptions.Labels},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: options.PodOptions.Labels,
},
Spec: pod.Spec,
},
},
}
return dep
}
22 changes: 13 additions & 9 deletions pkg/test/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
type PodOptions struct {
metav1.ObjectMeta
Image string
InitImage string
NodeName string
PriorityClassName string
InitResourceRequirements v1.ResourceRequirements
Expand Down Expand Up @@ -75,27 +76,22 @@ func Pod(overrides ...PodOptions) *v1.Pod {
}
}
if options.Image == "" {
options.Image = "alpine"
options.Image = "public.ecr.aws/eks-distro/kubernetes/pause:3.2"
}
volumes := []v1.Volume{}
var volumes []v1.Volume
for _, pvc := range options.PersistentVolumeClaims {
volumes = append(volumes, v1.Volume{
Name: strings.ToLower(randomdata.SillyName()),
VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc}},
})
}
return &v1.Pod{
p := &v1.Pod{
ObjectMeta: ObjectMeta(options.ObjectMeta),
Spec: v1.PodSpec{
NodeSelector: options.NodeSelector,
Affinity: buildAffinity(options),
TopologySpreadConstraints: options.TopologySpreadConstraints,
Tolerations: options.Tolerations,
InitContainers: []v1.Container{{
Name: strings.ToLower(sequentialRandomName()),
Image: options.Image,
Resources: options.InitResourceRequirements,
}},
Containers: []v1.Container{{
Name: strings.ToLower(sequentialRandomName()),
Image: options.Image,
Expand All @@ -110,6 +106,14 @@ func Pod(overrides ...PodOptions) *v1.Pod {
Phase: options.Phase,
},
}
if options.InitImage != "" {
p.Spec.InitContainers = []v1.Container{{
Name: strings.ToLower(sequentialRandomName()),
Image: options.InitImage,
Resources: options.InitResourceRequirements,
}}
}
return p
}

func sequentialRandomName() string {
Expand Down Expand Up @@ -152,7 +156,7 @@ func PodDisruptionBudget(overrides ...PDBOptions) *v1beta1.PodDisruptionBudget {
options := PDBOptions{}
for _, opts := range overrides {
if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil {
panic(fmt.Sprintf("Failed to merge pod options: %s", err))
panic(fmt.Sprintf("Failed to merge pdb options: %s", err))
}
}
return &v1beta1.PodDisruptionBudget{
Expand Down
4 changes: 2 additions & 2 deletions pkg/test/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func Provisioner(overrides ...ProvisionerOptions) *v1alpha5.Provisioner {
options := ProvisionerOptions{}
for _, opts := range overrides {
if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil {
panic(fmt.Sprintf("Failed to merge pod options: %s", err))
panic(fmt.Sprintf("Failed to merge provisioner options: %s", err))
}
}
if options.Name == "" {
Expand All @@ -72,7 +72,7 @@ func Provisioner(overrides ...ProvisionerOptions) *v1alpha5.Provisioner {
StartupTaints: options.StartupTaints,
Labels: options.Labels,
Limits: &v1alpha5.Limits{Resources: options.Limits},
TTLSecondsAfterEmpty: ptr.Int64(10),
TTLSecondsAfterEmpty: ptr.Int64(30),
},
Status: options.Status,
}
Expand Down
9 changes: 7 additions & 2 deletions test/pkg/environment/environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ type Environment struct {
context.Context
Options *Options
Client client.Client
Monitor *Monitor
}

func NewEnvironment(t *testing.T) (*Environment, error) {
Expand All @@ -38,9 +39,13 @@ func NewEnvironment(t *testing.T) (*Environment, error) {
if err != nil {
return nil, err
}
gomega.SetDefaultEventuallyTimeout(10 * time.Minute)
gomega.SetDefaultEventuallyTimeout(5 * time.Minute)
gomega.SetDefaultEventuallyPollingInterval(1 * time.Second)
return &Environment{Context: ctx, Options: options, Client: client}, nil
return &Environment{Context: ctx,
Options: options,
Client: client,
Monitor: NewClusterMonitor(ctx, client),
}, nil
}

func NewLocalClient() (client.Client, error) {
Expand Down
67 changes: 65 additions & 2 deletions test/pkg/environment/expectations.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,23 @@
package environment

import (
"fmt"
"sync"

. "github.com/onsi/gomega" //nolint:revive,stylecheck
"k8s.io/apimachinery/pkg/labels"

"github.com/aws/karpenter/pkg/utils/pod"

"github.com/samber/lo"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

. "github.com/onsi/ginkgo" //nolint:revive,stylecheck
. "github.com/onsi/gomega" //nolint:revive,stylecheck

"github.com/aws/karpenter/pkg/apis/awsnodetemplate/v1alpha1"
"github.com/aws/karpenter/pkg/apis/provisioning/v1alpha5"
)
Expand All @@ -30,14 +37,45 @@ var (
}
)

func (env *Environment) BeforeEach() {
var nodes v1.NodeList
Expect(env.Client.List(env.Context, &nodes)).To(Succeed())
for _, node := range nodes.Items {
if len(node.Spec.Taints) == 0 && !node.Spec.Unschedulable {
Fail(fmt.Sprintf("expected system pool node %s to be tainted", node.Name))
}
}

var pods v1.PodList
Expect(env.Client.List(env.Context, &pods)).To(Succeed())
for i := range pods.Items {
Expect(pod.IsProvisionable(&pods.Items[i])).To(BeFalse(),
fmt.Sprintf("expected to have no provisionable pods, found %s/%s", pods.Items[i].Namespace, pods.Items[i].Name))
Expect(pods.Items[i].Namespace).ToNot(Equal("default"),
fmt.Sprintf("expected no pods in the `default` namespace, found %s/%s", pods.Items[i].Namespace, pods.Items[i].Name))
}

var provisioners v1alpha5.ProvisionerList
Expect(env.Client.List(env.Context, &provisioners)).To(Succeed())
Expect(provisioners.Items).To(HaveLen(0), "expected no provisioners to exist")
env.Monitor.Reset()
}

func (env *Environment) ExpectCreated(objects ...client.Object) {
for _, object := range objects {
object.SetLabels(lo.Assign(object.GetLabels(), map[string]string{EnvironmentLabelName: env.Options.EnvironmentName}))
Expect(env.Client.Create(env, object)).To(Succeed())
}
}

func (env *Environment) ExpectCleaned() {
func (env *Environment) ExpectDeleted(objects ...client.Object) {
for _, object := range objects {
Expect(env.Client.Delete(env, object)).To(Succeed())
}
}

func (env *Environment) AfterEach() {
defer GinkgoRecover()
namespaces := &v1.NamespaceList{}
Expect(env.Client.List(env, namespaces)).To(Succeed())
wg := sync.WaitGroup{}
Expand Down Expand Up @@ -67,3 +105,28 @@ func (env *Environment) EventuallyExpectHealthy(pods ...*v1.Pod) {
}).Should(Succeed())
}
}

func (env *Environment) EventuallyExpectHealthyPodCount(selector labels.Selector, numPods int) {
Eventually(func(g Gomega) {
g.Expect(env.Monitor.RunningPods(selector)).To(Equal(numPods))
}).Should(Succeed())
}

func (env *Environment) EventuallyExpectScaleDown() {
Eventually(func(g Gomega) {
// expect the current node count to be what it was when the test started
g.Expect(env.Monitor.NodeCount()).To(Equal(env.Monitor.NodeCountAtReset()))
}).Should(Succeed())
}

func (env *Environment) ExpectCreatedNodeCount(comparator string, nodeCount int) {
Expect(env.Monitor.CreatedNodes()).To(BeNumerically(comparator, nodeCount),
fmt.Sprintf("expected %d created nodes, had %d", nodeCount, env.Monitor.CreatedNodes()))
}

func (env *Environment) ExpectNoCrashes() {
for name, restartCount := range env.Monitor.RestartCount() {
Expect(restartCount).To(Equal(0),
fmt.Sprintf("expected restart count of %s = 0, had %d", name, restartCount))
}
}
Loading

0 comments on commit 6f55e30

Please sign in to comment.