Skip to content

Commit

Permalink
Merge pull request #967 from k8stopologyawareschedwg/move_pod_utils
Browse files Browse the repository at this point in the history
e2e: move pod utils to a seperate package
  • Loading branch information
k8s-ci-robot authored Nov 30, 2022
2 parents ff1565f + 0a06562 commit c22604e
Show file tree
Hide file tree
Showing 4 changed files with 154 additions and 108 deletions.
43 changes: 23 additions & 20 deletions test/e2e/node_feature_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,13 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"

nfdv1alpha1 "sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1"
nfdclient "sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned"
"sigs.k8s.io/node-feature-discovery/source/custom"
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset"
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
)

var (
Expand Down Expand Up @@ -111,8 +114,8 @@ var _ = SIGDescribe("Node Feature Discovery", func() {

// Launch nfd-master
By("Creating nfd master pod and nfd-master service")
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
masterPod = f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
masterPod = f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))

// Create nfd-master service
nfdSvc, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
Expand Down Expand Up @@ -154,11 +157,11 @@ var _ = SIGDescribe("Node Feature Discovery", func() {

// Launch nfd-worker
By("Creating a nfd worker pod")
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
}
workerPod := testutils.NFDWorkerPod(podSpecOpts...)
workerPod := testpod.NFDWorker(podSpecOpts...)
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

Expand Down Expand Up @@ -204,13 +207,13 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
fConf := cfg.DefaultFeatures

By("Creating nfd-worker daemonset")
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
workerDS := testds.NFDWorker(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())

By("Getting node objects")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
Expand Down Expand Up @@ -334,18 +337,18 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
Expect(err).NotTo(HaveOccurred())

By("Creating nfd-worker daemonset with configmap mounted")
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
testutils.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
testpod.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
}
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
workerDS := testds.NFDWorker(podSpecOpts...)

workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())

By("Getting target node and checking labels")
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{})
Expand Down Expand Up @@ -417,16 +420,16 @@ core:
Expect(err).NotTo(HaveOccurred())

By("Creating nfd-worker daemonset")
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
}
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
workerDS := testds.NFDWorker(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())

expected := map[string]string{
"feature.node.kubernetes.io/e2e-flag-test-1": "true",
Expand Down
43 changes: 28 additions & 15 deletions test/e2e/topology_updater.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package e2e
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"time"

. "github.com/onsi/ginkgo/v2"
Expand All @@ -39,6 +40,8 @@ import (
admissionapi "k8s.io/pod-security-admission/api"

testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset"
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
)

var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
Expand Down Expand Up @@ -71,8 +74,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {

Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())

imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))

// Create nfd-master service
masterService, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
Expand All @@ -86,7 +89,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
Expect(err).NotTo(HaveOccurred())

By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())

label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()})
Expand Down Expand Up @@ -119,8 +122,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
kcfg := cfg.GetKubeletConfig()
By(fmt.Sprintf("Using config (%#v)", kcfg))

podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})

It("should fill the node resource topologies CR with the data", func() {
Expand All @@ -133,12 +136,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
sleeperPod := testutils.BestEffortSleeperPod()
sleeperPod := testpod.BestEffortSleeper()

podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
podMap[pod.Name] = pod
defer testutils.DeletePodsAsync(f, podMap)
defer testpod.DeleteAsync(f, podMap)

cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
Expand Down Expand Up @@ -173,12 +176,17 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
sleeperPod := testutils.GuaranteedSleeperPod("500m")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
// any random reasonable amount is fine
corev1.ResourceMemory: resource.MustParse("100Mi"),
}))

podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
podMap[pod.Name] = pod
defer testutils.DeletePodsAsync(f, podMap)
defer testpod.DeleteAsync(f, podMap)

cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
Expand Down Expand Up @@ -219,7 +227,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming exclusive CPUs")
sleeperPod := testutils.GuaranteedSleeperPod("1000m")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1000m"),
// any random reasonable amount is fine
corev1.ResourceMemory: resource.MustParse("100Mi"),
}))
// in case there is more than a single node in the cluster
// we need to set the node name, so we'll have certainty about
// which node we need to examine
Expand All @@ -228,7 +241,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
podMap[pod.Name] = pod
defer testutils.DeletePodsAsync(f, podMap)
defer testpod.DeleteAsync(f, podMap)

By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha1.NodeResourceTopology
Expand Down Expand Up @@ -274,11 +287,11 @@ excludeList:
kcfg := cfg.GetKubeletConfig()
By(fmt.Sprintf("Using config (%#v)", kcfg))

podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
}
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})

It("noderesourcetopology should not advertise the memory resource", func() {
Expand Down
58 changes: 58 additions & 0 deletions test/e2e/utils/daemonset/daemonset.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package daemonset

import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"

"sigs.k8s.io/node-feature-discovery/test/e2e/utils"
"sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
)

// NFDWorker provides the NFD daemon set worker definition
func NFDWorker(opts ...pod.SpecOption) *appsv1.DaemonSet {
return new("nfd-worker", &pod.NFDWorker(opts...).Spec)
}

// NFDTopologyUpdater provides the NFD daemon set topology updater
func NFDTopologyUpdater(kc utils.KubeletConfig, opts ...pod.SpecOption) *appsv1.DaemonSet {
return new("nfd-topology-updater", pod.NFDTopologyUpdaterSpec(kc, opts...))
}

// new provide the new daemon set
func new(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: name + "-" + string(uuid.NewUUID()),
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": name},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: *podSpec,
},
MinReadySeconds: 5,
},
}
}
Loading

0 comments on commit c22604e

Please sign in to comment.