Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: add test timestamp collector #1324

Merged
merged 7 commits into from
Jun 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 112 additions & 0 deletions pkg/test/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package test

import (
"fmt"
"math/rand"

"github.com/imdario/mergo"
"github.com/samber/lo"
Expand Down Expand Up @@ -321,3 +322,114 @@ func buildNodeAffinity(nodeRequirements []v1.NodeSelectorRequirement, nodePrefer
}
return nodeAffinity
}

func MakePodAntiAffinityPodOptions(key string) PodOptions {
// all of these pods have anti-affinity to each other
labels := map[string]string{
"app": "nginx",
}
return PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: lo.Assign(labels, map[string]string{DiscoveryLabel: "owned"})},
PodAntiRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
TopologyKey: key,
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: RandomCPU(),
v1.ResourceMemory: RandomMemory(),
},
}}
}
func MakePodAffinityPodOptions(key string) PodOptions {
affinityLabels := RandomAffinityLabels()
return PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: lo.Assign(affinityLabels, map[string]string{DiscoveryLabel: "owned"})},
PodRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{MatchLabels: affinityLabels},
TopologyKey: key,
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: RandomCPU(),
v1.ResourceMemory: RandomMemory(),
},
}}
}

func MakeTopologySpreadPodOptions(key string) PodOptions {
return PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: lo.Assign(RandomLabels(), map[string]string{DiscoveryLabel: "owned"})},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: key,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchLabels: RandomLabels(),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: RandomCPU(),
v1.ResourceMemory: RandomMemory(),
},
}}
}

func MakeGenericPodOptions() PodOptions {
return PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: lo.Assign(RandomLabels(), map[string]string{DiscoveryLabel: "owned"})},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: RandomCPU(),
v1.ResourceMemory: RandomMemory(),
},
}}
}

func MakeDiversePodOptions() []PodOptions {
var pods []PodOptions
pods = append(pods, MakeGenericPodOptions())
pods = append(pods, MakeTopologySpreadPodOptions(v1.LabelTopologyZone))
pods = append(pods, MakeTopologySpreadPodOptions(v1.LabelHostname))
pods = append(pods, MakePodAffinityPodOptions(v1.LabelHostname))
pods = append(pods, MakePodAffinityPodOptions(v1.LabelTopologyZone))
pods = append(pods, MakePodAntiAffinityPodOptions(v1.LabelHostname))
return pods
}

func RandomAffinityLabels() map[string]string {
return map[string]string{
"my-affinity": RandomLabelValue(),
}
}

func RandomLabels() map[string]string {
return map[string]string{
"my-label": RandomLabelValue(),
}
}

//nolint:gosec
var r = rand.New(rand.NewSource(42))

func RandomLabelValue() string {
labelValues := []string{"a", "b", "c", "d", "e", "f", "g"}
return labelValues[r.Intn(len(labelValues))]
}

func RandomMemory() resource.Quantity {
mem := []int{100, 256, 512, 1024, 2048, 4096}
return resource.MustParse(fmt.Sprintf("%dMi", mem[r.Intn(len(mem))]))
}

func RandomCPU() resource.Quantity {
cpu := []int{100, 250, 500, 1000, 1500}
return resource.MustParse(fmt.Sprintf("%dm", cpu[r.Intn(len(cpu))]))
}
172 changes: 172 additions & 0 deletions test/pkg/debug/collector.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
/*
Copyright The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package debug

import (
"encoding/csv"
"fmt"
"os"
"strings"
"time"

"github.com/samber/lo"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)

const (
StageE2E = "E2E"
StageBeforeEach = "BeforeEach"
StageAfterEach = "AfterEach"
)

type TimeIntervalCollector struct {
starts map[string]time.Time
ends map[string]time.Time
// used for ordering on Collect
Stages []string
suiteTimeIntervals map[string][]TimeInterval
}

func NewTimestampCollector() *TimeIntervalCollector {
return &TimeIntervalCollector{
starts: map[string]time.Time{},
ends: map[string]time.Time{},
Stages: []string{},
suiteTimeIntervals: map[string][]TimeInterval{},
}
}

func (t *TimeIntervalCollector) Reset() {
t.starts = map[string]time.Time{}
t.ends = map[string]time.Time{}
t.Stages = []string{}
}

// Record adds the current starts/ends/stages as a list of time intervals,
// and adds it to the existingTimestamps, then resets the starts/ends/stages.
func (t *TimeIntervalCollector) Record(name string) {
intervals := t.translate()
caser := cases.Title(language.AmericanEnglish)
sanitized := strings.ReplaceAll(caser.String(name), " ", "")
t.suiteTimeIntervals[sanitized] = intervals
t.Reset()
}

// Start will add a timestamp with the given stage and add it to the list
// If there is no End associated with a Start, the interval's inferred End
// is at the start of the AfterEach.
func (t *TimeIntervalCollector) Start(stage string) time.Time {
t.starts[stage] = time.Now()
t.Stages = append(t.Stages, stage)
return time.Now()
}

// Finalize will automatically add End time entries for Start entries
// without a corresponding set End. This is useful for when the test
// fails, since deferring time recording is tough to do.
func (t *TimeIntervalCollector) Finalize() {
for stage := range t.starts {
// If it's one of the enum stages, don't add, as these are added automatically.
if stage == StageE2E || stage == StageBeforeEach || stage == StageAfterEach {
continue
}
_, ok := t.ends[stage]
if ok {
continue
}
t.ends[stage] = time.Now()
}
}

// End will mark the interval's end time.
// If there is no End associated with a Start, the interval's inferred End
// is at the start of the AfterEach.
func (t *TimeIntervalCollector) End(stage string) {
t.ends[stage] = time.Now()
}

// translate takes the starts and ends in the existing TimeIntervalCollector
// and adds the lists of intervals into the suiteTimeIntervals to be used
// later for csv printing.
func (t *TimeIntervalCollector) translate() []TimeInterval {
intervals := []TimeInterval{}
for _, stage := range t.Stages {
end, ok := t.ends[stage]
if !ok {
end = time.Now()
}
intervals = append(intervals, TimeInterval{
Start: t.starts[stage],
End: end,
Stage: stage,
})
}
return intervals
}

type TimeInterval struct {
Start time.Time
End time.Time
Stage string
}

func (t TimeInterval) String() []string {
return []string{t.Stage, t.Start.UTC().Format(time.RFC3339), t.End.UTC().Format(time.RFC3339)}
}

// PrintTestTimes returns a list of tables.
// Each table has a list of Timestamps, where each timestamp is a list of strings.
func PrintTestTimes(times map[string][]TimeInterval) map[string][][]string {
ret := map[string][][]string{}
for name, interval := range times {
ret[name] = lo.Map(interval, func(t TimeInterval, _ int) []string {
return t.String()
})
}
return ret
}

// WriteTimestamps will create a temp directory and a .csv file for each suite test
func WriteTimestamps(path string, timestamps *TimeIntervalCollector) error {
directory, err := os.MkdirTemp("/tmp", "")
if err != nil {
return err
}
for name, table := range PrintTestTimes(timestamps.suiteTimeIntervals) {
file, err := os.CreateTemp(directory, fmt.Sprintf("*-%s.csv", name))
if err != nil {
return err
}
defer file.Close()

w := csv.NewWriter(file)

// Write the header
header := []string{"Stage", "Start", "End"}
if err := w.Write(header); err != nil {
return fmt.Errorf("failed to write header: %w", err)
}
if err := w.WriteAll(table); err != nil { // calls Flush internally
return fmt.Errorf("failed to flush writer: %w", err)
}

fmt.Println("-------- SUCCESS ---------")
fmt.Printf("Printed CSV TO %s\n", file.Name())
}
return nil
}
30 changes: 20 additions & 10 deletions test/pkg/environment/common/environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"testing"
"time"

"github.com/awslabs/operatorpkg/status"
"github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
Expand All @@ -39,6 +40,7 @@ import (
"sigs.k8s.io/karpenter/kwok/apis/v1alpha1"
"sigs.k8s.io/karpenter/pkg/test"
. "sigs.k8s.io/karpenter/pkg/utils/testing" //nolint:stylecheck
"sigs.k8s.io/karpenter/test/pkg/debug"

"knative.dev/pkg/system"
controllerruntime "sigs.k8s.io/controller-runtime"
Expand All @@ -60,10 +62,11 @@ type Environment struct {
context.Context
cancel context.CancelFunc

Client client.Client
Config *rest.Config
KubeClient kubernetes.Interface
Monitor *Monitor
TimeIntervalCollector *debug.TimeIntervalCollector
Client client.Client
Config *rest.Config
KubeClient kubernetes.Interface
Monitor *Monitor

StartingNodeCount int
}
Expand All @@ -82,12 +85,13 @@ func NewEnvironment(t *testing.T) *Environment {
gomega.SetDefaultEventuallyTimeout(5 * time.Minute)
gomega.SetDefaultEventuallyPollingInterval(1 * time.Second)
return &Environment{
Context: ctx,
cancel: cancel,
Config: config,
Client: client,
KubeClient: kubernetes.NewForConfigOrDie(config),
Monitor: NewMonitor(ctx, client),
Context: ctx,
cancel: cancel,
Config: config,
Client: client,
KubeClient: kubernetes.NewForConfigOrDie(config),
Monitor: NewMonitor(ctx, client),
TimeIntervalCollector: debug.NewTimestampCollector(),
}
}

Expand Down Expand Up @@ -129,6 +133,12 @@ func NewClient(ctx context.Context, config *rest.Config) client.Client {
})
return []string{t.Value}
}))
lo.Must0(cache.IndexField(ctx, &v1beta1.NodeClaim{}, "status.conditions[*].type", func(o client.Object) []string {
njtran marked this conversation as resolved.
Show resolved Hide resolved
nodeClaim := o.(*v1beta1.NodeClaim)
return lo.Map(nodeClaim.Status.Conditions, func(c status.Condition, _ int) string {
return c.Type
})
}))

c := lo.Must(client.New(config, client.Options{Scheme: scheme, Cache: &client.CacheOptions{Reader: cache}}))

Expand Down
Loading
Loading