Skip to content

Commit

Permalink
Start namespace names from 0 (kube-burner#364)
Browse files Browse the repository at this point in the history
* Namespace names should start from 0 in all cases

Signed-off-by: Raul Sevilla <[email protected]>

* Drop preLoadPeriod

Signed-off-by: Raul Sevilla <[email protected]>

---------

Signed-off-by: Raul Sevilla <[email protected]>
  • Loading branch information
rsevilla87 authored Jun 30, 2023
1 parent bc55c6a commit fddf590
Show file tree
Hide file tree
Showing 11 changed files with 20 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
podWait: false
waitWhenFinished: true
preLoadImages: true
preLoadPeriod: 30s
preLoadPeriod: 10s
churn: {{.CHURN}}
churnDuration: {{.CHURN_DURATION}}
churnPercent: {{.CHURN_PERCENT}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
podWait: false
waitWhenFinished: true
preLoadImages: true
preLoadPeriod: 30s
preLoadPeriod: 15s
churn: {{.CHURN}}
churnDuration: {{.CHURN_DURATION}}
churnPercent: {{.CHURN_PERCENT}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
podWait: false
waitWhenFinished: true
preLoadImages: true
preLoadPeriod: 30s
preLoadPeriod: 15s
churn: {{.CHURN}}
churnDuration: {{.CHURN_DURATION}}
churnPercent: {{.CHURN_PERCENT}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
podWait: false
waitWhenFinished: true
preLoadImages: true
preLoadPeriod: 30s
preLoadPeriod: 15s
namespaceLabels:
security.openshift.io/scc.podSecurityLabelSync: false
pod-security.kubernetes.io/enforce: privileged
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
podWait: false
waitWhenFinished: true
preLoadImages: true
preLoadPeriod: 30s
preLoadPeriod: 15s
namespaceLabels:
security.openshift.io/scc.podSecurityLabelSync: false
pod-security.kubernetes.io/enforce: privileged
Expand Down
16 changes: 8 additions & 8 deletions pkg/burner/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,25 +105,25 @@ func (ex *Executor) RunCreateJob(iterationStart, iterationEnd int) {
}
if !ex.NamespacedIterations {
ns = ex.Namespace
if err = createNamespace(ClientSet, ns, nsLabels); err != nil {
if err = createNamespace(ns, nsLabels); err != nil {
log.Fatal(err.Error())
}
}
// We have to sum 1 since the iterations start from 1
iterationProgress := (iterationEnd - iterationStart + 1) / 10
iterationProgress := (iterationEnd - iterationStart) / 10
percent := 1
var namespacesCreated = make(map[string]bool)
var namespacesWaited = make(map[string]bool)
for i := iterationStart; i <= iterationEnd; i++ {
for i := iterationStart; i < iterationEnd; i++ {
if i == iterationStart+iterationProgress*percent {
log.Infof("%v/%v iterations completed", i-iterationStart, iterationEnd-iterationStart+1)
log.Infof("%v/%v iterations completed", i-iterationStart, iterationEnd-iterationStart)
percent++
}
log.Debugf("Creating object replicas from iteration %d", i)
if ex.NamespacedIterations {
ns = ex.generateNamespace(i)
if !namespacesCreated[ns] {
if err = createNamespace(ClientSet, ns, nsLabels); err != nil {
if err = createNamespace(ns, nsLabels); err != nil {
log.Error(err.Error())
continue
}
Expand Down Expand Up @@ -152,7 +152,7 @@ func (ex *Executor) RunCreateJob(iterationStart, iterationEnd int) {
log.Infof("Waiting up to %s for actions to be completed", ex.MaxWaitTimeout)
// This semaphore is used to limit the maximum number of concurrent goroutines
sem := make(chan int, int(ClientSet.RESTClient().GetRateLimiter().QPS())*2)
for i := iterationStart; i <= iterationEnd; i++ {
for i := iterationStart; i < iterationEnd; i++ {
if ex.NamespacedIterations {
ns = ex.generateNamespace(i)
if namespacesWaited[ns] {
Expand Down Expand Up @@ -289,7 +289,7 @@ func (ex *Executor) RunCreateJobWithChurn() {
// Max amount of churn is 100% of namespaces
randStart := 1
if ex.JobIterations-numToChurn+1 > 0 {
randStart = rand.Intn(ex.JobIterations-numToChurn+1) + 1
randStart = rand.Intn(ex.JobIterations - numToChurn + 1)
} else {
numToChurn = ex.JobIterations
}
Expand All @@ -314,7 +314,7 @@ func (ex *Executor) RunCreateJobWithChurn() {
CleanupNamespaces(ctx, metav1.ListOptions{LabelSelector: "churndelete=delete"}, true)
log.Info("Re-creating deleted objects")
// Re-create objects that were deleted
ex.RunCreateJob(randStart, numToChurn+randStart-1)
ex.RunCreateJob(randStart, numToChurn+randStart)
log.Infof("Sleeping for %v", ex.ChurnDelay)
time.Sleep(ex.ChurnDelay)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/burner/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func Run(configSpec config.Spec, prometheusClients []*prometheus.Prometheus, ale
log.Infof("Churn percent: %v", job.ChurnPercent)
log.Infof("Churn delay: %v", job.ChurnDelay)
}
job.RunCreateJob(1, job.JobIterations)
job.RunCreateJob(0, job.JobIterations)
// If object verification is enabled
if job.VerifyObjects && !job.Verify() {
errMsg := "Object verification failed"
Expand Down
7 changes: 3 additions & 4 deletions pkg/burner/namespaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,22 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)

func createNamespace(clientset *kubernetes.Clientset, namespaceName string, nsLabels map[string]string) error {
func createNamespace(namespaceName string, nsLabels map[string]string) error {
ns := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: namespaceName, Labels: nsLabels},
}

return RetryWithExponentialBackOff(func() (done bool, err error) {
_, err = clientset.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{})
_, err = ClientSet.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{})
if errors.IsForbidden(err) {
log.Fatalf("authorization error creating namespace %s: %s", ns.Name, err)
return false, err
}
if errors.IsAlreadyExists(err) {
log.Infof("Namespace %s already exists", ns.Name)
nsSpec, _ := clientset.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
nsSpec, _ := ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
if nsSpec.Status.Phase == corev1.NamespaceTerminating {
log.Warnf("Namespace %s is in %v state, retrying", namespaceName, corev1.NamespaceTerminating)
return false, nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/burner/patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func (ex *Executor) RunPatchJob() {
continue
}
log.Infof("Found %d %s with selector %s; patching them", len(itemList.Items), obj.gvr.Resource, labelSelector)
for i := 1; i <= ex.JobIterations; i++ {
for i := 1; i < ex.JobIterations; i++ {
for _, item := range itemList.Items {
wg.Add(1)
go ex.patchHandler(obj, item, i, &wg)
Expand Down
2 changes: 1 addition & 1 deletion pkg/burner/pre_load.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func createDSs(imageList []string, namespaceLabels map[string]string) error {
for label, value := range namespaceLabels {
nsLabels[label] = value
}
if err := createNamespace(ClientSet, preLoadNs, nsLabels); err != nil {
if err := createNamespace(preLoadNs, nsLabels); err != nil {
log.Fatal(err)
}
for i, image := range imageList {
Expand Down
1 change: 1 addition & 0 deletions test/test-ocp.bats
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ teardown_file() {

@test "cluster-density" {
run kube-burner ocp cluster-density --iterations=2 --churn=false --uuid=${UUID}
[ "$status" -eq 0 ]
}

@test "cluster-density-ms for multiple endpoints case with indexing" {
Expand Down

0 comments on commit fddf590

Please sign in to comment.