diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 708145a53..8eedbd15a 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -55,9 +55,9 @@ jobs: - name: Execute kube-burner test working-directory: test - run: ../bin/kube-burner init -c kube-burner.yml --uuid my-uuid + run: ../bin/kube-burner init -c kube-burner.yml --uuid my-uuid --log-level=debug - lint: + lint: name: Run golangci-lint runs-on: ubuntu-latest steps: diff --git a/pkg/burner/burner.go b/pkg/burner/burner.go index 61062281f..04f90e538 100644 --- a/pkg/burner/burner.go +++ b/pkg/burner/burner.go @@ -268,37 +268,37 @@ func (ex *Executor) RunCreateJob() { ex.Start = time.Now().UTC() var podWG sync.WaitGroup var wg sync.WaitGroup + var ns string var err error ReadConfig(ex.Config.QPS, ex.Config.Burst) dynamicClient, err = dynamic.NewForConfig(RestConfig) if err != nil { log.Fatal(err) } - ns := fmt.Sprintf("%s-1", ex.Config.Namespace) - createNamespaces(ClientSet, ex.Config, ex.uuid) + if !ex.Config.NamespacedIterations { + ns = ex.Config.Namespace + createNamespace(ClientSet, ns, ex.Config, ex.uuid) + } for i := 1; i <= ex.Config.JobIterations; i++ { if ex.Config.NamespacedIterations { ns = fmt.Sprintf("%s-%d", ex.Config.Namespace, i) + createNamespace(ClientSet, fmt.Sprintf("%s-%d", ex.Config.Namespace, i), ex.Config, ex.uuid) } for objectIndex, obj := range ex.objects { wg.Add(1) go ex.replicaHandler(objectIndex, obj, ns, i, &wg) } + // Wait for all replicaHandlers to finish before move forward to the next interation + wg.Wait() if ex.Config.PodWait { - // If podWait is enabled, first wait for all replicaHandlers to finish - wg.Wait() waitForObjects(ex.objects, ns, &podWG, ex.Config.MaxWaitTimeout) } if ex.Config.JobIterationDelay > 0 { - wg.Wait() log.Infof("Sleeping for %d ms", ex.Config.JobIterationDelay) time.Sleep(time.Millisecond * time.Duration(ex.Config.JobIterationDelay)) } } - // Wait for all replicaHandlers to finish - wg.Wait() if ex.Config.WaitWhenFinished && !ex.Config.PodWait { - ns = fmt.Sprintf("%s-1", ex.Config.Namespace) for i := 1; i <= ex.Config.JobIterations; i++ { if ex.Config.NamespacedIterations { ns = fmt.Sprintf("%s-%d", ex.Config.Namespace, i) @@ -394,13 +394,12 @@ func yamlToUnstructured(y []byte, uns *unstructured.Unstructured) (runtime.Objec } func (ex *Executor) replicaHandler(objectIndex int, obj object, ns string, iteration int, wg *sync.WaitGroup) { + defer wg.Done() labels := map[string]string{ "kube-burner-uuid": ex.uuid, "kube-burner-job": ex.Config.Name, "kube-burner-index": strconv.Itoa(objectIndex), } - - defer wg.Done() tData := map[string]interface{}{ jobName: ex.Config.Name, jobIteration: iteration, diff --git a/pkg/burner/namespaces.go b/pkg/burner/namespaces.go index cd0bd5f2a..74cd83484 100644 --- a/pkg/burner/namespaces.go +++ b/pkg/burner/namespaces.go @@ -16,7 +16,6 @@ package burner import ( "context" - "fmt" "time" "github.com/cloud-bulldozer/kube-burner/log" @@ -29,24 +28,18 @@ import ( "k8s.io/client-go/kubernetes" ) -func createNamespaces(clientset *kubernetes.Clientset, config config.Job, uuid string) { +func createNamespace(clientset *kubernetes.Clientset, namespaceName string, config config.Job, uuid string) { labels := map[string]string{ "kube-burner-job": config.Name, "kube-burner-uuid": uuid, } - for i := 1; i <= config.JobIterations; i++ { - ns := v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-%d", config.Namespace, i), Labels: labels}, - } - log.Infof("Creating namespace %s", ns.Name) - _, err := clientset.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}) - if errors.IsAlreadyExists(err) { - log.Warnf("Namespace %s already exists", ns.Name) - } - // If !ex.Config.NamespacedIterations we create only one namespace - if !config.NamespacedIterations { - break - } + ns := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName, Labels: labels}, + } + log.Infof("Creating namespace %s", ns.Name) + _, err := clientset.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}) + if errors.IsAlreadyExists(err) { + log.Warnf("Namespace %s already exists", ns.Name) } } @@ -75,7 +68,7 @@ func CleanupNamespaces(clientset *kubernetes.Clientset, s *util.Selector) error func waitForDeleteNamespaces(clientset *kubernetes.Clientset, s *util.Selector) { log.Info("Waiting for namespaces to be definitely deleted") - wait.PollImmediateInfinite(1*time.Second, func() (bool, error) { + wait.PollImmediateInfinite(time.Second, func() (bool, error) { ns, err := clientset.CoreV1().Namespaces().List(context.TODO(), s.ListOptions) if err != nil { return false, err diff --git a/test/kube-burner.yml b/test/kube-burner.yml index b9fc607e1..133dd3947 100644 --- a/test/kube-burner.yml +++ b/test/kube-burner.yml @@ -11,14 +11,14 @@ global: enabled: false jobs: - - name: create-job + - name: namespaced jobType: create jobIterations: 2 qps: 5 burst: 15 namespacedIterations: true cleanup: true - namespace: my-ns + namespace: namespaced podWait: false waitWhenFinished: true verifyObjects: true @@ -30,14 +30,14 @@ jobs: inputVars: containerImage: gcr.io/google_containers/pause-amd64:3.0 - - name: create-job-2 + - name: not-namespaced jobType: create jobIterations: 2 qps: 5 burst: 15 - namespacedIterations: true + namespacedIterations: false cleanup: true - namespace: my-ns + namespace: not-namespaced podWait: true waitWhenFinished: false verifyObjects: true @@ -60,5 +60,5 @@ jobs: objects: - kind: Deployment - labelSelector: {kube-burner-job: create-job-2} + labelSelector: {kube-burner-job: not-namespaced} apiVersion: apps/v1 diff --git a/test/objectTemplates/deployment.yml b/test/objectTemplates/deployment.yml index caf6d8403..d8c9f8a78 100644 --- a/test/objectTemplates/deployment.yml +++ b/test/objectTemplates/deployment.yml @@ -19,3 +19,10 @@ spec: - infinity image: {{.containerImage}} name: ci-app + readinessProbe: + exec: + command: + - cat + - /etc/hosts + initialDelaySeconds: 3 + periodSeconds: 5