Skip to content

Commit

Permalink
Merge pull request #26618 from chaodaiG/dynamic-job-integration-test
Browse files Browse the repository at this point in the history
Integration test: job configured at runtime for horologium test
  • Loading branch information
k8s-ci-robot authored Jun 18, 2022
2 parents f3c2446 + 01c9c3e commit 82a6661
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 18 deletions.
10 changes: 0 additions & 10 deletions prow/test/integration/config/prow/jobs/periodics.yaml

This file was deleted.

44 changes: 37 additions & 7 deletions prow/test/integration/test/horologium_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,47 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)

const horologiumJobConfigFile = "horologium-test.yaml"

var horologiumJobConfig = `periodics:
- interval: 1m
name: horologium-schedule-test-job
spec:
containers:
- command:
- echo
args:
- "Hello World!"
image: localhost:5001/alpine
`

func TestLaunchProwJob(t *testing.T) {
const existJobName = "horologium-schedule-test-job"
t.Parallel()

clusterContext := getClusterContext()
t.Logf("Creating client for cluster: %s", clusterContext)
kubeClient, err := NewClients("", clusterContext)
if err != nil {
t.Fatalf("Failed creating clients for cluster %q: %v", clusterContext, err)
}

if err := updateJobConfig(context.Background(), kubeClient, horologiumJobConfigFile, []byte(horologiumJobConfig)); err != nil {
t.Fatalf("Failed update job config: %v", err)
}

t.Cleanup(func() {
if err := updateJobConfig(context.Background(), kubeClient, horologiumJobConfigFile, []byte{}); err != nil {
t.Logf("ERROR CLEANUP: %v", err)
}
labels, _ := labels.Parse("prow.k8s.io/job = horologium-schedule-test-job")
if err := kubeClient.DeleteAllOf(context.Background(), &prowjobv1.ProwJob{}, &ctrlruntimeclient.DeleteAllOfOptions{
ListOptions: ctrlruntimeclient.ListOptions{LabelSelector: labels},
}); err != nil {
t.Logf("ERROR CLEANUP: %v", err)
}
})

tests := []struct {
name string
}{
Expand All @@ -49,13 +86,6 @@ func TestLaunchProwJob(t *testing.T) {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()

clusterContext := getClusterContext()
t.Logf("Creating client for cluster: %s", clusterContext)
kubeClient, err := NewClients("", clusterContext)
if err != nil {
t.Fatalf("Failed creating clients for cluster %q: %v", clusterContext, err)
}
ctx := context.Background()

// getNextRunOrFail is a helper function getting the latest run
Expand Down
27 changes: 26 additions & 1 deletion prow/test/integration/test/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@ import (
"flag"
"fmt"
"io"
"sync"
"testing"

coreapi "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"k8s.io/client-go/rest"
Expand All @@ -39,7 +41,11 @@ const (
testpodNamespace = "test-pods"
)

var clusterContext = flag.String("cluster", "kind-kind-prow-integration", "The context of cluster to use for test")
var (
clusterContext = flag.String("cluster", "kind-kind-prow-integration", "The context of cluster to use for test")

jobConfigMux sync.Mutex
)

func getClusterContext() string {
return *clusterContext
Expand Down Expand Up @@ -103,3 +109,22 @@ func RandomString(t *testing.T) string {
}
return fmt.Sprintf("%x", sha256.Sum256(b[:]))[:32]
}

func updateJobConfig(ctx context.Context, kubeClient ctrlruntimeclient.Client, filename string, rawConfig []byte) error {
jobConfigMux.Lock()
defer jobConfigMux.Unlock()

var existingMap v1.ConfigMap
if err := kubeClient.Get(ctx, ctrlruntimeclient.ObjectKey{
Namespace: defaultNamespace,
Name: "job-config",
}, &existingMap); err != nil {
return err
}

if existingMap.BinaryData == nil {
existingMap.BinaryData = make(map[string][]byte)
}
existingMap.BinaryData[filename] = rawConfig
return kubeClient.Update(ctx, &existingMap)
}

0 comments on commit 82a6661

Please sign in to comment.