Skip to content

Commit

Permalink
Custom Task E2E Test with A Controller Installed
Browse files Browse the repository at this point in the history
This cl introduces a `wait-task` controller into test/ folder to
better support Custom Task e2e testing.

Previously we update the Run object to mimic the behavior of a custom
task controller. This cl introduces a real controller to reconcile
the custom task run.
  • Loading branch information
XinruZhang committed Aug 18, 2022
1 parent 8413f85 commit f57a7ce
Show file tree
Hide file tree
Showing 8 changed files with 2,648 additions and 1 deletion.
72 changes: 71 additions & 1 deletion test/custom_task_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ package test
import (
"context"
"fmt"
"os/exec"
"strings"
"sync"
"testing"
Expand All @@ -46,7 +47,7 @@ import (

const (
apiVersion = "example.dev/v0"
kind = "Example"
kind = "Wait"
)

var supportedFeatureGates = map[string]string{
Expand Down Expand Up @@ -401,3 +402,72 @@ spec:
t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err)
}
}

func applyController(t *testing.T) {
t.Log("Creating Wait Custom Task Controller...")
cmd := exec.Command("ko", "apply", "-f", "./config/controller.yaml")
cmd.Dir = "./wait-task"
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Failed to create Wait Custom Task Controller: %s, Output: %s", err, out)
}
}

func cleanUpController(t *testing.T) {
t.Log("Tearing down Wait Custom Task Controller...")
cmd := exec.Command("ko", "delete", "-f", "./config/controller.yaml")
cmd.Dir = "./wait-task"
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Failed to tear down Wait Custom Task Controller: %s, Output: %s", err, out)
}
}

func TestCustomTask_NoCRD(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, namespace := setup(ctx, t, requireAnyGate(supportedFeatureGates))
knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)

// Create a custom task controller
applyController(t)
defer cleanUpController(t)

// metadataLabel := map[string]string{"test-label": "test"}
runName := helpers.ObjectNameForTest(t)
run := parse.MustParseRun(t, fmt.Sprintf(`
metadata:
name: %s
spec:
ref:
apiVersion: %s
kind: %s
params:
- name: duration
value: 1s
`, runName, apiVersion, kind))
if _, err := c.RunClient.Create(ctx, run, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create TaskRun %q: %v", runName, err)
}

// Wait for the Run to Finish.
if err := WaitForRunState(ctx, c, runName, time.Minute, Succeed(runName), "RunFinished"); err != nil {
t.Fatalf("Waiting for Run to finish running: %v", err)
}

// Compose Want Run
wantRun := *v1alpha1.Run{}

// Get the actual Run
gotRun, err := c.RunClient.Get(ctx, runName, metav1.GetOptions{})
if err != nil {
t.Fatalf("%v", err)
}
if d := cmp.Diff(run, gotRun); d != "" {
t.Errorf("diff: %v", d)
}

t.Logf("%v\n%v", run, gotRun)
}
10 changes: 10 additions & 0 deletions test/wait-task/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Wait Custom Task for Tekton

This folder is simply copied from [experimental/wait-task](https://github.com/tektoncd/experimental/tree/main/wait-task)
for the testing purpose, with resources used build and release the wait
custom task removed.

It provides a [Tekton Custom
Task](https://tekton.dev/docs/pipelines/runs/) that, when run, simply waits a
given amount of time before succeeding, specified by an input parameter named
`duration`.
52 changes: 52 additions & 0 deletions test/wait-task/cmd/controller/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package main // import "github.com/tektoncd/experimental/wait-task/cmd/controller"

import (
"context"

"github.com/tektoncd/experimental/wait-task/pkg/reconciler"
runinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/run"
runreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1alpha1/run"
tkncontroller "github.com/tektoncd/pipeline/pkg/controller"
"k8s.io/client-go/tools/cache"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection/sharedmain"
)

const controllerName = "wait-task-controller"

func main() {
sharedmain.Main(controllerName, newController)
}

func newController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
c := &reconciler.Reconciler{}
impl := runreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
return controller.Options{
AgentName: controllerName,
}
})

runinformer.Get(ctx).Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: tkncontroller.FilterRunRef("example.dev/v0", "Wait"),
Handler: controller.HandleAll(impl.Enqueue),
})

return impl
}
239 changes: 239 additions & 0 deletions test/wait-task/config/controller.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,239 @@
apiVersion: v1
kind: Namespace
metadata:
name: wait-task
labels:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task

---

apiVersion: v1
kind: ServiceAccount
metadata:
name: wait-task-controller
namespace: wait-task
labels:
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: wait-task-controller-cluster-access
labels:
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
rules:
# Controller needs cluster access to all Run CRs.
- apiGroups: ["tekton.dev"]
resources: ["runs"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["tekton.dev"]
resources: ["runs/finalizers"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["tekton.dev"]
resources: ["runs/status"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]

# Controller needs permission to configure master-election.
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]

# Controller needs permission to emit events associated with Run CRs.
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: wait-task-controller
namespace: wait-task
labels:
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["list", "watch"]
# The controller needs access to these configmaps for logging information and runtime configuration.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
resourceNames: ["config-logging", "config-observability", "config-leader-election"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: wait-task-controller
namespace: wait-task
labels:
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
subjects:
- kind: ServiceAccount
name: wait-task-controller
namespace: wait-task
roleRef:
kind: Role
name: wait-task-controller
apiGroup: rbac.authorization.k8s.io

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: wait-task-controller-cluster-access
labels:
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
subjects:
- kind: ServiceAccount
name: wait-task-controller
namespace: wait-task
roleRef:
kind: ClusterRole
name: wait-task-controller-cluster-access
apiGroup: rbac.authorization.k8s.io

---

apiVersion: v1
kind: ConfigMap
metadata:
name: config-logging
namespace: wait-task
labels:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
data:
# Common configuration for all knative codebase
zap-logger-config: |
{
"level": "info",
"development": false,
"sampling": {
"initial": 100,
"thereafter": 100
},
"outputPaths": ["stdout"],
"errorOutputPaths": ["stderr"],
"encoding": "json",
"encoderConfig": {
"timeKey": "",
"levelKey": "level",
"nameKey": "logger",
"callerKey": "caller",
"messageKey": "msg",
"stacktraceKey": "stacktrace",
"lineEnding": "",
"levelEncoder": "",
"timeEncoder": "",
"durationEncoder": "",
"callerEncoder": ""
}
}
# Log level overrides
loglevel.controller: "info"
loglevel.webhook: "info"

---

apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-task-controller
namespace: wait-task
labels:
app.kubernetes.io/name: wait-task-controller
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/version: devel
app.kubernetes.io/part-of: wait-task
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: wait-task-controller
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
labels:
app.kubernetes.io/name: wait-task-controller
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/version: devel
app.kubernetes.io/part-of: wait-task
app: wait-task-controller
spec:
serviceAccountName: wait-task-controller
containers:
- name: wait-task-controller
image: ko://github.com/tektoncd/experimental/wait-task/cmd/controller
volumeMounts:
- name: config-logging
mountPath: /etc/config-logging
env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# If you are changing these names, you will also need to update
# the controller's Role in 200-role.yaml to include the new
# values in the "configmaps" "get" rule.
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: METRICS_DOMAIN
value: experimental.tekton.dev/wait-task
volumes:
- name: config-logging
configMap:
name: config-logging

---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: wait-task-controller
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/version: devel
app.kubernetes.io/part-of: wait-task
# tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml
pipeline.tekton.dev/release: "devel"
# labels below are related to istio and should not be used for resource lookup
app: wait-task-controller
version: "devel"
name: wait-task-controller
namespace: wait-task
spec:
ports:
- name: http-metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app.kubernetes.io/name: wait-task-controller
app.kubernetes.io/component: wait-task-controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: wait-task
Loading

0 comments on commit f57a7ce

Please sign in to comment.