Skip to content

Commit

Permalink
add log validation to integration tests
Browse files Browse the repository at this point in the history
What is the problem being solved?
This pr addresses #119 which explains that the integration tests should support viewing the logs for a Build containers invocation.

Why is this the best approach?
The solution to get the logs for Builds in this pr relies on using PersistentVolume's.  A volume is mounted into the test Build, the BuildSpec container writes to the mounted volume, and then a verfication pod is run that also mounts this Persistent volume at which point the Build logs for the init container that Build spawned are now verifiable.

What other approaches did you consider?
- Use Stackdriver logs
Issues:
- Stackdriver can take >30 minutes to propogate logs in some cases so it is not feasible for integration test
- Stackdriver use would limit integration tests to only work on clusters with Stackdriver integration

What side effects will this approach have?
This test will require that all test Tasks that are verifiable only through stdout/logging have to write to disk as that is how we can read out the values for verfification.

What future work remains to be done
The approach of storing test information in a PersistentVolume is required currently as Kubernetes pod logs are not queryable in some cases one a pod succeeds.  If in the future kubernetes better supports these logs natively, we can change this to use the native logging again.  Also this should be refactored into an easier to use libray for future tests.
  • Loading branch information
aaron-prindle authored and knative-prow-robot committed Oct 11, 2018
1 parent f26a80f commit 319e1b4
Show file tree
Hide file tree
Showing 4 changed files with 153 additions and 11 deletions.
124 changes: 121 additions & 3 deletions test/crd.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,25 +19,94 @@ limitations under the License.
package test

import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
"testing"

buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1"
)

const (
hwVolumeName = "scratch"
hwTaskName = "helloworld"
hwTaskRunName = "helloworld-run"
hwValidationPodName = "helloworld-validation-busybox"
hwPipelineName = "helloworld-pipeline"
hwPipelineRunName = "helloworld-pipelinerun"
hwPipelineParamsName = "helloworld-pipelineparams"

logPath = "/workspace"
logFile = "out.txt"

hwContainerName = "helloworld-busybox"
taskOutput = "do you want to build a snowman"
buildOutput = "Build successful"
)

func getHelloWorldTask(namespace string) *v1alpha1.Task {
func getHelloWorldValidationPod(namespace string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: hwValidationPodName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
corev1.Container{
Name: hwValidationPodName,
Image: "busybox",
Args: []string{
"cat", fmt.Sprintf("%s/%s", logPath, logFile),
},
VolumeMounts: []corev1.VolumeMount{
corev1.VolumeMount{
Name: "scratch",
MountPath: logPath,
},
},
},
},
Volumes: []corev1.Volume{
corev1.Volume{
Name: "scratch",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "scratch",
},
},
},
},
},
}
}

func getHelloWorldVolumeClaim(namespace string) *corev1.PersistentVolumeClaim {
return &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: hwVolumeName,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceStorage: *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI),
},
},
},
}
}

func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task {
return &v1alpha1.Task{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Expand All @@ -49,8 +118,22 @@ func getHelloWorldTask(namespace string) *v1alpha1.Task {
corev1.Container{
Name: hwContainerName,
Image: "busybox",
Args: []string{
"echo", taskOutput,
Args: args,
VolumeMounts: []corev1.VolumeMount{
corev1.VolumeMount{
Name: "scratch",
MountPath: logPath,
},
},
},
},
Volumes: []corev1.Volume{
corev1.Volume{
Name: "scratch",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "scratch",
},
},
},
},
Expand Down Expand Up @@ -137,3 +220,38 @@ func getHelloWorldPipelineRun(namespace string) *v1alpha1.PipelineRun {
},
}
}

func VerifyBuildOutput(t *testing.T, c *clients, namespace string, testStr string) {
// Create Validation Pod
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)

if _, err := pods.Create(getHelloWorldValidationPod(namespace)); err != nil {
t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err)
}

// Verify status of Pod (wait for it)
if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) {
// the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly
// there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation
if p.Status.Phase == corev1.PodRunning {
return true, nil
}
return false, nil
}, "ValidationPodCompleted"); err != nil {
t.Errorf("Error waiting for Pod %s to finish: %s", hwValidationPodName, err)
}

// Get validation pod logs and verify that the build executed a container w/ desired output
req := pods.GetLogs(hwValidationPodName, &corev1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
t.Fatalf("Failed to open stream to read: %v", err)
}
defer readCloser.Close()
var buf bytes.Buffer
out := bufio.NewWriter(&buf)
_, err = io.Copy(out, readCloser)
if !strings.Contains(buf.String(), testStr) {
t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, buf.String())
}
}
18 changes: 18 additions & 0 deletions test/crd_checks.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1"
"go.opencensus.io/trace"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
Expand Down Expand Up @@ -54,6 +55,23 @@ func WaitForTaskRunState(c *clients, name string, inState func(r *v1alpha1.TaskR
})
}

// WaitForPodState polls the status of the Pod called name from client every
// interval until inState returns `true` indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
// track how long it took for name to get into the state checked by inState.
func WaitForPodState(c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error {
metricName := fmt.Sprintf("WaitForPodState/%s/%s", name, desc)
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(r)
})
}

// WaitForPipelineRunState polls the status of the PipelineRun called name from client every
// interval until inState returns `true` indicating it is done, returns an
Expand Down
5 changes: 4 additions & 1 deletion test/pipelinerun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func TestPipelineRun(t *testing.T) {
defer tearDown(logger, c.KubeClient, namespace)

logger.Infof("Creating Pipeline Resources in namespace %s", namespace)
if _, err := c.TaskClient.Create(getHelloWorldTask(namespace)); err != nil {
if _, err := c.TaskClient.Create(getHelloWorldTask(namespace, []string{"echo", taskOutput})); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err)
}
if _, err := c.PipelineClient.Create(getHelloWorldPipeline(namespace)); err != nil {
Expand All @@ -63,4 +63,7 @@ func TestPipelineRun(t *testing.T) {
}

// TODO check that TaskRuns created

// Verify that the init containers Build ran had 'taskOutput' written
// VerifyBuildOutput(t, c, namespace, taskOutput)
}
17 changes: 10 additions & 7 deletions test/taskrun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,6 @@ import (
"github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1"
)

const (
buildOutput = "Build successful"
)

// TestTaskRun is an integration test that will verify a very simple "hello world" TaskRun can be
// executed.
func TestTaskRun(t *testing.T) {
Expand All @@ -44,8 +40,13 @@ func TestTaskRun(t *testing.T) {
knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger)
defer tearDown(logger, c.KubeClient, namespace)

logger.Infof("Creating Tasks and TaskRun in namespace %s", namespace)
if _, err := c.TaskClient.Create(getHelloWorldTask(namespace)); err != nil {
// Create Volume
if _, err := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Create(getHelloWorldVolumeClaim(namespace)); err != nil {
t.Fatalf("Failed to create Volume `%s`: %s", hwTaskName, err)
}

// Create Task
if _, err := c.TaskClient.Create(getHelloWorldTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s > %s/%s", taskOutput, logPath, logFile)})); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err)
}

Expand Down Expand Up @@ -74,7 +75,6 @@ func TestTaskRun(t *testing.T) {
}
podName := cluster.PodName
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)
fmt.Printf("Retrieved pods for podname %s: %s\n", podName, pods)

req := pods.GetLogs(podName, &corev1.PodLogOptions{})
readCloser, err := req.Stream()
Expand All @@ -88,4 +88,7 @@ func TestTaskRun(t *testing.T) {
if !strings.Contains(buf.String(), buildOutput) {
t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, podName, buf.String())
}

// Verify that the init containers Build ran had 'taskOutput' written
VerifyBuildOutput(t, c, namespace, taskOutput)
}

0 comments on commit 319e1b4

Please sign in to comment.