diff --git a/pkg/client/retrieve.go b/pkg/client/retrieve.go index e0a50f092..837058138 100644 --- a/pkg/client/retrieve.go +++ b/pkg/client/retrieve.go @@ -60,7 +60,7 @@ func (c *SonobuoyClient) RetrieveResults(cfg *RetrieveConfig) (io.Reader, <-chan } // Determine sonobuoy pod name - podName, err := pluginaggregation.GetStatusPodName(client, cfg.Namespace) + podName, err := pluginaggregation.GetAggregatorPodName(client, cfg.Namespace) if err != nil { return nil, nil, errors.Wrap(err, "failed to get the name of the aggregator pod to fetch results from") } diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go index 361fe9b50..b94f2a076 100644 --- a/pkg/discovery/discovery.go +++ b/pkg/discovery/discovery.go @@ -358,7 +358,7 @@ func setStatus(client kubernetes.Interface, namespace string, status *pluginaggr } // Determine sonobuoy pod name - podName, err := pluginaggregation.GetStatusPodName(client, namespace) + podName, err := pluginaggregation.GetAggregatorPodName(client, namespace) if err != nil { return errors.Wrap(err, "failed to get the name of the aggregator pod to set the status on") } diff --git a/pkg/plugin/aggregation/run.go b/pkg/plugin/aggregation/run.go index ab62ba961..1710148c8 100644 --- a/pkg/plugin/aggregation/run.go +++ b/pkg/plugin/aggregation/run.go @@ -165,9 +165,15 @@ func Run(client kubernetes.Interface, plugins []plugin.Interface, cfg plugin.Agg certs[p.GetName()] = cert } + // Get a reference to the aggregator pod to set up owner references correctly for each started plugin + aggregatorPod, err := GetAggregatorPod(client, namespace) + if err != nil { + return errors.Wrapf(err, "couldn't get aggregator pod") + } + for _, p := range plugins { logrus.WithField("plugin", p.GetName()).Info("Running plugin") - go aggr.RunAndMonitorPlugin(ctx, p, client, nodes.Items, cfg.AdvertiseAddress, certs[p.GetName()]) + go aggr.RunAndMonitorPlugin(ctx, p, client, nodes.Items, cfg.AdvertiseAddress, certs[p.GetName()], aggregatorPod) } // Give the plugins a chance to cleanup before a hard timeout occurs @@ -209,11 +215,11 @@ func Cleanup(client kubernetes.Interface, plugins []plugin.Interface) { // RunAndMonitorPlugin will start a plugin then monitor it for errors starting/running. // Errors detected will be handled by saving an error result in the aggregator.Results. -func (a *Aggregator) RunAndMonitorPlugin(ctx context.Context, p plugin.Interface, client kubernetes.Interface, nodes []corev1.Node, address string, cert *tls.Certificate) { +func (a *Aggregator) RunAndMonitorPlugin(ctx context.Context, p plugin.Interface, client kubernetes.Interface, nodes []corev1.Node, address string, cert *tls.Certificate, aggregatorPod *corev1.Pod) { monitorCh := make(chan *plugin.Result, 1) pCtx, cancel := context.WithCancel(ctx) - if err := p.Run(client, address, cert); err != nil { + if err := p.Run(client, address, cert, aggregatorPod); err != nil { err := errors.Wrapf(err, "error running plugin %v", p.GetName()) logrus.Error(err) monitorCh <- utils.MakeErrorResult(p.GetResultType(), map[string]interface{}{"error": err.Error()}, "") diff --git a/pkg/plugin/aggregation/run_test.go b/pkg/plugin/aggregation/run_test.go index 2a0833e6d..93a94a376 100644 --- a/pkg/plugin/aggregation/run_test.go +++ b/pkg/plugin/aggregation/run_test.go @@ -153,7 +153,7 @@ func TestRunAndMonitorPlugin(t *testing.T) { } go func() { - a.RunAndMonitorPlugin(ctx, tc.plugin, fclient, nil, "testname", testCert) + a.RunAndMonitorPlugin(ctx, tc.plugin, fclient, nil, "testname", testCert, &corev1.Pod{}) doneCh <- struct{}{} }() @@ -207,7 +207,7 @@ type MockCleanupPlugin struct { cleanedUp bool } -func (cp *MockCleanupPlugin) Run(_ kubernetes.Interface, _ string, _ *tls.Certificate) error { +func (cp *MockCleanupPlugin) Run(_ kubernetes.Interface, _ string, _ *tls.Certificate, _ *corev1.Pod) error { return nil } diff --git a/pkg/plugin/aggregation/status.go b/pkg/plugin/aggregation/status.go index 13f7bc729..8b3379084 100644 --- a/pkg/plugin/aggregation/status.go +++ b/pkg/plugin/aggregation/status.go @@ -97,7 +97,7 @@ func GetStatus(client kubernetes.Interface, namespace string) (*Status, error) { } // Determine sonobuoy pod name - podName, err := GetStatusPodName(client, namespace) + podName, err := GetAggregatorPodName(client, namespace) if err != nil { return nil, errors.Wrap(err, "failed to get the name of the aggregator pod to get the status from") } diff --git a/pkg/plugin/aggregation/update.go b/pkg/plugin/aggregation/update.go index cc48f8d0f..26917912a 100644 --- a/pkg/plugin/aggregation/update.go +++ b/pkg/plugin/aggregation/update.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" @@ -36,6 +37,13 @@ const ( DefaultStatusPodName = "sonobuoy" ) +// NoPodWithLabelError represents an error encountered when a pod with a given label can't be found +type NoPodWithLabelError string + +func (n NoPodWithLabelError) Error() string { + return string(n) +} + // node and name uniquely identify a single plugin result type key struct { node, name string @@ -118,7 +126,7 @@ func (u *updater) Annotate(results map[string]*plugin.Result) error { } // Determine sonobuoy pod name - podName, err := GetStatusPodName(u.client, u.namespace) + podName, err := GetAggregatorPodName(u.client, u.namespace) if err != nil { return errors.Wrap(err, "failed to get name of the aggregator pod to annotate") } @@ -166,25 +174,45 @@ func GetPatch(annotation string) map[string]interface{} { } } -// GetStatusPodName gets the sonobuoy master pod name based on its label. -func GetStatusPodName(client kubernetes.Interface, namespace string) (string, error) { +// GetAggregatorPod gets the sonobuoy aggregator pod based on its label. +// It returns NoPodWithLabelError in the case where a pod with sonobuoy aggregator label could not be found. +func GetAggregatorPod(client kubernetes.Interface, namespace string) (*v1.Pod, error) { listOptions := metav1.ListOptions{ LabelSelector: StatusPodLabel, } podList, err := client.CoreV1().Pods(namespace).List(listOptions) if err != nil { - return "", errors.Wrap(err, "unable to list pods with label %q") + return nil, errors.Wrapf(err, "unable to list pods with label %q", StatusPodLabel) } switch { case len(podList.Items) == 0: - logrus.Warningf("No pods found with label %q in namespace %s, using default pod name %q", StatusPodLabel, namespace, DefaultStatusPodName) - return DefaultStatusPodName, nil + logrus.Warningf("no pods found with label %q in namespace %s", StatusPodLabel, namespace) + return nil, NoPodWithLabelError(fmt.Sprintf("no pods found with label %q in namespace %s", StatusPodLabel, namespace)) + case len(podList.Items) > 1: - logrus.Warningf("Found more than one pod with label %q. Using %q", StatusPodLabel, podList.Items[0].GetName()) - return podList.Items[0].GetName(), nil + logrus.Warningf("Found more than one pod with label %q. Using pod with name %q", StatusPodLabel, podList.Items[0].GetName()) + return &podList.Items[0], nil default: - return podList.Items[0].GetName(), nil + return &podList.Items[0], nil } } + +// GetAggregatorPodName gets the sonobuoy aggregator pod name. It returns the default pod name +// if the pod cannot be found. +func GetAggregatorPodName(client kubernetes.Interface, namespace string) (string, error) { + ap, err := GetAggregatorPod(client, namespace) + + if err != nil { + switch err.(type) { + case NoPodWithLabelError: + logrus.Warningf("Aggregator pod not found, using default pod name %q: %v", DefaultStatusPodName, err) + return DefaultStatusPodName, nil + default: + return "", errors.Wrap(err, "failed to get aggregator pod") + } + } + + return ap.GetName(), nil +} diff --git a/pkg/plugin/aggregation/update_test.go b/pkg/plugin/aggregation/update_test.go index f1fdd3f85..545f52078 100644 --- a/pkg/plugin/aggregation/update_test.go +++ b/pkg/plugin/aggregation/update_test.go @@ -55,7 +55,102 @@ func TestCreateUpdater(t *testing.T) { } } -func TestGetStatusPodName(t *testing.T) { +func TestGetAggregatorPod(t *testing.T) { + createPodWithRunLabel := func(name string) corev1.Pod { + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{"run": "sonobuoy-master"}, + }, + } + } + + testPods := []corev1.Pod{ + createPodWithRunLabel("sonobuoy-run-pod-1"), + createPodWithRunLabel("sonobuoy-run-pod-2"), + } + + checkNoError := func(err error) error { + if err != nil { + return errors.Wrap(err, "expected no error") + } + return nil + } + + checkErrorFromServer := func(err error) error { + if err == nil { + return errors.New("expected error but got nil") + } + return nil + } + + checkNoPodWithLabelError := func(err error) error { + if _, ok := err.(NoPodWithLabelError); !ok { + return errors.Wrap(err, "expected error to have type NoPodWithLabelError") + } + return nil + } + + testCases := []struct { + desc string + podsOnServer corev1.PodList + errFromServer error + checkError func(err error) error + expectedPod *corev1.Pod + }{ + { + desc: "Error retrieving pods from server results in no pod and an error being returned", + podsOnServer: corev1.PodList{}, + errFromServer: errors.New("could not retrieve pods"), + checkError: checkErrorFromServer, + expectedPod: nil, + }, + { + desc: "No pods results in no pod and no error", + podsOnServer: corev1.PodList{}, + checkError: checkNoPodWithLabelError, + expectedPod: nil, + }, + { + desc: "Only one pod results in that pod being returned", + podsOnServer: corev1.PodList{Items: []corev1.Pod{testPods[0]}}, + checkError: checkNoError, + expectedPod: &testPods[0], + }, + { + desc: "More that one pod results in the first pod being returned", + podsOnServer: corev1.PodList{Items: testPods}, + checkError: checkNoError, + expectedPod: &testPods[0], + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + fclient := fake.NewSimpleClientset() + fclient.PrependReactor("*", "*", func(action k8stesting.Action) (handled bool, ret kuberuntime.Object, err error) { + return true, &tc.podsOnServer, tc.errFromServer + }) + + pod, err := GetAggregatorPod(fclient, "sonobuoy") + if checkErr := tc.checkError(err); checkErr != nil { + t.Errorf("error check failed: %v", checkErr) + } + if tc.expectedPod == nil { + if pod != nil { + t.Errorf("Expected no pod to be found but found pod %q", pod.GetName()) + } + } else { + if pod.GetName() != tc.expectedPod.GetName() { + t.Errorf("Incorrect pod returned, expected %q but got %q", tc.expectedPod.GetName(), pod.GetName()) + } + } + + }) + } +} + +func TestGetAggregatorPodName(t *testing.T) { createPodWithRunLabel := func(name string) corev1.Pod { return corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -84,7 +179,7 @@ func TestGetStatusPodName(t *testing.T) { expectedPodName: "sonobuoy", }, { - desc: "Only one pod results in that pod name being used", + desc: "A returned pod results in that pod name being used", podsOnServer: corev1.PodList{ Items: []corev1.Pod{ createPodWithRunLabel("sonobuoy-run-pod"), @@ -93,17 +188,6 @@ func TestGetStatusPodName(t *testing.T) { errFromServer: nil, expectedPodName: "sonobuoy-run-pod", }, - { - desc: "More that one pod results in the first pod name being used", - podsOnServer: corev1.PodList{ - Items: []corev1.Pod{ - createPodWithRunLabel("sonobuoy-run-pod-1"), - createPodWithRunLabel("sonobuoy-run-pod-2"), - }, - }, - errFromServer: nil, - expectedPodName: "sonobuoy-run-pod-1", - }, } for _, tc := range testCases { @@ -113,7 +197,7 @@ func TestGetStatusPodName(t *testing.T) { return true, &tc.podsOnServer, tc.errFromServer }) - podName, err := GetStatusPodName(fclient, "sonobuoy") + podName, err := GetAggregatorPodName(fclient, "sonobuoy") if tc.errFromServer == nil && err != nil { t.Errorf("Unexpected error returned, expected nil but got %q", err) } diff --git a/pkg/plugin/driver/daemonset/daemonset.go b/pkg/plugin/driver/daemonset/daemonset.go index 406b7a2ce..d6d2d2659 100644 --- a/pkg/plugin/driver/daemonset/daemonset.go +++ b/pkg/plugin/driver/daemonset/daemonset.go @@ -85,7 +85,7 @@ func getMasterAddress(hostname string) string { return fmt.Sprintf("https://%s/api/v1/results/by-node", hostname) } -func (p *Plugin) createDaemonSetDefinition(hostname string, cert *tls.Certificate) appsv1.DaemonSet { +func (p *Plugin) createDaemonSetDefinition(hostname string, cert *tls.Certificate, ownerPod *v1.Pod) appsv1.DaemonSet { ds := appsv1.DaemonSet{} annotations := map[string]string{ "sonobuoy-driver": p.GetDriver(), @@ -106,6 +106,14 @@ func (p *Plugin) createDaemonSetDefinition(hostname string, cert *tls.Certificat Namespace: p.Namespace, Labels: labels, Annotations: annotations, + OwnerReferences: []metav1.OwnerReference{ + metav1.OwnerReference{ + APIVersion: "v1", + Kind: "Pod", + Name: ownerPod.GetName(), + UID: ownerPod.GetUID(), + }, + }, } ds.Spec.Selector = &metav1.LabelSelector{ @@ -151,8 +159,8 @@ func (p *Plugin) createDaemonSetDefinition(hostname string, cert *tls.Certificat } // Run dispatches worker pods according to the DaemonSet's configuration. -func (p *Plugin) Run(kubeclient kubernetes.Interface, hostname string, cert *tls.Certificate) error { - daemonSet := p.createDaemonSetDefinition(getMasterAddress(hostname), cert) +func (p *Plugin) Run(kubeclient kubernetes.Interface, hostname string, cert *tls.Certificate, ownerPod *v1.Pod) error { + daemonSet := p.createDaemonSetDefinition(getMasterAddress(hostname), cert, ownerPod) secret, err := p.MakeTLSSecret(cert) if err != nil { diff --git a/pkg/plugin/driver/daemonset/daemonset_test.go b/pkg/plugin/driver/daemonset/daemonset_test.go index 518644ffc..c5a2b1cf1 100644 --- a/pkg/plugin/driver/daemonset/daemonset_test.go +++ b/pkg/plugin/driver/daemonset/daemonset_test.go @@ -41,6 +41,8 @@ const ( expectedNamespace = "test-namespace" ) +var aggregatorPod corev1.Pod + func createClientCertificate(name string) (*tls.Certificate, error) { auth, err := ca.NewAuthority() if err != nil { @@ -100,7 +102,7 @@ func TestCreateDaemonSetDefintion(t *testing.T) { t.Fatalf("couldn't make client certificate %v", err) } - daemonSet := testDaemonSet.createDaemonSetDefinition("", clientCert) + daemonSet := testDaemonSet.createDaemonSetDefinition("", clientCert, &corev1.Pod{}) expectedName := fmt.Sprintf("sonobuoy-test-plugin-daemon-set-%v", testDaemonSet.SessionID) if daemonSet.Name != expectedName { @@ -198,7 +200,7 @@ func TestCreateDaemonSetDefintionUsesDefaultPodSpec(t *testing.T) { t.Fatalf("couldn't create client certificate: %v", err) } - daemonSet := testDaemonSet.createDaemonSetDefinition("", clientCert) + daemonSet := testDaemonSet.createDaemonSetDefinition("", clientCert, &corev1.Pod{}) podSpec := daemonSet.Spec.Template.Spec expectedServiceAccount := "sonobuoy-serviceaccount" @@ -240,7 +242,7 @@ func TestCreateDaemonSetDefintionUsesProvidedPodSpec(t *testing.T) { t.Fatalf("couldn't create client certificate: %v", err) } - daemonSet := testDaemonSet.createDaemonSetDefinition("", clientCert) + daemonSet := testDaemonSet.createDaemonSetDefinition("", clientCert, &corev1.Pod{}) podSpec := daemonSet.Spec.Template.Spec if podSpec.ServiceAccountName != expectedServiceAccountName { @@ -269,14 +271,14 @@ func TestCreateDaemonSetDefinitionAddsToExistingResourcesInPodSpec(t *testing.T) }, }, } - testJob := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) + testPlugin := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) clientCert, err := createClientCertificate("test-job") if err != nil { t.Fatalf("couldn't create client certificate: %v", err) } - daemonSet := testJob.createDaemonSetDefinition("", clientCert) + daemonSet := testPlugin.createDaemonSetDefinition("", clientCert, &corev1.Pod{}) podSpec := daemonSet.Spec.Template.Spec // Existing container in pod spec, plus 2 added by Sonobuoy @@ -301,6 +303,70 @@ func TestCreateDaemonSetDefinitionAddsToExistingResourcesInPodSpec(t *testing.T) } } +func TestCreateDaemonSetDefinitionSetsOwnerReference(t *testing.T) { + m := manifest.Manifest{ + SonobuoyConfig: manifest.SonobuoyConfig{ + Driver: "DaemonSet", + PluginName: "test-job", + ResultType: "test-job-result", + }, + Spec: manifest.Container{Container: corev1.Container{}}, + } + testPlugin := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) + + clientCert, err := createClientCertificate("test-job") + if err != nil { + t.Fatalf("couldn't create client certificate: %v", err) + } + + aggregatorPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sonobuoy-aggregator", + UID: "123456-abcdef", + }, + } + + daemonSet := testPlugin.createDaemonSetDefinition("", clientCert, &aggregatorPod) + ownerReferences := daemonSet.ObjectMeta.OwnerReferences + + if len(ownerReferences) != 1 { + t.Fatalf("Expected 1 owner reference, got %v", len(ownerReferences)) + } + + testCases := []struct { + field string + want string + got string + }{ + { + field: "APIVersion", + want: "v1", + got: ownerReferences[0].APIVersion, + }, + { + field: "Kind", + want: "Pod", + got: ownerReferences[0].Kind, + }, + { + field: "Name", + want: aggregatorPod.ObjectMeta.Name, + got: ownerReferences[0].Name, + }, + { + field: "UID", + want: string(aggregatorPod.ObjectMeta.UID), + got: string(ownerReferences[0].UID), + }, + } + + for _, tc := range testCases { + if tc.got != tc.want { + t.Errorf("Expected ownerReference %v to be %q, got %q", tc.field, tc.want, tc.got) + } + } +} + func TestMonitorOnce(t *testing.T) { // Note: the pods/ds must be marked with the label "sonobuoy-run" or else our labelSelector // logic will filter them out even though the fake server returns them. diff --git a/pkg/plugin/driver/job/job.go b/pkg/plugin/driver/job/job.go index 9a6c38b87..810b9c978 100644 --- a/pkg/plugin/driver/job/job.go +++ b/pkg/plugin/driver/job/job.go @@ -78,7 +78,7 @@ func getMasterAddress(hostname string) string { return fmt.Sprintf("https://%s/api/v1/results/%v", hostname, plugin.GlobalResult) } -func (p *Plugin) createPodDefinition(hostname string, cert *tls.Certificate) v1.Pod { +func (p *Plugin) createPodDefinition(hostname string, cert *tls.Certificate, ownerPod *v1.Pod) v1.Pod { pod := v1.Pod{} annotations := map[string]string{ "sonobuoy-driver": p.GetDriver(), @@ -99,6 +99,14 @@ func (p *Plugin) createPodDefinition(hostname string, cert *tls.Certificate) v1. Namespace: p.Namespace, Labels: labels, Annotations: annotations, + OwnerReferences: []metav1.OwnerReference{ + metav1.OwnerReference{ + APIVersion: "v1", + Kind: "Pod", + Name: ownerPod.GetName(), + UID: ownerPod.GetUID(), + }, + }, } var podSpec v1.PodSpec @@ -135,8 +143,8 @@ func (p *Plugin) createPodDefinition(hostname string, cert *tls.Certificate) v1. } // Run dispatches worker pods according to the Job's configuration. -func (p *Plugin) Run(kubeclient kubernetes.Interface, hostname string, cert *tls.Certificate) error { - job := p.createPodDefinition(getMasterAddress(hostname), cert) +func (p *Plugin) Run(kubeclient kubernetes.Interface, hostname string, cert *tls.Certificate, ownerPod *v1.Pod) error { + job := p.createPodDefinition(getMasterAddress(hostname), cert, ownerPod) secret, err := p.MakeTLSSecret(cert) if err != nil { diff --git a/pkg/plugin/driver/job/job_test.go b/pkg/plugin/driver/job/job_test.go index 7341db4c0..6d06ce007 100644 --- a/pkg/plugin/driver/job/job_test.go +++ b/pkg/plugin/driver/job/job_test.go @@ -58,7 +58,7 @@ func createClientCertificate(name string) (*tls.Certificate, error) { } func TestCreatePodDefinition(t *testing.T) { - testJob := NewPlugin( + testPlugin := NewPlugin( manifest.Manifest{ SonobuoyConfig: manifest.SonobuoyConfig{ PluginName: "test-job", @@ -103,9 +103,9 @@ func TestCreatePodDefinition(t *testing.T) { t.Fatalf("couldn't make client certificate %v", err) } - pod := testJob.createPodDefinition("", clientCert) + pod := testPlugin.createPodDefinition("", clientCert, &corev1.Pod{}) - expectedName := fmt.Sprintf("sonobuoy-test-job-job-%v", testJob.SessionID) + expectedName := fmt.Sprintf("sonobuoy-test-job-job-%v", testPlugin.SessionID) if pod.Name != expectedName { t.Errorf("Expected pod name %v, got %v", expectedName, pod.Name) } @@ -187,14 +187,14 @@ func TestCreatePodDefinitionUsesDefaultPodSpec(t *testing.T) { }, }, } - testJob := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) + testPlugin := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) clientCert, err := createClientCertificate("test-job") if err != nil { t.Fatalf("couldn't create client certificate: %v", err) } - pod := testJob.createPodDefinition("", clientCert) + pod := testPlugin.createPodDefinition("", clientCert, &corev1.Pod{}) expectedServiceAccount := "sonobuoy-serviceaccount" if pod.Spec.ServiceAccountName != expectedServiceAccount { @@ -226,14 +226,14 @@ func TestCreatePodDefinitionUsesProvidedPodSpec(t *testing.T) { PodSpec: corev1.PodSpec{ServiceAccountName: expectedServiceAccountName}, }, } - testJob := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) + testPlugin := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) clientCert, err := createClientCertificate("test-job") if err != nil { t.Fatalf("couldn't create client certificate: %v", err) } - pod := testJob.createPodDefinition("", clientCert) + pod := testPlugin.createPodDefinition("", clientCert, &corev1.Pod{}) if pod.Spec.ServiceAccountName != expectedServiceAccountName { t.Errorf("expected pod spec to have provided service account name %q, got %q", expectedServiceAccountName, pod.Spec.ServiceAccountName) @@ -261,14 +261,14 @@ func TestCreatePodDefinitionAddsToExistingResourcesInPodSpec(t *testing.T) { }, }, } - testJob := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) + testPlugin := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) clientCert, err := createClientCertificate("test-job") if err != nil { t.Fatalf("couldn't create client certificate: %v", err) } - pod := testJob.createPodDefinition("", clientCert) + pod := testPlugin.createPodDefinition("", clientCert, &corev1.Pod{}) // Existing container in pod spec, plus 2 added by Sonobuoy expectedNumContainers := 3 @@ -292,6 +292,69 @@ func TestCreatePodDefinitionAddsToExistingResourcesInPodSpec(t *testing.T) { } } +func TestCreatePodDefinitionSetsOwnerReference(t *testing.T) { + m := manifest.Manifest{ + SonobuoyConfig: manifest.SonobuoyConfig{ + PluginName: "test-job", + ResultType: "test-job-result", + }, + Spec: manifest.Container{Container: corev1.Container{}}, + } + testPlugin := NewPlugin(m, expectedNamespace, expectedImageName, "Always", "image-pull-secret", map[string]string{}) + + clientCert, err := createClientCertificate("test-job") + if err != nil { + t.Fatalf("couldn't create client certificate: %v", err) + } + + aggregatorPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sonobuoy-aggregator", + UID: "123456-abcdef", + }, + } + + pod := testPlugin.createPodDefinition("", clientCert, &aggregatorPod) + ownerReferences := pod.ObjectMeta.OwnerReferences + + if len(ownerReferences) != 1 { + t.Fatalf("Expected 1 owner reference, got %v", len(ownerReferences)) + } + + testCases := []struct { + field string + want string + got string + }{ + { + field: "APIVersion", + want: "v1", + got: ownerReferences[0].APIVersion, + }, + { + field: "Kind", + want: "Pod", + got: ownerReferences[0].Kind, + }, + { + field: "Name", + want: aggregatorPod.ObjectMeta.Name, + got: ownerReferences[0].Name, + }, + { + field: "UID", + want: string(aggregatorPod.ObjectMeta.UID), + got: string(ownerReferences[0].UID), + }, + } + + for _, tc := range testCases { + if tc.got != tc.want { + t.Errorf("Expected ownerReference %v to be %q, got %q", tc.field, tc.want, tc.got) + } + } +} + func TestMonitorOnce(t *testing.T) { // Note: the pods must be marked with the label "sonobuoy-run" or else our labelSelector // logic will filter them out even though the fake server returns them. diff --git a/pkg/plugin/interface.go b/pkg/plugin/interface.go index 544ec3532..57b3e1150 100644 --- a/pkg/plugin/interface.go +++ b/pkg/plugin/interface.go @@ -38,7 +38,7 @@ const ( type Interface interface { // Run runs a plugin, declaring all resources it needs, and then // returns. It does not block and wait until the plugin has finished. - Run(kubeClient kubernetes.Interface, hostname string, cert *tls.Certificate) error + Run(kubeClient kubernetes.Interface, hostname string, cert *tls.Certificate, ownerPod *v1.Pod) error // Cleanup cleans up all resources created by the plugin Cleanup(kubeClient kubernetes.Interface) // Monitor continually checks for problems in the resources created by a