From c7cff248f974b269b4d8dbbaeafbc2bc920bd480 Mon Sep 17 00:00:00 2001 From: Daniel Pacak Date: Thu, 24 Sep 2020 12:35:10 +0200 Subject: [PATCH] feat: Use deterministic names for VulnerabilityReports Signed-off-by: Daniel Pacak --- CONTRIBUTING.md | 67 +++++++++++++++------ Makefile | 4 +- cmd/operator/main.go | 34 ++++++----- go.mod | 5 +- go.sum | 11 ++-- pkg/aqua/scanner.go | 6 +- pkg/controllers/job_controller.go | 47 ++------------- pkg/controllers/pod_controller.go | 96 ++++++++++++++++++++----------- pkg/etc/config.go | 32 +++++++---- pkg/etc/config_test.go | 50 +++++++++------- pkg/reports/store.go | 32 +++++++++-- pkg/resources/resources.go | 32 +++++++++++ pkg/scanner/scanner.go | 5 +- pkg/trivy/scanner.go | 54 +---------------- 14 files changed, 267 insertions(+), 208 deletions(-) create mode 100644 pkg/resources/resources.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c61d816..1c2b123 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -182,22 +182,57 @@ the operator in the same namespace as supervised workloads. You'll need an OperatorGroup to denote which namespaces the operator should watch. It must exist in the namespace where you want to deploy the operator. -3. Create the Subscription resource: - - ``` - cat << EOF | kubectl apply -f - - apiVersion: operators.coreos.com/v1alpha1 - kind: Subscription - metadata: - name: starboard-operator - namespace: marketplace - spec: - channel: alpha - name: starboard-operator - source: $QUAY_NAMESPACE-operators - sourceNamespace: marketplace - EOF - ``` +3. Create the Subscription resource + 1. with Trivy scanner, which is enabled by default: + + ``` + $ cat << EOF | kubectl apply -f - + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: starboard-operator + namespace: marketplace + spec: + channel: alpha + name: starboard-operator + source: $QUAY_NAMESPACE-operators + sourceNamespace: marketplace + EOF + ``` + 2. with Aqua CSP scanner: + + ``` + $ kubectl create secret generic starboard-operator \ + --namespace marketplace \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_USERNAME=$AQUA_CONSOLE_USERNAME \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_PASSWORD=$AQUA_CONSOLE_PASSWORD \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_VERSION=$AQUA_VERSION \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_HOST=http://csp-console-svc.aqua:8080 + ``` + + ``` + $ cat << EOF | kubectl apply -f - + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: starboard-operator + namespace: marketplace + spec: + channel: alpha + name: starboard-operator + source: $QUAY_NAMESPACE-operators + sourceNamespace: marketplace + config: + env: + - name: OPERATOR_SCANNER_TRIVY_ENABLED + value: "false" + - name: OPERATOR_SCANNER_AQUA_CSP_ENABLED + value: "true" + envFrom: + - secretRef: + name: starboard-operator + EOF + ``` A Subscription links the previous steps together by selecting an operator and one of its channels. OLM uses this information to start the corresponding operator Pod. The example above creates a new Subscription to the `alpha` diff --git a/Makefile b/Makefile index 62cc848..0fe3e82 100644 --- a/Makefile +++ b/Makefile @@ -19,10 +19,10 @@ modules: build: operator scanner scanner: $(SOURCES) - go build -o bin/scanner cmd/scanner/main.go + GOOS=linux go build -o bin/scanner cmd/scanner/main.go operator: $(SOURCES) - go build -o bin/operator cmd/operator/main.go + GOOS=linux go build -o bin/operator cmd/operator/main.go .PHONY: test test: diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 5dc95b1..32114a1 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -81,36 +81,44 @@ func run() error { targetNamespaces := config.Operator.GetTargetNamespaces() - setupLog.Info("Resolving multitenancy support", - "operatorNamespace", operatorNamespace, - "targetNamespaces", targetNamespaces) - - installMode, err := etc.ResolveInstallMode(operatorNamespace, targetNamespaces) + installMode, err := config.Operator.GetInstallMode() if err != nil { - return fmt.Errorf("resolving install mode: %w", err) + return fmt.Errorf("getting install mode: %w", err) } - setupLog.Info("Resolving install mode", "mode", installMode) + setupLog.Info("Resolving install mode", "installMode", installMode, + "operatorNamespace", operatorNamespace, + "targetNamespaces", targetNamespaces) // Set the default manager options. options := manager.Options{ Scheme: scheme, } - if len(targetNamespaces) == 1 && targetNamespaces[0] == operatorNamespace { - // Add support for OwnNamespace set in STARBOARD_TARGET_NAMESPACES (e.g. ns1). + switch installMode { + case etc.InstallModeOwnNamespace: + // Add support for OwnNamespace set in STARBOARD_NAMESPACE (e.g. marketplace) and STARBOARD_TARGET_NAMESPACES (e.g. marketplace) setupLog.Info("Constructing single-namespaced cache", "namespace", targetNamespaces[0]) options.Namespace = targetNamespaces[0] - } else if len(targetNamespaces) > 0 { - // Add support for SingleNamespace and MultiNamespace set in STARBOARD_TARGET_NAMESPACES (e.g. ns1,ns2). + case etc.InstallModeSingleNamespace: + // Add support for SingleNamespace set in STARBOARD_NAMESPACE (e.g. marketplace) and STARBOARD_TARGET_NAMESPACES (e.g. foo) + cachedNamespaces := append(targetNamespaces, operatorNamespace) + setupLog.Info("Constructing multi-namespaced cache", "namespaces", cachedNamespaces) + options.Namespace = targetNamespaces[0] + options.NewCache = cache.MultiNamespacedCacheBuilder(cachedNamespaces) + case etc.InstallModeMultiNamespace: + // Add support for MultiNamespace set in STARBOARD_NAMESPACE (e.g. marketplace) and STARBOARD_TARGET_NAMESPACES (e.g. foo,bar). // Note that we may face performance issues when using this with a high number of namespaces. // More: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder cachedNamespaces := append(targetNamespaces, operatorNamespace) setupLog.Info("Constructing multi-namespaced cache", "namespaces", cachedNamespaces) options.Namespace = "" options.NewCache = cache.MultiNamespacedCacheBuilder(cachedNamespaces) - } else if len(targetNamespaces) == 0 { - setupLog.Info("Disabling cache and watching all namespaces") + case etc.InstallModeAllNamespaces: + // Add support for AllNamespaces set in STARBOARD_NAMESPACE (e.g. marketplace) and STARBOARD_TARGET_NAMESPACES left blank. + setupLog.Info("Watching all namespaces") options.Namespace = "" + default: + return fmt.Errorf("unrecognized install mode: %v", installMode) } kubernetesConfig, err := ctrl.GetConfig() diff --git a/go.mod b/go.mod index 6dca328..60e767f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,8 @@ module github.com/aquasecurity/starboard-operator go 1.14 require ( - github.com/aquasecurity/starboard v0.3.1-0.20200915085831-05e4ce57da65 + github.com/aquasecurity/starboard v0.4.1-0.20200923101908-ca60574a118f + github.com/beorn7/perks v1.0.1 // indirect github.com/caarlos0/env/v6 v6.2.2 github.com/go-logr/logr v0.1.0 github.com/google/go-containerregistry v0.1.1 @@ -16,7 +17,7 @@ require ( k8s.io/apimachinery v0.19.0-alpha.3 k8s.io/client-go v0.19.0-alpha.3 k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 - sigs.k8s.io/controller-runtime v0.6.2 + sigs.k8s.io/controller-runtime v0.6.3 ) replace ( diff --git a/go.sum b/go.sum index 32ffa78..71357d7 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= -github.com/aquasecurity/starboard v0.3.1-0.20200915085831-05e4ce57da65 h1:uRoIOcMb/uTZwlnFZtEKL373eojYTW24UpFSnwW1w5g= -github.com/aquasecurity/starboard v0.3.1-0.20200915085831-05e4ce57da65/go.mod h1:Xdodbl8+u6Na3ah5DoeBONtVavbCrwMA7CcpKYDBizo= +github.com/aquasecurity/starboard v0.4.1-0.20200923101908-ca60574a118f h1:rrdkeHqSnE6z/og/Pse54kKnMF1l/bD6SJVlRIPlIKI= +github.com/aquasecurity/starboard v0.4.1-0.20200923101908-ca60574a118f/go.mod h1:Xdodbl8+u6Na3ah5DoeBONtVavbCrwMA7CcpKYDBizo= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -192,8 +192,9 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -1123,8 +1124,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= -sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= +sigs.k8s.io/controller-runtime v0.6.3 h1:SBbr+inLPEKhvlJtrvDcwIpm+uhDvp63Bl72xYJtoOE= +sigs.k8s.io/controller-runtime v0.6.3/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= diff --git a/pkg/aqua/scanner.go b/pkg/aqua/scanner.go index efa939f..1bc0212 100644 --- a/pkg/aqua/scanner.go +++ b/pkg/aqua/scanner.go @@ -37,7 +37,7 @@ func NewScanner(version etc.VersionInfo, config etc.ScannerAquaCSP) scanner.Vuln } } -func (s *aquaScanner) NewScanJob(resource kube.Object, spec corev1.PodSpec, options scanner.Options) (*batchv1.Job, *corev1.Secret, error) { +func (s *aquaScanner) NewScanJob(resource kube.Object, spec corev1.PodSpec, options scanner.Options) (*batchv1.Job, error) { jobName := uuid.New().String() initContainerName := jobName @@ -50,7 +50,7 @@ func (s *aquaScanner) NewScanJob(resource kube.Object, spec corev1.PodSpec, opti containerImagesAsJSON, err := containerImages.AsJSON() if err != nil { - return nil, nil, err + return nil, err } return &batchv1.Job{ @@ -122,7 +122,7 @@ func (s *aquaScanner) NewScanJob(resource kube.Object, spec corev1.PodSpec, opti }, }, }, - }, nil, nil + }, nil } func (s *aquaScanner) newScanJobContainer(podContainer corev1.Container) corev1.Container { diff --git a/pkg/controllers/job_controller.go b/pkg/controllers/job_controller.go index 10733bc..1bb8b04 100644 --- a/pkg/controllers/job_controller.go +++ b/pkg/controllers/job_controller.go @@ -3,8 +3,8 @@ package controllers import ( "context" "fmt" - "reflect" + "github.com/aquasecurity/starboard-operator/pkg/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aquasecurity/starboard-operator/pkg/etc" @@ -55,7 +55,7 @@ func (r *JobReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } if len(job.Status.Conditions) == 0 { - log.Info("Ignoring Job with unknown status condition") + log.Info("Ignoring Job without status conditions") return ctrl.Result{}, nil } @@ -85,12 +85,12 @@ func (r *JobReconciler) processCompleteScanJob(ctx context.Context, scanJob *bat return fmt.Errorf("getting workload from scan job labels set: %w", err) } - containerImages, err := r.getContainerImagesFrom(scanJob) + containerImages, err := resources.GetContainerImagesFromJob(scanJob) if err != nil { - return err + return fmt.Errorf("getting container images: %w", err) } - hasVulnerabilityReports, err := r.hasVulnerabilityReports(ctx, workload, containerImages) + hasVulnerabilityReports, err := r.Store.HasVulnerabilityReports(ctx, workload, containerImages) if err != nil { return err } @@ -146,41 +146,6 @@ func (r *JobReconciler) GetPodControlledBy(ctx context.Context, job *batchv1.Job return podList.Items[0].DeepCopy(), nil } -func (r *JobReconciler) hasVulnerabilityReports(ctx context.Context, owner kube.Object, containerImages kube.ContainerImages) (bool, error) { - vulnerabilityReports, err := r.Store.Read(ctx, owner) - if err != nil { - return false, err - } - - actual := map[string]bool{} - for containerName, _ := range vulnerabilityReports { - actual[containerName] = true - } - - expected := map[string]bool{} - for containerName, _ := range containerImages { - expected[containerName] = true - } - - return reflect.DeepEqual(actual, expected), nil -} - -// TODO We have similar code in other places -func (r *JobReconciler) getContainerImagesFrom(job *batchv1.Job) (kube.ContainerImages, error) { - var containerImagesAsJSON string - var ok bool - - if containerImagesAsJSON, ok = job.Annotations[kube.AnnotationContainerImages]; !ok { - return nil, fmt.Errorf("scan job does not have required annotation: %s", kube.AnnotationContainerImages) - } - containerImages := kube.ContainerImages{} - err := containerImages.FromJSON(containerImagesAsJSON) - if err != nil { - return nil, fmt.Errorf("reading scan job annotation: %s: %w", kube.AnnotationContainerImages, err) - } - return containerImages, nil -} - func (r *JobReconciler) processFailedScanJob(ctx context.Context, scanJob *batchv1.Job) error { pod, err := r.GetPodControlledBy(ctx, scanJob) if err != nil { @@ -194,7 +159,7 @@ func (r *JobReconciler) processFailedScanJob(ctx context.Context, scanJob *batch r.Log.Error(nil, "Scan job container", "container", container, "status.reason", status.Reason, "status.message", status.Message) } r.Log.Info("Deleting failed scan job") - return r.Client.Delete(ctx, scanJob) + return r.Client.Delete(ctx, scanJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) } func (r *JobReconciler) SetupWithManager(mgr ctrl.Manager) error { diff --git a/pkg/controllers/pod_controller.go b/pkg/controllers/pod_controller.go index bfa90ed..2312503 100644 --- a/pkg/controllers/pod_controller.go +++ b/pkg/controllers/pod_controller.go @@ -3,13 +3,13 @@ package controllers import ( "context" "fmt" - "reflect" + "k8s.io/apimachinery/pkg/types" + + "github.com/aquasecurity/starboard-operator/pkg/resources" "github.com/aquasecurity/starboard-operator/pkg/etc" "github.com/aquasecurity/starboard-operator/pkg/reports" "github.com/aquasecurity/starboard-operator/pkg/scanner" - "github.com/aquasecurity/starboard/pkg/docker" - batchv1 "k8s.io/api/batch/v1" "github.com/aquasecurity/starboard/pkg/kube" @@ -50,8 +50,18 @@ func (r *PodReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("pod", fmt.Sprintf("%s/%s", req.Namespace, req.Name)) + installMode, err := r.Config.GetInstallMode() + if err != nil { + return ctrl.Result{}, fmt.Errorf("getting install mode: %w", err) + } + + if r.IgnorePodInOperatorNamespace(installMode, req.NamespacedName) { + log.Info("Ignoring Pod run in the operator namespace") + return ctrl.Result{}, nil + } + // Retrieve the Pod from cache. - err := r.Client.Get(ctx, req.NamespacedName, pod) + err = r.Client.Get(ctx, req.NamespacedName, pod) if err != nil && errors.IsNotFound(err) { log.Info("Ignoring Pod that must have been deleted") return ctrl.Result{}, nil @@ -81,7 +91,7 @@ func (r *PodReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log.Info("Resolving immediate Pod owner", "owner", owner) // Check if containers of the Pod have corresponding VulnerabilityReports. - hasVulnerabilityReports, err := r.hasVulnerabilityReports(ctx, owner, pod) + hasVulnerabilityReports, err := r.Store.HasVulnerabilityReports(ctx, owner, resources.GetContainerImagesFromPodSpec(pod.Spec)) if err != nil { return ctrl.Result{}, fmt.Errorf("getting vulnerability reports: %w", err) } @@ -100,26 +110,6 @@ func (r *PodReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { return ctrl.Result{}, nil } -// hasVulnerabilityReports checks if the vulnerability reports exist for the specified workload. -func (r *PodReconciler) hasVulnerabilityReports(ctx context.Context, owner kube.Object, p *corev1.Pod) (bool, error) { - vulnerabilityReports, err := r.Store.Read(ctx, owner) - if err != nil { - return false, err - } - - actual := map[string]bool{} - for containerName, _ := range vulnerabilityReports { - actual[containerName] = true - } - - expected := map[string]bool{} - for _, container := range p.Spec.Containers { - expected[container.Name] = true - } - - return reflect.DeepEqual(actual, expected), nil -} - func (r *PodReconciler) ensureScanJob(ctx context.Context, owner kube.Object, p *corev1.Pod) error { log := r.Log.WithValues("pod", fmt.Sprintf("%s/%s", p.Namespace, p.Name)) @@ -141,27 +131,48 @@ func (r *PodReconciler) ensureScanJob(ctx context.Context, owner kube.Object, p return nil } - scanJob, secret, err := r.Scanner.NewScanJob(owner, p.Spec, scanner.Options{ + scanJob, err := r.Scanner.NewScanJob(owner, p.Spec, scanner.Options{ Namespace: r.Config.Namespace, ServiceAccountName: r.Config.ServiceAccount, - ImageCredentials: make(map[string]docker.Auth), ScanJobTimeout: r.Config.ScanJobTimeout, }) if err != nil { - return err - } - if secret != nil { - log.Info("Creating secret", "secret", fmt.Sprintf("%s/%s", secret.Namespace, secret.Name)) - err = r.Client.Create(ctx, secret) - if err != nil { - return err - } + return fmt.Errorf("constructing scan job: %w", err) } log.Info("Creating scan job", "job", fmt.Sprintf("%s/%s", scanJob.Namespace, scanJob.Name)) return r.Client.Create(ctx, scanJob) } +// IgnorePodInOperatorNamespace determines whether to reconcile the specified Pod +// based on the give InstallMode or not. Returns true if the Pod should be ignored, +// false otherwise. +// +// In the SingleNamespace install mode we're configuring Client cache +// to watch the operator namespace, in which the operator runs scan Jobs. +// However, we do not want to scan the workloads that might run in the +// operator namespace. +// +// In the MultiNamespace install mode we're configuring Client cache +// to watch the operator namespace, in which the operator runs scan Jobs. +// However, we do not want to scan the workloads that might run in the +// operator namespace unless the operator namespace is added to the list +// of target namespaces. +func (r *PodReconciler) IgnorePodInOperatorNamespace(installMode etc.InstallMode, pod types.NamespacedName) bool { + if installMode == etc.InstallModeSingleNamespace && + pod.Namespace == r.Config.Namespace { + return true + } + + if installMode == etc.InstallModeMultiNamespace && + pod.Namespace == r.Config.Namespace && + !SliceContainsString(r.Config.GetTargetNamespaces(), r.Config.Namespace) { + return true + } + + return false +} + // IsPodManagedByStarboardOperator returns true if the specified Pod // is managed by the Starboard Operator, false otherwise. // @@ -183,6 +194,9 @@ func HasContainersReadyCondition(pod *corev1.Pod) bool { return false } +// GetImmediateOwnerReference returns the immediate owner of the specified Pod. +// For example, for a Pod controlled by a Deployment it will return the active ReplicaSet object, +// whereas for an unmanaged Pod the immediate owner is the Pod itself. func GetImmediateOwnerReference(pod *corev1.Pod) kube.Object { ownerRef := metav1.GetControllerOf(pod) if ownerRef != nil { @@ -204,3 +218,15 @@ func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&corev1.Pod{}). Complete(r) } + +// SliceContainsString returns true if the specified slice of strings +// contains the give value, false otherwise. +func SliceContainsString(slice []string, value string) bool { + exists := false + for _, targetNamespace := range slice { + if targetNamespace == value { + exists = true + } + } + return exists +} diff --git a/pkg/etc/config.go b/pkg/etc/config.go index 7b58575..6aa18f0 100644 --- a/pkg/etc/config.go +++ b/pkg/etc/config.go @@ -1,7 +1,6 @@ package etc import ( - "errors" "fmt" "strings" "time" @@ -65,19 +64,32 @@ func (c Operator) GetTargetNamespaces() []string { return []string{} } -// ResolveInstallMode resolves install mode as defined by the OLM (Operator Lifecycle Manager). -func ResolveInstallMode(operatorNamespace string, targetNamespaces []string) (string, error) { +// InstallMode represents multitenancy support defined by the Operator Lifecycle Manager spec. +type InstallMode string + +const ( + InstallModeOwnNamespace InstallMode = "OwnNamespace" + InstallModeSingleNamespace InstallMode = "SingleNamespace" + InstallModeMultiNamespace InstallMode = "MultiNamespace" + InstallModeAllNamespaces InstallMode = "AllNamespaces" +) + +// GetInstallMode resolves InstallMode based on configured operator and target namespaces. +func (c Operator) GetInstallMode() (InstallMode, error) { + operatorNamespace, err := c.GetOperatorNamespace() + if err != nil { + return "", nil + } + targetNamespaces := c.GetTargetNamespaces() + if len(targetNamespaces) == 1 && operatorNamespace == targetNamespaces[0] { - return "OwnNamespace", nil + return InstallModeOwnNamespace, nil } if len(targetNamespaces) == 1 && operatorNamespace != targetNamespaces[0] { - return "SingleNamespace", nil + return InstallModeSingleNamespace, nil } if len(targetNamespaces) > 1 { - return "MultiNamespace", nil - } - if len(targetNamespaces) == 0 { - return "AllNamespaces", nil + return InstallModeMultiNamespace, nil } - return "", errors.New("unrecognized install mode") + return InstallModeAllNamespaces, nil } diff --git a/pkg/etc/config_test.go b/pkg/etc/config_test.go index 07549fb..14b7e79 100644 --- a/pkg/etc/config_test.go +++ b/pkg/etc/config_test.go @@ -44,49 +44,55 @@ func TestOperator_GetTargetNamespaces(t *testing.T) { } } -func TestResolveInstallMode(t *testing.T) { +func TestOperator_GetInstallMode(t *testing.T) { testCases := []struct { name string - operatorNamespace string - targetNamespaces []string - - expectedInstallMode string + operator etc.Operator + expectedInstallMode etc.InstallMode expectedError string }{ { - name: "Should resolve OwnNamespace", - operatorNamespace: "operators", - targetNamespaces: []string{"operators"}, - expectedInstallMode: "OwnNamespace", + name: "Should resolve OwnNamespace", + operator: etc.Operator{ + Namespace: "operators", + TargetNamespaces: "operators", + }, + expectedInstallMode: etc.InstallModeOwnNamespace, expectedError: "", }, { - name: "Should resolve SingleNamespace", - operatorNamespace: "operators", - targetNamespaces: []string{"foo"}, - expectedInstallMode: "SingleNamespace", + name: "Should resolve SingleNamespace", + operator: etc.Operator{ + Namespace: "operators", + TargetNamespaces: "foo", + }, + expectedInstallMode: etc.InstallModeSingleNamespace, expectedError: "", }, { - name: "Should resolve MultiNamespace", - operatorNamespace: "operators", - targetNamespaces: []string{"foo", "bar", "baz"}, - expectedInstallMode: "MultiNamespace", + name: "Should resolve MultiNamespace", + operator: etc.Operator{ + Namespace: "operators", + TargetNamespaces: "foo,bar,baz", + }, + expectedInstallMode: etc.InstallModeMultiNamespace, expectedError: "", }, { - name: "Should resolve AllNamespaces", - operatorNamespace: "operators", - targetNamespaces: []string{}, - expectedInstallMode: "AllNamespaces", + name: "Should resolve AllNamespaces", + operator: etc.Operator{ + Namespace: "operators", + TargetNamespaces: "", + }, + expectedInstallMode: etc.InstallModeAllNamespaces, expectedError: "", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - installMode, err := etc.ResolveInstallMode(tc.operatorNamespace, tc.targetNamespaces) + installMode, err := tc.operator.GetInstallMode() switch tc.expectedError { case "": require.NoError(t, err) diff --git a/pkg/reports/store.go b/pkg/reports/store.go index 1682310..7d94774 100644 --- a/pkg/reports/store.go +++ b/pkg/reports/store.go @@ -3,6 +3,8 @@ package reports import ( "context" "fmt" + "reflect" + "strings" batchv1 "k8s.io/api/batch/v1" "k8s.io/api/batch/v1beta1" @@ -17,7 +19,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/aquasecurity/starboard/pkg/kube" - "github.com/google/uuid" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,6 +27,7 @@ import ( type StoreInterface interface { Write(ctx context.Context, workload kube.Object, reports vulnerabilities.WorkloadVulnerabilities) error Read(ctx context.Context, workload kube.Object) (vulnerabilities.WorkloadVulnerabilities, error) + HasVulnerabilityReports(ctx context.Context, owner kube.Object, containerImages kube.ContainerImages) (bool, error) } type Store struct { @@ -46,16 +48,19 @@ func (s *Store) Write(ctx context.Context, workload kube.Object, reports vulnera return err } - for container, report := range reports { + for containerName, report := range reports { + reportName := fmt.Sprintf("%s-%s-%s", strings.ToLower(string(workload.Kind)), + workload.Name, containerName) + vulnerabilityReport := &starboardv1alpha1.VulnerabilityReport{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(uuid.New().String()), + Name: reportName, Namespace: workload.Namespace, Labels: labels.Set{ kube.LabelResourceKind: string(workload.Kind), kube.LabelResourceName: workload.Name, kube.LabelResourceNamespace: workload.Namespace, - kube.LabelContainerName: container, + kube.LabelContainerName: containerName, }, }, Report: report, @@ -122,3 +127,22 @@ func (s *Store) getRuntimeObjectFor(ctx context.Context, workload kube.Object) ( } return obj.(metav1.Object), nil } + +func (s *Store) HasVulnerabilityReports(ctx context.Context, owner kube.Object, containerImages kube.ContainerImages) (bool, error) { + vulnerabilityReports, err := s.Read(ctx, owner) + if err != nil { + return false, err + } + + actual := map[string]bool{} + for containerName, _ := range vulnerabilityReports { + actual[containerName] = true + } + + expected := map[string]bool{} + for containerName, _ := range containerImages { + expected[containerName] = true + } + + return reflect.DeepEqual(actual, expected), nil +} diff --git a/pkg/resources/resources.go b/pkg/resources/resources.go new file mode 100644 index 0000000..7c7a5e5 --- /dev/null +++ b/pkg/resources/resources.go @@ -0,0 +1,32 @@ +package resources + +import ( + "fmt" + + "github.com/aquasecurity/starboard/pkg/kube" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" +) + +func GetContainerImagesFromPodSpec(spec corev1.PodSpec) kube.ContainerImages { + images := kube.ContainerImages{} + for _, container := range spec.Containers { + images[container.Name] = container.Image + } + return images +} + +func GetContainerImagesFromJob(job *batchv1.Job) (kube.ContainerImages, error) { + var containerImagesAsJSON string + var ok bool + + if containerImagesAsJSON, ok = job.Annotations[kube.AnnotationContainerImages]; !ok { + return nil, fmt.Errorf("job does not have required annotation: %s", kube.AnnotationContainerImages) + } + containerImages := kube.ContainerImages{} + err := containerImages.FromJSON(containerImagesAsJSON) + if err != nil { + return nil, fmt.Errorf("parsing job annotation: %s: %w", kube.AnnotationContainerImages, err) + } + return containerImages, nil +} diff --git a/pkg/scanner/scanner.go b/pkg/scanner/scanner.go index 9953b7c..8470ea4 100644 --- a/pkg/scanner/scanner.go +++ b/pkg/scanner/scanner.go @@ -6,7 +6,6 @@ import ( "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" - "github.com/aquasecurity/starboard/pkg/docker" "github.com/aquasecurity/starboard/pkg/kube" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -18,8 +17,6 @@ type Options struct { Namespace string // ServiceAccountName the name of the Service Account to run the Pod controlled by the scan Job. ServiceAccountName string - // ImageCredentials maps container images to Docker authentications credentials in case the images are pulled from private registries. - ImageCredentials map[string]docker.Auth // ScanJobTimeout scan job timeout. ScanJobTimeout time.Duration } @@ -30,6 +27,6 @@ type Options struct { // the specified Kubernetes workload with the given Pod descriptor and Options. // type VulnerabilityScanner interface { - NewScanJob(workload kube.Object, spec corev1.PodSpec, options Options) (*batchv1.Job, *corev1.Secret, error) + NewScanJob(workload kube.Object, spec corev1.PodSpec, options Options) (*batchv1.Job, error) ParseVulnerabilityReport(imageRef string, logsReader io.ReadCloser) (v1alpha1.VulnerabilityScanResult, error) } diff --git a/pkg/trivy/scanner.go b/pkg/trivy/scanner.go index e754d90..3b12224 100644 --- a/pkg/trivy/scanner.go +++ b/pkg/trivy/scanner.go @@ -30,13 +30,10 @@ func NewScanner() scanner.VulnerabilityScanner { type trivyScanner struct { } -func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, options scanner.Options) (*batchv1.Job, *corev1.Secret, error) { +func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, options scanner.Options) (*batchv1.Job, error) { jobName := fmt.Sprintf(uuid.New().String()) initContainerName := jobName - imagePullSecretName := jobName - imagePullSecretData := make(map[string][]byte) - var imagePullSecret *corev1.Secret initContainers := []corev1.Container{ { @@ -70,36 +67,6 @@ func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, opt var envs []corev1.EnvVar - if dockerConfig, ok := options.ImageCredentials[c.Image]; ok { - registryUsernameKey := fmt.Sprintf("%s.username", c.Name) - registryPasswordKey := fmt.Sprintf("%s.password", c.Name) - - imagePullSecretData[registryUsernameKey] = []byte(dockerConfig.Username) - imagePullSecretData[registryPasswordKey] = []byte(dockerConfig.Password) - - envs = append(envs, corev1.EnvVar{ - Name: "TRIVY_USERNAME", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: imagePullSecretName, - }, - Key: registryUsernameKey, - }, - }, - }, corev1.EnvVar{ - Name: "TRIVY_PASSWORD", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: imagePullSecretName, - }, - Key: registryPasswordKey, - }, - }, - }) - } - scanJobContainers[i] = corev1.Container{ Name: c.Name, Image: trivyImageRef, @@ -140,22 +107,7 @@ func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, opt containerImagesAsJSON, err := containerImages.AsJSON() if err != nil { - return nil, nil, err - } - - if len(imagePullSecretData) > 0 { - imagePullSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: imagePullSecretName, - Namespace: options.Namespace, - Labels: map[string]string{ - kube.LabelResourceKind: string(workload.Kind), - kube.LabelResourceName: workload.Name, - kube.LabelResourceNamespace: workload.Namespace, - }, - }, - Data: imagePullSecretData, - } + return nil, err } return &batchv1.Job{ @@ -204,7 +156,7 @@ func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, opt }, }, }, - }, imagePullSecret, nil + }, nil } func (s *trivyScanner) ParseVulnerabilityReport(imageRef string, logsReader io.ReadCloser) (v1alpha1.VulnerabilityScanResult, error) {