diff --git a/.github/workflows/check-actions.yaml b/.github/workflows/check-actions.yaml index e0ce4947e..730ebc85d 100644 --- a/.github/workflows/check-actions.yaml +++ b/.github/workflows/check-actions.yaml @@ -16,9 +16,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Ensure SHA pinned actions - uses: zgosalvez/github-actions-ensure-sha-pinned-actions@76d1d8e0b075d7190b5d59b86da91c7bdbcc99b2 # v3.0.7 + uses: zgosalvez/github-actions-ensure-sha-pinned-actions@b88cd0aad2c36a63e42c71f81cb1958fed95ac87 # v3.0.10 with: allowlist: | kyverno/chainsaw diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8d6c4f603..f895d97ee 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: options: --user root steps: - name: Checkout code - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: policies - name: Run ah lint @@ -33,14 +33,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: policies - name: Validate all policies run: ./.hack/verify-files-structure.sh working-directory: policies - name: Clone Kyverno - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: repository: kyverno/kyverno path: kyverno @@ -58,11 +58,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: policies - name: Checkout Kyverno - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: repository: kyverno/kyverno path: kyverno diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0f7cc7266..f3a14835a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,9 +44,12 @@ jobs: - ^flux$ - ^flux-cel$ - ^istio$ + - ^istio-cel$ - ^karpenter$ - ^kasten$ + - ^kasten-cel$ - ^kubecost$ + - ^kubecost-cel$ - ^kubeops$ - ^kubevirt$ - ^linkerd$ @@ -68,7 +71,7 @@ jobs: - ^other$/^res - ^other-cel$/^res - ^other$/^[s-z] - - ^other-cel$/^res + - ^other-cel$/^[s-z] - ^pod-security$ - ^pod-security-cel$ - ^psa$ @@ -82,7 +85,7 @@ jobs: name: ${{ matrix.k8s-version.name }} - ${{ matrix.tests }} steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Setup Go uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/chainsaw-test.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..c160abb64 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,41 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-sidecar-injection-namespace +spec: + steps: + - name: step-01 + try: + - apply: + file: ../enforce-sidecar-injection-namespace.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: enforce-sidecar-injection-namespace + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ns-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-disabled.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-nolabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-somelabel.yaml diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-disabled.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-disabled.yaml new file mode 100644 index 000000000..0eec7ea44 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-disabled.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: disabled + name: bad-istio-sinj01 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-nolabel.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-nolabel.yaml new file mode 100644 index 000000000..4caa0efdb --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-nolabel.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: bad-istio-sinj03 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-somelabel.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-somelabel.yaml new file mode 100644 index 000000000..d25585d2a --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-somelabel.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: enabled + name: bad-istio-sinj02 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-good.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-good.yaml new file mode 100644 index 000000000..a5f30d2ac --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-good.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: enabled + name: good-istio-sinj01 +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: disabled + istio-injection: enabled + bar: enabled + name: good-istio-sinj02 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/policy-ready.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..acc3f29fb --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-sidecar-injection-namespace +status: + ready: true diff --git a/istio-cel/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml b/istio-cel/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e457fa2b8 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-sidecar-injection-namespace +policies: +- ../enforce-sidecar-injection-namespace.yaml +resources: +- ../.chainsaw-test/ns-bad-disabled.yaml +- ../.chainsaw-test/ns-bad-nolabel.yaml +- ../.chainsaw-test/ns-bad-somelabel.yaml +- ../.chainsaw-test/ns-good.yaml +results: +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - bad-istio-sinj01 + - bad-istio-sinj02 + - bad-istio-sinj03 + result: fail +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - good-istio-sinj01 + - good-istio-sinj02 + result: pass + diff --git a/istio-cel/enforce-sidecar-injection-namespace/artifacthub-pkg.yml b/istio-cel/enforce-sidecar-injection-namespace/artifacthub-pkg.yml new file mode 100644 index 000000000..177e3d150 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: enforce-sidecar-injection-namespace-cel +version: 1.0.0 +displayName: Enforce Istio Sidecar Injection in CEL expressions +description: >- + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces set `istio-inject` to `enabled`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml + ``` +keywords: + - kyverno + - Istio + - CEL Expressions +readme: | + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces set `istio-inject` to `enabled`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace" +digest: 123feb2a8d1b2743e33b1f91ddf7291c47eedcf2c24ae537a1d3afe6c503338d +createdAt: "2024-05-12T04:38:32Z" + diff --git a/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml b/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml new file mode 100644 index 000000000..5a2c91d80 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-sidecar-injection-namespace + annotations: + policies.kyverno.io/title: Enforce Istio Sidecar Injection in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label + `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces + set `istio-inject` to `enabled`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-istio-injection-enabled + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && 'istio-injection' in object.metadata.labels && object.metadata.labels['istio-injection'] == 'enabled'" + message: "All new Namespaces must have Istio sidecar injection enabled." + diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/chainsaw-test.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..e547cafa5 --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-strict-mtls +spec: + steps: + - name: step-01 + try: + - apply: + file: ../enforce-strict-mtls.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: enforce-strict-mtls + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: pa-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pa-bad.yaml diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/crd-assert.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..56561a629 --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: peerauthentications.security.istio.io +spec: {} +status: + acceptedNames: + kind: PeerAuthentication + listKind: PeerAuthenticationList + plural: peerauthentications + singular: peerauthentication + storedVersions: + - v1beta1 diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-bad.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-bad.yaml new file mode 100644 index 000000000..771d21f3d --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-bad.yaml @@ -0,0 +1,26 @@ +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: pa-bad01 +spec: + mtls: + mode: PERMISSIVE +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: pa-bad02 +spec: + mtls: + mode: DISABLE +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: pa-bad03 +spec: + selector: + matchLabels: + app: finance + mtls: + mode: DISABLE \ No newline at end of file diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-good.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-good.yaml new file mode 100644 index 000000000..0d2d9d383 --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-good.yaml @@ -0,0 +1,39 @@ +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa01 +spec: + mtls: + mode: STRICT +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa02 +spec: + mtls: + mode: UNSET +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa03 +spec: {} +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa04 +spec: + selector: + matchLabels: + app: finance + mtls: + mode: STRICT +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa05 +spec: + mtls: {} \ No newline at end of file diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/policy-ready.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..c5f7637cb --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-strict-mtls +status: + ready: true diff --git a/istio-cel/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml b/istio-cel/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..f4018437a --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,29 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-strict-mtls +policies: +- ../enforce-strict-mtls.yaml +resources: +- ../.chainsaw-test/pa-bad.yaml +- ../.chainsaw-test/pa-good.yaml +results: +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - pa-bad01 + - pa-bad02 + - pa-bad03 + result: fail +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - good-pa01 + - good-pa02 + - good-pa03 + - good-pa04 + - good-pa05 + result: pass + diff --git a/istio-cel/enforce-strict-mtls/artifacthub-pkg.yml b/istio-cel/enforce-strict-mtls/artifacthub-pkg.yml new file mode 100644 index 000000000..e760f1c9e --- /dev/null +++ b/istio-cel/enforce-strict-mtls/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: enforce-strict-mtls-cel +version: 1.0.0 +displayName: Enforce Istio Strict mTLS in CEL expressions +description: >- + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS can reduce the security for traffic within that portion of the mesh and should be controlled. This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring the `mode` be set to either `UNSET` or `STRICT`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml + ``` +keywords: + - kyverno + - Istio + - CEL Expressions +readme: | + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS can reduce the security for traffic within that portion of the mesh and should be controlled. This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring the `mode` be set to either `UNSET` or `STRICT`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "PeerAuthentication" +digest: 6bf5af52d9187ac5b1b90879ab3975ea618b38d04928ceecd4779fc2b2e4b26a +createdAt: "2024-05-12T04:41:47Z" + diff --git a/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml b/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml new file mode 100644 index 000000000..33747bbfd --- /dev/null +++ b/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-strict-mtls + annotations: + policies.kyverno.io/title: Enforce Istio Strict mTLS in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: PeerAuthentication + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which + can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on + the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS + can reduce the security for traffic within that portion of the mesh and should be controlled. + This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring + the `mode` be set to either `UNSET` or `STRICT`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-mtls + match: + any: + - resources: + kinds: + - PeerAuthentication + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec) || !has(object.spec.mtls) || !has(object.spec.mtls.mode) || + object.spec.mtls.mode in ['UNSET', 'STRICT'] + message: "PeerAuthentication resources may only set UNSET or STRICT for the mode." + diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/chainsaw-test.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..a1695faa6 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prevent-disabling-injection-pods +spec: + steps: + - name: step-01 + try: + - apply: + file: ../prevent-disabling-injection-pods.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: prevent-disabling-injection-pods + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..3f2e6b492 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + app.k8s.io/name: badpod01 + sidecar.istio.io/inject: "false" + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + sidecar.istio.io/inject: "false" + app.k8s.io/name: badpod02 + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-good.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..de1e03c32 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-good.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + app.k8s.io/name: goodpod01 + sidecar.istio.io/inject: "true" + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + app.k8s.io/name: goodpod02 + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..90c90ffc0 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + app.k8s.io/name: busybox + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "false" + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "false" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - sleep + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + app.k8s.io/name: busybox + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - sleep + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..59d1afa1b --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + app.k8s.io/name: busybox + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "true" + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + app.k8s.io/name: busybox + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "true" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/policy-ready.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..4c6866bd0 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-disabling-injection-pods +status: + ready: true diff --git a/istio-cel/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml b/istio-cel/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..5aa5be9ad --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,55 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-disabling-injection-pods +policies: +- ../prevent-disabling-injection-pods.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - badpod01 + - badpod02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - baddeploy01 + - baddeploy02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - badcronjob01 + - badcronjob02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass + diff --git a/istio-cel/prevent-disabling-injection-pods/artifacthub-pkg.yml b/istio-cel/prevent-disabling-injection-pods/artifacthub-pkg.yml new file mode 100644 index 000000000..36ec09a25 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: prevent-disabling-injection-pods-cel +version: 1.0.0 +displayName: Prevent Disabling Istio Sidecar Injection in CEL expressions +description: >- + One way sidecar injection in an Istio service mesh may be accomplished is by defining an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh thereby reducing visibility. This policy ensures that Pods cannot set the annotation `sidecar.istio.io/inject` to a value of `false`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml + ``` +keywords: + - kyverno + - Istio + - CEL Expressions +readme: | + One way sidecar injection in an Istio service mesh may be accomplished is by defining an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh thereby reducing visibility. This policy ensures that Pods cannot set the annotation `sidecar.istio.io/inject` to a value of `false`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 5de03c078273ce913a6ebf9064a85be4255b82e36f74bda822984e261363fe8b +createdAt: "2024-05-12T04:48:58Z" + diff --git a/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml b/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml new file mode 100644 index 000000000..6662e5151 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml @@ -0,0 +1,38 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-disabling-injection-pods + annotations: + policies.kyverno.io/title: Prevent Disabling Istio Sidecar Injection in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + One way sidecar injection in an Istio service mesh may be accomplished is by defining + an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh + thereby reducing visibility. This policy ensures that Pods cannot set the annotation + `sidecar.istio.io/inject` to a value of `false`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: prohibit-inject-annotation + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.metadata.annotations) || !('sidecar.istio.io/inject' in object.metadata.annotations) || + object.metadata.annotations['sidecar.istio.io/inject'] != 'false' + message: "Pods may not disable sidecar injection by setting the annotation sidecar.istio.io/inject to a value of false." + diff --git a/istio/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml b/istio/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e457fa2b8 --- /dev/null +++ b/istio/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-sidecar-injection-namespace +policies: +- ../enforce-sidecar-injection-namespace.yaml +resources: +- ../.chainsaw-test/ns-bad-disabled.yaml +- ../.chainsaw-test/ns-bad-nolabel.yaml +- ../.chainsaw-test/ns-bad-somelabel.yaml +- ../.chainsaw-test/ns-good.yaml +results: +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - bad-istio-sinj01 + - bad-istio-sinj02 + - bad-istio-sinj03 + result: fail +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - good-istio-sinj01 + - good-istio-sinj02 + result: pass + diff --git a/istio/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml b/istio/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..f4018437a --- /dev/null +++ b/istio/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,29 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-strict-mtls +policies: +- ../enforce-strict-mtls.yaml +resources: +- ../.chainsaw-test/pa-bad.yaml +- ../.chainsaw-test/pa-good.yaml +results: +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - pa-bad01 + - pa-bad02 + - pa-bad03 + result: fail +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - good-pa01 + - good-pa02 + - good-pa03 + - good-pa04 + - good-pa05 + result: pass + diff --git a/istio/enforce-tls-hosts-host-subnets/.kyverno-test/kyverno-test.yaml b/istio/enforce-tls-hosts-host-subnets/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..8b2e1cf9b --- /dev/null +++ b/istio/enforce-tls-hosts-host-subnets/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-tls-hosts-host-subnets +policies: +- ../enforce-tls-hosts-host-subnets.yaml +resources: +- ../.chainsaw-test/dr-bad.yaml +- ../.chainsaw-test/dr-good.yaml +results: +- policy: enforce-tls-hosts-host-subnets + rule: destrule + kind: DestinationRule + resources: + - bad-dr01 + - bad-dr02 + result: fail +- policy: enforce-tls-hosts-host-subnets + rule: destrule + kind: DestinationRule + resources: + - good-dr01 + - good-dr02 + - good-dr03 + - good-dr04 + - good-dr05 + result: pass + diff --git a/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml b/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml index d68283137..3f2e6b492 100644 --- a/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml +++ b/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml @@ -19,8 +19,8 @@ metadata: foo: bar annotations: sidecar.istio.io/inject: "false" - app.k8s.io/name: badpod01 - name: badpod01 + app.k8s.io/name: badpod02 + name: badpod02 spec: containers: - name: busybox diff --git a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml index ad858a2d7..90c90ffc0 100644 --- a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml +++ b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml @@ -53,16 +53,17 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: app.k8s.io/name: busybox sidecar.istio.io/inject: "false" + spec: containers: - name: hello image: busybox:1.35 command: - sleep - - 3600 + - "3600" restartPolicy: OnFailure --- apiVersion: batch/v1 @@ -74,14 +75,15 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: sidecar.istio.io/inject: "false" app.k8s.io/name: busybox + spec: containers: - name: hello image: busybox:1.35 - command: + command: - sleep - - 3600 + - "3600" restartPolicy: OnFailure \ No newline at end of file diff --git a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml index 387a650ab..59d1afa1b 100644 --- a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml +++ b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml @@ -52,9 +52,10 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: app.k8s.io/name: busybox + spec: containers: - name: hello image: busybox:1.35 @@ -72,10 +73,11 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: app.k8s.io/name: busybox sidecar.istio.io/inject: "true" + spec: containers: - name: hello image: busybox:1.35 diff --git a/istio/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml b/istio/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..5aa5be9ad --- /dev/null +++ b/istio/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,55 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-disabling-injection-pods +policies: +- ../prevent-disabling-injection-pods.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - badpod01 + - badpod02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - baddeploy01 + - baddeploy02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - badcronjob01 + - badcronjob02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass + diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/chainsaw-test.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..81d089924 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,60 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: k10-data-protection-by-label +spec: + steps: + - name: step-01 + try: + - apply: + file: ../k10-data-protection-by-label.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: k10-data-protection-by-label + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ns.yaml + - apply: + file: deployment-good.yaml + - apply: + file: ss-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: deployment-bad-badlabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: deployment-bad-nolabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ss-bad-badlabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ss-bad-nolabel.yaml + - name: step-98 + try: + - script: + content: kubectl delete deployments --all --force --grace-period=0 -n k10-dplabel-ns + - script: + content: kubectl delete statefulsets --all --force --grace-period=0 -n k10-dplabel-ns + - script: + content: kubectl delete pods --all --force --grace-period=0 -n k10-dplabel-ns diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/crd-assert.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..d660e00cb --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: policies.config.kio.kasten.io +spec: {} +status: + acceptedNames: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + storedVersions: + - v1alpha1 diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-badlabel.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-badlabel.yaml new file mode 100644 index 000000000..040ccdb48 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-badlabel.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy02 + labels: + app: busybox + purpose: production + dataprotection: foo-bar +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-nolabel.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-nolabel.yaml new file mode 100644 index 000000000..c34fd4785 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-nolabel.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy01 + labels: + app: busybox + purpose: production +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-good.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-good.yaml new file mode 100644 index 000000000..dcf3c489e --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-good.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy01 + namespace: k10-dplabel-ns + labels: + app: busybox + purpose: production + dataprotection: k10-goldpolicy +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy02 + namespace: k10-dplabel-ns + labels: + app: busybox + purpose: development + dataprotection: foo-bar +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy03 + namespace: k10-dplabel-ns + labels: + app: busybox +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/nginx-deployment-invalid.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/nginx-deployment-invalid.yaml new file mode 100644 index 000000000..566318b81 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/nginx-deployment-invalid.yaml @@ -0,0 +1,31 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: nginx + labels: + name: nginx +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: nginx + labels: + app: nginx + purpose: production + dataprotection: none # invalid named K10 Policy!! +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/kasten/k10-minimum-retention/.chainsaw-test/ns.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ns.yaml similarity index 58% rename from kasten/k10-minimum-retention/.chainsaw-test/ns.yaml rename to kasten-cel/k10-data-protection-by-label/.chainsaw-test/ns.yaml index 6ff7e7310..00e9c20e7 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/ns.yaml +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ns.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: k10-minimum-retention \ No newline at end of file + name: k10-dplabel-ns \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/policy-ready.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..4e8dfe8c2 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-data-protection-by-label +status: + ready: true diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-badlabel.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-badlabel.yaml new file mode 100644 index 000000000..cf1a15841 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-badlabel.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: bad-ss02 + labels: + purpose: production + dataprotection: foo-bar +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: busybox + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-nolabel.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-nolabel.yaml new file mode 100644 index 000000000..397a81231 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-nolabel.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: bad-ss01 + labels: + purpose: production +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: busybox + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-good.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-good.yaml new file mode 100644 index 000000000..bc6216c38 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-good.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: good-ss01 + namespace: k10-dplabel-ns + labels: + purpose: production + dataprotection: k10-silverpolicy +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: good-ss02 + namespace: k10-dplabel-ns +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: good-ss02 + namespace: k10-dplabel-ns + labels: + purpose: development + dataprotection: foo-bar +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.kyverno-test/kyverno-test.yaml b/kasten-cel/k10-data-protection-by-label/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..58f925a36 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: kyverno_data_protection_tests +policies: +- ../k10-data-protection-by-label.yaml +resources: +- nginx-deployment.yaml +results: +- kind: Deployment + policy: k10-data-protection-by-label + resources: + - nginx-deployment-invalid + result: fail + rule: k10-data-protection-by-label +- kind: Deployment + policy: k10-data-protection-by-label + resources: + - nginx-deployment + result: pass + rule: k10-data-protection-by-label diff --git a/kasten-cel/k10-data-protection-by-label/.kyverno-test/nginx-deployment.yaml b/kasten-cel/k10-data-protection-by-label/.kyverno-test/nginx-deployment.yaml new file mode 100644 index 000000000..9ceb00a66 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.kyverno-test/nginx-deployment.yaml @@ -0,0 +1,60 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: nginx + labels: + name: nginx + purpose: production + dataprotection: k10-goldpolicy + immutable: enabled +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: nginx + labels: + app: nginx + purpose: production + dataprotection: k10-goldpolicy # set a policy to use our 'gold' standard data protection policy (generate-gold-backup-policy) + immutable: enabled +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment-invalid + namespace: nginx + labels: + app: nginx + purpose: production + dataprotection: none # invalid named K10 Policy!! +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/kasten-cel/k10-data-protection-by-label/artifacthub-pkg.yml b/kasten-cel/k10-data-protection-by-label/artifacthub-pkg.yml new file mode 100644 index 000000000..2ca7fa978 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: k10-data-protection-by-label-cel +version: 1.0.0 +displayName: Check Data Protection By Label in CEL expressions +description: >- + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml + ``` +keywords: + - kyverno + - Kasten K10 by Veeam + - CEL Expressions +readme: | + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kasten K10 by Veeam in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Deployment, StatefulSet" +digest: e3a088a52aac74e16f9b2776df78891344edd6dc03ee6456dc71d71c34519325 +createdAt: "2024-05-12T07:05:48Z" + diff --git a/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml b/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml new file mode 100644 index 000000000..58270490c --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml @@ -0,0 +1,36 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-data-protection-by-label + annotations: + policies.kyverno.io/title: Check Data Protection By Label in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Deployment, StatefulSet + policies.kyverno.io/description: >- + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. + Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. +spec: + validationFailureAction: Audit + rules: + - name: k10-data-protection-by-label + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + selector: + matchLabels: + purpose: production + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && has(object.metadata.labels.dataprotection) && object.metadata.labels.dataprotection.startsWith('k10-')" + message: "Deployments and StatefulSets that specify 'dataprotection' label must have a valid k10-?* name (use labels: dataprotection: k10-)" + diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/chainsaw-test.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..e02442dae --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: k10-hourly-rpo +spec: + steps: + - name: step-01 + try: + - apply: + file: ../k10-hourly-rpo.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: k10-policy-hourly-rpo + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: k10-good-policy.yaml + - apply: + expect: + - check: + ($error != null): true + file: k10-bad-policy.yaml diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/crd-assert.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..d660e00cb --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: policies.config.kio.kasten.io +spec: {} +status: + acceptedNames: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + storedVersions: + - v1alpha1 diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-bad-policy.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-bad-policy.yaml new file mode 100644 index 000000000..1bc2dd2d6 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-bad-policy.yaml @@ -0,0 +1,34 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: k10-hourlyrpo-badpolicy01 + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@daily' + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp \ No newline at end of file diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-good-policy.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-good-policy.yaml new file mode 100644 index 000000000..8acf13284 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-good-policy.yaml @@ -0,0 +1,34 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: k10-hourlyrpo-goodpolicy01 + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@hourly' + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp \ No newline at end of file diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/policy-ready.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..3fa1c7221 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-policy-hourly-rpo +status: + ready: true diff --git a/kasten-cel/k10-hourly-rpo/.kyverno-test/backup-export-policy.yaml b/kasten-cel/k10-hourly-rpo/.kyverno-test/backup-export-policy.yaml new file mode 100644 index 000000000..96430f81a --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.kyverno-test/backup-export-policy.yaml @@ -0,0 +1,73 @@ +# An example compliant K10 Policy +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: hourly-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@hourly' # change this to @daily to test the 'audit_mission_critical_RPO' policy + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export # comment this line out to test 'enforce_3-2-1' policy + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp +--- +# An example compliant K10 Policy +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: daily-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@daily' # change this to @daily to test the 'audit_mission_critical_RPO' policy + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export # comment this line out to test 'enforce_3-2-1' policy + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp diff --git a/kasten/k10-minimum-retention/.kyverno-test/kyverno-test.yaml b/kasten-cel/k10-hourly-rpo/.kyverno-test/kyverno-test.yaml similarity index 50% rename from kasten/k10-minimum-retention/.kyverno-test/kyverno-test.yaml rename to kasten-cel/k10-hourly-rpo/.kyverno-test/kyverno-test.yaml index c6b85cb42..965e355f5 100644 --- a/kasten/k10-minimum-retention/.kyverno-test/kyverno-test.yaml +++ b/kasten-cel/k10-hourly-rpo/.kyverno-test/kyverno-test.yaml @@ -3,14 +3,19 @@ kind: Test metadata: name: kyverno_data_protection_tests policies: -- ../k10-minimum-retention.yaml +- ../k10-hourly-rpo.yaml resources: - backup-export-policy.yaml results: - kind: Policy - patchedResource: patched.yaml - policy: k10-minimum-retention + policy: k10-policy-hourly-rpo + resources: + - daily-policy + result: fail + rule: k10-policy-hourly-rpo +- kind: Policy + policy: k10-policy-hourly-rpo resources: - hourly-policy result: pass - rule: k10-minimum-retention + rule: k10-policy-hourly-rpo diff --git a/kasten-cel/k10-hourly-rpo/artifacthub-pkg.yml b/kasten-cel/k10-hourly-rpo/artifacthub-pkg.yml new file mode 100644 index 000000000..bffb28b92 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: k10-hourly-rpo-cel +version: 1.0.0 +displayName: Check Hourly RPO in CEL expressions +description: >- + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml + ``` +keywords: + - kyverno + - Kasten K10 by Veeam + - CEL Expressions +readme: | + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kasten K10 by Veeam in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Policy" +digest: 4c95862e422636b4900103e9620ed8e41d4cafd86984a1d22e81f35767bb0eef +createdAt: "2024-05-12T07:07:31Z" + diff --git a/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml b/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml new file mode 100644 index 000000000..d5f62904f --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-policy-hourly-rpo + annotations: + policies.kyverno.io/title: Check Hourly RPO in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Policy + policies.kyverno.io/description: >- + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. + This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical +spec: + validationFailureAction: Audit + rules: + - name: k10-policy-hourly-rpo + match: + any: + - resources: + kinds: + - config.kio.kasten.io/v1alpha1/Policy + operations: + - CREATE + - UPDATE + selector: + matchLabels: + appPriority: Mission-Critical + validate: + cel: + expressions: + - expression: "has(object.spec.frequency) && object.spec.frequency == '@hourly'" + message: "Mission Critical RPO frequency should use no shorter than @hourly frequency" + diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/chainsaw-test.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..8893c6007 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: k10-validate-ns-by-preset-label +spec: + steps: + - name: step-01 + try: + - apply: + file: ../k10-validate-ns-by-preset-label.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: k10-validate-ns-by-preset-label + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ns-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad.yaml diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/crd-assert.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..d660e00cb --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: policies.config.kio.kasten.io +spec: {} +status: + acceptedNames: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + storedVersions: + - v1alpha1 diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-bad.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-bad.yaml new file mode 100644 index 000000000..baf81215b --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-bad.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-badns01 + labels: + dataprotection: foo-bar +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-badns02 \ No newline at end of file diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-good.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-good.yaml new file mode 100644 index 000000000..f9fcdc245 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-good.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns01 + labels: + dataprotection: gold +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns02 + labels: + dataprotection: silver +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns03 + labels: + dataprotection: bronze +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns04 + labels: + dataprotection: none \ No newline at end of file diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/policy-ready.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..27e86f51b --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-validate-ns-by-preset-label +status: + ready: true diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/kyverno-test.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..440c95426 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,24 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: k10-validate-ns-by-preset-label-test +policies: +- ../k10-validate-ns-by-preset-label.yaml +resources: +- test-resource.yaml +results: +- kind: Namespace + policy: k10-validate-ns-by-preset-label + resources: + - namespace-invalid + result: fail + rule: k10-validate-ns-by-preset-label +- kind: Namespace + policy: k10-validate-ns-by-preset-label + resources: + - namespace-gold + - namespace-silver + - namespace-bronze + - namespace-none + result: pass + rule: k10-validate-ns-by-preset-label diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/test-resource.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/test-resource.yaml new file mode 100644 index 000000000..f5dc36f2b --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/test-resource.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-gold + labels: + dataprotection: gold +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-silver + labels: + dataprotection: silver +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-bronze + labels: + dataprotection: bronze +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-none + labels: + dataprotection: none +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-invalid \ No newline at end of file diff --git a/kasten-cel/k10-validate-ns-by-preset-label/artifacthub-pkg.yml b/kasten-cel/k10-validate-ns-by-preset-label/artifacthub-pkg.yml new file mode 100644 index 000000000..c1ec63ef0 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: k10-validate-ns-by-preset-label-cel +version: 1.0.0 +displayName: Validate Data Protection by Preset Label in CEL expressions +description: >- + Kubernetes applications are typically deployed into a single, logical namespace. Kasten K10 policies will discover and protect all resources within the selected namespace(s). This policy ensures all new namespaces include a label referencing a valid K10 SLA (Policy Preset) for data protection. This policy can be used in combination with generate ClusterPolicy to automatically create a K10 policy based on the specified SLA. The combination ensures that new applications are not inadvertently left unprotected. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml + ``` +keywords: + - kyverno + - Kasten K10 by Veeam + - CEL Expressions +readme: | + Kubernetes applications are typically deployed into a single, logical namespace. Kasten K10 policies will discover and protect all resources within the selected namespace(s). This policy ensures all new namespaces include a label referencing a valid K10 SLA (Policy Preset) for data protection. This policy can be used in combination with generate ClusterPolicy to automatically create a K10 policy based on the specified SLA. The combination ensures that new applications are not inadvertently left unprotected. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kasten K10 by Veeam in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace" +digest: e58ab4c2018542a6acd5e97446b09cf04cec26425b9a29f0207c518310c449f3 +createdAt: "2024-05-12T07:09:08Z" + diff --git a/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml b/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml new file mode 100644 index 000000000..4668e742a --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml @@ -0,0 +1,42 @@ +#NOTE: This example assumes that K10 policy presets named "gold", "silver", and "bronze" have been pre-created and K10 was deployed into the `kasten-io` namespace. +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-validate-ns-by-preset-label + annotations: + policies.kyverno.io/title: Validate Data Protection by Preset Label in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + policies.kyverno.io/subject: Namespace + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Kubernetes applications are typically deployed into a single, logical namespace. + Kasten K10 policies will discover and protect all resources within the selected namespace(s). + This policy ensures all new namespaces include a label referencing a valid K10 SLA + (Policy Preset) for data protection.This policy can be used in combination with generate + ClusterPolicy to automatically create a K10 policy based on the specified SLA. + The combination ensures that new applications are not inadvertently left unprotected. +spec: + validationFailureAction: Audit + rules: + - name: k10-validate-ns-by-preset-label + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && has(object.metadata.labels.dataprotection) && object.metadata.labels.dataprotection in ['gold', 'silver', 'bronze', 'none']" + message: >- + Namespaces must specify a "dataprotection" label with a value corresponding to a Kasten K10 SLA: + + "gold" - + "silver" - + "bronze" - + "none" - No local snapshots or backups diff --git a/kasten/k10-minimum-retention/artifacthub-pkg.yml b/kasten/k10-minimum-retention/artifacthub-pkg.yml deleted file mode 100644 index de205e415..000000000 --- a/kasten/k10-minimum-retention/artifacthub-pkg.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: k10-minimum-retention -version: 1.0.0 -displayName: Minimum Backup Retention -createdAt: "2023-04-10T20:12:53.000Z" -description: >- - K10 Policy resources can be validated to adhere to common compliance retention standards. Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. This policy deletes the retention value in the backup operation and replaces it with the specified retention. Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. To use different This policy can also be used go reduce retentions lengths to enforce cost optimization. -install: |- - ```shell - kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/k10-minimum-retention/k10-minimum-retention.yaml - ``` -keywords: - - kyverno - - Kasten K10 by Veeam -readme: | - K10 Policy resources can be validated to adhere to common compliance retention standards. Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. This policy deletes the retention value in the backup operation and replaces it with the specified retention. Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. To use different This policy can also be used go reduce retentions lengths to enforce cost optimization. - - Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ -annotations: - kyverno/category: "Kasten K10 by Veeam" - kyverno/kubernetesVersion: "1.21-1.22" - kyverno/subject: "Policy" -digest: f7d09195f6c8982f0075c866b0480626a3fbf4fd352130ae0a1be86abb79c2b7 diff --git a/kasten/k10-minimum-retention/.chainsaw-test/README.md b/kasten/kasten-minimum-retention/.chainsaw-test/README.md similarity index 100% rename from kasten/k10-minimum-retention/.chainsaw-test/README.md rename to kasten/kasten-minimum-retention/.chainsaw-test/README.md diff --git a/kasten/k10-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml similarity index 100% rename from kasten/k10-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml diff --git a/kasten/k10-minimum-retention/.chainsaw-test/chainsaw-test.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-test.yaml similarity index 87% rename from kasten/k10-minimum-retention/.chainsaw-test/chainsaw-test.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-test.yaml index a65611901..fbed4e96a 100755 --- a/kasten/k10-minimum-retention/.chainsaw-test/chainsaw-test.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-test.yaml @@ -3,7 +3,7 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: creationTimestamp: null - name: k10-minimum-retention + name: kasten-minimum-retention spec: steps: - name: step-01 @@ -15,7 +15,7 @@ spec: - apply: file: ns.yaml - apply: - file: ../k10-minimum-retention.yaml + file: ../kasten-minimum-retention.yaml - assert: file: policy-ready.yaml - name: step-03 diff --git a/kasten/k10-minimum-retention/.chainsaw-test/kuttlresource.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/kuttlresource.yaml similarity index 95% rename from kasten/k10-minimum-retention/.chainsaw-test/kuttlresource.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/kuttlresource.yaml index 9f139aaba..7bb8254fc 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/kuttlresource.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/kuttlresource.yaml @@ -2,7 +2,7 @@ apiVersion: config.kio.kasten.io/v1alpha1 kind: Policy metadata: name: hourly-policy - namespace: k10-minimum-retention + namespace: kasten-minimum-retention labels: appPriority: Mission-Critical spec: diff --git a/kasten/kasten-minimum-retention/.chainsaw-test/ns.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/ns.yaml new file mode 100644 index 000000000..9fdbec7b2 --- /dev/null +++ b/kasten/kasten-minimum-retention/.chainsaw-test/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kasten-minimum-retention \ No newline at end of file diff --git a/kasten/kasten-minimum-retention/.chainsaw-test/policy-ready.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..927882ff9 --- /dev/null +++ b/kasten/kasten-minimum-retention/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,9 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: kasten-minimum-retention +status: + conditions: + - reason: Succeeded + status: "True" + type: Ready \ No newline at end of file diff --git a/kasten/k10-minimum-retention/.chainsaw-test/resource-mutated.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/resource-mutated.yaml similarity index 95% rename from kasten/k10-minimum-retention/.chainsaw-test/resource-mutated.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/resource-mutated.yaml index 171754953..569de9cf8 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/resource-mutated.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/resource-mutated.yaml @@ -5,7 +5,7 @@ metadata: labels: appPriority: Mission-Critical name: hourly-policy - namespace: k10-minimum-retention + namespace: kasten-minimum-retention spec: actions: - action: backup diff --git a/kasten/k10-minimum-retention/.kyverno-test/backup-export-policy.yaml b/kasten/kasten-minimum-retention/.kyverno-test/kasten-hourly-policy.yaml similarity index 76% rename from kasten/k10-minimum-retention/.kyverno-test/backup-export-policy.yaml rename to kasten/kasten-minimum-retention/.kyverno-test/kasten-hourly-policy.yaml index 6abb9a37f..94db9932e 100644 --- a/kasten/k10-minimum-retention/.kyverno-test/backup-export-policy.yaml +++ b/kasten/kasten-minimum-retention/.kyverno-test/kasten-hourly-policy.yaml @@ -1,4 +1,3 @@ -# An example compliant K10 Policy apiVersion: config.kio.kasten.io/v1alpha1 kind: Policy metadata: @@ -8,7 +7,7 @@ metadata: appPriority: Mission-Critical spec: comment: My sample custom backup policy - frequency: '@hourly' # change this to @daily to test the 'audit_mission_critical_RPO' policy + frequency: '@hourly' subFrequency: minutes: [30] hours: [22,7] @@ -20,7 +19,7 @@ spec: monthly: 6 actions: - action: backup - - action: export # comment this line out to test 'enforce_3-2-1' policy + - action: export exportParameters: frequency: '@monthly' profile: diff --git a/kasten/kasten-minimum-retention/.kyverno-test/kasten-skipped-policies.yaml b/kasten/kasten-minimum-retention/.kyverno-test/kasten-skipped-policies.yaml new file mode 100644 index 000000000..73f735abc --- /dev/null +++ b/kasten/kasten-minimum-retention/.kyverno-test/kasten-skipped-policies.yaml @@ -0,0 +1,33 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: preset-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + presetRef: + name: mypreset + namespace: kasten-io + actions: + - action: backup + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp +--- +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: ondemand-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@onDemand' + actions: + - action: backup + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp \ No newline at end of file diff --git a/kasten/kasten-minimum-retention/.kyverno-test/kyverno-test.yaml b/kasten/kasten-minimum-retention/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..606bdee55 --- /dev/null +++ b/kasten/kasten-minimum-retention/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,29 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: kyverno_data_protection_tests +policies: +- ../kasten-minimum-retention.yaml +resources: +- kasten-skipped-policies.yaml +- kasten-hourly-policy.yaml +results: +- kind: Policy + patchedResource: patched.yaml + policy: kasten-minimum-retention + resources: + - hourly-policy + result: pass + rule: kasten-minimum-retention +- kind: Policy + policy: kasten-minimum-retention + resources: + - ondemand-policy + result: skip + rule: kasten-minimum-retention +- kind: Policy + policy: kasten-minimum-retention + resources: + - preset-policy + result: skip + rule: kasten-minimum-retention diff --git a/kasten/k10-minimum-retention/.kyverno-test/patched.yaml b/kasten/kasten-minimum-retention/.kyverno-test/patched.yaml similarity index 100% rename from kasten/k10-minimum-retention/.kyverno-test/patched.yaml rename to kasten/kasten-minimum-retention/.kyverno-test/patched.yaml diff --git a/kasten/kasten-minimum-retention/artifacthub-pkg.yml b/kasten/kasten-minimum-retention/artifacthub-pkg.yml new file mode 100644 index 000000000..51b74f6d0 --- /dev/null +++ b/kasten/kasten-minimum-retention/artifacthub-pkg.yml @@ -0,0 +1,20 @@ +name: kasten-minimum-retention +version: 1.0.1 +displayName: Set Kasten Policy Minimum Backup Retention +createdAt: "2023-05-07T00:00:00.000Z" +description: >- + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml + ``` +keywords: + - kyverno + - Veeam Kasten +readme: | + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type +annotations: + kyverno/category: "Veeam Kasten" + kyverno/kubernetesVersion: "1.24-1.30" + kyverno/subject: "Policy" +digest: e394e005816521b6157a1ef4a0c9757bca956dd706f6a82746fe661c7938d61f diff --git a/kasten/k10-minimum-retention/k10-minimum-retention.yaml b/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml similarity index 55% rename from kasten/k10-minimum-retention/k10-minimum-retention.yaml rename to kasten/kasten-minimum-retention/kasten-minimum-retention.yaml index a7535c298..1a21d7c81 100644 --- a/kasten/k10-minimum-retention/k10-minimum-retention.yaml +++ b/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml @@ -1,30 +1,38 @@ apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: - name: k10-minimum-retention + name: kasten-minimum-retention annotations: - policies.kyverno.io/title: Minimum Backup Retention - policies.kyverno.io/category: Kasten K10 by Veeam - kyverno.io/kyverno-version: 1.6.2 + policies.kyverno.io/title: Set Kasten Policy Minimum Backup Retention + policies.kyverno.io/category: Veeam Kasten + kyverno.io/kyverno-version: 1.12.1 policies.kyverno.io/minversion: 1.6.2 - kyverno.io/kubernetes-version: "1.21-1.22" + kyverno.io/kubernetes-version: "1.24-1.30" policies.kyverno.io/subject: Policy policies.kyverno.io/description: >- - K10 Policy resources can be validated to adhere to common compliance retention standards. - Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. - This policy deletes the retention value in the backup operation and replaces it with the specified retention. - Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. - To use different - This policy can also be used go reduce retentions lengths to enforce cost optimization. + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type spec: schemaValidation: false rules: - - name: k10-minimum-retention + - name: kasten-minimum-retention match: any: - resources: kinds: - config.kio.kasten.io/v1alpha1/Policy + preconditions: + all: + # Match only @hourly policies that do not use policy presets, as the + # number of retained artifacts can only be specified for frequencies + # of the same or lower granularity than the policy frequency. For example, + # if the policy frequency is '@daily', then retention can have values for + # 'daily', 'weekly', 'monthly' and 'yearly', but not for 'hourly'. + # If the policy frequency is 'hourly', then all retention values are + # allowed. If the policy frequency is '@onDemand' or policy preset is used + # then retention values are not allowed. + - key: "{{ request.object.spec.frequency || ''}}" + operator: Equals + value: '@hourly' mutate: # Federal Information Security Management Act (FISMA): 3 Years #patchesJson6902: |- diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/chainsaw-test.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..f948a1a98 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-kubecost-labels +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-kubecost-labels.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-kubecost-labels + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-bad.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..68e1fe0a1 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + env: foo + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + owner: foo + team: bar + department: foo + app: bar + name: badpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-good.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..50ec73c0a --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-good.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + owner: foo + team: bar + department: foo + app: bar + env: foo + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..44277f3ba --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + owner: "foo" + template: + metadata: + labels: + owner: "foo" + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + owner: "foo" + team: "foo" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-good.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..4e85726df --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,99 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + owner: "foo" + team: "foo" + department: "foo" + env: "foo" + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + owner: "foo" + team: "foo" + department: "foo" + env: "foo" + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + owner: "foo" + team: "foo" + department: "foo" + app: "foo" + env: "foo" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + owner: "foo" + team: "foo" + department: "foo" + app: "foo" + env: "foo" + foo: bar + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/policy-ready.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..ed44c7cf3 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-kubecost-labels +status: + ready: true diff --git a/kubecost-cel/require-kubecost-labels/.kyverno-test/kyverno-test.yaml b/kubecost-cel/require-kubecost-labels/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..41e2fbf5f --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,25 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-kubecost-labels +policies: +- ../require-kubecost-labels.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: require-kubecost-labels + resources: + - badpod01 + - badpod02 + - badpod03 + - badpod04 + - badpod05 + result: fail + rule: require-labels +- kind: Pod + policy: require-kubecost-labels + resources: + - goodpod + result: pass + rule: require-labels diff --git a/kubecost-cel/require-kubecost-labels/.kyverno-test/resource.yaml b/kubecost-cel/require-kubecost-labels/.kyverno-test/resource.yaml new file mode 100644 index 000000000..17ab732f0 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.kyverno-test/resource.yaml @@ -0,0 +1,73 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod + labels: + owner: John Doe + team: falcon + department: eng + app: redis + env: prod2 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + labels: + owner: John Doe +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + labels: + owner: John Doe + team: falcon +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 + labels: + owner: John Doe + team: falcon + department: eng +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 + labels: + owner: John Doe + team: falcon + department: eng + app: redis +spec: + containers: + - image: busybox:1.35 + name: busybox diff --git a/kubecost-cel/require-kubecost-labels/artifacthub-pkg.yml b/kubecost-cel/require-kubecost-labels/artifacthub-pkg.yml new file mode 100644 index 000000000..bfae83d11 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-kubecost-labels-cel +version: 1.0.0 +displayName: Require Kubecost Labels in CEL expressions +description: >- + Kubecost can use labels assigned to Pods in order to track and display cost allocation in a granular way. These labels, which can be customized, can be used to organize and group workloads in different ways. This policy requires that the labels `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), these labels will also be required for all Pod controllers. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml + ``` +keywords: + - kyverno + - Kubecost + - CEL Expressions +readme: | + Kubecost can use labels assigned to Pods in order to track and display cost allocation in a granular way. These labels, which can be customized, can be used to organize and group workloads in different ways. This policy requires that the labels `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), these labels will also be required for all Pod controllers. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kubecost in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod,Label" +digest: e7dc12ab8d4fa467c23bc117db5c9e33e5e0d804c597ee0d88fb9f55f11ab535 +createdAt: "2024-05-12T06:59:59Z" + diff --git a/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml b/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml new file mode 100644 index 000000000..32ca0dccb --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml @@ -0,0 +1,43 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-kubecost-labels + annotations: + policies.kyverno.io/title: Require Kubecost Labels in CEL expressions + policies.kyverno.io/category: Kubecost in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod, Label + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Kubecost can use labels assigned to Pods in order to track and display + cost allocation in a granular way. These labels, which can be customized, can be used + to organize and group workloads in different ways. This policy requires that the labels + `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno + autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), + these labels will also be required for all Pod controllers. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-labels + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.labels) && + has(object.metadata.labels.owner) && object.metadata.labels.owner != '' && + has(object.metadata.labels.team) && object.metadata.labels.team != '' && + has(object.metadata.labels.department) && object.metadata.labels.department != '' && + has(object.metadata.labels.app) && object.metadata.labels.app != '' && + has(object.metadata.labels.env) && object.metadata.labels.env != '' + message: "The Kubecost labels `owner`, `team`, `department`, `app`, and `env` are all required for Pods." + diff --git a/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml b/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml index d8db785f5..44277f3ba 100644 --- a/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml +++ b/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml @@ -8,7 +8,7 @@ spec: replicas: 1 selector: matchLabels: - app: busybox + foo: bar template: metadata: labels: @@ -28,7 +28,7 @@ spec: replicas: 1 selector: matchLabels: - app: busybox + owner: "foo" template: metadata: labels: diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/chainsaw-test.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..19705bdc2 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,48 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: advanced-restrict-image-registries +spec: + # disable templating because it can cause issues with CEL expressions + template: false + steps: + - name: step-01 + try: + - apply: + file: ../advanced-restrict-image-registries.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: advanced-restrict-image-registries + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ns-01.yaml + - apply: + file: ns-02.yaml + - apply: + file: cm.yaml + - name: step-03 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/cm.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/cm.yaml new file mode 100755 index 000000000..fdad1c734 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/cm.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + registries: ghcr.io/ +kind: ConfigMap +metadata: + name: clusterregistries + namespace: default diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-01.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-01.yaml new file mode 100755 index 000000000..30c99ca14 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-01.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + corp.com/allowed-registries: img.corp.com/ + name: imageregistries-ns01 diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-02.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-02.yaml new file mode 100755 index 000000000..3a301353b --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-02.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + corp.com/allowed-registries: docker.io/ + name: imageregistries-ns02 diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-bad.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..647879b83 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + namespace: imageregistries-ns01 +spec: + initContainers: + - name: busybox01-init + image: busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: corp.img.io/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + namespace: imageregistries-ns02 +spec: + initContainers: + - name: busybox01-init + image: corp.img.io/busybox:1.35 + containers: + - name: busybox01 + image: img.corp.com/busybox:1.35 + - name: busybox02 + image: docker.io/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + initContainers: + - name: busybox01-init + image: corp.img.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: ghcr.io/busybox:1.35 +--- \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-good.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..7d9b3714b --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-good.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 + namespace: imageregistries-ns01 +spec: + initContainers: + - name: busybox01-init + image: img.corp.com/busybox:1.35 + # - name: busybox02-init + # image: ghcr.io/busybox:1.35 + containers: + # - name: busybox01 + # image: ghcr.io/busybox:1.35 + - name: busybox02 + image: img.corp.com/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 + namespace: imageregistries-ns02 +spec: + initContainers: + - name: busybox01-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: docker.io/busybox:1.35 + - name: busybox02 + image: docker.io/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + initContainers: + - name: busybox01-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: ghcr.io/busybox:1.35 \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-bad.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..f2b36e075 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 + namespace: imageregistries-ns01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox01-init + image: docker.io/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: corp.img.io/busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 + namespace: imageregistries-ns02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - name: busybox01-init + image: docker.io/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: corp.img.io/busybox:1.35 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-good.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..5d4e2d168 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 + namespace: imageregistries-ns01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox01-init + image: img.corp.com/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: img.corp.com/busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 + namespace: imageregistries-ns02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - name: busybox01-init + image: docker.io/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: docker.io/busybox:1.35 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/policy-ready.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..817091e5a --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: advanced-restrict-image-registries +status: + ready: true diff --git a/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml b/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml new file mode 100644 index 000000000..d0471c492 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml @@ -0,0 +1,54 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: advanced-restrict-image-registries + annotations: + policies.kyverno.io/title: Advanced Restrict Image Registries in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + In instances where a ClusterPolicy defines all the approved image registries + is insufficient, more granular control may be needed to set permitted registries, + especially in multi-tenant use cases where some registries may be based on + the Namespace. This policy shows an advanced version of the Restrict Image Registries + policy which gets a global approved registry from a ConfigMap and, based upon an + annotation at the Namespace level, gets the registry approved for that Namespace. +spec: + validationFailureAction: Audit + background: false + rules: + - name: validate-corp-registries + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: clusterregistries + namespace: default + parameterNotFoundAction: Deny + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + - name: nsregistries + expression: >- + (has(namespaceObject.metadata.annotations) && 'corp.com/allowed-registries' in namespaceObject.metadata.annotations) ? + namespaceObject.metadata.annotations['corp.com/allowed-registries'] : ' ' + - name: clusterregistries + expression: "'registries' in params.data ? params.data['registries'] : ' '" + expressions: + - expression: "variables.allContainers.all(container, container.image.startsWith(variables.nsregistries) || container.image.startsWith(variables.clusterregistries))" + message: This Pod names an image that is not from an approved registry. + diff --git a/other-cel/advanced-restrict-image-registries/artifacthub-pkg.yml b/other-cel/advanced-restrict-image-registries/artifacthub-pkg.yml new file mode 100644 index 000000000..edb0ed41a --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: advanced-restrict-image-registries-cel +version: 1.0.0 +displayName: Advanced Restrict Image Registries in CEL expressions +description: >- + In instances where a ClusterPolicy defines all the approved image registries is insufficient, more granular control may be needed to set permitted registries, especially in multi-tenant use cases where some registries may be based on the Namespace. This policy shows an advanced version of the Restrict Image Registries policy which gets a global approved registry from a ConfigMap and, based upon an annotation at the Namespace level, gets the registry approved for that Namespace. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + In instances where a ClusterPolicy defines all the approved image registries is insufficient, more granular control may be needed to set permitted registries, especially in multi-tenant use cases where some registries may be based on the Namespace. This policy shows an advanced version of the Restrict Image Registries policy which gets a global approved registry from a ConfigMap and, based upon an annotation at the Namespace level, gets the registry approved for that Namespace. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: ada2e1e0dd2db1d27d973c07375812e415fb1592c9d1ea26a89850c090520ce4 +createdAt: "2024-04-21T11:03:06Z" + diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/chainsaw-test.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..74dd0c4e8 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: deny-commands-in-exec-probe +spec: + steps: + - name: step-01 + try: + - apply: + file: ../deny-commands-in-exec-probe.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: deny-commands-in-exec-probe + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pods-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pods-bad.yaml + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..1526c45a0 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - ls + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - foo + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - ps + - aus + periodSeconds: 10 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-good.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..e6ee813e1 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - meow + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - meow + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-bad.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-bad.yaml new file mode 100644 index 000000000..1b58ef909 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-bad.yaml @@ -0,0 +1,106 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - ls + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - ps + - aux + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - jcmd + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - echo + - jcmd + - echo + - hello + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - echo + - $(jcmd) + - echo + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - "echo bar" + - "echo ls foo" + - "echo bar" + periodSeconds: 10 \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-good.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-good.yaml new file mode 100644 index 000000000..dc0c71226 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-good.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - meow + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + grpc: + port: 8888 + periodSeconds: 10 \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/policy-ready.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..a95d54494 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: deny-commands-in-exec-probe +status: + ready: true diff --git a/other-cel/deny-commands-in-exec-probe/.kyverno-test/kyverno-test.yaml b/other-cel/deny-commands-in-exec-probe/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..0e9825553 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,30 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: deny-commands-in-exec-probe +policies: +- ../deny-commands-in-exec-probe.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: deny-commands-in-exec-probe + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail + rule: check-commands +- kind: Pod + policy: deny-commands-in-exec-probe + resources: + - goodpod02 + - goodpod03 + result: pass + rule: check-commands +- kind: Pod + policy: deny-commands-in-exec-probe + resources: + - goodpod01 + result: skip + rule: check-commands diff --git a/other-cel/deny-commands-in-exec-probe/.kyverno-test/resource.yaml b/other-cel/deny-commands-in-exec-probe/.kyverno-test/resource.yaml new file mode 100644 index 000000000..2e3810eb6 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.kyverno-test/resource.yaml @@ -0,0 +1,90 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - jcmd | grep Main +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - cat | ls -l +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: container01 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - echo ps -aux | grep cala +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container02 + image: czjunkfoo + - name: container03 + image: czjunkfoo + livenessProbe: + httpGet: + port: 8080 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container04 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - echo foo +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container04 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - env + - name: container05 + image: czjunkfoo diff --git a/other-cel/deny-commands-in-exec-probe/artifacthub-pkg.yml b/other-cel/deny-commands-in-exec-probe/artifacthub-pkg.yml new file mode 100644 index 000000000..2edc8226f --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/artifacthub-pkg.yml @@ -0,0 +1,25 @@ +name: deny-commands-in-exec-probe-cel +version: 1.0.0 +displayName: Deny Commands in Exec Probe in CEL expressions +description: >- + Developers may feel compelled to use simple shell commands as a workaround to creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged via detection of those commands. This policy prevents the use of certain commands `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. + +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Developers may feel compelled to use simple shell commands as a workaround to creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged via detection of those commands. This policy prevents the use of certain commands `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: af3cef475e79cc67105ba3a2be80f0692ea3744f14a9ccd3917d8de8d251e5d0 +createdAt: "2024-04-25T18:27:10Z" + diff --git a/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml b/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml new file mode 100644 index 000000000..a9381ee1f --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml @@ -0,0 +1,45 @@ +apiVersion: kyverno.io/v2beta1 +kind: ClusterPolicy +metadata: + name: deny-commands-in-exec-probe + annotations: + policies.kyverno.io/title: Deny Commands in Exec Probe in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Developers may feel compelled to use simple shell commands as a workaround to + creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged + via detection of those commands. This policy prevents the use of certain commands + `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. +spec: + validationFailureAction: Audit + background: false + rules: + - name: check-commands + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "check-liveness-probes-commands-exist" + expression: >- + object.spec.containers.exists(container, + has(container.livenessProbe) && has(container.livenessProbe.exec) && + size(container.livenessProbe.exec.command) > 0) + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + !has(container.livenessProbe) || !has(container.livenessProbe.exec) || + !container.livenessProbe.exec.command.exists(command, + command.matches('\\bjcmd\\b') || command.matches('\\bps\\b') || command.matches('\\bls\\b'))) + message: Cannot use commands `jcmd`, `ps`, or `ls` in liveness probes. + diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/chainsaw-test.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..a388d214e --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,42 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exclude-namespaces-dynamically +spec: + steps: + - name: step-01 + try: + - apply: + file: cm.yaml + - apply: + file: ns.yaml + - apply: + file: ../exclude-namespaces-dynamically.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: exclude-namespaces-example + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cm.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cm.yaml new file mode 100644 index 000000000..0c2e3c57a --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cm.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + exclude: "exclude-ns, exclude-ns-2" +kind: ConfigMap +metadata: + name: namespace-filters + namespace: default \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cmap.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cmap.yaml new file mode 100644 index 000000000..891cfb061 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cmap.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: namespace-filters + namespace: default +data: + exclude: "[\"default\", \"test\"]" + \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/ns.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/ns.yaml new file mode 100644 index 000000000..4c909ba8b --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/ns.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: exclude-ns +--- +apiVersion: v1 +kind: Namespace +metadata: + name: exclude-ns-2 \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-bad.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..78823c79e --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + namespace: default +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + labels: + bar: foo +spec: + containers: + - name: pod01 + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-good.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..9f2d32670 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-good.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 + namespace: exclude-ns +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 + namespace: exclude-ns-2 +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 + labels: + bar: foo + foo: bar +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 + namespace: exclude-ns-2 + labels: + foo: bar +spec: + containers: + - name: pod01 + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-bad.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..931d85e11 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb-01 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb-01 + image: kyverno + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-good.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..1360dedfa --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 + namespace: exclude-ns +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb-01 + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + foo: bar + spec: + containers: + - name: bb-01 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 + namespace: exclude-ns-2 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb-01 + image: kyverno + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: bb-01 + image: kyverno + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/policy-ready.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..451f8163f --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: exclude-namespaces-example +status: + ready: true diff --git a/other-cel/exclude-namespaces-dynamically/artifacthub-pkg.yml b/other-cel/exclude-namespaces-dynamically/artifacthub-pkg.yml new file mode 100644 index 000000000..817299960 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: exclude-namespaces-dynamically-cel +version: 1.0.0 +displayName: Exclude Namespaces Dynamically in CEL expressions +description: >- + It's common where policy lookups need to consider a mapping to many possible values rather than a static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap where the ConfigMap stores an array of strings. This policy validates that any Pods created outside of the list of Namespaces have the label `foo` applied. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + It's common where policy lookups need to consider a mapping to many possible values rather than a static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap where the ConfigMap stores an array of strings. This policy validates that any Pods created outside of the list of Namespaces have the label `foo` applied. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace, Pod" +digest: 5ddbe0a585b27d938e5ae070444d0d8f346785f8566b28bcbfef1dc0d90cd3f4 +createdAt: "2024-04-24T18:58:33Z" + diff --git a/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml b/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml new file mode 100644 index 000000000..dc5a65852 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml @@ -0,0 +1,109 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: exclude-namespaces-example + annotations: + policies.kyverno.io/title: Exclude Namespaces Dynamically in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Namespace, Pod + policies.kyverno.io/minversion: 1.11.0 + pod-policies.kyverno.io/autogen-controllers: none + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + It's common where policy lookups need to consider a mapping to many possible values rather than a + static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap + where the ConfigMap stores an array of strings. This policy validates that any Pods created + outside of the list of Namespaces have the label `foo` applied. +spec: + validationFailureAction: Audit + background: true + rules: + - name: exclude-namespaces-dynamically + match: + any: + - resources: + kinds: + - Deployment + - DaemonSet + - StatefulSet + - Job + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: "has(object.spec.template.metadata) && has(object.spec.template.metadata.labels) && 'foo' in object.spec.template.metadata.labels" + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + - name: exclude-namespaces-dynamically-pods + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: "has(object.metadata.labels) && 'foo' in object.metadata.labels" + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces ' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + - name: exclude-namespaces-dynamically-cronjobs + match: + any: + - resources: + kinds: + - CronJob + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: >- + has(object.spec.jobTemplate.spec.template.metadata) && + has(object.spec.jobTemplate.spec.template.metadata.labels) && 'foo' in object.spec.jobTemplate.spec.template.metadata.labels + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces ' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/chainsaw-test.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..30ace9483 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: limit-hostpath-vols +spec: + steps: + - name: step-01 + try: + - apply: + file: ../limit-hostpath-vols.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: limit-hostpath-vols + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/pod-bad.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..2dfc11c6c --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /etc/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /home/junk \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/pod-good.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..92ed9c3a4 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-good.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /data/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /data/junk + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-bad.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..0379980ec --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /etc/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /etc/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-good.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..acb202523 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /data/junk + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /data/junk + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/policy-ready.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..b5a967f65 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-vols +status: + ready: true diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/badpod.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/badpod.yaml new file mode 100644 index 000000000..52a2b193f --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/badpod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pods-all +spec: + initContainers: + - name: inittest-webserver + image: asdfeasdfasada:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - name: test-webserver02 + image: sjbonmqopcta:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + volumes: + - name: foo + hostPath: + path: /etc + # - name: bar + # hostPath: + # path: /data/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/goodpod.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/goodpod.yaml new file mode 100644 index 000000000..efa0d501a --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/goodpod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: good-pods-all +spec: + initContainers: + - name: inittest-webserver + image: asdfeasdfasada:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - name: test-webserver02 + image: sjbonmqopcta:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + volumes: + - name: foo + hostPath: + path: /data + # - name: bar + # hostPath: + # path: /data/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/kyverno-test.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..904dc13b1 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: limit-hostpath-vols +policies: +- ../limit-hostpath-vols.yaml +resources: +- goodpod.yaml +- badpod.yaml +results: +- kind: Pod + policy: limit-hostpath-vols + resources: + - bad-pods-all + result: fail + rule: limit-hostpath-to-slash-data +- kind: Pod + policy: limit-hostpath-vols + resources: + - good-pods-all + result: pass + rule: limit-hostpath-to-slash-data +variables: values.yaml diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/values.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/values.yaml new file mode 100644 index 000000000..f0bdd4ef1 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/values.yaml @@ -0,0 +1,8 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Values +policies: +- name: limit-hostpath-vols + resources: + - name: bad-pods-all + values: + request.operation: UPDATE diff --git a/other-cel/limit-hostpath-vols/artifacthub-pkg.yml b/other-cel/limit-hostpath-vols/artifacthub-pkg.yml new file mode 100644 index 000000000..4386e87d4 --- /dev/null +++ b/other-cel/limit-hostpath-vols/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: limit-hostpath-vols-cel +version: 1.0.0 +displayName: Limit hostPath Volumes to Specific Directories in CEL expressions +description: >- + hostPath volumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. It is strongly recommended to pair this policy with a second to ensure readOnly access is enforced preventing directory escape. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + hostPath volumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. It is strongly recommended to pair this policy with a second to ensure readOnly access is enforced preventing directory escape. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 8ae23309c8e49ca3e3abe72f406e9ae186564ab24468ea4e772b6f3097793892 +createdAt: "2024-04-26T15:52:10Z" + diff --git a/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml b/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml new file mode 100644 index 000000000..65d038c77 --- /dev/null +++ b/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml @@ -0,0 +1,41 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-vols + annotations: + policies.kyverno.io/title: Limit hostPath Volumes to Specific Directories in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + hostPath volumes consume the underlying node's file system. If hostPath volumes + are not to be universally disabled, they should be restricted to only certain + host paths so as not to allow access to sensitive information. This policy ensures + the only directory that can be mounted as a hostPath volume is /data. It is strongly + recommended to pair this policy with a second to ensure readOnly + access is enforced preventing directory escape. +spec: + background: false + validationFailureAction: Audit + rules: + - name: limit-hostpath-to-slash-data + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "has-host-path-volume" + expression: "has(object.spec.volumes) && object.spec.volumes.exists(volume, has(volume.hostPath))" + validate: + cel: + expressions: + - expression: "object.spec.volumes.all(volume, !has(volume.hostPath) || volume.hostPath.path.split('/')[1] == 'data')" + message: hostPath volumes are confined to /data. + diff --git a/other-cel/require-container-port-names/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-container-port-names/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..57c31e79a --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-container-port-names +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-container-port-names.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-container-port-names + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-container-port-names/.chainsaw-test/pod-bad.yaml b/other-cel/require-container-port-names/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..f55f602a1 --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + - containerPort: 443 + name: https-port +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + name: http-port + - containerPort: 443 + name: https-port \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/pod-good.yaml b/other-cel/require-container-port-names/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..0825ba43a --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/pod-good.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - containerPort: 443 + name: https-port \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..dd9ac3feb --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + - containerPort: 443 + name: https-port +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + - containerPort: 443 + name: https-port + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..f491d3aab --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - containerPort: 443 + name: https-port +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - containerPort: 443 + name: https-port + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/policy-ready.yaml b/other-cel/require-container-port-names/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..f8c44c430 --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-container-port-names +status: + ready: true diff --git a/other-cel/require-container-port-names/.kyverno-test/kyverno-test.yaml b/other-cel/require-container-port-names/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c1054a710 --- /dev/null +++ b/other-cel/require-container-port-names/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,52 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-container-port-names +policies: +- ../require-container-port-names.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - goodpod01 + - goodpod02 + result: pass +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - baddeployment01 + result: fail +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - badcronjob01 + result: fail +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - gooddeployment01 + result: pass +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - goodcronjob01 + result: pass + diff --git a/other-cel/require-container-port-names/artifacthub-pkg.yml b/other-cel/require-container-port-names/artifacthub-pkg.yml new file mode 100644 index 000000000..1583eaf92 --- /dev/null +++ b/other-cel/require-container-port-names/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-container-port-names-cel +version: 1.0.0 +displayName: Require Container Port Names in CEL expressions +description: >- + Containers may define ports on which they listen. In addition to a port number, a name field may optionally be used. Including a name makes it easier when defining Service resource definitions and others since the name may be referenced allowing the port number to change. This policy requires that for every containerPort defined there is also a name specified. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-container-port-names/require-container-port-names.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Containers may define ports on which they listen. In addition to a port number, a name field may optionally be used. Including a name makes it easier when defining Service resource definitions and others since the name may be referenced allowing the port number to change. This policy requires that for every containerPort defined there is also a name specified. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 62488bb402289ddbffe291c61acb14a50347e476e99d4f79ba035b4d3297403e +createdAt: "2024-04-27T16:37:39Z" + diff --git a/other-cel/require-container-port-names/require-container-port-names.yaml b/other-cel/require-container-port-names/require-container-port-names.yaml new file mode 100644 index 000000000..b2756b98e --- /dev/null +++ b/other-cel/require-container-port-names/require-container-port-names.yaml @@ -0,0 +1,36 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-container-port-names + annotations: + policies.kyverno.io/title: Require Container Port Names in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Containers may define ports on which they listen. In addition to a port number, + a name field may optionally be used. Including a name makes it easier when defining + Service resource definitions and others since the name may be referenced allowing + the port number to change. This policy requires that for every containerPort defined + there is also a name specified. +spec: + validationFailureAction: Audit + background: true + rules: + - name: port-name + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.containers.all(container, !has(container.ports) || container.ports.all(port, has(port.name)))" + message: Name is required for every containerPort. + diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/bad-pod.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/bad-pod.yaml new file mode 100644 index 000000000..da2b7cd1a --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/bad-pod.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: busybox + name: badpod01 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..0f3822143 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-emptydir-requests-limits +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-emptydir-requests-limits.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-emptydir-requests-and-limits + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-bad.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..b515750a6 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,159 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumes: + - name: foo + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/vol + name: vol + - image: busybox:1.35 + name: busybox02 + volumeMounts: + - mountPath: /mnt/foo + name: foo + volumes: + - name: vol + emptyDir: + sizeLimit: 200Mi + - name: foo + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/vol + name: vol + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/foo + name: foo + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod07 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + limits: + memory: "2Gi" + requests: + memory: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-good.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..2af525da3 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-good.yaml @@ -0,0 +1,207 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02 + volumes: + - name: foo + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod07 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + volumes: + - name: foo + hostPath: + path: /var/foo +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod08 +spec: + initContainers: + - name: certificates + image: busybox + volumeMounts: + - name: etc-ssl-certs + mountPath: /etc/ssl/certs + resources: + limits: + ephemeral-storage: 256Mi + requests: + ephemeral-storage: 256Mi + - name: configure + image: busybox + volumeMounts: + - name: etc-ssl-certs + mountPath: /etc/ssl/certs/ + - name: my-app-secrets + mountPath: /init-secrets + resources: + limits: + ephemeral-storage: 256Mi + requests: + ephemeral-storage: 256Mi + containers: + - name: my-app + image: busybox + resources: + limits: + cpu: "2" + ephemeral-storage: 1Gi + memory: 500Mi + requests: + ephemeral-storage: 500Mi + volumeMounts: + - name: etc-ssl-certs + mountPath: /etc/ssl/certs/ + - name: my-app-secrets + mountPath: /etc/secrets + volumes: + - name: my-app-secrets + emptyDir: + medium: Memory + - name: etc-ssl-certs + emptyDir: + medium: "Memory" \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..d32c2c22d --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + - image: busybox:1.35 + name: busybox02 + resources: + requests: + memory: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..70b656486 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/policy-ready.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..9aa39646d --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-emptydir-requests-and-limits +status: + ready: true diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/kyverno-test.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b97a5b0ec --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,30 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-emptydir-requests-and-limits +policies: +- ../require-emptydir-requests-limits.yaml +resources: +- resource-fail.yaml +- resource-pass.yaml +- resource-skip.yaml +results: +- kind: Pod + policy: require-emptydir-requests-and-limits + resources: + - fail-pod + result: fail + rule: check-emptydir-requests-limits +- kind: Pod + policy: require-emptydir-requests-and-limits + resources: + - pass-pod-01 + - pass-pod-02 + result: pass + rule: check-emptydir-requests-limits +- kind: Pod + policy: require-emptydir-requests-and-limits + resources: + - skip-pod + result: skip + rule: check-emptydir-requests-limits diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-fail.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-fail.yaml new file mode 100644 index 000000000..29b724bbc --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-fail.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: fail-pod +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + resources: {} + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-pass.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-pass.yaml new file mode 100644 index 000000000..b614d4688 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-pass.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pass-pod-01 +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: pass-pod-02 +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + volumeMounts: + - mountPath: /cache/data + name: vol + - mountPath: /cache/data2 + name: vo2 + volumes: + - name: vol + emptyDir: + sizeLimit: 1Gi + diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-skip.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-skip.yaml new file mode 100644 index 000000000..8c2736f3f --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-skip.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: skip-pod +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + volumeMounts: + - mountPath: /cache/data + name: vol + - mountPath: /cache/data2 + name: vol2 + volumes: + - name: vol + hostPath: + path: /mnt/data + - name: vol2 + hostPath: + path: /mnt/data2 diff --git a/other-cel/require-emptydir-requests-limits/artifacthub-pkg.yml b/other-cel/require-emptydir-requests-limits/artifacthub-pkg.yml new file mode 100644 index 000000000..859a6e8b3 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: require-emptydir-requests-limits-cel +version: 1.0.0 +displayName: Require Requests and Limits for emptyDir in CEL expressions +description: >- + Pods which mount emptyDir volumes may be allowed to potentially overrun the medium backing the emptyDir volume. This sample ensures that any initContainers or containers mounting an emptyDir volume have ephemeral-storage requests and limits set. Policy will be skipped if the volume has already a sizeLimit set. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Pods which mount emptyDir volumes may be allowed to potentially overrun the medium backing the emptyDir volume. This sample ensures that any initContainers or containers mounting an emptyDir volume have ephemeral-storage requests and limits set. Policy will be skipped if the volume has already a sizeLimit set. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 8915013155bfb12e6414848c9dec66a9e95ab7318f5da7d0c64bc621143e5383 +createdAt: "2024-05-19T10:11:00Z" diff --git a/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml b/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml new file mode 100644 index 000000000..bc3cc0b67 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml @@ -0,0 +1,51 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-emptydir-requests-and-limits + annotations: + policies.kyverno.io/title: Require Requests and Limits for emptyDir in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods which mount emptyDir volumes may be allowed to potentially overrun + the medium backing the emptyDir volume. This sample ensures that any + initContainers or containers mounting an emptyDir volume have + ephemeral-storage requests and limits set. Policy will be skipped if + the volume has already a sizeLimit set. +spec: + background: false + validationFailureAction: Audit + rules: + - name: check-emptydir-requests-limits + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "has-emptydir-volume" + expression: "has(object.spec.volumes) && object.spec.volumes.exists(volume, has(volume.emptyDir))" + validate: + cel: + variables: + - name: containers + expression: "object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : [])" + - name: emptydirnames + expression: >- + has(object.spec.volumes) ? + object.spec.volumes.filter(volume, has(volume.emptyDir) && !has(volume.emptyDir.sizeLimit)).map(volume, volume.name) : [] + expressions: + - expression: >- + variables.containers.all(container, + !has(container.volumeMounts) || + !container.volumeMounts.exists(mount, mount.name in variables.emptydirnames) || + container.resources.?requests[?'ephemeral-storage'].hasValue() && + container.resources.?limits[?'ephemeral-storage'].hasValue()) + message: Containers mounting emptyDir volumes must specify requests and limits for ephemeral-storage. + diff --git a/other-cel/require-non-root-groups/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-non-root-groups/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..776c5d107 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,40 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-non-root-groups +spec: + # disable templating because it can cause issues with CEL expressions + template: false + steps: + - name: step-01 + try: + - apply: + file: ../require-non-root-groups.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-non-root-groups + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-non-root-groups/.chainsaw-test/pod-bad.yaml b/other-cel/require-non-root-groups/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..af72489a1 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,246 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod07 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod08 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod09 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod10 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod11 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod12 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod13 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod14 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod15 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [0] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [14,0] +--- +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-badpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + fsGroup: 0 +--- diff --git a/other-cel/require-non-root-groups/.chainsaw-test/pod-good.yaml b/other-cel/require-non-root-groups/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..712cd7520 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/pod-good.yaml @@ -0,0 +1,182 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod07 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod08 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod09 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod10 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod03 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32,94] +--- +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-goodpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + fsGroup: 32 \ No newline at end of file diff --git a/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..5f56fe9c0 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,761 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment11 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment12 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment13 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment14 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment15 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob11 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob12 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob13 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob14 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob15 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [0] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [14,0] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [0] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [14,0] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + fsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + fsGroup: 0 \ No newline at end of file diff --git a/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..221eae1a4 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,561 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +#### CRONJOBS +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32,94] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32,94] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + fsGroup: 32 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + fsGroup: 32 diff --git a/other-cel/require-non-root-groups/.chainsaw-test/policy-ready.yaml b/other-cel/require-non-root-groups/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..b51263787 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-non-root-groups +status: + ready: true diff --git a/other-cel/require-non-root-groups/.kyverno-test/kyverno-test.yaml b/other-cel/require-non-root-groups/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b477f9589 --- /dev/null +++ b/other-cel/require-non-root-groups/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,198 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-non-root-groups +policies: +- ../require-non-root-groups.yaml +resources: +- resource.yaml +results: +- kind: CronJob + policy: require-non-root-groups + resources: + - fsgrp-badcronjob01 + result: fail + rule: check-fsgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - fsgrp-baddeployment01 + result: fail + rule: check-fsgroup +- kind: Pod + policy: require-non-root-groups + resources: + - fsgrp-badpod01 + result: fail + rule: check-fsgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - fsgrp-goodcronjob01 + - fsgrp-goodcronjob02 + result: pass + rule: check-fsgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - fsgrp-gooddeployment01 + - fsgrp-gooddeployment02 + result: pass + rule: check-fsgroup +- kind: Pod + policy: require-non-root-groups + resources: + - fsgrp-goodpod01 + - fsgrp-goodpod02 + result: pass + rule: check-fsgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - badcronjob01 + - badcronjob02 + - badcronjob03 + - badcronjob04 + - badcronjob05 + - badcronjob06 + - badcronjob07 + - badcronjob08 + - badcronjob09 + - badcronjob10 + - badcronjob11 + - badcronjob12 + - badcronjob13 + - badcronjob14 + - badcronjob15 + result: fail + rule: check-runasgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - baddeployment01 + - baddeployment02 + - baddeployment03 + - baddeployment04 + - baddeployment05 + - baddeployment06 + - baddeployment07 + - baddeployment08 + - baddeployment09 + - baddeployment10 + - baddeployment11 + - baddeployment12 + - baddeployment13 + - baddeployment14 + - baddeployment15 + result: fail + rule: check-runasgroup +- kind: Pod + policy: require-non-root-groups + resources: + - badpod01 + - badpod02 + - badpod03 + - badpod04 + - badpod05 + - badpod06 + - badpod07 + - badpod08 + - badpod09 + - badpod10 + - badpod11 + - badpod12 + - badpod13 + - badpod14 + - badpod15 + result: fail + rule: check-runasgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - goodcronjob01 + - goodcronjob02 + - goodcronjob03 + - goodcronjob04 + - goodcronjob05 + - goodcronjob06 + - goodcronjob07 + - goodcronjob08 + - goodcronjob09 + - goodcronjob10 + result: pass + rule: check-runasgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - gooddeployment01 + - gooddeployment02 + - gooddeployment03 + - gooddeployment04 + - gooddeployment05 + - gooddeployment06 + - gooddeployment07 + - gooddeployment08 + - gooddeployment09 + - gooddeployment10 + result: pass + rule: check-runasgroup +- kind: Pod + policy: require-non-root-groups + resources: + - goodpod01 + - goodpod02 + - goodpod03 + - goodpod04 + - goodpod05 + - goodpod06 + - goodpod07 + - goodpod08 + - goodpod09 + - goodpod10 + result: pass + rule: check-runasgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - supgrp-badcronjob01 + - supgrp-badcronjob02 + result: fail + rule: check-supplementalgroups +- kind: Deployment + policy: require-non-root-groups + resources: + - supgrp-baddeployment01 + - supgrp-baddeployment02 + result: fail + rule: check-supplementalgroups +- kind: Pod + policy: require-non-root-groups + resources: + - supgrp-badpod01 + - supgrp-badpod02 + result: fail + rule: check-supplementalgroups +- kind: CronJob + policy: require-non-root-groups + resources: + - supgrp-goodcronjob01 + - supgrp-goodcronjob02 + - supgrp-goodcronjob03 + result: pass + rule: check-supplementalgroups +- kind: Deployment + policy: require-non-root-groups + resources: + - supgrp-gooddeployment01 + - supgrp-gooddeployment02 + - supgrp-gooddeployment03 + result: pass + rule: check-supplementalgroups +- kind: Pod + policy: require-non-root-groups + resources: + - supgrp-goodpod01 + - supgrp-goodpod02 + - supgrp-goodpod03 + result: pass + rule: check-supplementalgroups diff --git a/other-cel/require-non-root-groups/.kyverno-test/resource.yaml b/other-cel/require-non-root-groups/.kyverno-test/resource.yaml new file mode 100644 index 000000000..97269bf53 --- /dev/null +++ b/other-cel/require-non-root-groups/.kyverno-test/resource.yaml @@ -0,0 +1,1854 @@ +############################ +## Rule: check-runasgroup ## +############################ +###### Pods - Bad +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod07 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod08 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod09 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod10 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod11 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod12 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod13 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod14 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod15 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +###### Pods - Good +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod07 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod08 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod09 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod10 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +###### Deployments - Bad +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment11 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment12 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment13 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment14 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment15 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +###### Deployments - Good +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +###### CronJobs - Bad +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob11 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob12 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob13 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob14 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob15 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +###### CronJobs - Good +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +#################################### +## Rule: check-supplementalgroups ## +#################################### +###### Pods - Bad +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [0] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [14,0] +--- +###### Pods - Good +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod03 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32,94] +--- +###### Deployments - Bad +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [0] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [14,0] +--- +###### Deployments - Good +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32,94] +--- +###### CronJobs - Bad +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [0] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [14,0] +--- +###### CronJobs - Good +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32,94] +--- +######################### +## Rule: check-fsgroup ## +######################### +###### Pods - Bad +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-badpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 0 +--- +###### Pods - Good +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 32 +--- +###### Deployments - Bad +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 0 +--- +###### Deployments - Good +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 32 +--- +###### CronJobs - Bad +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 0 +--- +###### CronJobs - Good +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 32 diff --git a/other-cel/require-non-root-groups/artifacthub-pkg.yml b/other-cel/require-non-root-groups/artifacthub-pkg.yml new file mode 100644 index 000000000..6c9a54a0e --- /dev/null +++ b/other-cel/require-non-root-groups/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-non-root-groups-cel +version: 1.0.0 +displayName: Require Non-Root Groups in CEL expressions +description: >- + Containers should be forbidden from running with a root primary or supplementary GID. This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number greater than zero (i.e., non root). A known issue prevents a policy such as this using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-non-root-groups/require-non-root-groups.yaml + ``` +keywords: + - kyverno + - Sample + - EKS Best Practices + - CEL Expressions +readme: | + Containers should be forbidden from running with a root primary or supplementary GID. This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number greater than zero (i.e., non root). A known issue prevents a policy such as this using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample, EKS Best Practices in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: b2f00c69719c2f91584551c65a0809e4d2d2e691030b41aa3bf80cdcb6e45320 +createdAt: "2024-05-19T10:49:49Z" diff --git a/other-cel/require-non-root-groups/require-non-root-groups.yaml b/other-cel/require-non-root-groups/require-non-root-groups.yaml new file mode 100644 index 000000000..4f0f77b7d --- /dev/null +++ b/other-cel/require-non-root-groups/require-non-root-groups.yaml @@ -0,0 +1,88 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-non-root-groups + annotations: + policies.kyverno.io/title: Require Non-Root Groups in CEL expressions + policies.kyverno.io/category: Sample, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Containers should be forbidden from running with a root primary or supplementary GID. + This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number + greater than zero (i.e., non root). A known issue prevents a policy such as this + using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-runasgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + ( + has(object.spec.securityContext) && has(object.spec.securityContext.runAsGroup) && (object.spec.securityContext.runAsGroup > 0) && + variables.allContainers.all(container, !has(container.securityContext) || !has(container.securityContext.runAsGroup) || container.securityContext.runAsGroup > 0) + ) || + ( + variables.allContainers.all(container, has(container.securityContext) && has(container.securityContext.runAsGroup) && container.securityContext.runAsGroup > 0) + ) + message: >- + Running with root group IDs is disallowed. The fields + spec.securityContext.runAsGroup, spec.containers[*].securityContext.runAsGroup, + spec.initContainers[*].securityContext.runAsGroup, and + spec.ephemeralContainers[*].securityContext.runAsGroup must be + set to a value greater than zero. + - name: check-supplementalgroups + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.supplementalGroups) || + object.spec.securityContext.supplementalGroups.all(group, group > 0) + message: >- + Containers cannot run with a root primary or supplementary GID. The field + spec.securityContext.supplementalGroups must be unset or + set to a value greater than zero. + - name: check-fsgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.fsGroup) || + object.spec.securityContext.fsGroup > 0 + message: >- + Containers cannot run with a root primary or supplementary GID. The field + spec.securityContext.fsGroup must be unset or set to a value greater than zero. + diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..3a4e08816 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-qos-guaranteed +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-qos-guaranteed.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-qos-guaranteed + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/pod-bad.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..9cd0cbc6a --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + cpu: "2" + - name: busybox-again + image: busybox:1.35 + resources: + limits: + memory: "100Mi" + cpu: "1" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + - name: busybox-again + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + - name: busybox-again + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/pod-good.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..f9ae97536 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-good.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + cpu: "2" + limits: + memory: "50Mi" + cpu: "2" + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..7a363df17 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + cpu: "1" + - name: busybox-again + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: busybox + image: busybox:1.35 + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + limits: + cpu: "2" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..b9d919b36 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "200Mi" + cpu: "2" + limits: + memory: "200Mi" + cpu: "2" + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + cpu: "0.5" + limits: + memory: "50Mi" + cpu: "0.5" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/policy-ready.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..7d014a1b1 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-qos-guaranteed +status: + ready: true diff --git a/other-cel/require-qos-guaranteed/.kyverno-test/kyverno-test.yaml b/other-cel/require-qos-guaranteed/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b11a7e2e1 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,24 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-qos-guaranteed +policies: +- ../require-qos-guaranteed.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: require-qos-guaranteed + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail + rule: guaranteed +- kind: Pod + policy: require-qos-guaranteed + resources: + - goodpod01 + - goodpod02 + result: pass + rule: guaranteed diff --git a/other-cel/require-qos-guaranteed/.kyverno-test/resource.yaml b/other-cel/require-qos-guaranteed/.kyverno-test/resource.yaml new file mode 100644 index 000000000..105a8d2a5 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.kyverno-test/resource.yaml @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" + - name: seconddemo + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "300Mi" + cpu: "500m" + requests: + memory: "300Mi" + cpu: "500m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "400Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "900m" + requests: + memory: "200Mi" + cpu: "700m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" + - name: secondname + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "800m" + requests: + memory: "200Mi" + cpu: "700m" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/artifacthub-pkg.yml b/other-cel/require-qos-guaranteed/artifacthub-pkg.yml new file mode 100644 index 000000000..047d7ef7e --- /dev/null +++ b/other-cel/require-qos-guaranteed/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-qos-guaranteed-cel +version: 1.0.0 +displayName: Require QoS Guaranteed in CEL expressions +description: >- + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain priority guarantees based upon the resources they define. When Pods define both requests and limits for both memory and CPU, and the requests and limits are equal to each other, Kubernetes grants the QoS class as guaranteed which allows them to run at a higher priority than others. This policy requires that all containers within a Pod run with this definition resulting in a guaranteed QoS. This policy is provided with the intention that users will need to control its scope by using exclusions, preconditions, and other policy language mechanisms. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml + ``` +keywords: + - kyverno + - Other + - Multi-Tenancy + - CEL Expressions +readme: | + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain priority guarantees based upon the resources they define. When Pods define both requests and limits for both memory and CPU, and the requests and limits are equal to each other, Kubernetes grants the QoS class as guaranteed which allows them to run at a higher priority than others. This policy requires that all containers within a Pod run with this definition resulting in a guaranteed QoS. This policy is provided with the intention that users will need to control its scope by using exclusions, preconditions, and other policy language mechanisms. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other, Multi-Tenancy in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 5f7ee8a0d9f33c875ac26940425f5cc12ac8f528ea6cf233df2b4c79ed5ae43d +createdAt: "2024-05-19T11:06:21Z" diff --git a/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml b/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml new file mode 100644 index 000000000..485ddfbcc --- /dev/null +++ b/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml @@ -0,0 +1,47 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-qos-guaranteed + annotations: + policies.kyverno.io/title: Require QoS Guaranteed in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain + priority guarantees based upon the resources they define. When Pods define both + requests and limits for both memory and CPU, and the requests and limits are equal + to each other, Kubernetes grants the QoS class as guaranteed which allows them to run + at a higher priority than others. This policy requires that all containers within a Pod + run with this definition resulting in a guaranteed QoS. This policy is provided with the + intention that users will need to control its scope by using + exclusions, preconditions, and other policy language mechanisms. +spec: + validationFailureAction: Audit + background: true + rules: + - name: guaranteed + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + has(container.resources) && + has(container.resources.requests) && + has(container.resources.requests.cpu) && has(container.resources.requests.memory) && + has(container.resources.limits) && + has(container.resources.limits.cpu) && has(container.resources.limits.memory) && + container.resources.requests.cpu == container.resources.limits.cpu && + container.resources.requests.memory == container.resources.limits.memory) + message: "All containers must define memory and CPU requests and limits where they are equal." + diff --git a/other-cel/restrict-edit-for-endpoints/artifacthub-pkg.yml b/other-cel/restrict-edit-for-endpoints/artifacthub-pkg.yml new file mode 100644 index 000000000..fb9ab8d60 --- /dev/null +++ b/other-cel/restrict-edit-for-endpoints/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-edit-for-endpoints-cel +version: 1.0.0 +displayName: Restrict Edit for Endpoints CVE-2021-25740 in CEL expressions +description: >- + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue defined in CVE-2021-25740 which could enable users to send network traffic to locations they would otherwise not have access to via a confused deputy attack. This was due to the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. This policy, intended to run in background mode, checks if your cluster is vulnerable to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have the edit permission of Endpoints. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml + ``` +keywords: + - kyverno + - Security + - CEL Expressions +readme: | + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue defined in CVE-2021-25740 which could enable users to send network traffic to locations they would otherwise not have access to via a confused deputy attack. This was due to the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. This policy, intended to run in background mode, checks if your cluster is vulnerable to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have the edit permission of Endpoints. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Security in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "ClusterRole" +digest: 1744f09b521d94f2d72bd0d7f50986ccd07cc90a9f66dbbdbaa985ca8c8e5b7e +createdAt: "2024-05-19T14:59:05Z" diff --git a/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml b/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml new file mode 100644 index 000000000..f1539a014 --- /dev/null +++ b/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml @@ -0,0 +1,39 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-edit-for-endpoints + annotations: + policies.kyverno.io/title: Restrict Edit for Endpoints CVE-2021-25740 in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: low + policies.kyverno.io/subject: ClusterRole + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue + defined in CVE-2021-25740 which could enable users to send network traffic to locations + they would otherwise not have access to via a confused deputy attack. This was due to + the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. + This policy, intended to run in background mode, checks if your cluster is vulnerable + to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have + the edit permission of Endpoints. +spec: + validationFailureAction: Audit + background: true + rules: + - name: system-aggregate-to-edit-check + match: + any: + - resources: + kinds: + - ClusterRole + names: + - system:aggregate-to-edit + validate: + cel: + expressions: + - expression: "!object.rules.exists(rule, 'endpoints' in rule.resources && 'edit' in rule.verbs)" + message: >- + This cluster may still be vulnerable to CVE-2021-25740. The system:aggregate-to-edit ClusterRole + should not have edit permission over Endpoints. + diff --git a/other-cel/restrict-jobs/.chainsaw-test/chainsaw-test.yaml b/other-cel/restrict-jobs/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..0537560b5 --- /dev/null +++ b/other-cel/restrict-jobs/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-jobs +spec: + steps: + - name: step-01 + try: + - apply: + file: ../restrict-jobs.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + expect: + - check: + ($error != null): true + file: jobs-bad.yaml + - apply: + file: cronjobs-good.yaml diff --git a/other-cel/restrict-jobs/.chainsaw-test/cronjobs-good.yaml b/other-cel/restrict-jobs/.chainsaw-test/cronjobs-good.yaml new file mode 100644 index 000000000..cc0f70f29 --- /dev/null +++ b/other-cel/restrict-jobs/.chainsaw-test/cronjobs-good.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/restrict-jobs/.chainsaw-test/jobs-bad.yaml b/other-cel/restrict-jobs/.chainsaw-test/jobs-bad.yaml new file mode 100644 index 000000000..700346ed5 --- /dev/null +++ b/other-cel/restrict-jobs/.chainsaw-test/jobs-bad.yaml @@ -0,0 +1,12 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: badjob +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never \ No newline at end of file diff --git a/kasten/k10-minimum-retention/.chainsaw-test/policy-ready.yaml b/other-cel/restrict-jobs/.chainsaw-test/policy-ready.yaml similarity index 81% rename from kasten/k10-minimum-retention/.chainsaw-test/policy-ready.yaml rename to other-cel/restrict-jobs/.chainsaw-test/policy-ready.yaml index 99fd5a77e..118a84bec 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/policy-ready.yaml +++ b/other-cel/restrict-jobs/.chainsaw-test/policy-ready.yaml @@ -1,7 +1,7 @@ apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: - name: k10-minimum-retention + name: restrict-jobs status: conditions: - reason: Succeeded diff --git a/other-cel/restrict-jobs/.kyverno-test/kyverno-test.yaml b/other-cel/restrict-jobs/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c2e9af736 --- /dev/null +++ b/other-cel/restrict-jobs/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: restrict-jobs +policies: +- ../restrict-jobs.yaml +resources: +- resource.yaml +results: +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - badjob + result: fail +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - goodjob + result: skip diff --git a/other-cel/restrict-jobs/.kyverno-test/resource.yaml b/other-cel/restrict-jobs/.kyverno-test/resource.yaml new file mode 100644 index 000000000..6e48e4443 --- /dev/null +++ b/other-cel/restrict-jobs/.kyverno-test/resource.yaml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: badjob +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: goodjob + ownerReferences: + - apiVersion: batch/v1 + blockOwnerDeletion: true + controller: true + kind: CronJob + name: goodcronjob01 + uid: a554d6b8-8b0a-44da-a9d9-d76a1f85b320 +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never \ No newline at end of file diff --git a/other-cel/restrict-jobs/artifacthub-pkg.yml b/other-cel/restrict-jobs/artifacthub-pkg.yml new file mode 100644 index 000000000..1f1b05fb6 --- /dev/null +++ b/other-cel/restrict-jobs/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-jobs-cel +version: 1.0.0 +displayName: Restrict Jobs in CEL expressions +description: >- + Jobs can be created directly and indirectly via a CronJob controller. In some cases, users may want to only allow Jobs if they are created via a CronJob. This policy restricts Jobs so they may only be created by a CronJob. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-jobs/restrict-jobs.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Jobs can be created directly and indirectly via a CronJob controller. In some cases, users may want to only allow Jobs if they are created via a CronJob. This policy restricts Jobs so they may only be created by a CronJob. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Job" +digest: d8806389e8dd3e8ed5a2fe1a38fe4e4dec080af79d2cca7e684ddb46f244c6be +createdAt: "2024-05-19T16:05:23Z" diff --git a/other-cel/restrict-jobs/restrict-jobs.yaml b/other-cel/restrict-jobs/restrict-jobs.yaml new file mode 100644 index 000000000..13b836675 --- /dev/null +++ b/other-cel/restrict-jobs/restrict-jobs.yaml @@ -0,0 +1,33 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-jobs + annotations: + policies.kyverno.io/title: Restrict Jobs in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Job + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Jobs can be created directly and indirectly via a CronJob controller. + In some cases, users may want to only allow Jobs if they are created via a CronJob. + This policy restricts Jobs so they may only be created by a CronJob. +spec: + validationFailureAction: Enforce + rules: + - name: restrict-job-from-cronjob + match: + any: + - resources: + kinds: + - Job + celPreconditions: + - name: "not-created-by-cronjob" + expression: "!has(object.metadata.ownerReferences) || object.metadata.ownerReferences[0].kind != 'CronJob'" + validate: + cel: + expressions: + - expression: "false" + message: Jobs are only allowed if spawned from CronJobs. + diff --git a/other-cel/restrict-node-label-creation/.chainsaw-test/chainsaw-test.yaml b/other-cel/restrict-node-label-creation/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..7174c36d8 --- /dev/null +++ b/other-cel/restrict-node-label-creation/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-node-label-creation +spec: + steps: + - name: step-01 + try: + - script: + content: | + kubectl get configmap kyverno -n kyverno -o yaml | sed 's/\[Node\/\*,\*,\*\]//g' - | sed 's/\[Node,\*,\*\]//g' - | kubectl apply -f - + - sleep: + duration: 5s + - name: step-02 + try: + - apply: + file: ../restrict-node-label-creation.yaml + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - script: + content: | + node=$(kubectl get nodes --no-headers | awk 'NR==1{print $1}') + if kubectl label --overwrite nodes $node foo=bar; then echo "Failure: successfully set label foo"; exit 1; else echo "Success: failed to set label foo"; fi + if kubectl label --overwrite nodes $node bar=bar; then echo "Success: set label bar"; else echo "Failed to set label bar"; exit 1; fi + if kubectl label --overwrite nodes $node bar=foo; then echo "Success: modified label bar"; else echo "Failed to modify label bar"; exit 1; fi + if kubectl label nodes $node bar-; then echo "Success: removed label bar"; else echo "Failed to remove label bar"; exit 1; fi + - name: step-04 + try: + - script: + content: | + kubectl get configmap -n kyverno kyverno -o yaml | sed 's/\[APIService,\*,\*\]/\[Node,\*,\*\] \[Node\/\*,\*,\*\] \[APIService,\*,\*\]/g' - | kubectl apply -f - diff --git a/other-cel/restrict-node-label-creation/.chainsaw-test/policy-ready.yaml b/other-cel/restrict-node-label-creation/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..dd8579329 --- /dev/null +++ b/other-cel/restrict-node-label-creation/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,9 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-node-label-creation +status: + conditions: + - reason: Succeeded + status: "True" + type: Ready \ No newline at end of file diff --git a/other-cel/restrict-node-label-creation/artifacthub-pkg.yml b/other-cel/restrict-node-label-creation/artifacthub-pkg.yml new file mode 100644 index 000000000..98c6f363b --- /dev/null +++ b/other-cel/restrict-node-label-creation/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-node-label-creation-cel +version: 1.0.0 +displayName: restrict node laUel creation in CEL expressions +description: >- + Node labels are critical pieces of metadata upon which many other applications and logic may depend and should not be altered or removed by regular users. Many cloud providers also use Node labels to signal specific functions to applications. This policy prevents setting of a new label called `foo` on cluster Nodes. Use of this policy requires removal of the Node resource filter in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy requires, at minimum, one of the following versions of Kubernetes: v1.18.18, v1.19.10, v1.20.6, or v1.21.0. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Node labels are critical pieces of metadata upon which many other applications and logic may depend and should not be altered or removed by regular users. Many cloud providers also use Node labels to signal specific functions to applications. This policy prevents setting of a new label called `foo` on cluster Nodes. Use of this policy requires removal of the Node resource filter in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy requires, at minimum, one of the following versions of Kubernetes: v1.18.18, v1.19.10, v1.20.6, or v1.21.0. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Node, Label" +digest: 688f42a4211a49dd6f743e2e302654447b9e27d8da63cb5547201be85cbb783b +createdAt: "2024-05-20T03:52:11Z" diff --git a/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml b/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml new file mode 100644 index 000000000..5a51e0975 --- /dev/null +++ b/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-node-label-creation + annotations: + policies.kyverno.io/title: Restrict node label creation in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/subject: Node, Label + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Node labels are critical pieces of metadata upon which many other applications and + logic may depend and should not be altered or removed by regular users. Many cloud + providers also use Node labels to signal specific functions to applications. + This policy prevents setting of a new label called `foo` on + cluster Nodes. Use of this policy requires removal of the Node resource filter + in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy + requires, at minimum, one of the following versions of Kubernetes: + v1.18.18, v1.19.10, v1.20.6, or v1.21.0. +spec: + validationFailureAction: Enforce + background: false + rules: + - name: prevent-label-set + match: + any: + - resources: + kinds: + - Node + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + - name: "has-foo-label" + expression: "has(object.metadata.labels) && 'foo' in object.metadata.labels" + validate: + cel: + expressions: + - expression: "false" + message: "Setting the `foo` label on a Node is not allowed." + diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/chainsaw-test.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..d8398690b --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,50 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-pod-controller-serviceaccount-updates +spec: + steps: + - name: step-01 + try: + - apply: + file: ../restrict-pod-controller-serviceaccount-updates.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: restrict-pod-controller-serviceaccount-updates + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ns.yaml + - apply: + file: sa-01.yaml + - apply: + file: sa-02.yaml + - apply: + file: deployment.yaml + - apply: + file: cronjob.yaml + - name: step-03 + try: + - apply: + expect: + - check: + ($error != null): true + file: cronjob-bad-update.yaml + - apply: + expect: + - check: + ($error != null): true + file: deploy-bad-update.yaml + - apply: + file: cronjob-good-update.yaml + - apply: + file: deploy-good-update.yaml diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-bad-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-bad-update.yaml new file mode 100644 index 000000000..035cb6a3c --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-bad-update.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: cronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + serviceAccountName: serviceaccount02 + containers: + - name: busybox + image: busybox:1.35 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-good-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-good-update.yaml new file mode 100644 index 000000000..004731f65 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-good-update.yaml @@ -0,0 +1,11 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: cronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: Never \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob.yaml new file mode 100755 index 000000000..5e04c53ee --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: cronjob01 +spec: + jobTemplate: + spec: + template: + spec: + containers: + - image: busybox:1.35 + name: busybox + restartPolicy: OnFailure + serviceAccountName: serviceaccount01 + schedule: '* * * * *' diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-bad-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-bad-update.yaml new file mode 100644 index 000000000..8788cebd3 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-bad-update.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: deployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + serviceAccountName: serviceaccount02 + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-good-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-good-update.yaml new file mode 100644 index 000000000..f100a5052 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-good-update.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: deployment01 +spec: + template: + spec: + restartPolicy: Always \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deployment.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deployment.yaml new file mode 100755 index 000000000..b8f88a8ad --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: deployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + serviceAccountName: serviceaccount01 diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/ns.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/ns.yaml new file mode 100755 index 000000000..e3688b96e --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: restrict-sa-ns diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/policy-ready.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..92b7018c8 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-pod-controller-serviceaccount-updates +status: + ready: true diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-01.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-01.yaml new file mode 100755 index 000000000..71e72fad5 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-01.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: serviceaccount01 + namespace: restrict-sa-ns diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-02.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-02.yaml new file mode 100755 index 000000000..042c339a8 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-02.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: serviceaccount02 + namespace: restrict-sa-ns diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/artifacthub-pkg.yml b/other-cel/restrict-pod-controller-serviceaccount-updates/artifacthub-pkg.yml new file mode 100644 index 000000000..94461cb0c --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-pod-controller-serviceaccount-updates-cel +version: 1.0.0 +displayName: Restrict Pod Controller ServiceAccount Updates in CEL expressions +description: >- + ServiceAccounts which have the ability to edit/patch workloads which they created may potentially use that privilege to update to a different ServiceAccount with higher privileges. This policy, intended to be run in `enforce` mode, blocks updates to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods directly for this field are not possible as it is immutable once set. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + ServiceAccounts which have the ability to edit/patch workloads which they created may potentially use that privilege to update to a different ServiceAccount with higher privileges. This policy, intended to be run in `enforce` mode, blocks updates to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods directly for this field are not possible as it is immutable once set. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: a3447fb207a7640b2744570dbe01cc0816128a7b7e0776ac2febf6c5a4db0e77 +createdAt: "2024-05-20T04:20:28Z" diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml new file mode 100644 index 000000000..81da09843 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml @@ -0,0 +1,59 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-pod-controller-serviceaccount-updates + annotations: + policies.kyverno.io/title: Restrict Pod Controller ServiceAccount Updates in CEL Expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: Medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + ServiceAccounts which have the ability to edit/patch workloads which they created + may potentially use that privilege to update to a different ServiceAccount with higher + privileges. This policy, intended to be run in `enforce` mode, blocks updates + to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods + directly for this field are not possible as it is immutable once set. +spec: + validationFailureAction: Audit + background: true + rules: + - name: block-serviceaccount-updates + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: >- + object.spec.template.spec.?serviceAccountName.orValue('empty') == oldObject.spec.template.spec.?serviceAccountName.orValue('empty') + message: >- + The serviceAccountName field may not be changed once created. + - name: block-serviceaccount-updates-cronjob + match: + any: + - resources: + kinds: + - CronJob + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: >- + object.spec.jobTemplate.spec.template.spec.?serviceAccountName.orValue('empty') == oldObject.spec.jobTemplate.spec.template.spec.?serviceAccountName.orValue('empty') + message: >- + The serviceAccountName field may not be changed once created. + diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/chainsaw-test.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..d0c7d8295 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: topologyspreadconstraints-policy +spec: + steps: + - name: step-01 + try: + - apply: + file: ../topologyspreadconstraints-policy.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: topologyspreadconstraints-policy + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..c704109f2 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy01 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy02 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy03 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-good.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..9f9c9ad53 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,91 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy01 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy02 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy03 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/policy-ready.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..080e44efe --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: topologyspreadconstraints-policy +status: + ready: true diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..d1e1c210e --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,33 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: topologyspreadconstraints +policies: +- ../topologyspreadconstraints-policy.yaml +resources: +- resource-fail1.yaml +- resource-fail2.yaml +- resource-fail3.yaml +- resource-pass.yaml +- resource-skip.yaml +results: +- kind: StatefulSet + policy: topologyspreadconstraints-policy + resources: + - monitoring/badss01 + - monitoring/badss02 + - monitoring/badss03 + result: fail + rule: spread-pods +- kind: StatefulSet + policy: topologyspreadconstraints-policy + resources: + - monitoring/goodss01 + result: pass + rule: spread-pods +- kind: StatefulSet + policy: topologyspreadconstraints-policy + resources: + - monitoring/skipss01 + result: skip + rule: spread-pods diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml new file mode 100644 index 000000000..065a47ff5 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss01 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml new file mode 100644 index 000000000..0031995fc --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss02 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml new file mode 100644 index 000000000..d88b17bf3 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss03 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml new file mode 100644 index 000000000..0310e6b00 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: goodss01 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml new file mode 100644 index 000000000..6761e7076 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: skipss01 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 1 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/artifacthub-pkg.yml b/other-cel/topologyspreadconstraints-policy/artifacthub-pkg.yml new file mode 100644 index 000000000..3d251a745 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: topologyspreadconstraints-policy-cel +version: 1.0.0 +displayName: Spread Pods Across Nodes & Zones in CEL expressions +description: >- + Deployments to a Kubernetes cluster with multiple availability zones often need to distribute those replicas to align with those zones to ensure site-level failures do not impact availability. This policy ensures topologySpreadConstraints are defined, to spread pods over nodes and zones. Deployments or Statefulsets with leass than 3 replicas are skipped. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Deployments to a Kubernetes cluster with multiple availability zones often need to distribute those replicas to align with those zones to ensure site-level failures do not impact availability. This policy ensures topologySpreadConstraints are defined, to spread pods over nodes and zones. Deployments or Statefulsets with leass than 3 replicas are skipped. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Deployment, StatefulSet" +digest: bd9dae9c99706fe3d16d26f59bd1bb8ecdaf09ffb038d79e8906fb8c72ec3b0f +createdAt: "2024-04-29T15:49:11Z" + diff --git a/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml b/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml new file mode 100644 index 000000000..858bfb197 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml @@ -0,0 +1,44 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: topologyspreadconstraints-policy + annotations: + policies.kyverno.io/title: Spread Pods Across Nodes & Zones in CEL expressions + kyverno.io/kubernetes-version: "1.26-1.27" + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/description: >- + Deployments to a Kubernetes cluster with multiple availability zones often need to + distribute those replicas to align with those zones to ensure site-level failures + do not impact availability. This policy ensures topologySpreadConstraints are defined, + to spread pods over nodes and zones. Deployments or Statefulsets with less than 3 + replicas are skipped. + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Deployment, StatefulSet +spec: + background: true + failurePolicy: Ignore + validationFailureAction: Audit + rules: + - name: spread-pods + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "replicas-must-be-3-or-more" + expression: "object.spec.replicas >= 3" + validate: + cel: + expressions: + - expression: >- + has(object.spec.template.spec.topologySpreadConstraints) && + size(object.spec.template.spec.topologySpreadConstraints.filter(t, t.topologyKey == 'kubernetes.io/hostname' || t.topologyKey == 'topology.kubernetes.io/zone')) == 2 + message: "topologySpreadConstraint for kubernetes.io/hostname & topology.kubernetes.io/zone are required" + diff --git a/other/require-container-port-names/.kyverno-test/kyverno-test.yaml b/other/require-container-port-names/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c1054a710 --- /dev/null +++ b/other/require-container-port-names/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,52 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-container-port-names +policies: +- ../require-container-port-names.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - goodpod01 + - goodpod02 + result: pass +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - baddeployment01 + result: fail +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - badcronjob01 + result: fail +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - gooddeployment01 + result: pass +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - goodcronjob01 + result: pass + diff --git a/other/restrict-jobs/.chainsaw-test/cronjob.yaml b/other/restrict-jobs/.chainsaw-test/cronjob.yaml deleted file mode 100644 index 52ae70aa5..000000000 --- a/other/restrict-jobs/.chainsaw-test/cronjob.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: hello -spec: - schedule: "*/1 * * * *" - jobTemplate: - spec: - template: - spec: - containers: - - name: hello - image: busybox - args: - - /bin/sh - - -c - - date; echo Hello from the Kubernetes cluster - restartPolicy: OnFailure \ No newline at end of file diff --git a/other/restrict-jobs/.chainsaw-test/job.yaml b/other/restrict-jobs/.chainsaw-test/job.yaml deleted file mode 100644 index 4edf767e2..000000000 --- a/other/restrict-jobs/.chainsaw-test/job.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: pi -spec: - template: - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - restartPolicy: Never - backoffLimit: 4 \ No newline at end of file diff --git a/other/restrict-jobs/.kyverno-test/kyverno-test.yaml b/other/restrict-jobs/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c2e9af736 --- /dev/null +++ b/other/restrict-jobs/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: restrict-jobs +policies: +- ../restrict-jobs.yaml +resources: +- resource.yaml +results: +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - badjob + result: fail +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - goodjob + result: skip diff --git a/other/restrict-jobs/.kyverno-test/resource.yaml b/other/restrict-jobs/.kyverno-test/resource.yaml new file mode 100644 index 000000000..6e48e4443 --- /dev/null +++ b/other/restrict-jobs/.kyverno-test/resource.yaml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: badjob +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: goodjob + ownerReferences: + - apiVersion: batch/v1 + blockOwnerDeletion: true + controller: true + kind: CronJob + name: goodcronjob01 + uid: a554d6b8-8b0a-44da-a9d9-d76a1f85b320 +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never \ No newline at end of file diff --git a/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml b/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml index 29c1bb319..dbaf54c33 100644 --- a/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml +++ b/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml @@ -4,8 +4,7 @@ metadata: name: badpod01 spec: nodeSelector: - matchLabels: - foo: bar + foo: bar containers: - name: busybox image: busybox:1.35 @@ -18,4 +17,4 @@ spec: nodeName: kind-control-plane containers: - name: busybox - image: busybox:1.35 \ No newline at end of file + image: busybox:1.35 diff --git a/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml b/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml index 67ef8da4b..985c6f597 100644 --- a/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml +++ b/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml @@ -16,8 +16,7 @@ spec: app: busybox spec: nodeSelector: - matchLabels: - foo: bar + foo: bar containers: - name: busybox image: busybox:1.35 @@ -36,4 +35,4 @@ spec: containers: - name: busybox image: busybox:1.35 - restartPolicy: OnFailure \ No newline at end of file + restartPolicy: OnFailure diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml index e5d9efa7e..d1e1c210e 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml @@ -14,20 +14,20 @@ results: - kind: StatefulSet policy: topologyspreadconstraints-policy resources: - - monitoring/fail1 - - monitoring/fail2 - - monitoring/fail3 + - monitoring/badss01 + - monitoring/badss02 + - monitoring/badss03 result: fail rule: spread-pods - kind: StatefulSet policy: topologyspreadconstraints-policy resources: - - monitoring/pass + - monitoring/goodss01 result: pass rule: spread-pods - kind: StatefulSet policy: topologyspreadconstraints-policy resources: - - monitoring/skip + - monitoring/skipss01 result: skip rule: spread-pods diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml index 143b295f1..065a47ff5 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: fail1 + name: badss01 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml index f61c1fab8..0031995fc 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: fail2 + name: badss02 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml index fc763ab04..d88b17bf3 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: fail3 + name: badss03 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml index 4e5f55339..0310e6b00 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: pass + name: goodss01 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml index 746608c08..6761e7076 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: skip + name: skipss01 namespace: monitoring labels: app: thanos-memcached