diff --git a/.dockerignore b/.dockerignore index 122da4923..a4078faf6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -39,18 +39,26 @@ !target/x86_64-unknown-linux-gnu/release/agent !target/x86_64-unknown-linux-gnu/debug/udev-video-broker !target/x86_64-unknown-linux-gnu/release/udev-video-broker +!target/x86_64-unknown-linux-gnu/debug/webhook-configuration +!target/x86_64-unknown-linux-gnu/release/webhook-configuration + !target/aarch64-unknown-linux-gnu/debug/controller !target/aarch64-unknown-linux-gnu/release/controller !target/aarch64-unknown-linux-gnu/debug/agent !target/aarch64-unknown-linux-gnu/release/agent !target/aarch64-unknown-linux-gnu/debug/udev-video-broker !target/aarch64-unknown-linux-gnu/release/udev-video-broker +!target/aarch64-unknown-linux-gnu/debug/webhook-configuration +!target/aarch64-unknown-linux-gnu/release/webhook-configuration + !target/armv7-unknown-linux-gnueabihf/debug/controller !target/armv7-unknown-linux-gnueabihf/release/controller !target/armv7-unknown-linux-gnueabihf/debug/agent !target/armv7-unknown-linux-gnueabihf/release/agent !target/armv7-unknown-linux-gnueabihf/debug/udev-video-broker !target/armv7-unknown-linux-gnueabihf/release/udev-video-broker +!target/armv7-unknown-linux-gnueabihf/debug/webhook-configuration +!target/armv7-unknown-linux-gnueabihf/release/webhook-configuration # Cross toml file needs to be available for making the cross build containers !Cross.toml diff --git a/.github/workflows/build-webhook-configuration-container.yml b/.github/workflows/build-webhook-configuration-container.yml new file mode 100644 index 000000000..7013e9588 --- /dev/null +++ b/.github/workflows/build-webhook-configuration-container.yml @@ -0,0 +1,106 @@ +name: Build Webhook Configuration + +on: + workflow_dispatch: + inputs: + push: + branches: [main] + paths: + - .github/actions/build-component-per-arch/** + - .github/actions/build-component-multi-arch/** + - .github/workflows/build-webhook-configuration-container.yml + - build/containers/Dockerfile.webhook-configuration + - webhooks/validating/configuration + - version.txt + - build/akri-containers.mk + - Makefile + pull_request: + branches: [main] + paths: + - .github/actions/build-component-per-arch/** + - .github/actions/build-component-multi-arch/** + - .github/workflows/build-workflow-configuration-container.yml + - build/containers/Dockerfile.workflow-configuration + - webhooks/validating/configuration + - version.txt + - build/akri-containers.mk + - Makefile + release: + types: + - published + +env: + AKRI_COMPONENT: webhook-configuration + MAKEFILE_COMPONENT: webhook-configuration + +jobs: + per-arch: + runs-on: ubuntu-latest + timeout-minutes: 60 + strategy: + matrix: + arch: + - arm64v8 + - arm32v7 + - amd64 + + steps: + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Prepare To Install + uses: actions/setup-node@v1 + with: + node-version: 12 + - name: Install Deps + run: | + yarn install + yarn add @actions/core @actions/github @actions/exec fs + + - name: Run Per-Arch component build for ${{ env.AKRI_COMPONENT }} + uses: ./.github/actions/build-component-per-arch + with: + github_event_name: ${{ github.event_name }} + github_ref: ${{ github.ref }} + github_event_action: ${{ github.event.action }} + github_merged: ${{ github.event.pull_request.merged }} + container_name: ${{ env.AKRI_COMPONENT }} + container_prefix: ghcr.io/deislabs/akri + container_registry_base_url: ghcr.io + container_registry_username: ${{ secrets.crUsername }} + container_registry_password: ${{ secrets.crPassword }} + makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} + platform: ${{ matrix.arch }} + build_rust: "1" + + multi-arch: + if: (github.event_name == 'release') || (github.event_name == 'push' && github.ref == 'refs/heads/main') || (startsWith(github.event_name, 'pull_request') && github.event.action == 'closed' && github.event.pull_request.merged == true && github.ref != 'refs/heads/main') + needs: per-arch + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout repo + uses: actions/checkout@v2 + + - name: Prepare To Install + uses: actions/setup-node@v1 + with: + node-version: 12 + - name: Install Deps + run: | + yarn install + yarn add @actions/core @actions/github @actions/exec fs + + - name: Run Multi-Arch component build for ${{ env.AKRI_COMPONENT }} + uses: ./.github/actions/build-component-multi-arch + with: + github_event_name: ${{ github.event_name }} + container_name: ${{ env.AKRI_COMPONENT }} + container_prefix: ghcr.io/deislabs/akri + container_registry_base_url: ghcr.io + container_registry_username: ${{ secrets.crUsername }} + container_registry_password: ${{ secrets.crPassword }} + makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/run-test-cases.yml b/.github/workflows/run-test-cases.yml index b61cd2b08..c71afcc00 100644 --- a/.github/workflows/run-test-cases.yml +++ b/.github/workflows/run-test-cases.yml @@ -9,6 +9,7 @@ on: - test/run-end-to-end.py - test/run-conservation-of-broker-pod.py - test/run-helm-install-delete.py + - test/run-webhook.py - test/shared_test_code.py - .github/workflows/run-test-cases.yml - build/containers/Dockerfile.agent @@ -26,6 +27,7 @@ on: - test/run-end-to-end.py - test/run-conservation-of-broker-pod.py - test/run-helm-install-delete.py + - test/run-webhook.py - test/shared_test_code.py - .github/workflows/run-test-cases.yml - build/containers/Dockerfile.agent @@ -64,8 +66,10 @@ jobs: make akri-build make controller-build-amd64 make agent-build-amd64 + make webhook-configuration-build-amd64 docker save ${PREFIX}/agent:${LABEL_PREFIX}-amd64 > agent.tar docker save ${PREFIX}/controller:${LABEL_PREFIX}-amd64 > controller.tar + docker save ${PREFIX}/webhook-configuration:${LABEL_PREFIX}-amd64 > webhook-configuration.tar - name: Upload Agent container as artifact if: startsWith(github.event_name, 'pull_request') @@ -79,6 +83,12 @@ jobs: with: name: controller.tar path: controller.tar + - name: Upload Webhook-Configuration container as artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/upload-artifact@v2 + with: + name: webhook-configuration.tar + path: webhook-configuration.tar test-cases: needs: build-containers @@ -137,6 +147,8 @@ jobs: test: - case: end-to-end file: test/run-end-to-end.py + - case: webhook + file: test/run-webhook.py steps: - name: Checkout the head commit of the branch @@ -163,6 +175,11 @@ jobs: uses: actions/download-artifact@v2 with: name: controller.tar + - name: Download Webhook-Configuration container artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/download-artifact@v2 + with: + name: webhook-configuration.tar - if: startsWith(matrix.kube.runtime, 'K3s') name: Install K3s @@ -197,6 +214,7 @@ jobs: run: | sudo k3s ctr image import agent.tar sudo k3s ctr image import controller.tar + sudo k3s ctr image import webhook-configuration.tar - if: startsWith(matrix.kube.runtime, 'Kubernetes') name: Install Kubernetes @@ -222,6 +240,7 @@ jobs: run: | sudo docker load --input agent.tar sudo docker load --input controller.tar + sudo docker load --input webhook-configuration.tar - if: startsWith(matrix.kube.runtime, 'MicroK8s') name: Install MicroK8s @@ -234,7 +253,6 @@ jobs: sudo chown -f -R $USER $HOME/.kube --verbose sudo sh -c "microk8s.kubectl config view --raw >~/.kube/config" sudo cat ~/.kube/config - # sudo microk8s.enable helm3 sudo microk8s.enable rbac sudo microk8s.enable dns until sudo microk8s.status --wait-ready; do sleep 5s; echo "Try again"; done @@ -253,6 +271,7 @@ jobs: sudo microk8s ctr images ls sudo microk8s ctr --debug --timeout 10s images import agent.tar sudo microk8s ctr --debug --timeout 10s images import controller.tar + sudo microk8s ctr --debug --timeout 10s images import webhook-configuration.tar sudo microk8s ctr images ls - name: Add Akri Helm Chart @@ -270,7 +289,7 @@ jobs: run: | git fetch origin main git show origin/main:version.txt > /tmp/version_to_test.txt - echo '--set agent.image.pullPolicy=Never,agent.image.tag=pr-amd64,controller.image.pullPolicy=Never,controller.image.tag=pr-amd64' > /tmp/extra_helm_args.txt + echo '--set agent.image.pullPolicy=Never,agent.image.tag=pr-amd64,controller.image.pullPolicy=Never,controller.image.tag=pr-amd64,webhookConfiguration.image.pullPolicy=Never,webhookConfiguration.image.tag=pr-amd64' > /tmp/extra_helm_args.txt # For non-PR (i.e. push, release, manual), version.txt is corresponds # to an existing Helm chart. - if: (!(startsWith(github.event_name, 'pull_request'))) @@ -311,3 +330,9 @@ jobs: with: name: ${{ matrix.kube.runtime }}-${{ matrix.test.case }}-controller-log path: /tmp/controller_log.txt + - name: Upload webhook log as artifact + if: always() + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.kube.runtime }}-${{ matrix.test.case }}-webhook-log + path: /tmp/webhook_log.txt diff --git a/Cargo.lock b/Cargo.lock index 6acca3460..e4ff9776b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "agent" -version = "0.1.18" +version = "0.2.0" dependencies = [ "akri-shared", "anyhow", @@ -74,7 +74,7 @@ dependencies = [ [[package]] name = "akri-shared" -version = "0.1.18" +version = "0.2.0" dependencies = [ "anyhow", "async-trait", @@ -525,7 +525,7 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "controller" -version = "0.1.18" +version = "0.2.0" dependencies = [ "akri-shared", "anyhow", @@ -3401,7 +3401,7 @@ dependencies = [ [[package]] name = "udev-video-broker" -version = "0.1.18" +version = "0.2.0" dependencies = [ "akri-shared", "env_logger", diff --git a/Cargo.toml b/Cargo.toml index 6fc6d756b..ffae9f85d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,9 +4,4 @@ h2 = { git = "https://github.com/kate-goldenring/h2", branch = "master" } [workspace] -members = [ - "shared", - "controller", - "agent", - "samples/brokers/udev-video-broker" -] +members = ["shared", "controller", "agent", "samples/brokers/udev-video-broker", "webhooks/validating/configuration"] diff --git a/agent/Cargo.toml b/agent/Cargo.toml index 3dbae9170..b065f0a33 100644 --- a/agent/Cargo.toml +++ b/agent/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "agent" -version = "0.1.18" +version = "0.2.0" authors = ["Kate Goldenring ", ""] edition = "2018" diff --git a/build/akri-containers.mk b/build/akri-containers.mk index 25fb36369..e96bd4fb2 100644 --- a/build/akri-containers.mk +++ b/build/akri-containers.mk @@ -18,8 +18,8 @@ install-cross: # # To make all platforms: `make akri` # To make specific platforms: `BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=1 make akri` -# To make single component: `make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection]` -# To make specific platforms: `BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=1 make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection]` +# To make single component: `make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection|webhook-configuration]` +# To make specific platforms: `BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=1 make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection|webhook-configuration]` # # .PHONY: akri @@ -31,6 +31,7 @@ akri-onvif: akri-build akri-docker-onvif akri-streaming: akri-build akri-docker-streaming akri-opcua-monitoring: akri-docker-opcua-monitoring akri-anomaly-detection: akri-docker-anomaly-detection +akri-webhook-configuration: akri-build akri-docker-webhook-configuration akri-build: install-cross akri-cross-build akri-docker: akri-docker-build akri-docker-push-per-arch akri-docker-push-multi-arch-create akri-docker-push-multi-arch-push @@ -41,6 +42,7 @@ akri-docker-onvif: onvif-build onvif-docker-per-arch onvif-docker-multi-arch-cre akri-docker-streaming: streaming-build streaming-docker-per-arch streaming-docker-multi-arch-create streaming-docker-multi-arch-push akri-docker-opcua-monitoring: opcua-monitoring-build opcua-monitoring-docker-per-arch opcua-monitoring-docker-multi-arch-create opcua-monitoring-docker-multi-arch-push akri-docker-anomaly-detection: anomaly-detection-build anomaly-detection-docker-per-arch anomaly-detection-docker-multi-arch-create anomaly-detection-docker-multi-arch-push +akri-docker-webhook-configuration: webhook-configuration-build webhook-configuration-docker-per-arch webhook-configuration-docker-multi-arch-create webhook-configuration-docker-multi-arch-push akri-cross-build: akri-cross-build-amd64 akri-cross-build-arm32 akri-cross-build-arm64 akri-cross-build-amd64: @@ -56,7 +58,7 @@ ifeq (1, ${BUILD_ARM64}) PKG_CONFIG_ALLOW_CROSS=1 cross build --release --target=$(ARM64V8_TARGET) endif -akri-docker-build: controller-build agent-build udev-build onvif-build streaming-build opcua-monitoring-build anomaly-detection-build +akri-docker-build: controller-build agent-build udev-build onvif-build streaming-build opcua-monitoring-build anomaly-detection-build webhook-configuration-build controller-build: controller-build-amd64 controller-build-arm32 controller-build-arm64 controller-build-amd64: ifeq (1, ${BUILD_AMD64}) @@ -141,6 +143,20 @@ ifeq (1, ${BUILD_ARM64}) docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.anomaly-detection-app . -t $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) endif +webhook-configuration-build: webhook-configuration-build-amd64 webhook-configuration-build-arm32 webhook-configuration-build-arm64 +webhook-configuration-build-amd64: +ifeq (1, ${BUILD_AMD64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.webhook-configuration . -t $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(AMD64_TARGET) +endif +webhook-configuration-build-arm32: +ifeq (1, ${BUILD_ARM32}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.webhook-configuration . -t $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM32V7_TARGET) +endif +webhook-configuration-build-arm64: +ifeq (1, ${BUILD_ARM64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.webhook-configuration . -t $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM64V8_TARGET) +endif + streaming-build: streaming-build-amd64 streaming-build-arm32 streaming-build-arm64 streaming-build-amd64: ifeq (1, ${BUILD_AMD64}) @@ -155,7 +171,7 @@ ifeq (1, ${BUILD_ARM64}) docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.video-streaming-app . -t $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) endif -akri-docker-push-per-arch: controller-docker-per-arch agent-docker-per-arch udev-docker-per-arch onvif-docker-per-arch streaming-docker-per-arch opcua-monitoring-docker-per-arch anomaly-detection-docker-per-arch +akri-docker-push-per-arch: controller-docker-per-arch agent-docker-per-arch udev-docker-per-arch onvif-docker-per-arch streaming-docker-per-arch opcua-monitoring-docker-per-arch anomaly-detection-docker-per-arch webhook-configuration-docker-per-arch controller-docker-per-arch: controller-docker-per-arch-amd64 controller-docker-per-arch-arm32 controller-docker-per-arch-arm64 controller-docker-per-arch-amd64: @@ -241,6 +257,20 @@ ifeq (1, ${BUILD_ARM64}) docker push $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) endif +webhook-configuration-docker-per-arch: webhook-configuration-docker-per-arch-amd64 webhook-configuration-docker-per-arch-arm32 webhook-configuration-docker-per-arch-arm64 +webhook-configuration-docker-per-arch-amd64: +ifeq (1, ${BUILD_AMD64}) + docker push $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(AMD64_SUFFIX) +endif +webhook-configuration-docker-per-arch-arm32: +ifeq (1, ${BUILD_ARM32}) + docker push $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) +endif +webhook-configuration-docker-per-arch-arm64: +ifeq (1, ${BUILD_ARM64}) + docker push $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) +endif + streaming-docker-per-arch: streaming-docker-per-arch-amd64 streaming-docker-per-arch-arm32 streaming-docker-per-arch-arm64 streaming-docker-per-arch-amd64: ifeq (1, ${BUILD_AMD64}) @@ -323,6 +353,17 @@ ifeq (1, ${BUILD_ARM64}) $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX) $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) endif +webhook-configuration-docker-multi-arch-create: +ifeq (1, ${BUILD_AMD64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/webhook-configuration:$(LABEL_PREFIX) $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(AMD64_SUFFIX) +endif +ifeq (1, ${BUILD_ARM32}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/webhook-configuration:$(LABEL_PREFIX) $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) +endif +ifeq (1, ${BUILD_ARM64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/webhook-configuration:$(LABEL_PREFIX) $(PREFIX)/webhook-configuration:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) +endif + streaming-docker-multi-arch-create: ifeq (1, ${BUILD_AMD64}) $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) @@ -334,7 +375,7 @@ ifeq (1, ${BUILD_ARM64}) $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) endif -akri-docker-push-multi-arch-push: controller-docker-multi-arch-push agent-docker-multi-arch-push udev-docker-multi-arch-push onvif-docker-multi-arch-push streaming-docker-multi-arch-push opcua-monitoring-docker-multi-arch-push anomaly-detection-docker-multi-arch-push +akri-docker-push-multi-arch-push: controller-docker-multi-arch-push agent-docker-multi-arch-push udev-docker-multi-arch-push onvif-docker-multi-arch-push streaming-docker-multi-arch-push opcua-monitoring-docker-multi-arch-push anomaly-detection-docker-multi-arch-push webhook-configuration-docker-multi-arch-push controller-docker-multi-arch-push: $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/controller:$(LABEL_PREFIX) @@ -348,6 +389,8 @@ opcua-monitoring-docker-multi-arch-push: $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX) anomaly-detection-docker-multi-arch-push: $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX) +webhook-configuration-docker-multi-arch-push: + $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/webhook-configuration:$(LABEL_PREFIX) streaming-docker-multi-arch-push: $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) diff --git a/build/containers/Dockerfile.webhook-configuration b/build/containers/Dockerfile.webhook-configuration new file mode 100644 index 000000000..05b68d0f8 --- /dev/null +++ b/build/containers/Dockerfile.webhook-configuration @@ -0,0 +1,24 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu + +FROM ${PLATFORM}/debian:buster-slim + +ARG CROSS_BUILD_TARGET + +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean + +COPY ./target/${CROSS_BUILD_TARGET}/release/webhook-configuration /server + +ENV RUST_LOG agent,akri_shared + +CMD ["/server"] +# CMD ["/server", "--tls-crt-file=/path/to/crt", "--tls-key-file=/path/to/key", "--port=8443"] diff --git a/controller/Cargo.toml b/controller/Cargo.toml index cfa5624b3..f8b28ab8e 100644 --- a/controller/Cargo.toml +++ b/controller/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "controller" -version = "0.1.18" +version = "0.2.0" authors = [""] edition = "2018" diff --git a/deployment/helm/Chart.yaml b/deployment/helm/Chart.yaml index 1899140d0..ba46d739e 100644 --- a/deployment/helm/Chart.yaml +++ b/deployment/helm/Chart.yaml @@ -15,9 +15,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.18 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.1.18 +appVersion: 0.2.0 diff --git a/deployment/helm/templates/webhook-configuration.yaml b/deployment/helm/templates/webhook-configuration.yaml new file mode 100644 index 000000000..a23db98c2 --- /dev/null +++ b/deployment/helm/templates/webhook-configuration.yaml @@ -0,0 +1,142 @@ +{{- if .Values.webhookConfiguration.enabled }} +apiVersion: v1 +kind: List +metadata: + name: {{ .Values.webhookConfiguration.name }} +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Values.webhookConfiguration.name }} + subjects: + - kind: ServiceAccount + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ .Values.webhookConfiguration.name }} + spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Values.webhookConfiguration.name }} + template: + metadata: + labels: + app: {{ .Values.webhookConfiguration.name }} + spec: + {{- if .Values.rbac.enabled }} + serviceAccountName: {{ .Values.webhookConfiguration.name }} + {{- end }} + containers: + - name: webhook + {{- if .Values.useDevelopmentContainers }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:latest-dev" .Values.webhookConfiguration.image.repository | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.webhookConfiguration.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.webhookConfiguration.image.tag) | quote }} + {{- end }} + {{- else }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:latest" .Values.webhookConfiguration.image.repository | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.webhookConfiguration.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.webhookConfiguration.image.tag) | quote }} + {{- end }} + {{- end }} + imagePullPolicy: {{ .Values.webhookConfiguration.image.pullPolicy }} + command: + - /server + args: + - --tls-crt-file=/secrets/tls.crt + - --tls-key-file=/secrets/tls.key + - --port=8443 + volumeMounts: + - name: secrets + mountPath: /secrets + readOnly: true + volumes: + - name: secrets + secret: + secretName: {{ .Values.webhookConfiguration.name }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.webhookConfiguration.allowOnControlPlane }} + tolerations: + {{- /* Allow this pod to run on the master. */}} + - key: node-role.kubernetes.io/master + effect: NoSchedule + {{- end }} + {{- if or .Values.webhookConfiguration.linuxOnly .Values.webhookConfiguration.onlyOnControlPlane .Values.webhookConfiguration.nodeSelectors }} + nodeSelector: + {{- if .Values.webhookConfiguration.nodeSelectors }} + {{- toYaml .Values.webhookConfiguration.nodeSelectors | nindent 8 }} + {{- end }} + {{- if .Values.webhookConfiguration.linuxOnly }} + "kubernetes.io/os": linux + {{- end }} + {{- if .Values.webhookConfiguration.onlyOnControlPlane }} + node-role.kubernetes.io/master: "" + {{- end }} + {{- end }} + - apiVersion: v1 + kind: Service + metadata: + name: {{ .Values.webhookConfiguration.name }} + spec: + selector: + app: {{ .Values.webhookConfiguration.name }} + ports: + - name: http + port: 443 + targetPort: 8443 + - apiVersion: admissionregistration.k8s.io/v1 + kind: ValidatingWebhookConfiguration + metadata: + name: {{ .Values.webhookConfiguration.name }} + webhooks: + - name: {{ .Values.webhookConfiguration.name }}.{{ .Release.Namespace }}.svc + clientConfig: + service: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + port: 443 + path: "/validate" + caBundle: {{ required "please rerun helm install" .Values.webhookConfiguration.caBundle }} + rules: + - operations: + - "CREATE" + - "UPDATE" + apiGroups: + - {{ .Values.crds.group }} + apiVersions: + - {{ .Values.crds.version }} + resources: + - "configurations" + scope: "*" + admissionReviewVersions: + - v1 + - v1beta1 + sideEffects: None +{{- end }} \ No newline at end of file diff --git a/deployment/helm/values.yaml b/deployment/helm/values.yaml index 1b9eb2a9a..cac130625 100644 --- a/deployment/helm/values.yaml +++ b/deployment/helm/values.yaml @@ -303,4 +303,30 @@ udev: # targetPort is the service targetPort of the instance service targetPort: 8083 # protocol is the service protocol of the instance service - protocol: TCP \ No newline at end of file + protocol: TCP + +# Admission Controllers (Webhooks) +webhookConfiguration: + # enabled defines whether to apply the Akri Admission Controller (Webhook) for Akri Configurations + enabled: false + # name of the webhook + name: akri-webhook-configuration + # base64-encoded CA certificate (PEM) used by Kubernetes to validate the Webhook's certificate + caBundle: null + image: + # repository is the Akri Webhook for Configurations image reference + repository: ghcr.io/deislabs/akri/webhook-configuration + tag: + # pullPolicy is the Akri Controller pull policy + pullPolicy: Always + # onlyOnControlPlane dictates whether the Akri Controller will only run on nodes with + # the label with (key, value) of ("node-role.kubernetes.io/master", "") + onlyOnControlPlane: false + # allowOnControlPlane dictates whether a toleration will be added to allow to Akri Controller + # to run on the control plane node + allowOnControlPlane: true + # linuxOnly dictates whether the Akri Controller will only run on a linux node + linuxOnly: true + # nodeSelectors is the array of nodeSelectors used to target nodes for the Akri Controller to run on + # This can be set from the helm command line using `--set controller.nodeSelectors.label="value"` + nodeSelectors: {} diff --git a/samples/brokers/udev-video-broker/Cargo.toml b/samples/brokers/udev-video-broker/Cargo.toml index 27d8c51f2..7ae78c6fa 100644 --- a/samples/brokers/udev-video-broker/Cargo.toml +++ b/samples/brokers/udev-video-broker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "udev-video-broker" -version = "0.1.18" +version = "0.2.0" authors = ["Kate Goldenring ", ""] edition = "2018" diff --git a/shared/Cargo.toml b/shared/Cargo.toml index e6c4754ea..66b85db22 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-shared" -version = "0.1.18" +version = "0.2.0" authors = [""] edition = "2018" diff --git a/test/run-webhook.md b/test/run-webhook.md new file mode 100644 index 000000000..24d8d329a --- /dev/null +++ b/test/run-webhook.md @@ -0,0 +1,266 @@ +# Run Webhook End-to-End Tests + +File: `/tests/run-webhook.py` + +Complements existing Python-based end-to-end test [script](/test/run-end-to-end.py) with a script to test Akri configured to use the Configuration Admission Controller Webhook ([README](/webhooks/validating/configuration/README.me)). + +The Webhook validates Akri Configurations, permitting (semantically) valid Configurations to be applied to a cluster and prohibiting (semantically) invalid Configurations. + +In order to create an end-to-end test including the Webhook: + +1. Akri (including the Webhook) is deployed to a cluster +1. A valid Configuration is applied and, confirmed to have been applied by retrieval +1. An invalid Configuration is applied and, confirmed to have been trapped by the Webhook by catching an (API) exception +1. The cluster is deleted. + +## ImagePullSecrets + +When running the script outside of the GitHub Actions workflow, you may need to configure the Kubernetes cluster to access a private registry, for example GitHub Container Registry (aka GHCR). The simplest way to authenticate to a private registry is to create a Secret (e.g. `${SECRET}`) containing the credentials in the Namespace(s) and configure Helm to reference the Secret when deploying Akri: `--set=imagePullSecrets[0].name=${SECRET}` + +## Configuration + +The Webhook requires a certificate and key. The certificate must correctly reference the Webhook's Kubernetes' service name through its Subject Alternate Name (SAN) configuration. + +The test includes 2 certificates (and their associate keys). Both require that the Webhook's name (`WEBHOOK_NAME`) be `akri-webhook-configuration`. + +The script is configured to use the first cert|key pair in the `default` namespace with a Service name: `akri-webhook-configuration.default.svc.cluster.local`. The second cert|key pair is for the `deleteme` namespace (see below) for Service name: `akri-webhook-configuration.deleteme.svc.cluster.local`. + +If you wish to use a different Webhook name or namespace, you will need to generate a new cert|key pair, then reconfigure the script using these and the CA. See [Generate Certificate|Key](#Generate-CertKey). + +The GitHub Actions workflow applies end-to-end tests to the test cluster's `default` namespace. This script permits non-`default` clusters to be used, for example when testing the script locally. To further simplify this process and avoid having to create a certificate and key for the Webhook, a certificate is provided that works with a namespace named `deleteme`. If you would like to use `deleteme` instead of `default` namespace: + ++ Ensure the namespace exists: `kubectl create namespace deleteme` ++ Update the script `namespace="deleteme"` ++ Replace the value of `CRT` and `KEY` in the script with those below + +```Python +# CRT|KEY defined (DNS) for `akri-webhook-configuration.deleteme.svc` +CRT = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURUVENDQWpXZ0F3SUJBZ0lRUW4zSVpvZStLby9DQnllSDBuaXJJVEFOQmdrcWhraUc5dzBCQVFzRkFEQU4KTVFzd0NRWURWUVFEREFKRFFUQWVGdzB5TVRBeU1URXhOek0zTVRGYUZ3MHlNakF5TVRFeE56TTNNVEZhTUFBdwpnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFERmxOZ3kxNWJjOG1EeURINk5QQVdQCnFmUGVUY0VCN2NQYjliaGNTYzVaK0F0V2FHWk8rM2RKb1pFdGkwN01lNW9qa3p4WkNLMk41NXcxL0k2SWR3K00KQzJKQlFtYitiR1lMMjJOdFhwdXQxMXpyVWpNM0t5emlZUkhxVE5iSWdITEREV2l4QWt0UG56TGVPZnp0UXlOSwpUTGNZTXpLT3hybkpyai9YWjhiU2RYRUNwakREM3BIVEdjcWVkQjdpWTB5ZVJ0MmJYMFI3MU9sMlJIaFkrUFdPCjhwb3N4STNQeUV4VW1LZU4vMDhpMSs4dWRLV0R0Mm4velNsRExKS2ZFTFJJZTI1T0kvOURldjlUWnZWeTVtYWcKR0RyZ0d4VlFlVG9XVFNMTXNZK3l6ODFudWhlTTRkUldKbGl0azRPbnFZdlpHcFVDQ3BFeGhPZkR6a1RKcElGbApBZ01CQUFHamdiVXdnYkl3RXdZRFZSMGxCQXd3Q2dZSUt3WUJCUVVIQXdFd0RBWURWUjBUQVFIL0JBSXdBREFmCkJnTlZIU01FR0RBV2dCUlVKd3FRQ3dHdUlQV0wrSVhDSjgrNlZjdk8yakJzQmdOVkhSRUJBZjhFWWpCZ2dpZGgKYTNKcExYZGxZbWh2YjJzdFkyOXVabWxuZFhKaGRHbHZiaTVrWld4bGRHVnRaUzV6ZG1PQ05XRnJjbWt0ZDJWaQphRzl2YXkxamIyNW1hV2QxY21GMGFXOXVMbVJsYkdWMFpXMWxMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFDTklGUnVHSHdjVnRWTXlhTEZqTW5BSktBQlNVL2hEOTlhTnJsRUU1aTQKRGkyeDExYUVFNVFkWS9RdnE3bXYzUk1RL2Y1NEZpYjVETURpSG50Z0F1ZHlTajZtT1pBUG1TMVFXTVo4QlhlOQphTzJMWVczYnBmQUIwSytFUkJ4NWRwdXBoYWZYR2hNR09VeGtMelNucUptS0lhSmF2V3JyYTV1cFd0dExDVDRpCjhFenNnb25ESzA5Si9WanBnYWhFUW1jMjBmcytHZ3QvNThEdmZuMSttMG4zNGVpakc1MWx4eVM3aWkwQi9WdkMKVE55WUYweWtSTWJrRWM5YzRkdHc5bnNiZHI5WFNIZFpFSFIxaDZUcnpldlRFQzlteU91UGw3V0tUaG1SVE5qWApkWGNTVkZtb1VpbDJDbGNxd001Q2c2TGd6Y0k4Zm10VlNVeVVGYmZwUkNYeQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" +KEY = "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBeFpUWU10ZVczUEpnOGd4K2pUd0ZqNm56M2szQkFlM0QyL1c0WEVuT1dmZ0xWbWhtClR2dDNTYUdSTFl0T3pIdWFJNU04V1FpdGplZWNOZnlPaUhjUGpBdGlRVUptL214bUM5dGpiVjZicmRkYzYxSXoKTnlzczRtRVI2a3pXeUlCeXd3MW9zUUpMVDU4eTNqbjg3VU1qU2t5M0dETXlqc2E1eWE0LzEyZkcwblZ4QXFZdwp3OTZSMHhuS25uUWU0bU5NbmtiZG0xOUVlOVRwZGtSNFdQajFqdkthTE1TTno4aE1WSmluamY5UEl0ZnZMblNsCmc3ZHAvODBwUXl5U254QzBTSHR1VGlQL1Ezci9VMmIxY3VabW9CZzY0QnNWVUhrNkZrMGl6TEdQc3MvTlo3b1gKak9IVVZpWllyWk9EcDZtTDJScVZBZ3FSTVlUbnc4NUV5YVNCWlFJREFRQUJBb0lCQVFEQ3NqMnBQQkNKZ0w1UApSa2llVy9zTzZtWkpOVTF2M1NBWGJEZFRtZGNoaU8rRElqVk90eldBOVJqZVRGeEYyN2EwUDY1RC9lMG4zSWR1Ckc0VklyQ3BCMGlYc01NYlZCM1EzVXVUVExWc3pIdm1OV2Q3bUNrR2NnaExwVXZhRGRTK2hUV0ZRcS9ZU2E4bncKZWl2bWtUWUJUVDlQTllRb2RXTTJmZUtqSEx3clBaaE9aTFlOdWQ5TDcxV3FQdEdXU2xRR0JUU2dwZnYrd2UrLwprWVNrRnd1MnZaYXdCa2c0ZHFCSWE1YUxVYmUvVlRmZW9EOFFlb1p3MlNKMkszNE04OElrakFsV1RYSDlYaU15CnZrYjgzYmxjRUVIVHE1L1JBWExMK1kxVkEzR0plYVVHVzB1WGJsWHVqRGFDWW0rQ2RZZ3gyNllWeUwvUnlmUDYKZ2hKSU9VS0JBb0dCQVArdC9TaU5sY1FhcCtkZ2t2Sy84cGl4NGxSZWU4QnBJZzNzaWg4a0NkNnozenlsaVIzQgpFcGNlMTFTTm8raVN2YjZGcDVLUTB1MnI2YlVJU25STWhyVkRLa1ljd0lqdE5acWx0S1ZFUjZ3OXFrVkVHeEdwClozZHprSElUclR4Kzh2MVM5dnFBVzc1U3FTNlNacDArUWZjOS9uVkZnM3ZWV05QTGlQb1VWNFk1QW9HQkFNWFUKT0Y4TUgyNHg2Tm5sWmtWWGRzeGZXSlgyamtFSGwzS3NuMlJVM3FtMXNucEpBaG14Y3BYTmE5VEdFWGRsNjdQYQo1QUxxU3NkbzVsZTFFRzhQbWppRHR2NmJ3ZWEzSHZDREFXblhTK3JwM2g2UkJaRGw1eU9ycmJ1TzZsT1FTc3hPCnF3a3ZuenFMQlNUWThKdGNQV0JZajhPaktZWmZxMTJLallPZmpEU05Bb0dCQUtTRWxoTlVGM3hhRXBRbFppamgKTGY3bTUxV1dmbGF1ejRUYUlYNHNPRldldEJSWUI4U25pWWpJQlpLWW1WRjdxckEvWERaSkRoQjB3Q3NHckxIcwowL2txd0xiZ3BWcjJGN25zeWpKVm56RExkUmFnM2pJZEtVQ0prZloxaHRFWWRzNWVaaUdHR29KNnVmWUhxaE9nCkRkNURlOHFGOGpicWJ2L0pSZGgwNG1TeEFvR0JBTU1DZHVzaXpSellNQndUS1NSem1wVE43RW92dUh6Y0dldWQKeEtXbmo3S2xmS0ZVdExCVkhvb1M3QmZiZzc0NkJ3WE5ZWFNLTmxxcHlsNXRDeDBmdVR1Nmd6b3FtaEp2TXgyTgpWbWhhSmVrVXpyTTg2OHF4Qm85QUhjdEVqekwraXUwcEl5cXorZmRBc1Rwb2E0NEtlQ293UXM5c1dIT3dmUUdCCm9ndzh5MzNGQW9HQkFNcFhXQkdJWkZpazk1amhkQ1A5aUdoZVAzMTVLRlZ6NnVycGFtMWcxMmNlT28rNjJ6MUQKVHNscWlWZW1EOU92dVJ3aHFSVldzMkc3Sk95Nys4RFhWREhZd0xTOGRpZmNnVGtaakxnTHlDeHpzQ0xBbVpyTgp0Qkx1dWtoTVlCY2hOMHRLK05Pa05zdGVCZkIzR0RKdDhlUUV2WkRkT0plUlNweTMxNHNPQWppUgotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=" +``` + +## Explanation + +`/test/run-wehook.py` comprises 2 functions: `main` and `do_test`. + +The script uses functions (shared with `/test/run-end-to-end.py`) defined in `/test/shared_test_code.py` + +The script depends on [Python Client for Kubernetes](https://pypi.org/project/kubernetes/). This SDK is used by the script(s) to access Kubernetes cluster resources during the test. However, the shared functions use `sudo kubectl logs` through Python's `os.system` function to obtain logs. + +No SDK is used for Helm. Helm commands are effected through Python's `os.system` function. + +### `main` + +`main` determines the location of the Helm Chart and then assembles the correct `helm install` command to install Akri. In addition to Akri's `agent` and `controller`, the script configures Helm to include the Webhook. Like the `agent` and `controller`, the Webhook is configurable using Helm's `--set` flag to override Akri's Chart's `values.yaml` settings. Specifically, the Webhook is enabled (by default it is disabled), the name, defined by the constant `WEBHOOK_NAME` is used and the CA certificate used to sign the Webhook's certificate is given to the cluster so that it may validate the Webhook's certificate. + +> **NOTE** `WEBHOOK_NAME=akri-webhook-configuration` which is the default value defined in `values.yaml`. Although redundant, it is provided here to be more intentional. + +```python +def get_webhook_helm_config() -> str: + webhook = "\ + --set=webhookConfiguration.enabled=true \ + --set=webhookConfiguration.name={name} \ + --set=webhookConfiguration.caBundle={cabundle} \ + ".format( + name=WEBHOOK_NAME, + cabundle=CA_BUNDLE, + ) + print("Webhook configuration:\n{}".format(webhook)) + return webhook +``` + +Once the Helm Chart is installed, the function calls `do_test`. Regardless of whether `do_test` succeeds, the Helm Chart is uninstalled|deleted and the script outputs any exception thrown by `do_test`. + +### `do_test` + +`do_test` shares some tests with `/test/run-end-to-end.py`, namely by checking whether Akri's CRDs (`Configuration`, `Instance`) were successfully created by the Helm Chart installation, and whether the deployment is in the correct state, namely whether there is an Akri Agent and an Akri Controller running. If both tests pass, the function proceeds. + +The Webhook is manifest by a Deployment that produces a ReplicaSet that manages a single Pod. `do_test` effects `kubectl describe` commands for each of these resources and outputs the results to the stdout. + +Then, `do_test` applies a valid Configuration to the cluster. It does this using the Kubernetes SDK. First to apply (create) the Configuration and then to get the resource. It outputs the result to stdout before deleting the Configuration. + +Then, `do_test` applies an invalid Configuration to the cluster. The Configuration is syntactically correct but semantically incorrect; it is valid YAML but an invalid Configuration. Without the Webhook, the cluster will accept this Configuration. With the Webhook, the Configuration should be rejected. The test is similar to the test for a valid Configuration, except this time the function expects an API exception to be thrown by the Kubernetes API. + +The Webhooks' logs are retrieved and persisted to `WEBHOOK_LOG_PATH = "/tmp/webhook_log.txt"`. When run under GitHub Actions, the workflow persists this log file. + +## `subprocess` vs. `os` + +Python (3.x) deprecated `os` and replaced it with `subprocess`. The Webhook script uses `subprocess` rather than `os` because `subprocess` appears to work more cleanly with GitHub Actions and correctly placing stdout and stderr after the commands as they are run. Using `os` with GitHub Actions (as is done by `/test/shared_test_code.py`) causes the stdout (and stderr) to be displayed at the beginning of the workflow output. + +The Webhook Python script wraps `subprocess.run` in a function called `run`: + +```python +def run(command): + print("Executing: {}".format(command)) + result = subprocess.run(command, + shell=True, + capture_output=True, + text=True) + print("returncode: {}".format(result.returncode)) + if result.stdout: + print("stdout:") + print(result.stdout) + if result.stderr: + print("stderr:") + print(result.stderr) +``` + +## Generate Certificate|Key + +```bash +NAMESPACE="deleteme" +kubectl create namespace ${NAMESPACE} + +WEBHOOK="akri-webhook-configuration" # Default name if not provided + +# Generate CA +openssl req \ +-nodes \ +-new \ +-x509 \ +-keyout ./secrets/ca.key \ +-out ./secrets/ca.crt \ +-subj "/CN=CA" + +# Create Secret +kubectl create secret tls ca \ +--namespace=${NAMESPACE} \ +--cert=./secrets/ca.crt \ +--key=./secrets/ca.key + +# Create Issuer using this Secret +echo " +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: ca + namespace: ${NAMESPACE} +spec: + ca: + secretName: ca +" | kubectl apply --filename=- + +# Create Certificate using this CA +echo " +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: ${WEBHOOK} + namespace: ${NAMESPACE} +spec: + secretName: ${WEBHOOK} + duration: 8760h + renewBefore: 720h + isCA: false + privateKey: + algorithm: RSA + encoding: PKCS1 + size: 2048 + usages: + - server auth + dnsNames: + - ${WEBHOOK}.${NAMESPACE}.svc + - ${WEBHOOK}.${NAMESPACE}.svc.cluster.local + issuerRef: + name: ca + kind: Issuer + group: cert-manager.io +" | kubectl apply --filename=- + +# Check +kubectl get certificate/${WEBHOOK} --namespace=${NAMESPACE} + +# Delete Certificate (to stop Secret being recreated) +kubectl delete certificate/${WEBHOOK} --namespace=${NAMESPACE} + +# Retrieve cert-manager generated certificates and key +CRT=$(\ + kubectl get secret/${WEBHOOK} \ + --namespace=${NAMESPACE} \ + --output=jsonpath="{.data.tls\.crt}") && echo ${CRT} + +KEY=$(\ + kubectl get secret/${WEBHOOK} \ + --namespace=${NAMESPACE} \ + --output=jsonpath="{.data.tls\.key}") && echo ${KEY} + +CABUNDLE=$(\ + kubectl get secret/${WEBHOOK} \ + --namespace=${NAMESPACE} \ + --output=jsonpath="{.data.ca\.crt}") && echo ${CABUNDLE} +``` + +## Validate + +> **NOTE** Certificate is bound to `akri-webhook-configuration.default.svc` + +```bash +echo ${CRT} \ +| base64 --decode \ +| openssl x509 -in - -noout -text +``` + +Yields: + +```console +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + b4:6e:54:8d:2a:ad:ea:77:3f:30:8a:3b:00:da:7b:2b + Signature Algorithm: sha256WithRSAEncryption + Issuer: CN = CA + Validity + Not Before: Feb 9 19:06:58 2021 GMT + Not After : Feb 9 19:06:58 2022 GMT + Subject: + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:b8:bf:13:b7:44:db:c9:f6:22:d1:c9:06:4b:43: + db:56:8d:b0:e5:2f:e0:95:52:6a:47:ee:1a:04:64: + 03:66:30:54:c8:7f:1d:5a:24:b2:a7:3f:c8:4e:be: + 8b:7f:89:58:e8:d5:8f:5b:c8:c6:3c:80:b2:b6:dc: + c8:81:34:c1:66:78:55:40:17:e2:2d:6c:50:73:9e: + c3:ce:f9:aa:14:ff:b2:06:50:20:29:17:c0:e8:7e: + cd:93:c2:67:34:b5:26:96:88:2c:71:30:87:d9:47: + f7:e3:fa:36:a8:c8:9f:f4:1e:aa:e6:01:d6:ec:77: + 97:e3:e7:be:d1:dc:a2:c1:91:2a:12:86:ab:cd:6b: + 88:08:2e:bb:d9:ec:09:42:16:5e:28:82:1d:fc:9e: + 9d:cf:f9:38:e8:96:25:6e:63:ed:3b:cd:8b:51:64: + 75:f4:d7:04:cc:37:f6:24:31:eb:b6:31:e5:00:1a: + e5:b2:54:88:23:fd:a6:43:d9:ba:2c:30:ff:8f:cf: + e1:a3:24:28:f0:2a:4c:f2:08:9f:70:83:41:f7:ec: + 6d:01:d2:9c:4c:d3:15:ef:6c:b8:a9:55:75:95:47: + cd:34:5c:48:ff:8a:4f:49:3e:03:97:5d:e4:84:8f: + 30:a7:99:aa:99:92:19:a3:41:a3:88:59:96:ec:0c: + 7a:01 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Authority Key Identifier: + keyid:54:27:0A:90:0B:01:AE:20:F5:8B:F8:85:C2:27:CF:BA:55:CB:CE:DA + + X509v3 Subject Alternative Name: critical + DNS:akri-webhook-configuration.default.svc, DNS:akri-webhook-configuration.default.svc.cluster.local + Signature Algorithm: sha256WithRSAEncryption + 3a:b3:c6:0c:db:da:70:96:ef:08:f2:7f:80:fa:3f:ff:7d:ab: + 78:9c:0c:df:86:bf:ee:b8:08:9c:2f:79:41:a8:a5:8e:99:62: + 10:15:55:2c:b3:79:1c:1c:89:11:7f:6a:67:ca:bc:ad:88:9b: + 33:b5:4c:32:b2:09:79:98:f3:f9:c4:6f:bc:b1:62:83:6b:16: + 70:e1:f5:df:75:84:cc:18:91:e8:f1:78:36:58:59:62:00:c7: + 63:38:46:45:fb:c8:92:8a:33:e2:ea:9c:34:07:16:b7:69:da: + 88:14:2f:53:85:13:d9:80:e5:8a:29:d5:dd:76:e0:08:87:d3: + fd:d3:8c:3c:66:0b:75:cf:ab:35:05:f9:07:52:4f:b3:2d:25: + 65:23:43:9a:21:f9:6d:ce:3a:fd:0a:44:0d:f6:9c:7f:5f:82: + df:ee:95:76:e4:6f:ff:b7:07:b8:51:a7:a1:3e:ce:ca:b8:7f: + b8:75:e9:0d:23:dd:1e:8f:42:09:ef:4f:f0:cc:f4:0e:5c:0f: + 85:32:51:cf:81:ff:4e:b1:0b:3a:5b:ed:7a:75:7b:c2:0a:54: + f9:0a:f6:d3:2c:15:0e:a7:30:b1:52:b8:85:8b:1f:4f:8a:51: + f9:6e:90:03:87:04:3c:d9:df:46:02:da:4c:2f:23:06:6f:b1: + 9c:5e:cd:80 +``` diff --git a/test/run-webhook.py b/test/run-webhook.py new file mode 100755 index 000000000..732f8ae8b --- /dev/null +++ b/test/run-webhook.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 + +import shared_test_code +import os, subprocess + +from kubernetes import client, config +from kubernetes.client.rest import ApiException + +HELM_CHART_NAME = "akri" +NAMESPACE = "default" +WEBHOOK_NAME = "akri-webhook-configuration" +WEBHOOK_LOG_PATH = "/tmp/webhook_log.txt" + +# Required by Webhook +# DNS: `akri-webhook-configuration.default.svc` +# Expires: 09-Feb-2022 +CRT = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURURENDQWpTZ0F3SUJBZ0lSQUxSdVZJMHFyZXAzUHpDS093RGFleXN3RFFZSktvWklodmNOQVFFTEJRQXcKRFRFTE1Ba0dBMVVFQXd3Q1EwRXdIaGNOTWpFd01qQTVNVGt3TmpVNFdoY05Nakl3TWpBNU1Ua3dOalU0V2pBQQpNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVMOFR0MFRieWZZaTBja0dTMFBiClZvMnc1Uy9nbFZKcVIrNGFCR1FEWmpCVXlIOGRXaVN5cHovSVRyNkxmNGxZNk5XUFc4akdQSUN5dHR6SWdUVEIKWm5oVlFCZmlMV3hRYzU3RHp2bXFGUCt5QmxBZ0tSZkE2SDdOazhKbk5MVW1sb2dzY1RDSDJVZjM0L28ycU1pZgo5QjZxNWdIVzdIZVg0K2UrMGR5aXdaRXFFb2Fyeld1SUNDNjcyZXdKUWhaZUtJSWQvSjZkei9rNDZKWWxibVB0Ck84MkxVV1IxOU5jRXpEZjJKREhydGpIbEFCcmxzbFNJSS8ybVE5bTZMREQvajgvaG95UW84Q3BNOGdpZmNJTkIKOSt4dEFkS2NUTk1WNzJ5NHFWVjFsVWZOTkZ4SS80cFBTVDREbDEza2hJOHdwNW1xbVpJWm8wR2ppRm1XN0F4NgpBUUlEQVFBQm80R3pNSUd3TUJNR0ExVWRKUVFNTUFvR0NDc0dBUVVGQndNQk1Bd0dBMVVkRXdFQi93UUNNQUF3Ckh3WURWUjBqQkJnd0ZvQVVWQ2NLa0FzQnJpRDFpL2lGd2lmUHVsWEx6dG93YWdZRFZSMFJBUUgvQkdBd1hvSW0KWVd0eWFTMTNaV0pvYjI5ckxXTnZibVpwWjNWeVlYUnBiMjR1WkdWbVlYVnNkQzV6ZG1PQ05HRnJjbWt0ZDJWaQphRzl2YXkxamIyNW1hV2QxY21GMGFXOXVMbVJsWm1GMWJIUXVjM1pqTG1Oc2RYTjBaWEl1Ykc5allXd3dEUVlKCktvWklodmNOQVFFTEJRQURnZ0VCQURxenhnemIybkNXN3dqeWY0RDZQLzk5cTNpY0ROK0d2KzY0Q0p3dmVVR28KcFk2WlloQVZWU3l6ZVJ3Y2lSRi9hbWZLdksySW16TzFUREt5Q1htWTgvbkViN3l4WW9OckZuRGg5ZDkxaE13WQprZWp4ZURaWVdXSUF4Mk00UmtYN3lKS0tNK0xxbkRRSEZyZHAyb2dVTDFPRkU5bUE1WW9wMWQxMjRBaUgwLzNUCmpEeG1DM1hQcXpVRitRZFNUN010SldValE1b2grVzNPT3YwS1JBMzJuSDlmZ3QvdWxYYmtiLyszQjdoUnA2RSsKenNxNGY3aDE2UTBqM1I2UFFnbnZUL0RNOUE1Y0Q0VXlVYytCLzA2eEN6cGI3WHAxZThJS1ZQa0s5dE1zRlE2bgpNTEZTdUlXTEgwK0tVZmx1a0FPSEJEelozMFlDMmt3dkl3WnZzWnhlellBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" +KEY = "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdUw4VHQwVGJ5ZllpMGNrR1MwUGJWbzJ3NVMvZ2xWSnFSKzRhQkdRRFpqQlV5SDhkCldpU3lwei9JVHI2TGY0bFk2TldQVzhqR1BJQ3l0dHpJZ1RUQlpuaFZRQmZpTFd4UWM1N0R6dm1xRlAreUJsQWcKS1JmQTZIN05rOEpuTkxVbWxvZ3NjVENIMlVmMzQvbzJxTWlmOUI2cTVnSFc3SGVYNCtlKzBkeWl3WkVxRW9hcgp6V3VJQ0M2NzJld0pRaFplS0lJZC9KNmR6L2s0NkpZbGJtUHRPODJMVVdSMTlOY0V6RGYySkRIcnRqSGxBQnJsCnNsU0lJLzJtUTltNkxERC9qOC9ob3lRbzhDcE04Z2lmY0lOQjkreHRBZEtjVE5NVjcyeTRxVlYxbFVmTk5GeEkKLzRwUFNUNERsMTNraEk4d3A1bXFtWklabzBHamlGbVc3QXg2QVFJREFRQUJBb0lCQVFDZlJNTkhoUXFDTXpyVApacDJSZDE5NVg4KzMxYTJrclpkSWlhRk9WYmFFZTNnc0hVSDl1NU4xRWt5cWJpU3UvNFp4dStMS091MkRyV1BrCnQ3UDNoN2FQazMvVE1JUGhxdlkwcHhPaHRLVUhVMlJ6Z3RJbSt2NW9zU0NqbUw0R3Q0RWIxeXVSTFVpQWJrWHIKK1lMendYbjhLQkFuR0VEa1BUbnAxWmt4TFNmMi9LakZXcDV1eklqME1BbGFGdWczcy9nVm5tVVYxMTQrZEx0RgoyWGYrTGVnY1ZQUTlkbm5iQ0hsdXAvSEU5VlJaS25vS1RQR0wvNTdUUnJSaFFSRU93dVZ0NjhWRm1wOWdidzlaClQ1MndUc1N0UkRKblFYVVBQY2NyN0ppbng3TDVYSVYySFZ6UlpWcVRreVloRTJORkE2UGl1dHBJV2ZhenJZOUQKUHlpbXR2SzlBb0dCQVBDc051djNNTkladjh4bEE4SjJCdmFUU0llb0wyYUdOZy9tNUthTW5tOVJhK0NNYmx5bAoxbi9uZDFwQ045RTlQRERHdkp3LzF4dk9aN2ZQeGlNQmd6ZW9kWEk5Z3c2d3VJeGx2bW9HUDJxcW9mUmxNbSt6CitNMVYzbmZIVVl2Y0JFMlpGbWh0NVRqRU4yMmFnOEtPQ0FYZ2lmc1dsV3p4V1U4V1hKOXJIRHNMQW9HQkFNU0QKRXFoN1hSMjNaMXRsRlc1Ly9mT3ovUmNHTm5wTWRvVEZlemt2MVRqc3RjNWZYT0FVUHhGMXVNT0VLNHlsOVY0bQpEOEtKRGNkZ2sxclhTQ3dGakF2YlZOdEdjQ0dJay82S1NyWXJmRFVZTmM1MjFBa0VIZlpLUVJVYlRlZjg3c1hwCmVhZGtoUE1IT09acHN3RmxnTUs1aTl2cmF1a00yb21naXZZZ2RlYWpBb0dBS3MwRnEyczNoSFhOMVVTMXFYU2kKQW1IcENTOFExdlBSVTN5bGR6VVV6QWszM1NROFVEK3g2T2M2STVRWkp4M3p3VnptbUFjR2MweCt4NEtzNHZiVwo1aVFRVnZPM2hmcEpwN1pFYWNpWXFKaVYyc2ZRYzJzWE9UVW5MamdGT1pFME5yU2Q5bzVzc0c2OHlNSXM0b0d0CnpaWEVGQ0pOQ3FYVlV5cFA2STM4NUVjQ2dZQjlZUHVJajUwcmxwYlZVenRIVTFaZUpScDNsRGt4OHBNenh5UUYKcXFVcU9xME16UDllNE13VWdiMnUwU2RRQjVyenhPa05QNUNSQXVkQmNGWFY4SHdZSElxWmxPbDZHOEFCQ1k3OQpoK1Vwb3hiQmNrTjZ0U3ZBdGtPc0NjMjlGRDNyL0Rqb09sUXhFd3lVeGgrMTVtTXUybCtIb3o2RkR2Um9Gd3hTCldRZWdiUUtCZ0NHUmhBcUtORXJUQm5uNXlPWUFnVkJtdkxjQk4xV3NEOVBGSDJXT3VJbUZWamtscTJyeFNBbW0KbTExWU9Yam1FWU9LTE1taEhwRVQ3MFJTeXd5UDcxa05ucjFjVVk5U2FXQmFxMS9sc2MxQkh4b05rYjJCNHVqVwo0L0ZUZ3JuUGlvRkNHT2IvaElCWXNRblh1VXp1ejJwKzBKdGJPblZodktGWDdJejV6Rm55Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" +CA_BUNDLE = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrekNDQWVPZ0F3SUJBZ0lVQmFHTGpXNFB6eE84S0RzS2dvRkRGbnk0WjZBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0RURUxNQWtHQTFVRUF3d0NRMEV3SGhjTk1qRXdNakEwTWpFd01qUXpXaGNOTWpFd016QTJNakV3TWpRegpXakFOTVFzd0NRWURWUVFEREFKRFFUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCCkFMWFpBNVkxc3FIU1pKS1RpTWh0aHJCbno2YzdzeTk1eFBHZzgwYm1rVXNlb3FmazU3dnJuRTE1NXZOOXlqVGkKYmFHRVZzR1prdjFKdnpaWFhST3hpOUNESlVOSXN5ZW5rdjdSbklIV3BCekZYTktwMlZIS1hGNERwV1BiOFJCcApndHNHaU9rYS83cHVYV3hqem5NRStiOUtvVVRYb1o4ek5XQURQSE9rNGFuUk11QmliUTNoNWdQbDArdWJRY0pQCnMvYUdVc21XdWNOOXlIV0kzYXAzY1NCeFloZUZQallDVnVMeitrMFVXaEFkQnlIWjVxaHNINWEvSUgzdGIwaWoKZ0RBM3FvWTVJZ3l0TEpzOXNiblpsTVBURW44SzFtbk9uOENqNlNaQ2ppTHZYMml5WkRKSjY0UEpYSHpUd2NzWAo5YTloUDl4aURpNjBFcDk3dDRuSHlla0NBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGRlFuQ3BBTEFhNGc5WXY0CmhjSW56N3BWeTg3YU1COEdBMVVkSXdRWU1CYUFGRlFuQ3BBTEFhNGc5WXY0aGNJbno3cFZ5ODdhTUE4R0ExVWQKRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLZUhNdjFXbkEweC8rM0dHNDBnNjZ6SQpYZlljQlRTWUszZCtRT2E0OGlZWjBENEdBSWhLNnpvYWxVcVpQSCs4U1g0Zy9GOS9OdktLano1MnJQRWNEM2FqClRkN2QralZzaVFLVVlVTnd2OFlBSllXaGZINGYzYjBCb1d5K3FOVEFLMW84ZHlBa3gyNDd4cGJOc1p2OWhkUzMKNUN6YlpXRE5LZXVpazdZcHNVMzJON25qRjVZOE4xMmhGbXNBNGlHSEZvTTAzK3QxU3Fsb1Q1NUp4YXpXTzJTdQpyUXF3dDRBM2RvTGorMlh2N0RyVjRBWGhDdzRidE82MytsUCtYd2ZocWs3ajM1SW9aVExFLzRiM1FFczcwMnl2CllpMXJ3bkNlSVF6L1AxYTNJc2UyS3R5OC9EWHNqMkhRUHpuZCt4L1ptS3ZVVFVwRzJNaXhYREtRT3pLR3Z5WT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + +SECRET = { + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": WEBHOOK_NAME, + "namespace": NAMESPACE, + }, + "type": "kubernetes.io/tls", + "data": { + "ca.crt": CA_BUNDLE, + "tls.crt": CRT, + "tls.key": KEY, + } +} + +GROUP = "akri.sh" +VERSION = "v0" +KIND = "Configuration" +NAME = "broker" + +RESOURCES = {"limits": {"{{PLACEHOLDER}}": "1"}} + +SERVICE = { + "type": "ClusterIP", + "ports": [{ + "name": "name", + "port": 0, + "targetPort": 0, + "protocol": "TCP" + }] +} + +TEMPLATE = { + "apiVersion": "{}/{}".format(GROUP, VERSION), + "kind": KIND, + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "" + }, + "creationTimestamp": "2021-01-01T00:00:00Z", + "generation": 1, + "managedFields": [], + "name": NAME, + "uid": "00000000-0000-0000-0000-000000000000" + }, + "spec": { + "protocol": { + "debugEcho": { + "descriptions": ["foo", "bar"], + "shared": True + } + }, + "brokerPodSpec": { + "containers": [{ + "name": "test-broker", + "image": "nginx:latest", + "imagePullPolicy": "Always", + }], + }, + "instanceServiceSpec": SERVICE, + "configurationServiceSpec": SERVICE, + "capacity": 1 + } +} + + +def main(): + print("End-to-end test using validating webhook") + + # If this is a PUSH, the test needs to wait for the new containers to be + # built/pushed. In this case, the workflow will set /tmp/sleep_duration.txt to + # the number of seconds to sleep. + # If this is a MANUALLY triggerd or a PULL-REQUEST, no new containers will + # be built/pushed, the workflows will not set /tmp/sleep_duration.txt and + # this test will execute immediately. + shared_test_code.initial_sleep() + + # Webhook expects TLS-containing Secret (of the same name) mounted as a volume + kubeconfig_path = shared_test_code.get_kubeconfig_path() + print("Loading k8s config: {}".format(kubeconfig_path)) + config.load_kube_config(config_file=kubeconfig_path) + + print("Creating Secret: {namespace}/{name}".format(namespace=NAMESPACE, + name=WEBHOOK_NAME)) + client.CoreV1Api().create_namespaced_secret(body=SECRET, + namespace=NAMESPACE) + + # Update Helm and install this version's chart + os.system("helm repo update") + + # Get version of akri to test + test_version = shared_test_code.get_test_version() + print("Testing version: {}".format(test_version)) + + shared_test_code.major_version = "v" + test_version.split(".")[0] + print("Testing major version: {}".format(shared_test_code.major_version)) + + helm_chart_location = shared_test_code.get_helm_chart_location() + print("Get Akri Helm chart: {}".format(helm_chart_location)) + + cri_args = shared_test_code.get_cri_args() + print("Providing Akri Helm chart with CRI args: {}".format(cri_args)) + + extra_helm_args = shared_test_code.get_extra_helm_args() + print("Providing Akri Helm chart with extra helm args: {}".format( + extra_helm_args)) + + helm_install_command = "\ + helm install {chart_name} {location} \ + --namespace={namespace} \ + --set=agent.allowDebugEcho=true \ + {webhook_config} \ + {cri_args} \ + {helm_args} \ + --debug\ + ".format(chart_name=HELM_CHART_NAME, + location=helm_chart_location, + namespace=NAMESPACE, + webhook_config=get_webhook_helm_config(), + cri_args=cri_args, + helm_args=extra_helm_args) + print("Helm command: {}".format(helm_install_command)) + os.system(helm_install_command) + + res = False + try: + res = do_test() + except Exception as e: + print(e) + res = False + finally: + # Best effort cleanup work + try: + # Save Agent and controller logs + shared_test_code.save_agent_and_controller_logs( + namespace=NAMESPACE) + finally: + # Delete akri and check that controller and Agent pods deleted + os.system("\ + helm delete {chart_name} \ + --namespace={namespace}\ + ".format( + chart_name=HELM_CHART_NAME, + namespace=NAMESPACE, + )) + # Delete Webhook Secret + client.CoreV1Api().delete_namespaced_secret(name=WEBHOOK_NAME, + namespace=NAMESPACE) + if res: + # Only test cleanup if the test has succeeded up to now + if not shared_test_code.check_akri_state( + 0, 0, 0, 0, 0, 0, namespace=NAMESPACE): + print( + "Akri not running in expected state after helm delete") + raise RuntimeError("Scenario Failed") + + if not res: + raise RuntimeError("Scenario Failed") + + +def do_test() -> bool: + kubeconfig_path = shared_test_code.get_kubeconfig_path() + print("Loading k8s config: {}".format(kubeconfig_path)) + config.load_kube_config(config_file=kubeconfig_path) + + # Get kubectl command + kubectl_cmd = shared_test_code.get_kubectl_command() + + # Ensure Helm Akri installation applied CRDs and set up Agent and Controller + print("Checking for CRDs") + if not shared_test_code.crds_applied(): + print("CRDs not applied by helm chart") + return False + + print("Checking for initial Akri state") + + if not shared_test_code.check_akri_state(1, 1, 0, 0, 0, 0): + print("Akri not running in expected state") + run("sudo {kubectl} get pods,services,akric,akrii --show-labels". + format(kubectl=kubectl_cmd)) + return False + + # Enumerate Webhook resources + print("Debugging:") + + print("Deployment:") + run("sudo {kubectl} describe deployment/{service}\ + --namespace={namespace}".format(kubectl=kubectl_cmd, + service=WEBHOOK_NAME, + namespace=NAMESPACE)) + + print("ReplicaSet:") + run("sudo {kubectl} describe replicaset \ + --selector=app={service} \ + --namespace={namespace}".format(kubectl=kubectl_cmd, + service=WEBHOOK_NAME, + namespace=NAMESPACE)) + + print("Pod:") + run("sudo {kubectl} describe pod \ + --selector=app={service} \ + --namespace={namespace}".format(kubectl=kubectl_cmd, + service=WEBHOOK_NAME, + namespace=NAMESPACE)) + + # Apply Valid Akri Configuration + print("Applying Valid Akri Configuration") + + # Use the template and place resources in the correct location + body = TEMPLATE + body["spec"]["brokerPodSpec"]["containers"][0]["resources"] = RESOURCES + + api = client.CustomObjectsApi() + api.create_namespaced_custom_object(group=GROUP, + version=VERSION, + namespace=NAMESPACE, + plural="configurations", + body=body) + + # Check + print("Retrieving Akri Configuration") + akri_config = api.get_namespaced_custom_object(group=GROUP, + version=VERSION, + name=NAME, + namespace=NAMESPACE, + plural="configurations") + print(akri_config) + + # Delete + api.delete_namespaced_custom_object( + group=GROUP, + version=VERSION, + name=NAME, + namespace=NAMESPACE, + plural="configurations", + body=client.V1DeleteOptions(), + ) + + # Apply Invalid Akri Configuration + res = False + try: + print("Applying Invalid (!) Akri Configuration") + + # Use the template but(!) place resources in an incorrect location + body = TEMPLATE + body["spec"]["brokerPodSpec"]["resources"] = RESOURCES + + api.create_namespaced_custom_object(group=GROUP, + version=VERSION, + namespace=NAMESPACE, + plural="configurations", + body=body) + except ApiException as e: + print( + "As expected, Invalid Akri Configuration generates API Exception") + print("Status Code: {} [{}]", e.status, e.reason) + print("Response: {}".format(e.body)) + res = True + else: + print("Expected APIException but none was thrown. This is an error!") + + # Debugging: check the Webhook's logs + print("Webhook logs") + run("sudo {kubectl} logs deployment/{service} --namespace={namespace}". + format(kubectl=kubectl_cmd, + service=WEBHOOK_NAME, + namespace=NAMESPACE)) + + res = False + + # Save Webhook logs + run("{kubectl} logs deployment/{service} --namespace={namespace} >> {file}" + .format(kubectl=kubectl_cmd, + service=WEBHOOK_NAME, + namespace=NAMESPACE, + file=WEBHOOK_LOG_PATH)) + + print("Akri Validating Webhook test: {}".format( + "Success" if res else "Failure")) + return res + + +def get_webhook_helm_config() -> str: + webhook = "\ + --set=webhookConfiguration.enabled=true \ + --set=webhookConfiguration.name={name} \ + --set=webhookConfiguration.caBundle={cabundle} \ + ".format( + name=WEBHOOK_NAME, + cabundle=CA_BUNDLE, + ) + print("Webhook configuration:\n{}".format(webhook)) + return webhook + + +def run(command): + print("Executing: {}".format(command)) + result = subprocess.run(command, + shell=True, + capture_output=True, + text=True) + print("returncode: {}".format(result.returncode)) + if result.stdout: + print("stdout:") + print(result.stdout) + if result.stderr: + print("stderr:") + print(result.stderr) + + +if __name__ == "__main__": + main() diff --git a/test/shared_test_code.py b/test/shared_test_code.py index d8eaa1d3f..b524b8d44 100644 --- a/test/shared_test_code.py +++ b/test/shared_test_code.py @@ -32,50 +32,66 @@ agent_pod_name = "" controller_pod_name = "" + def get_helm_chart_location(): # Get helm chart location passed in helm install command (i.e. `repo/chart --version X.Y.Z` or `./deployment/helm`) return open(HELM_CHART_LOCATION, "r").readline().rstrip() + def get_extra_helm_args(): # Get any extra helm args passed from workflow if os.path.exists(EXTRA_HELM_ARGS_FILE): return open(EXTRA_HELM_ARGS_FILE, "r").readline().rstrip() return "" + def initial_sleep(): # Sleep for amount of time specified in SLEEP_DURATION_FILE else don't sleep at all if os.path.exists(SLEEP_DURATION_FILE): - initial_sleep_duration = open(SLEEP_DURATION_FILE, "r").readline().rstrip() + initial_sleep_duration = open(SLEEP_DURATION_FILE, + "r").readline().rstrip() print("Sleeping for {} seconds".format(initial_sleep_duration)) time.sleep(int(initial_sleep_duration)) print("Done sleeping") + def helm_update(): # Update Helm and install this version's chart os.system("helm repo update") + def get_kubeconfig_path(): # Get kubeconfig path return open(KUBE_CONFIG_PATH_FILE, "r").readline().rstrip() + def get_kubectl_command(): # Get kubectl command return open(RUNTIME_COMMAND_FILE, "r").readline().rstrip() + def get_cri_args(): # Get CRI args for Akri Helm return open(HELM_CRI_ARGS_FILE, "r").readline().rstrip() + def get_test_version(): # Get version of akri to test if os.path.exists(VERSION_FILE): return open(VERSION_FILE, "r").readline().rstrip() - return open("version.txt", "r").readline().rstrip() + return open("version.txt", "r").readline().rstrip() + -def save_agent_and_controller_logs(): +def save_agent_and_controller_logs(namespace="default"): kubectl_cmd = get_kubectl_command() - os.system("sudo {} logs {} >> {}".format(kubectl_cmd, agent_pod_name, AGENT_LOG_PATH)) - os.system("sudo {} logs {} >> {}".format(kubectl_cmd, controller_pod_name, CONTROLLER_LOG_PATH)) + os.system("{} logs {} --namespace={} >> {}".format(kubectl_cmd, + agent_pod_name, + namespace, + AGENT_LOG_PATH)) + os.system("{} logs {} --namespace={} >> {}".format(kubectl_cmd, + controller_pod_name, + namespace, + CONTROLLER_LOG_PATH)) def crds_applied(): @@ -84,18 +100,26 @@ def crds_applied(): for x in range(5): if x != 0: time.sleep(SLEEP_INTERVAL) - current_crds = [x["spec"]["names"]["kind"].lower() for x in v1_ext.list_custom_resource_definition().to_dict()['items']] + current_crds = [ + x["spec"]["names"]["kind"].lower() for x in + v1_ext.list_custom_resource_definition().to_dict()['items'] + ] if "configuration" in current_crds and "instance" in current_crds: return True return False + def check_pods_running(v1, pod_label_selector, count): - print("Checking number of pods [{}] ... expected {}".format(pod_label_selector, count)) + print("Checking number of pods [{}] ... expected {}".format( + pod_label_selector, count)) for x in range(30): if x != 0: time.sleep(SLEEP_INTERVAL) - print("Sleep iteration {} ... been waiting for {} seconds for pod check".format(x+1, (x+1)*SLEEP_INTERVAL)) - pods = v1.list_pod_for_all_namespaces(label_selector=pod_label_selector).items + print( + "Sleep iteration {} ... been waiting for {} seconds for pod check" + .format(x + 1, (x + 1) * SLEEP_INTERVAL)) + pods = v1.list_pod_for_all_namespaces( + label_selector=pod_label_selector).items print("Found {} pods".format(len(pods))) if count == 0: # Expectation is that no pods are running @@ -118,16 +142,22 @@ def check_pods_running(v1, pod_label_selector, count): all_running = False break if all_running: return True - print("Wrong number of pods [{}] found ... expected {}".format(pod_label_selector, count)) + print("Wrong number of pods [{}] found ... expected {}".format( + pod_label_selector, count)) return False + def check_svcs_running(v1, svc_label_selector, count): - print("Checking number of svcs [{}] ... expected {}".format(svc_label_selector, count)) + print("Checking number of svcs [{}] ... expected {}".format( + svc_label_selector, count)) for x in range(30): if x != 0: time.sleep(SLEEP_INTERVAL) - print("Sleep iteration {} ... been waiting for {} seconds for svc check".format(x+1, (x+1)*SLEEP_INTERVAL)) - svcs = v1.list_service_for_all_namespaces(label_selector=svc_label_selector).items + print( + "Sleep iteration {} ... been waiting for {} seconds for svc check" + .format(x + 1, (x + 1) * SLEEP_INTERVAL)) + svcs = v1.list_service_for_all_namespaces( + label_selector=svc_label_selector).items print("Found {} pods".format(len(svcs))) if count == 0: # Expectation is that no svcs are running @@ -137,44 +167,62 @@ def check_svcs_running(v1, svc_label_selector, count): # Expectation is that `count` svcs are running if len(svcs) == count: return True - print("Wrong number of services [{}] found ... expected {}".format(svc_label_selector, count)) + print("Wrong number of services [{}] found ... expected {}".format( + svc_label_selector, count)) return False + def get_pod_name(pod_label_selector, index): v1 = client.CoreV1Api() print("Getting pod name [{}]".format(pod_label_selector)) - pods = v1.list_pod_for_all_namespaces(label_selector=pod_label_selector).items + pods = v1.list_pod_for_all_namespaces( + label_selector=pod_label_selector).items if len(pods) >= index: if pods[index].status.phase == "Running": return pods[index].metadata.name return "" + def get_running_pod_names_and_uids(pod_label_selector): v1 = client.CoreV1Api() map = {} print("Getting pod name [{}]".format(pod_label_selector)) - pods = v1.list_pod_for_all_namespaces(label_selector=pod_label_selector).items + pods = v1.list_pod_for_all_namespaces( + label_selector=pod_label_selector).items for pod in pods: if pod.status.phase == "Running": map[pod.metadata.name] = pod.metadata.uid return map -def check_instance_count(count): - print("Checking for instances ... version:{} count:{}".format(major_version, count)) + +def check_instance_count(count, namespace="default"): + print("Checking for instances ... version:{} count:{}".format( + major_version, count)) + if count == 0: + return True + api_instance = client.CustomObjectsApi() for x in range(20): if x != 0: time.sleep(SLEEP_INTERVAL) - print("Sleep iteration {} ... been waiting for {} seconds for instances".format(x+1, (x+1)*SLEEP_INTERVAL)) - instances = api_instance.list_namespaced_custom_object(GROUP, major_version, "default", "instances")['items'] + print( + "Sleep iteration {} ... been waiting for {} seconds for instances". + format(x + 1, (x + 1) * SLEEP_INTERVAL)) + instances = api_instance.list_namespaced_custom_object( + group=GROUP, + version=major_version, + namespace=namespace, + plural="instances")['items'] if len(instances) == count: return True return False + def check_agent_pods_state(v1, agents): global agent_pod_name print("Checking for agent pods ... expected {}".format(agents)) - agents_check_failed = check_pods_running(v1, AGENT_POD_LABEL_SELECTOR, agents) + agents_check_failed = check_pods_running(v1, AGENT_POD_LABEL_SELECTOR, + agents) if not agents_check_failed: print("Wrong number of agents found ... expected {}".format(agents)) else: @@ -186,49 +234,69 @@ def check_agent_pods_state(v1, agents): return agents_check_failed + def check_controller_pods_state(v1, controllers): global controller_pod_name print("Checking for controller pods ... expected {}".format(controllers)) - controllers_check_failed = check_pods_running(v1, CONTROLLER_POD_LABEL_SELECTOR, controllers) + controllers_check_failed = check_pods_running( + v1, CONTROLLER_POD_LABEL_SELECTOR, controllers) if not controllers_check_failed: - print("Wrong number of controllers found ... expected {}".format(controllers)) + print("Wrong number of controllers found ... expected {}".format( + controllers)) else: if controllers == 1: - controller_pod_name = get_pod_name(CONTROLLER_POD_LABEL_SELECTOR, 0) + controller_pod_name = get_pod_name(CONTROLLER_POD_LABEL_SELECTOR, + 0) if controller_pod_name == "": print("Controller pod name not found") return False return controllers_check_failed + def check_broker_pods_state(v1, brokers): print("Checking for broker pods ... expected {}".format(brokers)) - brokers_check_failed = check_pods_running(v1, BROKER_POD_LABEL_SELECTOR, brokers) + brokers_check_failed = check_pods_running(v1, BROKER_POD_LABEL_SELECTOR, + brokers) if not brokers_check_failed: print("Wrong number of brokers found ... expected {}".format(brokers)) return brokers_check_failed -def check_config_svcs_state(v1, count): + +def check_config_svcs_state(v1, count: int): print("Checking for configuration services ... expected {}".format(count)) - config_svcs_check_failed = check_svcs_running(v1, CONFIGURATION_SVC_LABEL_SELECTOR, count) + config_svcs_check_failed = check_svcs_running( + v1, CONFIGURATION_SVC_LABEL_SELECTOR, count) if not config_svcs_check_failed: - print("Wrong number of configuration services found ... expected {}".format(count)) + print("Wrong number of configuration services found ... expected {}". + format(count)) return config_svcs_check_failed -def check_instance_svcs_state(v1, count): + +def check_instance_svcs_state(v1, count: int): print("Checking for instance services ... expected {}".format(count)) - instance_svcs_check_failed = check_svcs_running(v1, INSTANCE_SVC_LABEL_SELECTOR, count) + instance_svcs_check_failed = check_svcs_running( + v1, INSTANCE_SVC_LABEL_SELECTOR, count) if not instance_svcs_check_failed: print("Wrong number of brokers found ... expected {}".format(count)) return instance_svcs_check_failed -def check_akri_state(agents, controllers, instances, brokers, config_svcs, instance_svcs): - print("Checking for Akri state ... expected agent(s):{}, controller(s):{}, instance(s):{}, broker(s):{}, config service(s):{}, and instance service(s):{} to exist".format(agents, controllers, instances, brokers, config_svcs, instance_svcs)) + +def check_akri_state(agents, + controllers, + instances, + brokers, + config_svcs, + instance_svcs, + namespace="default"): + print( + "Checking for Akri state ... expected agent(s):{}, controller(s):{}, instance(s):{}, broker(s):{}, config service(s):{}, and instance service(s):{} to exist" + .format(agents, controllers, instances, brokers, config_svcs, + instance_svcs)) v1 = client.CoreV1Api() return check_agent_pods_state(v1, agents) and \ - check_controller_pods_state(v1, controllers) and \ - check_instance_count(instances) and \ - check_broker_pods_state(v1, brokers) and \ - check_config_svcs_state(v1, config_svcs) and \ - check_instance_svcs_state(v1, instance_svcs) - + check_controller_pods_state(v1, controllers) and \ + check_instance_count(instances, namespace) and \ + check_broker_pods_state(v1, brokers) and \ + check_config_svcs_state(v1, config_svcs) and \ + check_instance_svcs_state(v1, instance_svcs) diff --git a/version.sh b/version.sh index ba38074e3..a867b5fd3 100755 --- a/version.sh +++ b/version.sh @@ -124,7 +124,7 @@ if [ "$CHECK" == "1" ]; then echo " Verified format: $BASEDIR/version.txt" fi - CARGO_FILES="$BASEDIR/shared/Cargo.toml $BASEDIR/controller/Cargo.toml $BASEDIR/agent/Cargo.toml $BASEDIR/samples/brokers/udev-video-broker/Cargo.toml" + CARGO_FILES="$BASEDIR/shared/Cargo.toml $BASEDIR/controller/Cargo.toml $BASEDIR/agent/Cargo.toml $BASEDIR/samples/brokers/udev-video-broker/Cargo.toml $BASEDIR/webhooks/validating/configuration/Cargo.toml" TOML_VERSION_PATTERN="^version" TOML_VERSION="\"$(echo $VERSION)\"" for CARGO_FILE in $CARGO_FILES @@ -187,7 +187,7 @@ then fi echo "Updating to version: $NEW_VERSION" - CARGO_FILES="$BASEDIR/shared/Cargo.toml $BASEDIR/controller/Cargo.toml $BASEDIR/agent/Cargo.toml $BASEDIR/samples/brokers/udev-video-broker/Cargo.toml" + CARGO_FILES="$BASEDIR/shared/Cargo.toml $BASEDIR/controller/Cargo.toml $BASEDIR/agent/Cargo.toml $BASEDIR/samples/brokers/udev-video-broker/Cargo.toml $BASEDIR/webhooks/validating/configuration/Cargo.toml" TOML_VERSION_PATTERN="^version = .*" TOML_VERSION_LINE="version = \"$NEW_VERSION\"" for CARGO_FILE in $CARGO_FILES diff --git a/version.txt b/version.txt index 44905e76e..0ea3a944b 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.1.18 \ No newline at end of file +0.2.0 diff --git a/webhooks/validating/configuration/Cargo.toml b/webhooks/validating/configuration/Cargo.toml new file mode 100644 index 000000000..a9fd5d645 --- /dev/null +++ b/webhooks/validating/configuration/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "webhook-configuration" +version = "0.2.0" +authors = ["DazWilkin "] +edition = "2018" + +[dependencies] +actix = "0.10.0" +actix-web = { version = "3.3.2", features = ["openssl"] } +actix-rt = "1.1.1" +akri-shared = { path = "../../../shared" } +clap = "3.0.0-beta.2" +k8s-openapi = { version = "0.6.0", features = ["v1_16"] } +kube = { version = "0.23.0", features = ["openapi"] } +openapi = { git = "https://github.com/DazWilkin/openapi-admission-v1", tag = "v1.1.0" } +openssl = "0.10" +rustls = "0.18.0" +serde = { version = "1.0.118", features = ["derive"] } +serde_json = "1.0.61" diff --git a/webhooks/validating/configuration/README.md b/webhooks/validating/configuration/README.md new file mode 100644 index 000000000..433c67b66 --- /dev/null +++ b/webhooks/validating/configuration/README.md @@ -0,0 +1,69 @@ +# Akri Admission Controller (Webhook) for validating Akri Configurations + +This Admission Controller (Webhook) validates Akri Configuration files. + +The HTTP service that implements the Webhook must be configured to use TLS. The Webhook expects its TLS certificate and private key to be stored within a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets). + +It is recommended to use [`cert-manager`](https://cert-manager.io) in Kubernetes. `cert-manager` makes it easy to generate TLS certificates and private keys and, because it's a Kubernetes-native app, `cert-manager` stores these in Kubernetes Secrets. You may use a self-signed (!) CA with `cert-manager` and certificates signed by this CA will work with the Webhook. + +If you wish to install the Webhook, before installing the Helm Chart for Akri, you will need to have PEM-encoded versions of CA certificate, Webhook certificate and private key. The Webhook handler expects a Secret, with the same name (!), containing its certificate and private key, to exist in the Namespace where it will be deployed. + +If you're using `cert-manager` and have an `Issuer` called `ca`, you may generate a Secret for a Webhook called `${WEBHOOK}` in Namespace `${NAMESPACE}` with the following commands: + +```bash +WEBHOOK="akri-webhook-configuration" # Default name if not provided +NAMESPACE="default" + +echo " +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: ${WEBHOOK} + namespace: ${NAMESPACE} +spec: + secretName: ${WEBHOOK} + duration: 8760h + renewBefore: 720h + isCA: false + privateKey: + algorithm: RSA + encoding: PKCS1 + size: 2048 + usages: + - server auth + dnsNames: + - ${WEBHOOK}.${NAMESPACE}.svc + - ${WEBHOOK}.${NAMESPACE}.svc.cluster.local + issuerRef: + name: ca + kind: Issuer + group: cert-manager.io +" | kubectl apply --filename=- +``` + +> **NOTE** You must provide the above with a `${NAMESPACE}` even if the value is `default` so that it may construct qualified DNS for the Webhook Service. + +When Kubernetes is configured to use the Webhook, it requires the base64-encoded PEM certificate of the CA. The CA certificate may be obtained from the Webhook's certificate using: + +```bash +CABUNDLE=$(\ + kubectl get secret/${WEBHOOK} \ + --namespace=${NAMESPACE} \ + --output=jsonpath="{.data.ca\.crt}") && echo ${CABUNDLE} +``` + +Now you may proceed to install the Helm Chart for Akri, enabling the Webhook and providing the `CABUNDLE`: + +```bash +WEBHOOK=... +NAMESPACE=... +CABUNDLE=... + +helm install webhook akri-helm-charts/akri-dev \ +--namespace=${DEFAULT} \ +--set=webhookConfiguration.enabled=true \ +--set=webhookConfiguration.name=${WEBHOOK} \ +--set=webhookConfiguration.caBundle=${CABUNDLE} \ +--set=webhookConfiguration.image.repository=ghcr.io/deislabs/akri/webhook-configuration \ +--set=webhookConfiguration.image.tag=v1 +``` diff --git a/webhooks/validating/configuration/src/main.rs b/webhooks/validating/configuration/src/main.rs new file mode 100644 index 000000000..c074bd818 --- /dev/null +++ b/webhooks/validating/configuration/src/main.rs @@ -0,0 +1,767 @@ +use actix_web::{post, web, App, HttpResponse, HttpServer, Responder}; +use akri_shared::akri::configuration::KubeAkriConfig; +use clap::Arg; +use k8s_openapi::apimachinery::pkg::runtime::RawExtension; +use openapi::models::{ + V1AdmissionRequest as AdmissionRequest, V1AdmissionResponse as AdmissionResponse, + V1AdmissionReview as AdmissionReview, V1Status as Status, +}; +use openssl::ssl::{SslAcceptor, SslAcceptorBuilder, SslFiletype, SslMethod}; +use serde_json::{json, Value}; + +fn get_builder(key: &str, crt: &str) -> SslAcceptorBuilder { + let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); + builder.set_private_key_file(key, SslFiletype::PEM).unwrap(); + builder.set_certificate_chain_file(crt).unwrap(); + + builder +} +fn check( + v: &serde_json::Value, + deserialized: &serde_json::Value, +) -> Result<(), Box> { + if v != &serde_json::Value::Null && deserialized == &serde_json::Value::Null { + return Err(None.ok_or(format!("no matching value in `deserialized`"))?); + } + + match v { + serde_json::Value::Object(o) => { + for (key, value) in o { + if let Err(e) = check(&value, &deserialized[key]) { + return Err(None.ok_or(format!( + "input key ({:?}) not equal to parsed: ({:?})", + key, e + ))?); + } + } + Ok(()) + } + serde_json::Value::Array(s) => { + for (pos, _e) in s.iter().enumerate() { + if let Err(e) = check(&s[pos], &deserialized[pos]) { + return Err(None.ok_or(format!( + "input index ({:?}) not equal to parsed: ({:?})", + pos, e + ))?); + } + } + Ok(()) + } + serde_json::Value::String(s) => match deserialized { + serde_json::Value::String(ds) => { + if s != ds { + Err(None.ok_or(format!("input ({:?}) not equal to parsed ({:?})", s, ds))?) + } else { + Ok(()) + } + } + _ => Err(None.ok_or(format!( + "input ({:?}) not equal to parsed ({:?})", + s, deserialized + ))?), + }, + serde_json::Value::Bool(b) => match deserialized { + serde_json::Value::Bool(db) => { + if b != db { + Err(None.ok_or(format!("input ({:?}) not equal to parsed ({:?})", b, db))?) + } else { + Ok(()) + } + } + _ => Err(None.ok_or(format!( + "input ({:?}) not equal to parsed ({:?})", + b, deserialized + ))?), + }, + serde_json::Value::Number(n) => match deserialized { + serde_json::Value::Number(dn) => { + if n != dn { + Err(None.ok_or(format!("input ({:?}) not equal to parsed ({:?})", n, dn))?) + } else { + Ok(()) + } + } + _ => Err(None.ok_or(format!( + "input ({:?}) not equal to parsed ({:?})", + n, deserialized + ))?), + }, + serde_json::Value::Null => match deserialized { + serde_json::Value::Null => Ok(()), + _ => Err(None.ok_or(format!( + "input (Null) not equal to parsed ({:?})", + deserialized + ))?), + }, + } +} + +fn filter_configuration(mut v: Value) -> Value { + let metadata = v["metadata"].as_object_mut().unwrap(); + metadata.remove("creationTimestamp"); + metadata.remove("deletionTimestamp"); + metadata.remove("managedFields"); + + let generation = metadata.get_mut("generation").unwrap(); + *generation = json!(generation.as_f64().unwrap()); + + v +} +fn validate_configuration(rqst: &AdmissionRequest) -> AdmissionResponse { + println!("Validating Configuration"); + match &rqst.object { + Some(raw) => { + let x: RawExtension = serde_json::from_value(raw.clone()) + .expect("Could not parse as Kubernetes RawExtension"); + let y = serde_json::to_string(&x).unwrap(); + let c: KubeAkriConfig = + serde_json::from_str(y.as_str()).expect("Could not parse as Akri Configuration"); + let reserialized = serde_json::to_string(&c).unwrap(); + let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); + + let v: Value = filter_configuration(raw.clone()); + + // Do they match? + match check(&v, &deserialized) { + Ok(_) => AdmissionResponse::new(true, rqst.uid.to_owned()), + Err(e) => AdmissionResponse { + allowed: false, + audit_annotations: None, + patch: None, + patch_type: None, + status: Some(Status { + api_version: None, + code: None, + details: None, + kind: None, + message: Some(e.to_string()), + metadata: None, + reason: None, + status: None, + }), + uid: rqst.uid.to_owned(), + warnings: None, + }, + } + } + None => AdmissionResponse { + allowed: false, + audit_annotations: None, + patch: None, + patch_type: None, + status: Some(Status { + api_version: None, + code: None, + details: None, + kind: None, + message: Some("AdmissionRequest object contains no data".to_owned()), + metadata: None, + reason: None, + status: None, + }), + uid: rqst.uid.to_owned(), + warnings: None, + }, + } +} + +#[post("/validate")] +async fn validate(rqst: web::Json) -> impl Responder { + println!("Handler invoked"); + match &rqst.request { + Some(rqst) => { + println!("Handler received: AdmissionRequest"); + let resp = validate_configuration(&rqst); + let resp: AdmissionReview = AdmissionReview { + api_version: Some("admission.k8s.io/v1".to_owned()), + kind: Some("AdmissionReview".to_owned()), + request: None, + response: Some(resp), + }; + let body = serde_json::to_string(&resp).expect("Valid AdmissionReview"); + return HttpResponse::Ok().body(body); + } + None => { + println!("Handler received: Nothing"); + return HttpResponse::BadRequest().body(""); + } + } +} + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + let matches = clap::App::new("Akri Webhook") + .arg( + Arg::new("crt_file") + .long("tls-crt-file") + .takes_value(true) + .required(true) + .about("TLS certificate file"), + ) + .arg( + Arg::new("key_file") + .long("tls-key-file") + .takes_value(true) + .required(true) + .about("TLS private key file"), + ) + .arg( + Arg::new("port") + .long("port") + .takes_value(true) + .required(true) + .about("port"), + ) + .get_matches(); + + let crt_file = matches.value_of("crt_file").expect("TLS certificate file"); + let key_file = matches.value_of("key_file").expect("TLS private key file"); + + let port = matches + .value_of("port") + .unwrap_or("8443") + .parse::() + .expect("valid port [0-65535]"); + + let endpoint = format!("0.0.0.0:{}", port); + println!("Started Webhook server: {}", endpoint); + + let builder = get_builder(key_file, crt_file); + HttpServer::new(|| App::new().service(validate)) + .bind_openssl(endpoint, builder)? + .run() + .await +} + +#[cfg(test)] +mod tests { + use super::*; + use actix_web::test; + + const VALID: &str = r#" + { + "kind": "AdmissionReview", + "apiVersion": "admission.k8s.io/v1", + "request": { + "uid": "00000000-0000-0000-0000-000000000000", + "kind": { + "group": "akri.sh", + "version": "v0", + "kind": "Configuration" + }, + "resource": { + "group": "akri.sh", + "version": "v0", + "resource": "configurations" + }, + "requestKind": { + "group": "akri.sh", + "version": "v0", + "kind": "Configuration" + }, + "requestResource": { + "group": "akri.sh", + "version": "v0", + "resource": "configurations" + }, + "name": "name", + "namespace": "default", + "operation": "CREATE", + "userInfo": { + "username": "admin", + "uid": "admin", + "groups": [] + }, + "object": { + "apiVersion": "akri.sh/v0", + "kind": "Configuration", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "" + }, + "creationTimestamp": "2021-01-01T00:00:00Z", + "generation": 1, + "managedFields": [], + "name": "name", + "namespace": "default", + "uid": "00000000-0000-0000-0000-000000000000" + }, + "spec": { + "protocol": { + "debugEcho": { + "descriptions": ["foo","bar"], + "shared": true + } + }, + "brokerPodSpec": { + "containers": [ + { + "image": "image", + "name": "name", + "resources": { + "limits": { + "{{PLACEHOLDER}}": "1" + } + } + } + ], + "imagePullSecrets": [ + { + "name": "name" + } + ] + }, + "capacity": 1 + } + }, + "oldObject": null, + "dryRun": false, + "options": { + "kind": "CreateOptions", + "apiVersion": "meta.k8s.io/v1" + } + } + } + "#; + + // Valid JSON but invalid akri.sh/v0/Configuration + // Misplaced `resources` + // Valid: .request.object.spec.brokerPodSpec.containers[*].resources + // Invalid: .request.object.spec.brokerPodSpec.resources + const INVALID: &str = r#" + { + "kind": "AdmissionReview", + "apiVersion": "admission.k8s.io/v1", + "request": { + "uid": "00000000-0000-0000-0000-000000000000", + "kind": { + "group": "akri.sh", + "version": "v0", + "kind": "Configuration" + }, + "resource": { + "group": "akri.sh", + "version": "v0", + "resource": "configurations" + }, + "requestKind": { + "group": "akri.sh", + "version": "v0", + "kind": "Configuration" + }, + "requestResource": { + "group": "akri.sh", + "version": "v0", + "resource": "configurations" + }, + "name": "name", + "namespace": "default", + "operation": "CREATE", + "userInfo": { + "username": "admin", + "uid": "admin", + "groups": [] + }, + "object": { + "apiVersion": "akri.sh/v0", + "kind": "Configuration", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "" + }, + "creationTimestamp": "2021-01-01T00:00:00Z", + "generation": 1, + "managedFields": [], + "name": "name", + "namespace": "default", + "uid": "00000000-0000-0000-0000-000000000000" + }, + "spec": { + "protocol": { + "debugEcho": { + "descriptions": ["foo","bar"], + "shared": true + } + }, + "brokerPodSpec": { + "containers": [ + { + "image": "image", + "name": "name" + } + ], + "resources": { + "limits": { + "{{PLACEHOLDER}}": "1" + } + }, + "imagePullSecrets": [ + { + "name": "name" + } + ] + }, + "capacity": 1 + } + }, + "oldObject": null, + "dryRun": false, + "options": { + "kind": "CreateOptions", + "apiVersion": "meta.k8s.io/v1" + } + } + } + "#; + + const EXTENDED: &str = r#" + { + "kind": "AdmissionReview", + "apiVersion": "admission.k8s.io/v1", + "request": { + "uid": "00000000-0000-0000-0000-000000000000", + "kind": { + "group": "akri.sh", + "version": "v0", + "kind": "Configuration" + }, + "resource": { + "group": "akri.sh", + "version": "v0", + "resource": "configurations" + }, + "requestKind": { + "group": "akri.sh", + "version": "v0", + "kind": "Configuration" + }, + "requestResource": { + "group": "akri.sh", + "version": "v0", + "resource": "configurations" + }, + "name": "name", + "namespace": "default", + "operation": "CREATE", + "userInfo": { + "username": "admin", + "uid": "admin", + "groups": [] + }, + "object": { + "apiVersion": "akri.sh/v0", + "kind": "Configuration", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "" + }, + "creationTimestamp": "2021-01-01T00:00:00Z", + "generation": 1, + "managedFields": [], + "name": "name", + "namespace": "default", + "uid": "00000000-0000-0000-0000-000000000000" + }, + "spec": { + "protocol": { + "debugEcho": { + "descriptions": ["foo","bar"], + "shared": true + } + }, + "brokerPodSpec": { + "containers": [ + { + "image": "image", + "name": "name", + "resources": { + "limits": { + "{{PLACEHOLDER}}": "1" + } + } + } + ], + "imagePullSecrets": [ + { + "name": "name" + } + ] + }, + "instanceServiceSpec": { + "type": "ClusterIP", + "ports": [{ + "name": "name", + "port": 0, + "targetPort": 0, + "protocol": "TCP" + }] + }, + "configurationServiceSpec": { + "type": "ClusterIP", + "ports": [{ + "name": "name", + "port": 0, + "targetPort": 0, + "protocol": "TCP" + }] + }, + "capacity": 1 + } + }, + "oldObject": null, + "dryRun": false, + "options": { + "kind": "CreateOptions", + "apiVersion": "meta.k8s.io/v1" + } + } + } + "#; + + const METADATA: &str = r#" + { + "apiVersion": "akri.sh/v0", + "kind": "Configuration", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "" + }, + "creationTimestamp": "2021-01-01T00:00:00Z", + "generation": 1, + "managedFields": [], + "name": "name", + "namespace": "default", + "uid": "00000000-0000-0000-0000-000000000000" + }, + "spec": {} + } + "#; + + // JSON Syntax Tests + #[test] + fn test_both_null() { + assert!(check(&serde_json::Value::Null, &serde_json::Value::Null).is_ok()); + } + + #[test] + fn test_value_is_null() { + let deserialized: Value = serde_json::from_str("{}").unwrap(); + assert!(check(&serde_json::Value::Null, &deserialized).is_err()); + } + + #[test] + fn test_deserialized_is_null() { + let v: Value = serde_json::from_str("{}").unwrap(); + assert!(check(&v, &serde_json::Value::Null).is_err()); + } + + #[test] + fn test_both_empty() { + let deserialized: Value = serde_json::from_str("{}").unwrap(); + let v: Value = serde_json::from_str("{}").unwrap(); + assert!(check(&v, &deserialized).is_ok()); + } + + #[test] + fn test_both_same() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3 ] } }"#) + .unwrap(); + let v: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3 ] } }"#) + .unwrap(); + assert!(check(&v, &deserialized).is_ok()); + } + + #[test] + fn test_deserialized_has_extra() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "hi" } }"#).unwrap(); + let v: Value = serde_json::from_str(r#"{ "a": 1, "b": { "c": 2 } }"#).unwrap(); + assert!(check(&v, &deserialized).is_ok()); + } + + #[test] + fn test_value_has_extra() { + let deserialized: Value = serde_json::from_str(r#"{ "a": 1, "b": { "c": 2 } }"#).unwrap(); + let v: Value = serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "hi" } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_types_int_to_str() { + // value=#, deser=str + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "hi" } }"#).unwrap(); + let v: Value = serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": 3 } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_types_str_to_bool() { + // value=str, deser=bool + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": true } }"#).unwrap(); + let v: Value = serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "3" } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_types_bool_to_int() { + // value=bool, deser=# + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": 2 } }"#).unwrap(); + let v: Value = serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": true } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_strings() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "hi" } }"#).unwrap(); + let v: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "hello" } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_numbers() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": 2, "d": "hi" } }"#).unwrap(); + let v: Value = serde_json::from_str(r#"{ "a": 2, "b": { "c": 2, "d": "hi" } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_bools() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi" } }"#).unwrap(); + let v: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": false, "d": "hi" } }"#).unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_different_array_element() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3 ] } }"#) + .unwrap(); + let v: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 5, 3 ] } }"#) + .unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_value_has_extra_array_element() { + let deserialized: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3 ] } }"#) + .unwrap(); + let v: Value = serde_json::from_str( + r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3, 4 ] } }"#, + ) + .unwrap(); + assert!(check(&v, &deserialized).is_err()); + } + + #[test] + fn test_deserialized_has_extra_array_element() { + let deserialized: Value = serde_json::from_str( + r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3, 4 ] } }"#, + ) + .unwrap(); + let v: Value = + serde_json::from_str(r#"{ "a": 1, "b": { "c": true, "d": "hi", "e": [ 1, 2, 3 ] } }"#) + .unwrap(); + assert!(check(&v, &deserialized).is_ok()); + } + + // Akri Configuration schema tests + use kube::api::{Object, Void}; + #[test] + fn test_creationtimestamp_is_filtered() { + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let reserialized = serde_json::to_string(&t).expect("bytes"); + let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); + let v = filter_configuration(deserialized); + assert_eq!(v["metadata"].get("creationTimestamp"), None); + } + + #[test] + fn test_deletiontimestamp_is_filtered() { + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let reserialized = serde_json::to_string(&t).expect("bytes"); + let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); + let v = filter_configuration(deserialized); + assert_eq!(v["metadata"].get("deletionTimestamp"), None); + } + + #[test] + fn test_managedfields_is_filtered() { + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let reserialized = serde_json::to_string(&t).expect("bytes"); + let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); + let v = filter_configuration(deserialized); + assert_eq!(v["metadata"].get("managedFields"), None); + } + + #[test] + fn test_generation_becomes_f64() { + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let reserialized = serde_json::to_string(&t).expect("bytes"); + let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); + let v = filter_configuration(deserialized); + assert!(v["metadata"].get("generation").unwrap().is_f64()); + } + + #[test] + fn test_validate_configuration_valid() { + let valid: AdmissionReview = serde_json::from_str(VALID).expect("v1.AdmissionReview JSON"); + let rqst = valid.request.expect("v1.AdmissionRequest JSON"); + let resp = validate_configuration(&rqst); + assert_eq!(resp.allowed, true); + } + + #[test] + fn test_validate_configuration_invalid() { + let invalid: AdmissionReview = + serde_json::from_str(INVALID).expect("v1.AdmissionReview JSON"); + let rqst = invalid.request.expect("v1.AdmissionRequest JSON"); + let resp = validate_configuration(&rqst); + assert_eq!(resp.allowed, false); + } + + #[test] + fn test_validate_configuration_extended() { + let valid: AdmissionReview = + serde_json::from_str(EXTENDED).expect("v1.AdmissionReview JSON"); + let rqst = valid.request.expect("v1.AdmissionRequest JSON"); + let resp = validate_configuration(&rqst); + assert_eq!(resp.allowed, true); + } + + #[actix_rt::test] + async fn test_validate_valid() { + let mut app = test::init_service(App::new().service(validate)).await; + let valid: AdmissionReview = serde_json::from_str(VALID).expect("v1.AdmissionReview JSON"); + let rqst = test::TestRequest::post() + .uri("/validate") + .set_json(&valid) + .to_request(); + let resp = test::call_service(&mut app, rqst).await; + assert_eq!(resp.status().is_success(), true); + } + + #[actix_rt::test] + async fn test_validate_invalid() { + let mut app = test::init_service(App::new().service(validate)).await; + let invalid: AdmissionReview = + serde_json::from_str(INVALID).expect("v1.AdmissionReview JSON"); + let rqst = test::TestRequest::post() + .uri("/validate") + .set_json(&invalid) + .to_request(); + let resp = test::call_service(&mut app, rqst).await; + assert_eq!(resp.status().is_success(), true); + } +}