From 0b24e595baa1f7f3bfca2bc40de3602b2b1f19bd Mon Sep 17 00:00:00 2001 From: Vibhu Prashar Date: Wed, 5 Apr 2023 18:44:14 +0530 Subject: [PATCH] Implement CI using CircleCI (#421) * Implement CI using Circleci * Modify config.yaml and add missing new line * Add build require in tests workflow * Update crds in ci_test.sh * Fix path of crd template * Update env for disabling log rules in obsctl reloader * Update prometheus-operator crd link * Incorporate review comments * Add check for restartCount in run_test * Update go version --- .circleci/config.yml | 71 +++++++ .../observatorium-metrics-template.yaml | 34 ++- .../observatorium-metrics-template.jsonnet | 6 + services/observatorium-metrics.libsonnet | 8 +- tests/ci/ci_test.sh | 201 ++++++++++++++++++ tests/ci/observatorium-metrics.ci.env | 70 ++++++ tests/ci/observatorium-up-metrics.yaml | 70 ++++++ tests/ci/observatorium.test.ci.env | 38 ++++ tests/ci/pre-requisites.yaml | 171 +++++++++++++++ tests/ci/rbac.yaml | 29 +++ tests/ci/telemeter.ci.env | 20 ++ tests/ci/test-tenant.yaml | 30 +++ 12 files changed, 733 insertions(+), 15 deletions(-) create mode 100644 .circleci/config.yml create mode 100755 tests/ci/ci_test.sh create mode 100644 tests/ci/observatorium-metrics.ci.env create mode 100644 tests/ci/observatorium-up-metrics.yaml create mode 100644 tests/ci/observatorium.test.ci.env create mode 100644 tests/ci/pre-requisites.yaml create mode 100644 tests/ci/rbac.yaml create mode 100644 tests/ci/telemeter.ci.env create mode 100644 tests/ci/test-tenant.yaml diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..c4187a2724 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,71 @@ +version: 2.1 +jobs: + build: + docker: + - image: cimg/go:1.19 + steps: + - checkout + - run: make vendor_jsonnet + - run: make grafana manifests prometheusrules + - run: make format + - run: make lint + - run: make validate + - run: git diff --exit-code + tests: + machine: + image: ubuntu-2004:current + resource_class: large + environment: + KUBECONFIG: kubeconfig + steps: + - checkout + - run: + name: Get dependicies + command: | + sudo apt-get update + sudo apt-get install \ + ca-certificates \ + curl \ + wget \ + gnupg \ + lsb-release + sudo mkdir -m 0755 -p /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo chmod a+r /etc/apt/keyrings/docker.gpg + sudo apt-get update + - run: + name: Install Docker + command: sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + - run: + name: Get OC CLI Binary + command: | + wget https://github.com/okd-project/okd/releases/download/4.12.0-0.okd-2023-02-18-033438/openshift-client-linux-4.12.0-0.okd-2023-02-18-033438.tar.gz + tar xzvf openshift-client-linux-4.12.0-0.okd-2023-02-18-033438.tar.gz + sudo mv oc kubectl /usr/local/bin + - run: + name: Spin up microshift container + command: | + docker run -d --name microshift --privileged -v microshift-data:/var/lib -p 6443:6443 -p 80:80 -p 443:443 quay.io/microshift/microshift-aio:latest + sleep 60 + - run: + name: Export the kubeconfig + command: docker exec -i microshift cat /var/lib/microshift/resources/kubeadmin/kubeconfig > tests/ci/kubeconfig + - run: + name: Run tests + command: | + echo $KUBECONFIG + cd tests/ci + ./ci_test.sh metrics + - store_artifacts: + path: /tmp/artifacts +workflows: + version: 2 + build-and-test: + jobs: + - build + - tests: + requires: + - build diff --git a/resources/services/observatorium-metrics-template.yaml b/resources/services/observatorium-metrics-template.yaml index f72b0556d8..50dcb91570 100644 --- a/resources/services/observatorium-metrics-template.yaml +++ b/resources/services/observatorium-metrics-template.yaml @@ -364,11 +364,11 @@ objects: periodSeconds: 30 resources: limits: - cpu: "4" - memory: 4Gi + cpu: ${OBSERVATORIUM_ALERTMANAGER_CPU_LIMIT} + memory: ${OBSERVATORIUM_ALERTMANAGER_MEMORY_LIMIT} requests: - cpu: "1" - memory: 1Gi + cpu: ${OBSERVATORIUM_ALERTMANAGER_CPU_REQUEST} + memory: ${OBSERVATORIUM_ALERTMANAGER_MEMORY_REQUEST} volumeMounts: - mountPath: /data name: alertmanager-data @@ -435,7 +435,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 10Gi + storage: ${OBSERVATORIUM_ALERTMANAGER_PVC_STORAGE} - apiVersion: apps/v1 kind: Deployment metadata: @@ -3109,7 +3109,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: ${THANOS_STORE_PVC_STORAGE} storageClassName: ${STORAGE_CLASS} - apiVersion: v1 kind: Service @@ -3366,7 +3366,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: ${THANOS_STORE_PVC_STORAGE} storageClassName: ${STORAGE_CLASS} - apiVersion: v1 kind: Service @@ -3623,7 +3623,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: ${THANOS_STORE_PVC_STORAGE} storageClassName: ${STORAGE_CLASS} - apiVersion: v1 kind: Service @@ -3880,7 +3880,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: ${THANOS_STORE_PVC_STORAGE} storageClassName: ${STORAGE_CLASS} - apiVersion: v1 kind: Service @@ -4137,7 +4137,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: ${THANOS_STORE_PVC_STORAGE} storageClassName: ${STORAGE_CLASS} - apiVersion: v1 kind: Service @@ -4394,7 +4394,7 @@ objects: - ReadWriteOnce resources: requests: - storage: 50Gi + storage: ${THANOS_STORE_PVC_STORAGE} storageClassName: ${STORAGE_CLASS} - apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -4478,6 +4478,16 @@ parameters: value: quay.io/prometheus/alertmanager - name: OBSERVATORIUM_ALERTMANAGER_IMAGE_TAG value: v0.25.0 +- name: OBSERVATORIUM_ALERTMANAGER_PVC_STORAGE + value: 10Gi +- name: OBSERVATORIUM_ALERTMANAGER_CPU_REQUEST + value: "1" +- name: OBSERVATORIUM_ALERTMANAGER_MEMORY_REQUEST + value: 1Gi +- name: OBSERVATORIUM_ALERTMANAGER_MEMORY_LIMIT + value: "4" +- name: OBSERVATORIUM_ALERTMANAGER_CPU_LIMIT + value: 4Gi - name: SERVICE_ACCOUNT_NAME value: prometheus-telemeter - name: STORAGE_CLASS @@ -4644,6 +4654,8 @@ parameters: value: "2" - name: THANOS_STORE_CPU_REQUEST value: 500m +- name: THANOS_STORE_PVC_STORAGE + value: 50Gi - name: THANOS_STORE_SERIES_TOUCHED_LIMIT value: "0" - name: THANOS_STORE_SERIES_SAMPLE_LIMIT diff --git a/services/observatorium-metrics-template.jsonnet b/services/observatorium-metrics-template.jsonnet index 750f701f2f..a094372ccf 100644 --- a/services/observatorium-metrics-template.jsonnet +++ b/services/observatorium-metrics-template.jsonnet @@ -41,6 +41,11 @@ local obs = import 'observatorium.libsonnet'; { name: 'OBSERVATORIUM_ALERTMANAGER_LOG_LEVEL', value: 'info' }, { name: 'OBSERVATORIUM_ALERTMANAGER_IMAGE', value: 'quay.io/prometheus/alertmanager' }, { name: 'OBSERVATORIUM_ALERTMANAGER_IMAGE_TAG', value: 'v0.25.0' }, + { name: 'OBSERVATORIUM_ALERTMANAGER_PVC_STORAGE', value: '10Gi' }, + { name: 'OBSERVATORIUM_ALERTMANAGER_CPU_REQUEST', value: '1' }, + { name: 'OBSERVATORIUM_ALERTMANAGER_MEMORY_REQUEST', value: '1Gi' }, + { name: 'OBSERVATORIUM_ALERTMANAGER_MEMORY_LIMIT', value: '4' }, + { name: 'OBSERVATORIUM_ALERTMANAGER_CPU_LIMIT', value: '4Gi' }, { name: 'SERVICE_ACCOUNT_NAME', value: 'prometheus-telemeter' }, { name: 'STORAGE_CLASS', value: 'gp2-csi' }, { name: 'THANOS_COMPACTOR_CPU_LIMIT', value: '1' }, @@ -124,6 +129,7 @@ local obs = import 'observatorium.libsonnet'; { name: 'THANOS_STORE_BUCKET_CACHE_REPLICAS', value: '3' }, { name: 'THANOS_STORE_CPU_LIMIT', value: '2' }, { name: 'THANOS_STORE_CPU_REQUEST', value: '500m' }, + { name: 'THANOS_STORE_PVC_STORAGE', value: '50Gi' }, { name: 'THANOS_STORE_SERIES_TOUCHED_LIMIT', value: '0' }, { name: 'THANOS_STORE_SERIES_SAMPLE_LIMIT', value: '0' }, { name: 'THANOS_STORE_INDEX_CACHE_CONNECTION_LIMIT', value: '3072' }, diff --git a/services/observatorium-metrics.libsonnet b/services/observatorium-metrics.libsonnet index 591b235cdb..f070838542 100644 --- a/services/observatorium-metrics.libsonnet +++ b/services/observatorium-metrics.libsonnet @@ -236,7 +236,7 @@ local oauthProxy = import './sidecars/oauth-proxy.libsonnet'; storageClassName: '${STORAGE_CLASS}', resources: { requests: { - storage: '50Gi', + storage: '${THANOS_STORE_PVC_STORAGE}', }, }, }, @@ -926,7 +926,7 @@ local oauthProxy = import './sidecars/oauth-proxy.libsonnet'; accessModes: ['ReadWriteOnce'], resources: { requests: { - storage: '10Gi', + storage: '${OBSERVATORIUM_ALERTMANAGER_PVC_STORAGE}', }, }, }, @@ -985,8 +985,8 @@ local oauthProxy = import './sidecars/oauth-proxy.libsonnet'; path: '/', } }, resources: { - requests: { cpu: '1', memory: '1Gi' }, - limits: { cpu: '4', memory: '4Gi' }, + requests: { cpu: '${OBSERVATORIUM_ALERTMANAGER_CPU_REQUEST}', memory: '${OBSERVATORIUM_ALERTMANAGER_MEMORY_REQUEST}' }, + limits: { cpu: '${OBSERVATORIUM_ALERTMANAGER_CPU_LIMIT}', memory: '${OBSERVATORIUM_ALERTMANAGER_MEMORY_LIMIT}' }, }, }], volumes: [ diff --git a/tests/ci/ci_test.sh b/tests/ci/ci_test.sh new file mode 100755 index 0000000000..7faa50bf3d --- /dev/null +++ b/tests/ci/ci_test.sh @@ -0,0 +1,201 @@ +#!/bin/bash + +set -euo pipefail + +ARTIFACT_DIR="${ARTIFACT_DIR:-/tmp/artifacts}" +check_status() { + oc rollout status $1 -n $2 --timeout=5m || { + must_gather "$ARTIFACT_DIR" + exit 1 + } +} + +prereq() { + oc apply -f pre-requisites.yaml + oc create -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/main/bundle.yaml + oc process -f ../../resources/crds/observatorium-logs-crds-template.yaml | oc apply -f - + +} + +minio() { + oc create ns minio || true + oc wait --for=jsonpath='{.status.phase}=Active' namespace/minio --timeout=5s + oc process -f ../minio-template.yaml \ + -p MINIO_CPU_REQUEST=30m \ + -p MINIO_CPU_LIMITS=50m \ + -p MINIO_MEMORY_REQUEST=50Mi \ + -p MINIO_MEMORY_LIMITS=100Mi \ + --local -o yaml | \ + sed -e 's/storage: [0-9].Gi/storage: 0.25Gi/g' | \ + oc apply -n minio -f - + check_status deployment/minio minio +} + +dex() { + oc create ns dex || true + oc wait --for=jsonpath='{.status.phase}=Active' namespace/dex --timeout=5s + oc process -f ../dex-template.yaml \ + -p DEX_CPU_REQUEST=30m \ + -p DEX_CPU_LIMITS=50m \ + -p DEX_MEMORY_REQUEST=50Mi \ + -p DEX_MEMORY_LIMITS=100Mi \ + --local -o yaml | \ + sed -e 's/storage: [0-9].Gi/storage: 0.25Gi/g' | \ + oc apply -n dex -f - + check_status deployment/dex dex +} + +observatorium_metrics() { + oc create ns observatorium-metrics || true + oc wait --for=jsonpath='{.status.phase}=Active' namespace/observatorium-metrics --timeout=5s + oc process -f ../observatorium-metrics-thanos-objectstorage-secret-template.yaml | oc apply --namespace observatorium-metrics -f - + oc apply -f ../observatorium-alertmanager-config-secret.yaml --namespace observatorium-metrics + oc apply -f ../observatorium-cluster-role.yaml + oc apply -f ../observatorium-cluster-role-binding.yaml + oc apply --namespace observatorium-metrics -f ../observatorium-service-account.yaml + oc process --param-file=observatorium-metrics.ci.env \ + -f ../../resources/services/observatorium-metrics-template.yaml | \ + oc apply --namespace observatorium-metrics -f - + resources=$( + oc get statefulsets -o name -n observatorium-metrics + oc get deployments -o name -n observatorium-metrics + ) + for res in $resources; do + check_status $res observatorium-metrics + done +} + +observatorium() { + oc create ns observatorium || true + oc wait --for=jsonpath='{.status.phase}=Active' namespace/observatorium --timeout=5s + oc apply -f ../observatorium-rules-objstore-secret.yaml --namespace observatorium + oc apply -f ../observatorium-rhobs-tenant-secret.yaml --namespace observatorium + oc process --param-file=observatorium.test.ci.env \ + -f ../../resources/services/observatorium-template.yaml | \ + oc apply --namespace observatorium -f - + resources=$( + oc get statefulsets -o name -n observatorium + oc get deployments -o name -n observatorium + ) + for res in $resources; do + check_status $res observatorium + done + +} + +telemeter() { + oc create ns telemeter || true + oc wait --for=jsonpath='{.status.phase}=Active' namespace/telemeter --timeout=5s + oc apply --namespace telemeter -f ../telemeter-token-refersher-oidc-secret.yaml + oc process --param-file=telemeter.ci.env \ + -f ../../resources/services/telemeter-template.yaml | \ + oc apply --namespace telemeter -f - + resources=$( + oc get statefulsets -o name -n telemeter + oc get deployments -o name -n telemeter + ) + for res in $resources; do + check_status $res telemeter + done +} + +run_test() { + oc apply -n observatorium -f test-tenant.yaml + oc apply -n observatorium -f rbac.yaml + oc rollout restart deployment/observatorium-observatorium-api -n observatorium + check_status deployment/observatorium-observatorium-api observatorium + oc apply -n observatorium -f observatorium-up-metrics.yaml + oc wait --for=condition=complete --timeout=5m \ + -n observatorium job/observatorium-up-metrics || { + must_gather "$ARTIFACT_DIR" + exit 1 + } + for namespace in minio dex observatorium observatorium-metrics telemeter; do + + out=$(oc get pods -n $namespace -o jsonpath='{.items[?(@.status.containerStatuses[].restartCount>=3)].metadata.name}') + if [ -n "$out" ]; then + must_gather "$ARTIFACT_DIR" + exit 1 + fi + done +} + +must_gather() { + local artifact_dir="$1" + + for namespace in minio dex observatorium observatorium-metrics telemeter; do + mkdir -p "$artifact_dir/$namespace" + + for name in $(oc get pods -n "$namespace" -o jsonpath='{.items[*].metadata.name}') ; do + oc -n "$namespace" describe pod "$name" > "$artifact_dir/$namespace/$name.describe" + oc -n "$namespace" get pod "$name" -o yaml > "$artifact_dir/$namespace/$name.yaml" + + for initContainer in $(oc -n "$namespace" get pod "$name" -o jsonpath='{.spec.initContainers[*].name}') ; do + oc -n "$namespace" logs "$name" -c "$initContainer" > "$artifact_dir/$namespace/$name-$initContainer.logs" + done + + for container in $(oc -n "$namespace" get pod "$name" -o jsonpath='{.spec.containers[*].name}') ; do + oc -n "$namespace" logs "$name" -c "$container" > "$artifact_dir/$namespace/$name-$container.logs" + done + done + done + + oc describe nodes > "$artifact_dir/nodes" + oc get pods --all-namespaces > "$artifact_dir/pods" + oc get deploy --all-namespaces > "$artifact_dir/deployments" + oc get statefulset --all-namespaces > "$artifact_dir/statefulsets" + oc get services --all-namespaces > "$artifact_dir/services" + oc get endpoints --all-namespaces > "$artifact_dir/endpoints" +} + +ci.metrics() { + prereq + minio + dex + observatorium_metrics + observatorium + run_test + telemeter +} + +ci.logs() { + #TODO + : +} + +ci.traces(){ + #TODO + : +} + +ci.help() { + local fns=$(declare -F -p | cut -f3 -d ' ' | grep '^ci\.' | cut -f2- -d.) + read -d '^' -r docstring < + +task: +$(for fn in ${fns[@]};do printf " - %s\n" "${fn}";done) +^ +EOF_HELP + echo -e "$docstring" + exit 1 +} + +is_function() { + local fn=$1 + [[ $(type -t "$fn") == "function" ]] + return $? +} + +main() { + local fn=${1:-''} + local ci_fn="ci.$fn" + if ! is_function "$ci_fn"; then + ci.help + fi + $ci_fn + return $? +} +main "$@" + diff --git a/tests/ci/observatorium-metrics.ci.env b/tests/ci/observatorium-metrics.ci.env new file mode 100644 index 0000000000..bb1b760dc3 --- /dev/null +++ b/tests/ci/observatorium-metrics.ci.env @@ -0,0 +1,70 @@ +SERVICE_ACCOUNT_NAME=observatorium +THANOS_S3_SECRET=thanos-test-s3 +JAEGER_AGENT_IMAGE=jaegertracing/jaeger-agent +STORAGE_CLASS=kubevirt-hostpath-provisioner + +MEMCACHED_EXPORTER_MEMORY_REQUEST=200Mi +MEMCACHED_CPU_LIMIT=50m +MEMCACHED_CPU_REQUEST=50m +MEMCACHED_EXPORTER_CPU_LIMIT=50m +MEMCACHED_MEMORY_LIMIT=200Mi +MEMCACHED_MEMORY_REQUEST=200Mi +OAUTH_PROXY_CPU_LIMITS=50m +OAUTH_PROXY_CPU_REQUEST=50m +OAUTH_PROXY_MEMORY_REQUEST=200Mi +THANOS_COMPACTOR_CPU_LIMIT=50m +THANOS_COMPACTOR_CPU_REQUEST=50m +THANOS_COMPACTOR_MEMORY_LIMIT=200Mi +THANOS_COMPACTOR_MEMORY_REQUEST=200Mi +THANOS_COMPACTOR_PVC_REQUEST=0.25Gi +THANOS_QUERIER_REPLICAS=1 +THANOS_QUERY_FRONTEND_REPLICAS=1 +THANOS_QUERIER_CPU_LIMIT=50m +THANOS_QUERIER_CPU_REQUEST=50m +THANOS_QUERIER_MEMORY_LIMIT=200Mi +THANOS_QUERIER_MEMORY_REQUEST=200Mi +THANOS_QUERY_FRONTEND_CPU_LIMIT=50m +THANOS_QUERY_FRONTEND_CPU_REQUEST=50m +THANOS_QUERY_FRONTEND_MEMORY_LIMIT=200Mi +THANOS_QUERY_FRONTEND_MEMORY_REQUEST=200Mi +THANOS_QUERY_FRONTEND_QUERY_CACHE_MEMCACHED_CPU_LIMIT=50m +THANOS_QUERY_FRONTEND_QUERY_CACHE_MEMCACHED_CPU_REQUEST=50m +THANOS_QUERY_FRONTEND_QUERY_CACHE_MEMCACHED_MEMORY_LIMIT=200Mi +THANOS_QUERY_FRONTEND_QUERY_CACHE_MEMCACHED_MEMORY_REQUEST=200Mi +THANOS_STORE_INDEX_CACHE_MEMCACHED_CPU_LIMIT=50m +THANOS_STORE_INDEX_CACHE_MEMCACHED_CPU_REQUEST=50m +THANOS_STORE_INDEX_CACHE_MEMCACHED_MEMORY_LIMIT=200Mi +THANOS_STORE_INDEX_CACHE_MEMCACHED_MEMORY_REQUEST=200Mi +THANOS_STORE_BUCKET_CACHE_MEMCACHED_CPU_LIMIT=50m +THANOS_STORE_BUCKET_CACHE_MEMCACHED_CPU_REQUEST=50m +THANOS_STORE_BUCKET_CACHE_MEMCACHED_MEMORY_LIMIT=200Mi +THANOS_STORE_BUCKET_CACHE_MEMCACHED_MEMORY_REQUEST=200Mi +THANOS_STORE_BUCKET_CACHE_REPLICAS=1 +THANOS_STORE_INDEX_CACHE_REPLICAS=1 +THANOS_STORE_REPLICAS=1 +THANOS_STORE_CPU_LIMIT=50m +THANOS_STORE_CPU_REQUEST=50m +THANOS_STORE_MEMORY_LIMIT=200Mi +THANOS_STORE_MEMORY_REQUEST=200Mi +THANOS_STORE_PVC_STORAGE=0.25Gi +THANOS_RECEIVE_CPU_LIMIT=50m +THANOS_RECEIVE_CPU_REQUEST=50m +THANOS_RECEIVE_MEMORY_LIMIT=200Mi +THANOS_RECEIVE_MEMORY_REQUEST=200Mi +THANOS_RECEIVE_REPLICAS=3 +THANOS_RECEIVE_PVC_STORAGE=0.25Gi +THANOS_RULER_CPU_LIMIT=50m +THANOS_RULER_CPU_REQUEST=50m +THANOS_RULER_MEMORY_LIMIT=200Mi +THANOS_RULER_MEMORY_REQUEST=200Mi +THANOS_RULER_REPLICAS=1 +THANOS_RULER_PVC_REQUEST=0.25Gi +THANOS_VOLCANO_CPU_LIMIT=50m +THANOS_VOLCANO_CPU_REQUEST=50m +THANOS_VOLCANO_MEMORY_LIMIT=200Mi +THANOS_VOLCANO_MEMORY_REQUEST=200Mi +OBSERVATORIUM_ALERTMANAGER_CPU_REQUEST=30m +OBSERVATORIUM_ALERTMANAGER_MEMORY_REQUEST=50Mi +OBSERVATORIUM_ALERTMANAGER_MEMORY_LIMIT=100Mi +OBSERVATORIUM_ALERTMANAGER_CPU_LIMIT=50m +OBSERVATORIUM_ALERTMANAGER_PVC_STORAGE=0.25Gi diff --git a/tests/ci/observatorium-up-metrics.yaml b/tests/ci/observatorium-up-metrics.yaml new file mode 100644 index 0000000000..b0e5a3ed3d --- /dev/null +++ b/tests/ci/observatorium-up-metrics.yaml @@ -0,0 +1,70 @@ +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: test + app.kubernetes.io/instance: e2e-test + app.kubernetes.io/name: observatorium-up + app.kubernetes.io/version: master-2022-10-27-d8bb06f + name: observatorium-up-metrics +spec: + backoffLimit: 5 + template: + metadata: + labels: + app.kubernetes.io/component: test + app.kubernetes.io/instance: e2e-test + app.kubernetes.io/name: observatorium-up + app.kubernetes.io/version: master-2022-10-27-d8bb06f + spec: + containers: + - args: + - --endpoint-type=metrics + - --endpoint-write=http://observatorium-observatorium-api.observatorium.svc.cluster.local:8080/api/metrics/v1/test/api/v1/receive + - --endpoint-read=http://observatorium-observatorium-api.observatorium.svc.cluster.local:8080/api/metrics/v1/test + - --period=5s + - --duration=2m + - --name=mydummymetrics + - --labels=dummyid="12" + - --latency=30s + - --initial-query-delay=10s + - --threshold=0.90 + - --token-file=/var/shared/token + image: quay.io/observatorium/up:master-2022-10-27-d8bb06f + name: observatorium-up + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - mountPath: /var/shared + name: shared + readOnly: true + initContainers: + - command: + - /bin/sh + - -c + - | + curl --request POST \ + --silent \ + --url http://dex.dex.svc.cluster.local:5556/dex/token \ + --header 'content-type: application/x-www-form-urlencoded' \ + --data grant_type=password \ + --data username=admin@example.com \ + --data password=password \ + --data client_id=test \ + --data client_secret=ZXhhbXBsZS1hcHAtc2VjcmV0 \ + --data scope="openid email" | sed 's/^{.*"id_token":[^"]*"\([^"]*\)".*}/\1/' > /var/shared/token + image: docker.io/curlimages/curl + name: curl + volumeMounts: + - mountPath: /var/shared + name: shared + readOnly: false + restartPolicy: OnFailure + volumes: + - emptyDir: {} + name: shared diff --git a/tests/ci/observatorium.test.ci.env b/tests/ci/observatorium.test.ci.env new file mode 100644 index 0000000000..c9e968d3a5 --- /dev/null +++ b/tests/ci/observatorium.test.ci.env @@ -0,0 +1,38 @@ +SERVICE_ACCOUNT_NAME=default +JAEGER_AGENT_IMAGE=jaegertracing/jaeger-agent +RULES_OBJSTORE_S3_SECRET=rules-objstore-s3 +MANAGED_TENANTS=rhobs +OBSERVATORIUM_URL=http://observatorium-observatorium-api.observatorium.svc.cluster.local:8080 + +GUBERNATOR_IMAGE=thrawn01/gubernator +GUBERNATOR_REPLICAS=1 +GUBERNATOR_CPU_LIMIT=50m +GUBERNATOR_CPU_REQUEST=30m +GUBERNATOR_MEMORY_LIMIT=100Mi +GUBERNATOR_MEMORY_REQUEST=50Mi +OBSERVATORIUM_API_REPLICAS=1 +OBSERVATORIUM_API_CPU_LIMIT=50m +OBSERVATORIUM_API_CPU_REQUEST=30m +OBSERVATORIUM_API_MEMORY_LIMIT=100Mi +OBSERVATORIUM_API_MEMORY_REQUEST=50Mi +UP_REPLICAS=1 +UP_CPU_REQUEST=30m +UP_CPU_LIMIT=50m +UP_MEMORY_REQUEST=50Mi +UP_MEMORY_LIMIT=100Mi +MEMCACHED_CPU_LIMIT=50m +MEMCACHED_CPU_REQUEST=30m +MEMCACHED_EXPORTER_CPU_LIMIT=50m +MEMCACHED_EXPORTER_CPU_REQUEST=30m +MEMCACHED_EXPORTER_MEMORY_LIMIT=100Mi +MEMCACHED_MEMORY_LIMIT=100Mi +MEMCACHED_MEMORY_REQUEST=50Mi +OAUTH_PROXY_CPU_LIMITS=50m +OAUTH_PROXY_CPU_REQUEST=30m +OAUTH_PROXY_MEMORY_LIMITS=100Mi +OAUTH_PROXY_MEMORY_REQUEST=50Mi +OPA_AMS_CPU_LIMIT=50m +OPA_AMS_CPU_REQUEST=30m +OPA_AMS_MEMORY_LIMIT=100Mi +OPA_AMS_MEMORY_REQUEST=50Mi + diff --git a/tests/ci/pre-requisites.yaml b/tests/ci/pre-requisites.yaml new file mode 100644 index 0000000000..e18ce22fd4 --- /dev/null +++ b/tests/ci/pre-requisites.yaml @@ -0,0 +1,171 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openshift-ingress-router +rules: +- apiGroups: + - "" + resources: + - namespaces + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - list + - watch +- apiGroups: + - route.openshift.io + resources: + - routes/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-ingress-router +roleRef: + apiGroup: "" + kind: ClusterRole + name: openshift-ingress-router +subjects: +- kind: ServiceAccount + namespace: openshift-ingress + name: ingress-router +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-ingress-router-auth-delegator +roleRef: + apiGroup: "" + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + namespace: openshift-ingress + name: ingress-router +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-ingress +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-router + namespace: openshift-ingress +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: routes.route.openshift.io +spec: + # group name to use for REST API: /apis// + group: route.openshift.io + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Host + type: string + jsonPath: .status.ingress[0].host + - name: Admitted + type: string + jsonPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + - name: Service + type: string + jsonPath: .spec.to.name + - name: TLS + type: string + jsonPath: .spec.tls.type + subresources: + # enable spec/status + status: {} + # either Namespaced or Cluster + scope: Namespaced + names: + # plural name to be used in the URL: /apis/// + plural: routes + # singular name to be used as an alias on the CLI and for display + singular: route + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: Route +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ingress-router + namespace: openshift-ingress + labels: + k8s-app: ingress-router +spec: + selector: + matchLabels: + k8s-app: ingress-router + template: + metadata: + labels: + k8s-app: ingress-router + spec: + serviceAccountName: ingress-router + containers: + - env: + - name: ROUTER_LISTEN_ADDR + value: 0.0.0.0:1936 + - name: ROUTER_METRICS_TYPE + value: haproxy + - name: ROUTER_SERVICE_HTTPS_PORT + value: "443" + - name: ROUTER_SERVICE_HTTP_PORT + value: "80" + - name: ROUTER_THREADS + value: "4" + - name: ROUTER_SUBDOMAIN + value: "${name}-${namespace}.apps.127.0.0.1.nip.io" + - name: ROUTER_ALLOW_WILDCARD_ROUTES + value: "true" + image: openshift/origin-haproxy-router:v4.0.0 + livenessProbe: + httpGet: + host: localhost + path: /healthz + port: 1936 + initialDelaySeconds: 10 + name: router + ports: + - containerPort: 80 + - containerPort: 443 + - containerPort: 1936 + name: stats + protocol: TCP + readinessProbe: + httpGet: + host: localhost + path: healthz/ready + port: 1936 + initialDelaySeconds: 10 + resources: + requests: + cpu: 50m + memory: 50Mi + hostNetwork: true diff --git a/tests/ci/rbac.yaml b/tests/ci/rbac.yaml new file mode 100644 index 0000000000..280cd2ec18 --- /dev/null +++ b/tests/ci/rbac.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +data: + rbac.yaml: |- + "roleBindings": + - "name": "test" + "roles": + - "read-write" + "subjects": + - "kind": "user" + "name": "admin@example.com" + "roles": + - "name": "read-write" + "permissions": + - "read" + - "write" + "resources": + - "logs" + - "metrics" + "tenants": + - "test" +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + name: observatorium-observatorium-api + namespace: observatorium diff --git a/tests/ci/telemeter.ci.env b/tests/ci/telemeter.ci.env new file mode 100644 index 0000000000..8aae1c70d2 --- /dev/null +++ b/tests/ci/telemeter.ci.env @@ -0,0 +1,20 @@ +SERVICE_ACCOUNT_NAME=default +TELEMETER_FORWARD_URL=http://observatorium-observatorium-api.observatorium.svc.cluster.local:8080/api/metrics/v1/telemeter/api/v1/receive + +MEMCACHED_CPU_LIMIT=1 +REPLICAS=1 +MEMCACHED_CPU_LIMIT=50m +MEMCACHED_CPU_REQUEST=30m +MEMCACHED_EXPORTER_CPU_LIMIT=50m +MEMCACHED_EXPORTER_CPU_REQUEST=30m +MEMCACHED_EXPORTER_MEMORY_LIMIT=100Mi +MEMCACHED_MEMORY_LIMIT=100Mi +MEMCACHED_MEMORY_REQUEST=50Mi +OAUTH_PROXY_CPU_REQUEST=30m +OAUTH_PROXY_MEMORY_REQUEST=50Mi +OAUTH_PROXY_CPU_LIMITS=50m +OAUTH_PROXY_MEMORY_LIMITS=100Mi +TELEMETER_SERVER_CPU_LIMIT=50m +TELEMETER_SERVER_CPU_REQUEST=30m +TELEMETER_SERVER_MEMORY_LIMIT=100Mi +TELEMETER_SERVER_MEMORY_REQUEST=50Mi diff --git a/tests/ci/test-tenant.yaml b/tests/ci/test-tenant.yaml new file mode 100644 index 0000000000..84393a0962 --- /dev/null +++ b/tests/ci/test-tenant.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + name: observatorium-observatorium-api + namespace: observatorium +stringData: + client-id: test + client-secret: ZXhhbXBsZS1hcHAtc2VjcmV0 + issuer-url: http://dex.dex.svc.cluster.local:5556/dex + tenants.yaml: |- + "tenants": + - "id": "1610b0c3-c509-4592-a256-a1871353dbfa" + "name": "test" + "oidc": + "clientID": "test" + "clientSecret": "ZXhhbXBsZS1hcHAtc2VjcmV0" + "issuerURL": "http://dex.dex.svc.cluster.local:5556/dex" + "usernameClaim": "email" + "rateLimits": + - "endpoint": "/api/metrics/v1/.+/api/v1/receive" + "limit": 1000 + "window": "1s" + - "endpoint": "/api/logs/v1/.*" + "limit": 1000 + "window": "1s"