From c87a612f2d45af5ef6f4820e8ffc43d499873647 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 07:50:42 +0100 Subject: [PATCH 01/37] scaffold the project kubebuilder init --domain dis.altinn.cloud --repo github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator --- .../.devcontainer/devcontainer.json | 25 ++ .../.devcontainer/post-install.sh | 23 ++ .../dis-promrulegroups-operator/.dockerignore | 3 + .../.github/workflows/lint.yml | 23 ++ .../.github/workflows/test-e2e.yml | 35 ++ .../.github/workflows/test.yml | 23 ++ .../dis-promrulegroups-operator/.gitignore | 27 ++ .../dis-promrulegroups-operator/.golangci.yml | 47 +++ .../dis-promrulegroups-operator/Dockerfile | 33 ++ services/dis-promrulegroups-operator/Makefile | 212 ++++++++++++ services/dis-promrulegroups-operator/PROJECT | 10 + .../dis-promrulegroups-operator/README.md | 114 +++++++ .../dis-promrulegroups-operator/cmd/main.go | 159 +++++++++ .../config/default/kustomization.yaml | 177 ++++++++++ .../config/default/manager_metrics_patch.yaml | 4 + .../config/default/metrics_service.yaml | 17 + .../config/manager/kustomization.yaml | 2 + .../config/manager/manager.yaml | 95 ++++++ .../network-policy/allow-metrics-traffic.yaml | 26 ++ .../config/network-policy/kustomization.yaml | 2 + .../config/prometheus/kustomization.yaml | 2 + .../config/prometheus/monitor.yaml | 30 ++ .../config/rbac/kustomization.yaml | 20 ++ .../config/rbac/leader_election_role.yaml | 40 +++ .../rbac/leader_election_role_binding.yaml | 15 + .../config/rbac/metrics_auth_role.yaml | 17 + .../rbac/metrics_auth_role_binding.yaml | 12 + .../config/rbac/metrics_reader_role.yaml | 9 + .../config/rbac/role.yaml | 11 + .../config/rbac/role_binding.yaml | 15 + .../config/rbac/service_account.yaml | 8 + services/dis-promrulegroups-operator/go.mod | 98 ++++++ services/dis-promrulegroups-operator/go.sum | 251 ++++++++++++++ .../hack/boilerplate.go.txt | 15 + .../test/e2e/e2e_suite_test.go | 120 +++++++ .../test/e2e/e2e_test.go | 307 ++++++++++++++++++ .../test/utils/utils.go | 251 ++++++++++++++ 37 files changed, 2278 insertions(+) create mode 100644 services/dis-promrulegroups-operator/.devcontainer/devcontainer.json create mode 100644 services/dis-promrulegroups-operator/.devcontainer/post-install.sh create mode 100644 services/dis-promrulegroups-operator/.dockerignore create mode 100644 services/dis-promrulegroups-operator/.github/workflows/lint.yml create mode 100644 services/dis-promrulegroups-operator/.github/workflows/test-e2e.yml create mode 100644 services/dis-promrulegroups-operator/.github/workflows/test.yml create mode 100644 services/dis-promrulegroups-operator/.gitignore create mode 100644 services/dis-promrulegroups-operator/.golangci.yml create mode 100644 services/dis-promrulegroups-operator/Dockerfile create mode 100644 services/dis-promrulegroups-operator/Makefile create mode 100644 services/dis-promrulegroups-operator/PROJECT create mode 100644 services/dis-promrulegroups-operator/README.md create mode 100644 services/dis-promrulegroups-operator/cmd/main.go create mode 100644 services/dis-promrulegroups-operator/config/default/kustomization.yaml create mode 100644 services/dis-promrulegroups-operator/config/default/manager_metrics_patch.yaml create mode 100644 services/dis-promrulegroups-operator/config/default/metrics_service.yaml create mode 100644 services/dis-promrulegroups-operator/config/manager/kustomization.yaml create mode 100644 services/dis-promrulegroups-operator/config/manager/manager.yaml create mode 100644 services/dis-promrulegroups-operator/config/network-policy/allow-metrics-traffic.yaml create mode 100644 services/dis-promrulegroups-operator/config/network-policy/kustomization.yaml create mode 100644 services/dis-promrulegroups-operator/config/prometheus/kustomization.yaml create mode 100644 services/dis-promrulegroups-operator/config/prometheus/monitor.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/kustomization.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/leader_election_role.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/leader_election_role_binding.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/metrics_auth_role.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/metrics_auth_role_binding.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/metrics_reader_role.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/role.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/role_binding.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/service_account.yaml create mode 100644 services/dis-promrulegroups-operator/go.mod create mode 100644 services/dis-promrulegroups-operator/go.sum create mode 100644 services/dis-promrulegroups-operator/hack/boilerplate.go.txt create mode 100644 services/dis-promrulegroups-operator/test/e2e/e2e_suite_test.go create mode 100644 services/dis-promrulegroups-operator/test/e2e/e2e_test.go create mode 100644 services/dis-promrulegroups-operator/test/utils/utils.go diff --git a/services/dis-promrulegroups-operator/.devcontainer/devcontainer.json b/services/dis-promrulegroups-operator/.devcontainer/devcontainer.json new file mode 100644 index 00000000..e2cdc09c --- /dev/null +++ b/services/dis-promrulegroups-operator/.devcontainer/devcontainer.json @@ -0,0 +1,25 @@ +{ + "name": "Kubebuilder DevContainer", + "image": "golang:1.22", + "features": { + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/git:1": {} + }, + + "runArgs": ["--network=host"], + + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.shell.linux": "/bin/bash" + }, + "extensions": [ + "ms-kubernetes-tools.vscode-kubernetes-tools", + "ms-azuretools.vscode-docker" + ] + } + }, + + "onCreateCommand": "bash .devcontainer/post-install.sh" +} + diff --git a/services/dis-promrulegroups-operator/.devcontainer/post-install.sh b/services/dis-promrulegroups-operator/.devcontainer/post-install.sh new file mode 100644 index 00000000..265c43ee --- /dev/null +++ b/services/dis-promrulegroups-operator/.devcontainer/post-install.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -x + +curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 +chmod +x ./kind +mv ./kind /usr/local/bin/kind + +curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/amd64 +chmod +x kubebuilder +mv kubebuilder /usr/local/bin/ + +KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) +curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl" +chmod +x kubectl +mv kubectl /usr/local/bin/kubectl + +docker network create -d=bridge --subnet=172.19.0.0/24 kind + +kind version +kubebuilder version +docker --version +go version +kubectl version --client diff --git a/services/dis-promrulegroups-operator/.dockerignore b/services/dis-promrulegroups-operator/.dockerignore new file mode 100644 index 00000000..a3aab7af --- /dev/null +++ b/services/dis-promrulegroups-operator/.dockerignore @@ -0,0 +1,3 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ diff --git a/services/dis-promrulegroups-operator/.github/workflows/lint.yml b/services/dis-promrulegroups-operator/.github/workflows/lint.yml new file mode 100644 index 00000000..b6967b35 --- /dev/null +++ b/services/dis-promrulegroups-operator/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +name: Lint + +on: + push: + pull_request: + +jobs: + lint: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.22' + + - name: Run linter + uses: golangci/golangci-lint-action@v6 + with: + version: v1.59 diff --git a/services/dis-promrulegroups-operator/.github/workflows/test-e2e.yml b/services/dis-promrulegroups-operator/.github/workflows/test-e2e.yml new file mode 100644 index 00000000..87806440 --- /dev/null +++ b/services/dis-promrulegroups-operator/.github/workflows/test-e2e.yml @@ -0,0 +1,35 @@ +name: E2E Tests + +on: + push: + pull_request: + +jobs: + test-e2e: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.22' + + - name: Install the latest version of kind + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + + - name: Verify kind installation + run: kind version + + - name: Create kind cluster + run: kind create cluster + + - name: Running Test e2e + run: | + go mod tidy + make test-e2e diff --git a/services/dis-promrulegroups-operator/.github/workflows/test.yml b/services/dis-promrulegroups-operator/.github/workflows/test.yml new file mode 100644 index 00000000..7baf6579 --- /dev/null +++ b/services/dis-promrulegroups-operator/.github/workflows/test.yml @@ -0,0 +1,23 @@ +name: Tests + +on: + push: + pull_request: + +jobs: + test: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.22' + + - name: Running Tests + run: | + go mod tidy + make test diff --git a/services/dis-promrulegroups-operator/.gitignore b/services/dis-promrulegroups-operator/.gitignore new file mode 100644 index 00000000..ada68ff0 --- /dev/null +++ b/services/dis-promrulegroups-operator/.gitignore @@ -0,0 +1,27 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/* +Dockerfile.cross + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Go workspace file +go.work + +# Kubernetes Generated files - skip generated files, except for vendored files +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +.vscode +*.swp +*.swo +*~ diff --git a/services/dis-promrulegroups-operator/.golangci.yml b/services/dis-promrulegroups-operator/.golangci.yml new file mode 100644 index 00000000..aac8a13f --- /dev/null +++ b/services/dis-promrulegroups-operator/.golangci.yml @@ -0,0 +1,47 @@ +run: + timeout: 5m + allow-parallel-runners: true + +issues: + # don't skip warning about doc comments + # don't exclude the default set of lint + exclude-use-default: false + # restore some of the defaults + # (fill in the rest as needed) + exclude-rules: + - path: "api/*" + linters: + - lll + - path: "internal/*" + linters: + - dupl + - lll +linters: + disable-all: true + enable: + - dupl + - errcheck + - exportloopref + - ginkgolinter + - goconst + - gocyclo + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + +linters-settings: + revive: + rules: + - name: comment-spacings diff --git a/services/dis-promrulegroups-operator/Dockerfile b/services/dis-promrulegroups-operator/Dockerfile new file mode 100644 index 00000000..4ba18b68 --- /dev/null +++ b/services/dis-promrulegroups-operator/Dockerfile @@ -0,0 +1,33 @@ +# Build the manager binary +FROM golang:1.22 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/services/dis-promrulegroups-operator/Makefile b/services/dis-promrulegroups-operator/Makefile new file mode 100644 index 00000000..6ce70049 --- /dev/null +++ b/services/dis-promrulegroups-operator/Makefile @@ -0,0 +1,212 @@ +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.31.0 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. +# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. +# Prometheus and CertManager are installed by default; skip with: +# - PROMETHEUS_INSTALL_SKIP=true +# - CERT_MANAGER_INSTALL_SKIP=true +.PHONY: test-e2e +test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + @command -v kind >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @kind get clusters | grep -q 'kind' || { \ + echo "No Kind cluster is running. Please start a Kind cluster before running the e2e tests."; \ + exit 1; \ + } + go test ./test/e2e/ -v -ginkgo.v + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name dis-promrulegroups-operator-builder + $(CONTAINER_TOOL) buildx use dis-promrulegroups-operator-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm dis-promrulegroups-operator-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.3 +CONTROLLER_TOOLS_VERSION ?= v0.16.4 +ENVTEST_VERSION ?= release-0.19 +GOLANGCI_LINT_VERSION ?= v1.59.1 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef diff --git a/services/dis-promrulegroups-operator/PROJECT b/services/dis-promrulegroups-operator/PROJECT new file mode 100644 index 00000000..851ea3ac --- /dev/null +++ b/services/dis-promrulegroups-operator/PROJECT @@ -0,0 +1,10 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: dis.altinn.cloud +layout: +- go.kubebuilder.io/v4 +projectName: dis-promrulegroups-operator +repo: github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator +version: "3" diff --git a/services/dis-promrulegroups-operator/README.md b/services/dis-promrulegroups-operator/README.md new file mode 100644 index 00000000..2d5bd4e6 --- /dev/null +++ b/services/dis-promrulegroups-operator/README.md @@ -0,0 +1,114 @@ +# dis-promrulegroups-operator +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started + +### Prerequisites +- go version v1.22.0+ +- docker version 17.03+. +- kubectl version v1.11.3+. +- Access to a Kubernetes v1.11.3+ cluster. + +### To Deploy on the cluster +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-build docker-push IMG=/dis-promrulegroups-operator:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/dis-promrulegroups-operator:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + +## Project Distribution + +Following are the steps to build the installer and distribute this project to users. + +1. Build the installer for the image built and published in the registry: + +```sh +make build-installer IMG=/dis-promrulegroups-operator:tag +``` + +NOTE: The makefile target mentioned above generates an 'install.yaml' +file in the dist directory. This file contains all the resources built +with Kustomize, which are necessary to install this project without +its dependencies. + +2. Using the installer + +Users can just run kubectl apply -f to install the project, i.e.: + +```sh +kubectl apply -f https://raw.githubusercontent.com//dis-promrulegroups-operator//dist/install.yaml +``` + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/services/dis-promrulegroups-operator/cmd/main.go b/services/dis-promrulegroups-operator/cmd/main.go new file mode 100644 index 00000000..11d50f21 --- /dev/null +++ b/services/dis-promrulegroups-operator/cmd/main.go @@ -0,0 +1,159 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + // not provided, self-signed certificates will be generated by default. This option is not recommended for + // production environments as self-signed certificates do not offer the same level of trust and security + // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing + // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName + // to provide certificates, ensuring the server communicates using trusted and secure certificates. + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "26513e23.dis.altinn.cloud", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/services/dis-promrulegroups-operator/config/default/kustomization.yaml b/services/dis-promrulegroups-operator/config/default/kustomization.yaml new file mode 100644 index 00000000..e5448012 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/default/kustomization.yaml @@ -0,0 +1,177 @@ +# Adds namespace to all resources. +namespace: dis-promrulegroups-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: dis-promrulegroups-operator- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +#- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you enable cert-manager +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true diff --git a/services/dis-promrulegroups-operator/config/default/manager_metrics_patch.yaml b/services/dis-promrulegroups-operator/config/default/manager_metrics_patch.yaml new file mode 100644 index 00000000..2aaef653 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/services/dis-promrulegroups-operator/config/default/metrics_service.yaml b/services/dis-promrulegroups-operator/config/default/metrics_service.yaml new file mode 100644 index 00000000..cb7525ed --- /dev/null +++ b/services/dis-promrulegroups-operator/config/default/metrics_service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager diff --git a/services/dis-promrulegroups-operator/config/manager/kustomization.yaml b/services/dis-promrulegroups-operator/config/manager/kustomization.yaml new file mode 100644 index 00000000..5c5f0b84 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/services/dis-promrulegroups-operator/config/manager/manager.yaml b/services/dis-promrulegroups-operator/config/manager/manager.yaml new file mode 100644 index 00000000..b69ebbec --- /dev/null +++ b/services/dis-promrulegroups-operator/config/manager/manager.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/services/dis-promrulegroups-operator/config/network-policy/allow-metrics-traffic.yaml b/services/dis-promrulegroups-operator/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 00000000..52a52ab3 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,26 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gathering data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/services/dis-promrulegroups-operator/config/network-policy/kustomization.yaml b/services/dis-promrulegroups-operator/config/network-policy/kustomization.yaml new file mode 100644 index 00000000..ec0fb5e5 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/services/dis-promrulegroups-operator/config/prometheus/kustomization.yaml b/services/dis-promrulegroups-operator/config/prometheus/kustomization.yaml new file mode 100644 index 00000000..ed137168 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/services/dis-promrulegroups-operator/config/prometheus/monitor.yaml b/services/dis-promrulegroups-operator/config/prometheus/monitor.yaml new file mode 100644 index 00000000..fa87ba3d --- /dev/null +++ b/services/dis-promrulegroups-operator/config/prometheus/monitor.yaml @@ -0,0 +1,30 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml b/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml new file mode 100644 index 00000000..5619aa00 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml @@ -0,0 +1,20 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml diff --git a/services/dis-promrulegroups-operator/config/rbac/leader_election_role.yaml b/services/dis-promrulegroups-operator/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..61543355 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/services/dis-promrulegroups-operator/config/rbac/leader_election_role_binding.yaml b/services/dis-promrulegroups-operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..8d319ea1 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/services/dis-promrulegroups-operator/config/rbac/metrics_auth_role.yaml b/services/dis-promrulegroups-operator/config/rbac/metrics_auth_role.yaml new file mode 100644 index 00000000..32d2e4ec --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/services/dis-promrulegroups-operator/config/rbac/metrics_auth_role_binding.yaml b/services/dis-promrulegroups-operator/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 00000000..e775d67f --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/services/dis-promrulegroups-operator/config/rbac/metrics_reader_role.yaml b/services/dis-promrulegroups-operator/config/rbac/metrics_reader_role.yaml new file mode 100644 index 00000000..51a75db4 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/services/dis-promrulegroups-operator/config/rbac/role.yaml b/services/dis-promrulegroups-operator/config/rbac/role.yaml new file mode 100644 index 00000000..d6b6321d --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/role.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: manager-role +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] diff --git a/services/dis-promrulegroups-operator/config/rbac/role_binding.yaml b/services/dis-promrulegroups-operator/config/rbac/role_binding.yaml new file mode 100644 index 00000000..a4203be1 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/services/dis-promrulegroups-operator/config/rbac/service_account.yaml b/services/dis-promrulegroups-operator/config/rbac/service_account.yaml new file mode 100644 index 00000000..3d885340 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/services/dis-promrulegroups-operator/go.mod b/services/dis-promrulegroups-operator/go.mod new file mode 100644 index 00000000..992171da --- /dev/null +++ b/services/dis-promrulegroups-operator/go.mod @@ -0,0 +1,98 @@ +module github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator + +go 1.22.0 + +require ( + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 + k8s.io/apimachinery v0.31.0 + k8s.io/client-go v0.31.0 + sigs.k8s.io/controller-runtime v0.19.0 +) + +require ( + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.20.1 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/apiserver v0.31.0 // indirect + k8s.io/component-base v0.31.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/services/dis-promrulegroups-operator/go.sum b/services/dis-promrulegroups-operator/go.sum new file mode 100644 index 00000000..a8ec01da --- /dev/null +++ b/services/dis-promrulegroups-operator/go.sum @@ -0,0 +1,251 @@ +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= +k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= +k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/services/dis-promrulegroups-operator/hack/boilerplate.go.txt b/services/dis-promrulegroups-operator/hack/boilerplate.go.txt new file mode 100644 index 00000000..ff72ff2a --- /dev/null +++ b/services/dis-promrulegroups-operator/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/services/dis-promrulegroups-operator/test/e2e/e2e_suite_test.go b/services/dis-promrulegroups-operator/test/e2e/e2e_suite_test.go new file mode 100644 index 00000000..82cc3350 --- /dev/null +++ b/services/dis-promrulegroups-operator/test/e2e/e2e_suite_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator/test/utils" +) + +var ( + // Optional Environment Variables: + // - PROMETHEUS_INSTALL_SKIP=true: Skips Prometheus Operator installation during test setup. + // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. + // These variables are useful if Prometheus or CertManager is already installed, avoiding + // re-installation and conflicts. + skipPrometheusInstall = os.Getenv("PROMETHEUS_INSTALL_SKIP") == "true" + skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" + // isPrometheusOperatorAlreadyInstalled will be set true when prometheus CRDs be found on the cluster + isPrometheusOperatorAlreadyInstalled = false + // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster + isCertManagerAlreadyInstalled = false + + // projectImage is the name of the image which will be build and loaded + // with the code source changes to be tested. + projectImage = "example.com/dis-promrulegroups-operator:v0.0.1" +) + +// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, +// temporary environment to validate project changes with the the purposed to be used in CI jobs. +// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs +// CertManager and Prometheus. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting dis-promrulegroups-operator integration test suite\n") + RunSpecs(t, "e2e suite") +} + +var _ = BeforeSuite(func() { + By("Ensure that Prometheus is enabled") + _ = utils.UncommentCode("config/default/kustomization.yaml", "#- ../prometheus", "#") + + By("generating files") + cmd := exec.Command("make", "generate") + _, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make generate") + + By("generating manifests") + cmd = exec.Command("make", "manifests") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make manifests") + + By("building the manager(Operator) image") + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") + + // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is + // built and available before running the tests. Also, remove the following block. + By("loading the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") + + // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. + // To prevent errors when tests run in environments with Prometheus or CertManager already installed, + // we check for their presence before execution. + // Setup Prometheus and CertManager before the suite if not skipped and if not already installed + if !skipPrometheusInstall { + By("checking if prometheus is installed already") + isPrometheusOperatorAlreadyInstalled = utils.IsPrometheusCRDsInstalled() + if !isPrometheusOperatorAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing Prometheus Operator...\n") + Expect(utils.InstallPrometheusOperator()).To(Succeed(), "Failed to install Prometheus Operator") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: Prometheus Operator is already installed. Skipping installation...\n") + } + } + if !skipCertManagerInstall { + By("checking if cert manager is installed already") + isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() + if !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") + Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") + } + } +}) + +var _ = AfterSuite(func() { + // Teardown Prometheus and CertManager after the suite if not skipped and if they were not already installed + if !skipPrometheusInstall && !isPrometheusOperatorAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling Prometheus Operator...\n") + utils.UninstallPrometheusOperator() + } + if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") + utils.UninstallCertManager() + } +}) diff --git a/services/dis-promrulegroups-operator/test/e2e/e2e_test.go b/services/dis-promrulegroups-operator/test/e2e/e2e_test.go new file mode 100644 index 00000000..bfc6a91d --- /dev/null +++ b/services/dis-promrulegroups-operator/test/e2e/e2e_test.go @@ -0,0 +1,307 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator/test/utils" +) + +// namespace where the project is deployed in +const namespace = "dis-promrulegroups-operator-system" + +// serviceAccountName created for the project +const serviceAccountName = "dis-promrulegroups-operator-controller-manager" + +// metricsServiceName is the name of the metrics service of the project +const metricsServiceName = "dis-promrulegroups-operator-controller-manager-metrics-service" + +// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data +const metricsRoleBindingName = "dis-promrulegroups-operator-metrics-binding" + +var _ = Describe("Manager", Ordered, func() { + var controllerPodName string + + // Before running the tests, set up the environment by creating the namespace, + // installing CRDs, and deploying the controller. + BeforeAll(func() { + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") + }) + + // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, + // and deleting the namespace. + AfterAll(func() { + By("cleaning up the curl pod for metrics") + cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) + _, _ = utils.Run(cmd) + + By("undeploying the controller-manager") + cmd = exec.Command("make", "undeploy") + _, _ = utils.Run(cmd) + + By("uninstalling CRDs") + cmd = exec.Command("make", "uninstall") + _, _ = utils.Run(cmd) + + By("removing manager namespace") + cmd = exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + // After each test, check for failures and collect logs, events, + // and pod descriptions for debugging. + AfterEach(func() { + specReport := CurrentSpecReport() + if specReport.Failed() { + By("Fetching controller manager pod logs") + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + controllerLogs, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Controller logs:\n %s", controllerLogs)) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Failed to get Controller logs: %s", err)) + } + + By("Fetching Kubernetes events") + cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") + eventsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Kubernetes events:\n%s", eventsOutput)) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Failed to get Kubernetes events: %s", err)) + } + + By("Fetching curl-metrics logs") + cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Metrics logs:\n %s", metricsOutput)) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Failed to get curl-metrics logs: %s", err)) + } + + By("Fetching controller manager pod description") + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) + podDescription, err := utils.Run(cmd) + if err == nil { + fmt.Println("Pod description:\n", podDescription) + } else { + fmt.Println("Failed to describe controller pod") + } + } + }) + + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(time.Second) + + Context("Manager", func() { + It("should run successfully", func() { + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get the name of the controller-manager pod + cmd := exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") + podNames := utils.GetNonEmptyLines(podOutput) + g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") + controllerPodName = podNames[0] + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) + + // Validate the pod's status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") + } + Eventually(verifyControllerUp).Should(Succeed()) + }) + + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, + "--clusterrole=dis-promrulegroups-operator-metrics-reader", + fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), + ) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") + + By("validating that the metrics service is available") + cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("validating that the ServiceMonitor for Prometheus is applied in the namespace") + cmd = exec.Command("kubectl", "get", "ServiceMonitor", "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "ServiceMonitor should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("waiting for the metrics endpoint to be ready") + verifyMetricsEndpointReady := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready") + } + Eventually(verifyMetricsEndpointReady).Should(Succeed()) + + By("verifying that the controller manager is serving the metrics server") + verifyMetricsServerStarted := func(g Gomega) { + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"), + "Metrics server not yet started") + } + Eventually(verifyMetricsServerStarted).Should(Succeed()) + + By("creating the curl-metrics pod to access the metrics endpoint") + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", + "--namespace", namespace, + "--image=curlimages/curl:7.78.0", + "--", "/bin/sh", "-c", fmt.Sprintf( + "curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics", + token, metricsServiceName, namespace)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", + "-o", "jsonpath={.status.phase}", + "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + metricsOutput := getMetricsOutput() + Expect(metricsOutput).To(ContainSubstring( + "controller_runtime_reconcile_total", + )) + }) + + // +kubebuilder:scaffold:e2e-webhooks-checks + + // TODO: Customize the e2e test suite with scenarios specific to your project. + // Consider applying sample/CR(s) and check their status and/or verifying + // the reconciliation by using the metrics, i.e.: + // metricsOutput := getMetricsOutput() + // Expect(metricsOutput).To(ContainSubstring( + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, + // strings.ToLower(), + // )) + }) +}) + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request +// and parsing the resulting token from the API response. +func serviceAccountToken() (string, error) { + const tokenRequestRawString = `{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenRequest" + }` + + // Temporary file to store the token request + secretName := fmt.Sprintf("%s-token-request", serviceAccountName) + tokenRequestFile := filepath.Join("/tmp", secretName) + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) + if err != nil { + return "", err + } + + var out string + verifyTokenCreation := func(g Gomega) { + // Execute kubectl command to create the token + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + "/api/v1/namespaces/%s/serviceaccounts/%s/token", + namespace, + serviceAccountName, + ), "-f", tokenRequestFile) + + output, err := cmd.CombinedOutput() + g.Expect(err).NotTo(HaveOccurred()) + + // Parse the JSON output to extract the token + var token tokenRequest + err = json.Unmarshal([]byte(output), &token) + g.Expect(err).NotTo(HaveOccurred()) + + out = token.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return out, err +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() string { + By("getting the curl-metrics logs") + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + return metricsOutput +} + +// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, +// containing only the token field that we need to extract. +type tokenRequest struct { + Status struct { + Token string `json:"token"` + } `json:"status"` +} diff --git a/services/dis-promrulegroups-operator/test/utils/utils.go b/services/dis-promrulegroups-operator/test/utils/utils.go new file mode 100644 index 00000000..c3d51cec --- /dev/null +++ b/services/dis-promrulegroups-operator/test/utils/utils.go @@ -0,0 +1,251 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:golint,revive +) + +const ( + prometheusOperatorVersion = "v0.77.1" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.16.0" + certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) (string, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + + return string(output), nil +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "create", "-f", url) + _, err := Run(cmd) + return err +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed +// by verifying the existence of key CRDs related to Prometheus. +func IsPrometheusCRDsInstalled() bool { + // List of common Prometheus CRDs + prometheusCRDs := []string{ + "prometheuses.monitoring.coreos.com", + "prometheusrules.monitoring.coreos.com", + "prometheusagents.monitoring.coreos.com", + } + + cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name") + output, err := Run(cmd) + if err != nil { + return false + } + crdList := GetNonEmptyLines(string(output)) + for _, crd := range prometheusCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed +// by verifying the existence of key CRDs related to Cert Manager. +func IsCertManagerCRDsInstalled() bool { + // List of common Cert Manager CRDs + certManagerCRDs := []string{ + "certificates.cert-manager.io", + "issuers.cert-manager.io", + "clusterissuers.cert-manager.io", + "certificaterequests.cert-manager.io", + "orders.acme.cert-manager.io", + "challenges.acme.cert-manager.io", + } + + // Execute the kubectl command to get all CRDs + cmd := exec.Command("kubectl", "get", "crds") + output, err := Run(cmd) + if err != nil { + return false + } + + // Check if any of the Cert Manager CRDs are present + crdList := GetNonEmptyLines(string(output)) + for _, crd := range certManagerCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/test/e2e", "", -1) + return wd, nil +} + +// UncommentCode searches for target in the file and remove the comment prefix +// of the target content. The target content may span multiple lines. +func UncommentCode(filename, target, prefix string) error { + // false positive + // nolint:gosec + content, err := os.ReadFile(filename) + if err != nil { + return err + } + strContent := string(content) + + idx := strings.Index(strContent, target) + if idx < 0 { + return fmt.Errorf("unable to find the code %s to be uncomment", target) + } + + out := new(bytes.Buffer) + _, err = out.Write(content[:idx]) + if err != nil { + return err + } + + scanner := bufio.NewScanner(bytes.NewBufferString(target)) + if !scanner.Scan() { + return nil + } + for { + _, err := out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)) + if err != nil { + return err + } + // Avoid writing a newline in case the previous line was the last in target. + if !scanner.Scan() { + break + } + if _, err := out.WriteString("\n"); err != nil { + return err + } + } + + _, err = out.Write(content[idx+len(target):]) + if err != nil { + return err + } + // false positive + // nolint:gosec + return os.WriteFile(filename, out.Bytes(), 0644) +} From 63151d518a27c69c2b54b01274e443a937860b5c Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 07:53:01 +0100 Subject: [PATCH 02/37] generate the dist folder make build-installer --- .../config/manager/kustomization.yaml | 6 + .../dist/install.yaml | 226 ++++++++++++++++++ 2 files changed, 232 insertions(+) create mode 100644 services/dis-promrulegroups-operator/dist/install.yaml diff --git a/services/dis-promrulegroups-operator/config/manager/kustomization.yaml b/services/dis-promrulegroups-operator/config/manager/kustomization.yaml index 5c5f0b84..ad13e96b 100644 --- a/services/dis-promrulegroups-operator/config/manager/kustomization.yaml +++ b/services/dis-promrulegroups-operator/config/manager/kustomization.yaml @@ -1,2 +1,8 @@ resources: - manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: controller + newTag: latest diff --git a/services/dis-promrulegroups-operator/dist/install.yaml b/services/dis-promrulegroups-operator/dist/install.yaml new file mode 100644 index 00000000..4e102f31 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/install.yaml @@ -0,0 +1,226 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + control-plane: controller-manager + name: dis-promrulegroups-operator-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + name: dis-promrulegroups-operator-controller-manager + namespace: dis-promrulegroups-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + name: dis-promrulegroups-operator-leader-election-role + namespace: dis-promrulegroups-operator-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + name: dis-promrulegroups-operator-manager-role +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dis-promrulegroups-operator-metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dis-promrulegroups-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + name: dis-promrulegroups-operator-leader-election-rolebinding + namespace: dis-promrulegroups-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: dis-promrulegroups-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: dis-promrulegroups-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + name: dis-promrulegroups-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dis-promrulegroups-operator-manager-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: dis-promrulegroups-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: dis-promrulegroups-operator-metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dis-promrulegroups-operator-metrics-auth-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: dis-promrulegroups-operator-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + control-plane: controller-manager + name: dis-promrulegroups-operator-controller-manager-metrics-service + namespace: dis-promrulegroups-operator-system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: dis-promrulegroups-operator + control-plane: controller-manager + name: dis-promrulegroups-operator-controller-manager + namespace: dis-promrulegroups-operator-system +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --metrics-bind-address=:8443 + - --leader-elect + - --health-probe-bind-address=:8081 + command: + - /manager + image: controller:latest + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: dis-promrulegroups-operator-controller-manager + terminationGracePeriodSeconds: 10 From 7728238eb34594d309bdc56e72a3115663e59c01 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:30:05 +0100 Subject: [PATCH 03/37] create the controller kubebuilder create api --group monitoring.coreos.com --version v1 --kind PrometheusRule --controller --resource=false --external-api-path="github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" make build-installer --- services/dis-promrulegroups-operator/PROJECT | 7 ++ .../dis-promrulegroups-operator/cmd/main.go | 11 +++ .../config/rbac/role.yaml | 33 +++++-- .../dist/install.yaml | 25 ++++- services/dis-promrulegroups-operator/go.mod | 29 +++--- services/dis-promrulegroups-operator/go.sum | 50 +++++----- .../controller/prometheusrule_controller.go | 62 ++++++++++++ .../prometheusrule_controller_test.go | 32 +++++++ .../internal/controller/suite_test.go | 95 +++++++++++++++++++ 9 files changed, 296 insertions(+), 48 deletions(-) create mode 100644 services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go create mode 100644 services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go create mode 100644 services/dis-promrulegroups-operator/internal/controller/suite_test.go diff --git a/services/dis-promrulegroups-operator/PROJECT b/services/dis-promrulegroups-operator/PROJECT index 851ea3ac..b00589d7 100644 --- a/services/dis-promrulegroups-operator/PROJECT +++ b/services/dis-promrulegroups-operator/PROJECT @@ -7,4 +7,11 @@ layout: - go.kubebuilder.io/v4 projectName: dis-promrulegroups-operator repo: github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator +resources: +- controller: true + external: true + group: monitoring.coreos.com + kind: PrometheusRule + path: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 + version: v1 version: "3" diff --git a/services/dis-promrulegroups-operator/cmd/main.go b/services/dis-promrulegroups-operator/cmd/main.go index 11d50f21..92111691 100644 --- a/services/dis-promrulegroups-operator/cmd/main.go +++ b/services/dis-promrulegroups-operator/cmd/main.go @@ -25,6 +25,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + monitoringcoreoscomv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -34,6 +35,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" + + "github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator/internal/controller" // +kubebuilder:scaffold:imports ) @@ -45,6 +48,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(monitoringcoreoscomv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -140,6 +144,13 @@ func main() { os.Exit(1) } + if err = (&controller.PrometheusRuleReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PrometheusRule") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/services/dis-promrulegroups-operator/config/rbac/role.yaml b/services/dis-promrulegroups-operator/config/rbac/role.yaml index d6b6321d..4d45543c 100644 --- a/services/dis-promrulegroups-operator/config/rbac/role.yaml +++ b/services/dis-promrulegroups-operator/config/rbac/role.yaml @@ -1,11 +1,32 @@ +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - labels: - app.kubernetes.io/name: dis-promrulegroups-operator - app.kubernetes.io/managed-by: kustomize name: manager-role rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/finalizers + verbs: + - update +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/status + verbs: + - get + - patch + - update diff --git a/services/dis-promrulegroups-operator/dist/install.yaml b/services/dis-promrulegroups-operator/dist/install.yaml index 4e102f31..e88c6cba 100644 --- a/services/dis-promrulegroups-operator/dist/install.yaml +++ b/services/dis-promrulegroups-operator/dist/install.yaml @@ -60,19 +60,34 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: dis-promrulegroups-operator name: dis-promrulegroups-operator-manager-role rules: - apiGroups: - - "" + - monitoring.coreos.com resources: - - pods + - prometheusrules verbs: + - create + - delete - get - list + - patch + - update - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/finalizers + verbs: + - update +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/services/dis-promrulegroups-operator/go.mod b/services/dis-promrulegroups-operator/go.mod index 992171da..a7eb890d 100644 --- a/services/dis-promrulegroups-operator/go.mod +++ b/services/dis-promrulegroups-operator/go.mod @@ -1,12 +1,15 @@ module github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator -go 1.22.0 +go 1.23 + +toolchain go1.23.3 require ( github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 sigs.k8s.io/controller-runtime v0.19.0 ) @@ -68,12 +71,12 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -84,13 +87,13 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.31.0 // indirect - k8s.io/apiextensions-apiserver v0.31.0 // indirect - k8s.io/apiserver v0.31.0 // indirect - k8s.io/component-base v0.31.0 // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/apiserver v0.31.2 // indirect + k8s.io/component-base v0.31.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/services/dis-promrulegroups-operator/go.sum b/services/dis-promrulegroups-operator/go.sum index a8ec01da..27d83ccd 100644 --- a/services/dis-promrulegroups-operator/go.sum +++ b/services/dis-promrulegroups-operator/go.sum @@ -101,6 +101,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 h1:Fm9Z+FabnB+6EoGq15j+pyLmaK6hYrYOpBlTzOLTQ+E= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -165,26 +167,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -221,24 +223,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= -k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= -k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= -k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4= +k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA= +k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go new file mode 100644 index 00000000..11ceb170 --- /dev/null +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -0,0 +1,62 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + monitoringcoreoscomv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// PrometheusRuleReconciler reconciles a PrometheusRule object +type PrometheusRuleReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the PrometheusRule object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile +func (r *PrometheusRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PrometheusRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&monitoringcoreoscomv1.PrometheusRule{}). + Named("prometheusrule"). + Complete(r) +} diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go new file mode 100644 index 00000000..4fcf203f --- /dev/null +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("PrometheusRule Controller", func() { + Context("When reconciling a resource", func() { + + It("should successfully reconcile the resource", func() { + + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/services/dis-promrulegroups-operator/internal/controller/suite_test.go b/services/dis-promrulegroups-operator/internal/controller/suite_test.go new file mode 100644 index 00000000..a237e818 --- /dev/null +++ b/services/dis-promrulegroups-operator/internal/controller/suite_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + monitoringcoreoscomv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = monitoringcoreoscomv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) From 28396c75c9b88166a113b10f5fa7777cb2db8a0e Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:49:42 +0100 Subject: [PATCH 04/37] update the rbac permissions rename monitoringcoreoscomv1 to monitoringv1 make build-installer --- services/dis-promrulegroups-operator/cmd/main.go | 4 ++-- .../dis-promrulegroups-operator/config/rbac/role.yaml | 4 ---- services/dis-promrulegroups-operator/dist/install.yaml | 4 ---- .../internal/controller/prometheusrule_controller.go | 8 ++++---- .../internal/controller/suite_test.go | 4 ++-- 5 files changed, 8 insertions(+), 16 deletions(-) diff --git a/services/dis-promrulegroups-operator/cmd/main.go b/services/dis-promrulegroups-operator/cmd/main.go index 92111691..7b4ceceb 100644 --- a/services/dis-promrulegroups-operator/cmd/main.go +++ b/services/dis-promrulegroups-operator/cmd/main.go @@ -25,7 +25,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" - monitoringcoreoscomv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -48,7 +48,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(monitoringcoreoscomv1.AddToScheme(scheme)) + utilruntime.Must(monitoringv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/services/dis-promrulegroups-operator/config/rbac/role.yaml b/services/dis-promrulegroups-operator/config/rbac/role.yaml index 4d45543c..61d43680 100644 --- a/services/dis-promrulegroups-operator/config/rbac/role.yaml +++ b/services/dis-promrulegroups-operator/config/rbac/role.yaml @@ -9,8 +9,6 @@ rules: resources: - prometheusrules verbs: - - create - - delete - get - list - patch @@ -28,5 +26,3 @@ rules: - prometheusrules/status verbs: - get - - patch - - update diff --git a/services/dis-promrulegroups-operator/dist/install.yaml b/services/dis-promrulegroups-operator/dist/install.yaml index e88c6cba..7875fe0b 100644 --- a/services/dis-promrulegroups-operator/dist/install.yaml +++ b/services/dis-promrulegroups-operator/dist/install.yaml @@ -67,8 +67,6 @@ rules: resources: - prometheusrules verbs: - - create - - delete - get - list - patch @@ -86,8 +84,6 @@ rules: - prometheusrules/status verbs: - get - - patch - - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 11ceb170..743d34a7 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -19,7 +19,7 @@ package controller import ( "context" - monitoringcoreoscomv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,8 +32,8 @@ type PrometheusRuleReconciler struct { Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/status,verbs=get // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -56,7 +56,7 @@ func (r *PrometheusRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reque // SetupWithManager sets up the controller with the Manager. func (r *PrometheusRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&monitoringcoreoscomv1.PrometheusRule{}). + For(&monitoringv1.PrometheusRule{}). Named("prometheusrule"). Complete(r) } diff --git a/services/dis-promrulegroups-operator/internal/controller/suite_test.go b/services/dis-promrulegroups-operator/internal/controller/suite_test.go index a237e818..423c7672 100644 --- a/services/dis-promrulegroups-operator/internal/controller/suite_test.go +++ b/services/dis-promrulegroups-operator/internal/controller/suite_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - monitoringcoreoscomv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -76,7 +76,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = monitoringcoreoscomv1.AddToScheme(scheme.Scheme) + err = monitoringv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme From 10e84384f2e70338374d0cb88eb4877586cfe218 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:54:36 +0100 Subject: [PATCH 05/37] update Dockerfile to include the Azure tool --- .../dis-promrulegroups-operator/Dockerfile | 18 +++++++--- .../Dockerfile.bak | 33 +++++++++++++++++++ 2 files changed, 46 insertions(+), 5 deletions(-) create mode 100644 services/dis-promrulegroups-operator/Dockerfile.bak diff --git a/services/dis-promrulegroups-operator/Dockerfile b/services/dis-promrulegroups-operator/Dockerfile index 4ba18b68..9b65c6fb 100644 --- a/services/dis-promrulegroups-operator/Dockerfile +++ b/services/dis-promrulegroups-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.22 AS builder +FROM golang:1.23 AS builder1 ARG TARGETOS ARG TARGETARCH @@ -13,7 +13,6 @@ RUN go mod download # Copy the go source COPY cmd/main.go cmd/main.go -COPY api/ api/ COPY internal/ internal/ # Build @@ -23,11 +22,20 @@ COPY internal/ internal/ # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +FROM node:20 AS builder2 +WORKDIR /tool +RUN npm i --omit=dev https://gitpkg.now.sh/monteiro-renato/prometheus-collector/tools/az-prom-rules-converter?with-piped-input + + # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +FROM gcr.io/distroless/nodejs22-debian12:nonroot WORKDIR / -COPY --from=builder /workspace/manager . +COPY --from=builder1 /workspace/manager . +COPY --from=builder2 /tool /tool/. +ENV PATH="$PATH:nodejs/bin/" +ENV ENVIRONMENT="prod" USER 65532:65532 -ENTRYPOINT ["/manager"] +ENTRYPOINT ["/manager"] \ No newline at end of file diff --git a/services/dis-promrulegroups-operator/Dockerfile.bak b/services/dis-promrulegroups-operator/Dockerfile.bak new file mode 100644 index 00000000..4ba18b68 --- /dev/null +++ b/services/dis-promrulegroups-operator/Dockerfile.bak @@ -0,0 +1,33 @@ +# Build the manager binary +FROM golang:1.22 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] From f3f6f2d84767aa110f1e1efbf515cad754408b25 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:22:30 +0100 Subject: [PATCH 06/37] update the README --- .../dis-promrulegroups-operator/README.md | 66 +++++++++++++++++-- 1 file changed, 62 insertions(+), 4 deletions(-) diff --git a/services/dis-promrulegroups-operator/README.md b/services/dis-promrulegroups-operator/README.md index 2d5bd4e6..b7fc71d6 100644 --- a/services/dis-promrulegroups-operator/README.md +++ b/services/dis-promrulegroups-operator/README.md @@ -1,8 +1,67 @@ # dis-promrulegroups-operator -// TODO(user): Add simple overview of use/purpose - +This controller is intended to sync PrometheusRule CRs created by Pyrra (which are not automatically picked up by the Managed Prometheus) with Azure PrometheusRuleGroups. + +```mermaid +sequenceDiagram + participant User + participant K8s + participant Pyrra + participant ThisController + participant Azure + User->>K8s: Deploy a Pyrra CR + K8s->>Pyrra: A new SLO CR has been created + Pyrra->>K8s: Create PrometheusRule CR + K8s ->> ThisController: A new PrometheusRule CR has been created + ThisController ->> K8s: Fetch the object + alt Object is marked for deletion + alt Object has our finalizer + ThisController->> Azure: Delete PrometheusRuleGroups + alt Successfully deleted all PrometheusRuleGroups + ThisController->> ThisController: Remove finalizer from Object + ThisController->> K8s: Update Object + else Did not Successfully delete all PrometheusRuleGroups + ThisController->> ThisController: Requeue the request + end + else Object does not have our finalizer + ThisController->> ThisController: Stop the reconciliation + end + else Object is not marked for deletion + alt Object has been marked with our finalizer + alt Object has our annotations + ThisController->> ThisController: Fetch metadata stored in the Annotations + ThisController->> ThisController: Regenerate the ARM Template based on the PrometheusRules + alt The new template and the old are the same + ThisController->> ThisController: Stop reconciliation + else The new and old templates are not the same + alt Resources have been deleted from PrometheusRule + ThisController->> Azure: Delete the corresponding PrometheusRuleGroups + else Resources have been added to the PrometheusRule + ThisController->> Azure: Apply the new ARM template + end + end + else Object does not have our annotations + ThisController ->> ThisController: Generate ARM template from PrometheusRules + ThisController ->> Azure: Deploy the ARM template + alt ARM deployment was Successful + ThisController->> ThisController: Add metadata as Annotations to the CR + ThisController->> K8s: Update CR + else ARM deployment was not Successful + ThisController->>ThisController: Requeue + end + end + else Object has not been marked with our finalizer + ThisController ->> ThisController: Add our finalizer to the object + ThisController ->> K8s: Update Object + end + end +``` ## Description -// TODO(user): An in-depth paragraph about your project and overview of use +We are using the [Managed Prometheus from Azure](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/prometheus-metrics-overview) instead of operating a Prometheus instance ourselves. +This makes some things easier but adds other complexities. +One problem we have at the moment is the fact that [Pyrra](https://github.com/pyrra-dev/pyrra) generated resources ([PrometheusRule](https://prometheus-operator.dev/docs/getting-started/design/#prometheusrule) CRs or ConfigMaps) are not automatically picked up by the managed Prometheus. +This custom controller provides the glue required to connect the two systems by watching the PrometheusRule CRs generated by Pyrra and managing the equivalent resource in Azure ([PrometheusRuleGroup](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/prometheus-rule-groups)). + +The controller does not manage any Custom Resource itself; It only watches the PrometheusRule CRs and adds a finalizer to them to make sure it can cleanup the Azure resources related to the CR. ## Getting Started @@ -111,4 +170,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - From a1d5ee28c9fbc1a7228c3a07600fdc0a1aac2849 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:49:14 +0100 Subject: [PATCH 07/37] examples of input and output of the conversion tool --- .../test/example_arm_template.json | 267 ++++++++++++++++++ .../test/example_prometheusRuleGroup.json | 170 +++++++++++ 2 files changed, 437 insertions(+) create mode 100644 services/dis-promrulegroups-operator/test/example_arm_template.json create mode 100644 services/dis-promrulegroups-operator/test/example_prometheusRuleGroup.json diff --git a/services/dis-promrulegroups-operator/test/example_arm_template.json b/services/dis-promrulegroups-operator/test/example_arm_template.json new file mode 100644 index 00000000..b30dbfbf --- /dev/null +++ b/services/dis-promrulegroups-operator/test/example_arm_template.json @@ -0,0 +1,267 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "string", + "defaultValue": "ResourceGroupLocation" + }, + "clusterName": { + "type": "string", + "metadata": { + "description": "Cluster name" + }, + "defaultValue": "ClusterName" + }, + "actionGroupId": { + "type": "string", + "metadata": { + "description": "Action Group ResourceId" + } + }, + "azureMonitorWorkspace": { + "type": "string", + "metadata": { + "description": "ResourceId of Azure monitor workspace to associate to" + }, + "defaultValue": "AzureMonitorWorkspace" + } + }, + "variables": {}, + "resources": [ + { + "name": "http-increase", + "type": "Microsoft.AlertsManagement/prometheusRuleGroups", + "apiVersion": "2023-03-01", + "location": "[parameters('location')]", + "properties": { + "interval": "PT2M30S", + "scopes": [ + "[parameters('azureMonitorWorkspace')]" + ], + "clusterName": "[parameters('clusterName')]", + "rules": [ + { + "record": "http_requests:increase4w", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum by (status) (increase(http_requests_total{job=\"app\"}[4w]))" + }, + { + "severity": 3, + "resolveConfiguration": { + "autoResolved": true, + "timeToResolve": "PT10M" + }, + "actions": [ + { + "actionGroupId": "[parameters('actionGroupId')]" + } + ], + "alert": "SLOMetricAbsent", + "for": "PT2M", + "labels": { + "job": "app", + "severity": "critical", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + }, + "expression": "absent(http_requests_total{job=\"app\"}) == 1" + } + ] + } + }, + { + "name": "http", + "type": "Microsoft.AlertsManagement/prometheusRuleGroups", + "apiVersion": "2023-03-01", + "location": "[parameters('location')]", + "properties": { + "interval": "PT1M", + "scopes": [ + "[parameters('azureMonitorWorkspace')]" + ], + "clusterName": "[parameters('clusterName')]", + "rules": [ + { + "record": "http_requests:burnrate5m", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[5m])) / sum(rate(http_requests_total{job=\"app\"}[5m]))" + }, + { + "record": "http_requests:burnrate30m", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[30m])) / sum(rate(http_requests_total{job=\"app\"}[30m]))" + }, + { + "record": "http_requests:burnrate1h", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[1h])) / sum(rate(http_requests_total{job=\"app\"}[1h]))" + }, + { + "record": "http_requests:burnrate2h", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[2h])) / sum(rate(http_requests_total{job=\"app\"}[2h]))" + }, + { + "record": "http_requests:burnrate6h", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[6h])) / sum(rate(http_requests_total{job=\"app\"}[6h]))" + }, + { + "record": "http_requests:burnrate1d", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[1d])) / sum(rate(http_requests_total{job=\"app\"}[1d]))" + }, + { + "record": "http_requests:burnrate4d", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + }, + "expression": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[4d])) / sum(rate(http_requests_total{job=\"app\"}[4d]))" + }, + { + "severity": 3, + "resolveConfiguration": { + "autoResolved": true, + "timeToResolve": "PT10M" + }, + "actions": [ + { + "actionGroupId": "[parameters('actionGroupId')]" + } + ], + "alert": "ErrorBudgetBurn", + "for": "PT2M", + "labels": { + "exhaustion": "2d", + "job": "app", + "long": "1h", + "severity": "critical", + "short": "5m", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + }, + "expression": "http_requests:burnrate5m{job=\"app\",slo=\"http\"} > (14 * (1-0.995)) and http_requests:burnrate1h{job=\"app\",slo=\"http\"} > (14 * (1-0.995))" + }, + { + "severity": 3, + "resolveConfiguration": { + "autoResolved": true, + "timeToResolve": "PT10M" + }, + "actions": [ + { + "actionGroupId": "[parameters('actionGroupId')]" + } + ], + "alert": "ErrorBudgetBurn", + "for": "PT15M", + "labels": { + "exhaustion": "4d", + "job": "app", + "long": "6h", + "severity": "critical", + "short": "30m", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + }, + "expression": "http_requests:burnrate30m{job=\"app\",slo=\"http\"} > (7 * (1-0.995)) and http_requests:burnrate6h{job=\"app\",slo=\"http\"} > (7 * (1-0.995))" + }, + { + "severity": 3, + "resolveConfiguration": { + "autoResolved": true, + "timeToResolve": "PT10M" + }, + "actions": [ + { + "actionGroupId": "[parameters('actionGroupId')]" + } + ], + "alert": "ErrorBudgetBurn", + "for": "PT1H", + "labels": { + "exhaustion": "2w", + "job": "app", + "long": "1d", + "severity": "warning", + "short": "2h", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + }, + "expression": "http_requests:burnrate2h{job=\"app\",slo=\"http\"} > (2 * (1-0.995)) and http_requests:burnrate1d{job=\"app\",slo=\"http\"} > (2 * (1-0.995))" + }, + { + "severity": 3, + "resolveConfiguration": { + "autoResolved": true, + "timeToResolve": "PT10M" + }, + "actions": [ + { + "actionGroupId": "[parameters('actionGroupId')]" + } + ], + "alert": "ErrorBudgetBurn", + "for": "PT3H", + "labels": { + "exhaustion": "4w", + "job": "app", + "long": "4d", + "severity": "warning", + "short": "6h", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + }, + "expression": "http_requests:burnrate6h{job=\"app\",slo=\"http\"} > (1 * (1-0.995)) and http_requests:burnrate4d{job=\"app\",slo=\"http\"} > (1 * (1-0.995))" + } + ] + } + } + ] +} diff --git a/services/dis-promrulegroups-operator/test/example_prometheusRuleGroup.json b/services/dis-promrulegroups-operator/test/example_prometheusRuleGroup.json new file mode 100644 index 00000000..ead53f1e --- /dev/null +++ b/services/dis-promrulegroups-operator/test/example_prometheusRuleGroup.json @@ -0,0 +1,170 @@ +{ + "groups": [ + { + "name": "http-increase", + "interval": "2m30s", + "rules": [ + { + "record": "http_requests:increase4w", + "expr": "sum by (status) (increase(http_requests_total{job=\"app\"}[4w]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "alert": "SLOMetricAbsent", + "expr": "absent(http_requests_total{job=\"app\"}) == 1", + "for": "2m", + "labels": { + "job": "app", + "severity": "critical", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + } + } + ] + }, + { + "name": "http", + "interval": "30s", + "rules": [ + { + "record": "http_requests:burnrate5m", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[5m])) / sum(rate(http_requests_total{job=\"app\"}[5m]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "record": "http_requests:burnrate30m", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[30m])) / sum(rate(http_requests_total{job=\"app\"}[30m]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "record": "http_requests:burnrate1h", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[1h])) / sum(rate(http_requests_total{job=\"app\"}[1h]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "record": "http_requests:burnrate2h", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[2h])) / sum(rate(http_requests_total{job=\"app\"}[2h]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "record": "http_requests:burnrate6h", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[6h])) / sum(rate(http_requests_total{job=\"app\"}[6h]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "record": "http_requests:burnrate1d", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[1d])) / sum(rate(http_requests_total{job=\"app\"}[1d]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "record": "http_requests:burnrate4d", + "expr": "sum(rate(http_requests_total{job=\"app\",status=~\"5..\"}[4d])) / sum(rate(http_requests_total{job=\"app\"}[4d]))", + "labels": { + "job": "app", + "slo": "http", + "team": "foo" + } + }, + { + "alert": "ErrorBudgetBurn", + "expr": "http_requests:burnrate5m{job=\"app\",slo=\"http\"} > (14 * (1-0.995)) and http_requests:burnrate1h{job=\"app\",slo=\"http\"} > (14 * (1-0.995))", + "for": "2m0s", + "labels": { + "exhaustion": "2d", + "job": "app", + "long": "1h", + "severity": "critical", + "short": "5m", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + } + }, + { + "alert": "ErrorBudgetBurn", + "expr": "http_requests:burnrate30m{job=\"app\",slo=\"http\"} > (7 * (1-0.995)) and http_requests:burnrate6h{job=\"app\",slo=\"http\"} > (7 * (1-0.995))", + "for": "15m0s", + "labels": { + "exhaustion": "4d", + "job": "app", + "long": "6h", + "severity": "critical", + "short": "30m", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + } + }, + { + "alert": "ErrorBudgetBurn", + "expr": "http_requests:burnrate2h{job=\"app\",slo=\"http\"} > (2 * (1-0.995)) and http_requests:burnrate1d{job=\"app\",slo=\"http\"} > (2 * (1-0.995))", + "for": "1h0m0s", + "labels": { + "exhaustion": "2w", + "job": "app", + "long": "1d", + "severity": "warning", + "short": "2h", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + } + }, + { + "alert": "ErrorBudgetBurn", + "expr": "http_requests:burnrate6h{job=\"app\",slo=\"http\"} > (1 * (1-0.995)) and http_requests:burnrate4d{job=\"app\",slo=\"http\"} > (1 * (1-0.995))", + "for": "3h0m0s", + "labels": { + "exhaustion": "4w", + "job": "app", + "long": "4d", + "severity": "warning", + "short": "6h", + "slo": "http", + "team": "foo" + }, + "annotations": { + "description": "foo" + } + } + ] + } + ] +} \ No newline at end of file From 13fea1201e6e306f25fcd73c0942734fb8bc374a Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:55:18 +0100 Subject: [PATCH 08/37] add npm and az-prom-rules-converter as dependencies --- services/dis-promrulegroups-operator/Makefile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/services/dis-promrulegroups-operator/Makefile b/services/dis-promrulegroups-operator/Makefile index 6ce70049..e4abb9fe 100644 --- a/services/dis-promrulegroups-operator/Makefile +++ b/services/dis-promrulegroups-operator/Makefile @@ -61,6 +61,15 @@ vet: ## Run go vet against code. .PHONY: test test: manifests generate fmt vet envtest ## Run tests. + @command -v npm >/dev/null 2>&1 || { \ + echo "npm is not installed. Please install npm manually."; \ + exit 1; \ + } + @command -v $(LOCALBIN)/az-tool/node_modules/.bin/az-prom-rules-converter >/dev/null 2>&1 || { \ + echo "Installing az-prom-rules-converter in $(LOCALBIN)/az-tool"; \ + mkdir -p $(LOCALBIN)/az-tool; \ + npm i --prefix $(LOCALBIN)/az-tool --omit=dev https://gitpkg.now.sh/monteiro-renato/prometheus-collector/tools/az-prom-rules-converter?with-piped-input; \ + } KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out # TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. From fa10f01f9064dade2b12ecd3e1bc8078c28321cb Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:56:57 +0100 Subject: [PATCH 09/37] include the PrometheusRule CRD --- ...rometheusruleCustomResourceDefinition.yaml | 141 ++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 services/dis-promrulegroups-operator/config/external-crds/prometheusruleCustomResourceDefinition.yaml diff --git a/services/dis-promrulegroups-operator/config/external-crds/prometheusruleCustomResourceDefinition.yaml b/services/dis-promrulegroups-operator/config/external-crds/prometheusruleCustomResourceDefinition.yaml new file mode 100644 index 00000000..633a2c4a --- /dev/null +++ b/services/dis-promrulegroups-operator/config/external-crds/prometheusruleCustomResourceDefinition.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + operator.prometheus.io/version: 0.77.1 + name: prometheusrules.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PrometheusRule + listKind: PrometheusRuleList + plural: prometheusrules + shortNames: + - promrule + singular: prometheusrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + The `PrometheusRule` custom resource definition (CRD) defines [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules to be evaluated by `Prometheus` or `ThanosRuler` objects. + `Prometheus` and `ThanosRuler` objects select `PrometheusRule` objects using label and namespace selectors. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Specification of desired alerting rule definitions for Prometheus. + properties: + groups: + description: Content of Prometheus rule file + items: + description: RuleGroup is a list of sequentially evaluated recording and alerting rules. + properties: + interval: + description: Interval determines how often rules in the group are evaluated. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + limit: + description: |- + Limit the number of alerts an alerting rule and series a recording + rule can produce. + Limit is supported starting with Prometheus >= 2.31 and Thanos Ruler >= 0.24. + type: integer + name: + description: Name of the rule group. + minLength: 1 + type: string + partial_response_strategy: + description: |- + PartialResponseStrategy is only used by ThanosRuler and will + be ignored by Prometheus instances. + More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response + pattern: ^(?i)(abort|warn)?$ + type: string + query_offset: + description: |- + Defines the offset the rule evaluation timestamp of this particular group by the specified duration into the past. + It requires Prometheus >= v2.53.0. + It is not supported for ThanosRuler. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + rules: + description: List of alerting and recording rules. + items: + description: |- + Rule describes an alerting or recording rule + See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) rule + properties: + alert: + description: |- + Name of the alert. Must be a valid label value. + Only one of `record` and `alert` must be set. + type: string + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to each alert. + Only valid for alerting rules. + type: object + expr: + anyOf: + - type: integer + - type: string + description: PromQL expression to evaluate. + x-kubernetes-int-or-string: true + for: + description: Alerts are considered firing once they have been returned for this long. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + keep_firing_for: + description: KeepFiringFor defines how long an alert will continue firing after the condition that triggered it has cleared. + minLength: 1 + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + labels: + additionalProperties: + type: string + description: Labels to add or overwrite. + type: object + record: + description: |- + Name of the time series to output to. Must be a valid metric name. + Only one of `record` and `alert` must be set. + type: string + required: + - expr + type: object + type: array + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true From afd7fd8b52a1cc239df496fa80c0f21274842421 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:59:19 +0100 Subject: [PATCH 10/37] add an example .env file --- .../dis-promrulegroups-operator/.env_example | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 services/dis-promrulegroups-operator/.env_example diff --git a/services/dis-promrulegroups-operator/.env_example b/services/dis-promrulegroups-operator/.env_example new file mode 100644 index 00000000..d7d9ae52 --- /dev/null +++ b/services/dis-promrulegroups-operator/.env_example @@ -0,0 +1,16 @@ +# The azure subscription where the resources are located. +AZ_SUBSCRIPTION_ID= +# The resource group name which contains the resources managed by this controller. +AZ_RESOURCE_GROUP_NAME= +# The location of the resource group which contains the resources managed by this controller. +AZ_RESOURCE_GROUP_LOCATION= +# TODO: remove me. The action group needs to be decided based on user config. TBD how (maybe an extra label on the CR) +AZ_ACTION_GROUP_ID= +# The azure monitor workspace that will be associated with the PrometheusRuleGroups +AZ_AZURE_MONITOR_WORKSPACE= +# # The 'clusterName' property must match the cluster label that is added to the metrics when scraped from a specific cluster. By default, this label is set to the last part (resource name) of the cluster ID. +AZ_CLUSTER_NAME= +# Path to node executable +NODE_PATH= +# Path to az-prom-rules-converter tool +AZ_PROM_RULES_CONVERTER_PATH= \ No newline at end of file From 48db0e58981aa981bea234cc2bc889a7cfaf9213 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:07:54 +0100 Subject: [PATCH 11/37] setup the controller dependencies --- .../dis-promrulegroups-operator/cmd/main.go | 62 +++++++++++++++++-- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/services/dis-promrulegroups-operator/cmd/main.go b/services/dis-promrulegroups-operator/cmd/main.go index 7b4ceceb..f77f0b61 100644 --- a/services/dis-promrulegroups-operator/cmd/main.go +++ b/services/dis-promrulegroups-operator/cmd/main.go @@ -19,13 +19,20 @@ package main import ( "crypto/tls" "flag" + "fmt" "os" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + "github.com/joho/godotenv" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" _ "k8s.io/client-go/plugin/pkg/client/auth" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + + "github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator/internal/controller" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -35,8 +42,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - - "github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator/internal/controller" // +kubebuilder:scaffold:imports ) @@ -52,7 +57,24 @@ func init() { // +kubebuilder:scaffold:scheme } +func getEnvVarOrExit(name string) string { + value, ok := os.LookupEnv(name) + if !ok { + setupLog.Error(fmt.Errorf("missing required env var"), "Missing required env var", "env var", name) + os.Exit(1) + } + return value +} + func main() { + if os.Getenv("ENVIRONMENT") == "" { + err := godotenv.Load() + if err != nil { + setupLog.Error(err, "Error loading .env file") + os.Exit(1) + } + } + var metricsAddr string var enableLeaderElection bool var probeAddr string @@ -144,9 +166,39 @@ func main() { os.Exit(1) } + // Initialize Reconciler dependencies + subscriptionId := getEnvVarOrExit("AZ_SUBSCRIPTION_ID") + + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + setupLog.Error(err, "failed to obtain a credential") + os.Exit(1) + } + + deploymentsClient, err := armresources.NewDeploymentsClient(subscriptionId, cred, nil) + if err != nil { + setupLog.Error(err, "failed to create the DeploymentsClient") + os.Exit(1) + } + + prometheusRuleGroupsClient, err := armalertsmanagement.NewPrometheusRuleGroupsClient(subscriptionId, cred, nil) + if err != nil { + setupLog.Error(err, "failed to create the PrometheusRuleGroupsClient") + os.Exit(1) + } + + // Create the reconciler and Register the reconciler with the Manager if err = (&controller.PrometheusRuleReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + DeploymentClient: deploymentsClient, + PrometheusRuleGroupsClient: prometheusRuleGroupsClient, + AzResourceGroupName: getEnvVarOrExit("AZ_RESOURCE_GROUP_NAME"), + AzResourceGroupLocation: getEnvVarOrExit("AZ_RESOURCE_GROUP_LOCATION"), + AzAzureMonitorWorkspace: getEnvVarOrExit("AZ_AZURE_MONITOR_WORKSPACE"), + AzClusterName: getEnvVarOrExit("AZ_CLUSTER_NAME"), + NodePath: getEnvVarOrExit("NODE_PATH"), + AzPromRulesConverterPath: getEnvVarOrExit("AZ_PROM_RULES_CONVERTER_PATH"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "PrometheusRule") os.Exit(1) From e0cf42a8dcf9b25eb967aeccd3cd3b4de0e69fda Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:09:32 +0100 Subject: [PATCH 12/37] only resources in the monitoring namespace --- services/dis-promrulegroups-operator/cmd/main.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/services/dis-promrulegroups-operator/cmd/main.go b/services/dis-promrulegroups-operator/cmd/main.go index f77f0b61..03d73497 100644 --- a/services/dis-promrulegroups-operator/cmd/main.go +++ b/services/dis-promrulegroups-operator/cmd/main.go @@ -37,6 +37,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/metrics/filters" @@ -160,6 +161,15 @@ func main() { // if you are doing or is intended to do any operation such as perform cleanups // after the manager stops then its usage might be unsafe. // LeaderElectionReleaseOnCancel: true, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + // TODO: We need to decide on how we are going to handle this. + // Should the resources be created in a specific namespace? or should developers just push into their own namespaces? + // Whatever we decide, it must be coherent with what we configure in the relabel config rules in Prometheus + // otherwise the SLO metrics will not be pushed into the centralized monitoring. + "monitoring": {}, + }, + }, }) if err != nil { setupLog.Error(err, "unable to start manager") From 50b824fe269004c9ff33e3b8dce750c201f686c9 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:10:21 +0100 Subject: [PATCH 13/37] include the PrometheusRule CRD in the test suite --- .../internal/controller/suite_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/suite_test.go b/services/dis-promrulegroups-operator/internal/controller/suite_test.go index 423c7672..e72d0707 100644 --- a/services/dis-promrulegroups-operator/internal/controller/suite_test.go +++ b/services/dis-promrulegroups-operator/internal/controller/suite_test.go @@ -58,7 +58,7 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases"), filepath.Join("..", "..", "config/external-crds")}, ErrorIfCRDPathMissing: false, // The BinaryAssetsDirectory is only required if you want to run the tests directly From f138a091e8ed5bdd154d7dca06ab134dbf5f9c1f Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:10:34 +0100 Subject: [PATCH 14/37] add a file with test utils --- .../internal/controller/test_util.go | 177 ++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 services/dis-promrulegroups-operator/internal/controller/test_util.go diff --git a/services/dis-promrulegroups-operator/internal/controller/test_util.go b/services/dis-promrulegroups-operator/internal/controller/test_util.go new file mode 100644 index 00000000..fbd52dc5 --- /dev/null +++ b/services/dis-promrulegroups-operator/internal/controller/test_util.go @@ -0,0 +1,177 @@ +package controller + +import ( + "context" + "net/http" + + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + + armalertsmanagement "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement" + alertsmanagement_fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement/fake" + + armresources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + armresources_fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/fake" + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + pyrrav1alpha1 "github.com/pyrra-dev/pyrra/kubernetes/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Fetched and adapted from: https://github.com/pyrra-dev/pyrra/blob/main/kubernetes/controllers/servicelevelobjective_test.go + +func NewExamplePrometheusRule() *monitoringv1.PrometheusRule { + trueBool := true + return &monitoringv1.PrometheusRule{ + TypeMeta: metav1.TypeMeta{ + APIVersion: monitoring.GroupName + "/" + monitoringv1.Version, + Kind: monitoringv1.PrometheusRuleKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "http", + Namespace: "monitoring", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: pyrrav1alpha1.GroupVersion.Version, + Kind: "ServiceLevelObjective", + Name: "http", + UID: "123", + Controller: &trueBool, + }, + }, + Labels: map[string]string{ + "pyrra.dev/team": "foo", + "team": "bar", + }, + }, + Spec: monitoringv1.PrometheusRuleSpec{ + Groups: []monitoringv1.RuleGroup{ + { + Name: "http-increase", + Interval: monitoringDuration("2m30s"), + Rules: []monitoringv1.Rule{ + { + Record: "http_requests:increase4w", + Expr: intstr.FromString(`sum by (status) (increase(http_requests_total{job="app"}[4w]))`), + Labels: map[string]string{ + "job": "app", + "slo": "http", + "team": "foo", + }, + }, + { + Alert: "SLOMetricAbsent", + Expr: intstr.FromString(`absent(http_requests_total{job="app"}) == 1`), + For: monitoringDuration("2m"), + Annotations: map[string]string{ + "description": "foo", + }, + Labels: map[string]string{ + "severity": "critical", + "job": "app", + "slo": "http", + "team": "foo", + }, + }, + }, + }, + { + Name: "http", + Interval: monitoringDuration("30s"), + Rules: []monitoringv1.Rule{ + { + Record: "http_requests:burnrate5m", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[5m])) / sum(rate(http_requests_total{job="app"}[5m]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Record: "http_requests:burnrate30m", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[30m])) / sum(rate(http_requests_total{job="app"}[30m]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Record: "http_requests:burnrate1h", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[1h])) / sum(rate(http_requests_total{job="app"}[1h]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Record: "http_requests:burnrate2h", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[2h])) / sum(rate(http_requests_total{job="app"}[2h]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Record: "http_requests:burnrate6h", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[6h])) / sum(rate(http_requests_total{job="app"}[6h]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Record: "http_requests:burnrate1d", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[1d])) / sum(rate(http_requests_total{job="app"}[1d]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Record: "http_requests:burnrate4d", + Expr: intstr.FromString(`sum(rate(http_requests_total{job="app",status=~"5.."}[4d])) / sum(rate(http_requests_total{job="app"}[4d]))`), + Labels: map[string]string{"job": "app", "slo": "http", "team": "foo"}, + }, + { + Alert: "ErrorBudgetBurn", + Expr: intstr.FromString(`http_requests:burnrate5m{job="app",slo="http"} > (14 * (1-0.995)) and http_requests:burnrate1h{job="app",slo="http"} > (14 * (1-0.995))`), + For: monitoringDuration("2m0s"), + Labels: map[string]string{"severity": "critical", "job": "app", "long": "1h", "slo": "http", "short": "5m", "team": "foo", "exhaustion": "2d"}, + Annotations: map[string]string{"description": "foo"}, + }, + { + Alert: "ErrorBudgetBurn", + Expr: intstr.FromString(`http_requests:burnrate30m{job="app",slo="http"} > (7 * (1-0.995)) and http_requests:burnrate6h{job="app",slo="http"} > (7 * (1-0.995))`), + For: monitoringDuration("15m0s"), + Labels: map[string]string{"severity": "critical", "job": "app", "long": "6h", "slo": "http", "short": "30m", "team": "foo", "exhaustion": "4d"}, + Annotations: map[string]string{"description": "foo"}, + }, + { + Alert: "ErrorBudgetBurn", + Expr: intstr.FromString(`http_requests:burnrate2h{job="app",slo="http"} > (2 * (1-0.995)) and http_requests:burnrate1d{job="app",slo="http"} > (2 * (1-0.995))`), + For: monitoringDuration("1h0m0s"), + Labels: map[string]string{"severity": "warning", "job": "app", "long": "1d", "slo": "http", "short": "2h", "team": "foo", "exhaustion": "2w"}, + Annotations: map[string]string{"description": "foo"}, + }, + { + Alert: "ErrorBudgetBurn", + Expr: intstr.FromString(`http_requests:burnrate6h{job="app",slo="http"} > (1 * (1-0.995)) and http_requests:burnrate4d{job="app",slo="http"} > (1 * (1-0.995))`), + For: monitoringDuration("3h0m0s"), + Labels: map[string]string{"severity": "warning", "job": "app", "long": "4d", "slo": "http", "short": "6h", "team": "foo", "exhaustion": "4w"}, + Annotations: map[string]string{"description": "foo"}, + }, + }, + }, + }, + }, + } +} + +func monitoringDuration(d string) *monitoringv1.Duration { + md := monitoringv1.Duration(d) + return &md +} + +func NewFakeDeploymentsServer() armresources_fake.DeploymentsServer { + return armresources_fake.DeploymentsServer{ + BeginCreateOrUpdate: func(ctx context.Context, resourceGroupName, deploymentName string, parameters armresources.Deployment, options *armresources.DeploymentsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armresources.DeploymentsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) { + // Set the values for the success response + resp.SetTerminalResponse(http.StatusCreated, armresources.DeploymentsClientCreateOrUpdateResponse{}, nil) + // Set the values for the error response; mutually exclusive. If both configured, the error response prevails + // errResp.SetResponseError(http.StatusBadRequest, "ThisIsASimulatedError") + return + }, + } +} + +func NewFakeNewPrometheusRuleGroupsServer() alertsmanagement_fake.PrometheusRuleGroupsServer { + return alertsmanagement_fake.PrometheusRuleGroupsServer{ + Delete: func(ctx context.Context, resourceGroupName, ruleGroupName string, options *armalertsmanagement.PrometheusRuleGroupsClientDeleteOptions) (resp azfake.Responder[armalertsmanagement.PrometheusRuleGroupsClientDeleteResponse], errResp azfake.ErrorResponder) { + resp.SetResponse(http.StatusOK, armalertsmanagement.PrometheusRuleGroupsClientDeleteResponse{}, nil) + // errResp.SetResponseError(http.StatusNotFound, http.StatusText(http.StatusNotFound)) + return + }, + } +} From 04aac848945e565c80b0bc57da8900711a7ce071 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:16:00 +0100 Subject: [PATCH 15/37] first implementation basic tests go mod tidy --- services/dis-promrulegroups-operator/go.mod | 33 +- services/dis-promrulegroups-operator/go.sum | 89 +++- .../controller/prometheusrule_controller.go | 416 +++++++++++++++++- .../prometheusrule_controller_test.go | 195 +++++++- 4 files changed, 699 insertions(+), 34 deletions(-) diff --git a/services/dis-promrulegroups-operator/go.mod b/services/dis-promrulegroups-operator/go.mod index a7eb890d..1e0d1f72 100644 --- a/services/dis-promrulegroups-operator/go.mod +++ b/services/dis-promrulegroups-operator/go.mod @@ -5,35 +5,49 @@ go 1.23 toolchain go1.23.3 require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement v0.10.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 + github.com/joho/godotenv v1.5.1 github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 + github.com/prometheus/common v0.55.0 + github.com/pyrra-dev/pyrra v0.7.7 + k8s.io/api v0.31.2 k8s.io/apimachinery v0.31.2 k8s.io/client-go v0.31.2 sigs.k8s.io/controller-runtime v0.19.0 ) require ( + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dennwc/varint v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/cel-go v0.20.1 // indirect @@ -42,23 +56,28 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect github.com/google/uuid v1.6.0 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prometheus v0.46.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/stretchr/testify v1.9.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect @@ -68,9 +87,12 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect @@ -87,7 +109,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.31.2 // indirect k8s.io/apiextensions-apiserver v0.31.2 // indirect k8s.io/apiserver v0.31.2 // indirect k8s.io/component-base v0.31.2 // indirect diff --git a/services/dis-promrulegroups-operator/go.sum b/services/dis-promrulegroups-operator/go.sum index 27d83ccd..a1de4d6b 100644 --- a/services/dis-promrulegroups-operator/go.sum +++ b/services/dis-promrulegroups-operator/go.sum @@ -1,7 +1,32 @@ +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement v0.10.0 h1:zSc/9W0WKtH/qpFlN1K2pQjMJLMTMJpWyexdjtexHtA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement v0.10.0/go.mod h1:AQQcirAE2A3+knMh7Voguwx+0txBqeDBaWu+ML1aMSc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= +github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -16,10 +41,14 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -28,6 +57,10 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -35,8 +68,9 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= @@ -46,10 +80,14 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -64,18 +102,30 @@ github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2 github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -83,6 +133,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -92,10 +144,16 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -109,8 +167,16 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw= +github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4= +github.com/pyrra-dev/pyrra v0.7.7 h1:yoUOO6636nReWcu099h37+4iFYCaVwBJCrlXw61J5K8= +github.com/pyrra-dev/pyrra v0.7.7/go.mod h1:BuHlOQMd/hoHKqznZ2g61ZvxVTEEt7DeJvCqAdGbULE= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -150,6 +216,8 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -159,8 +227,10 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -179,6 +249,7 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 743d34a7..21cf7ca8 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -17,42 +17,426 @@ limitations under the License. package controller import ( + "bytes" "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + azcoreruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + prometheusmodel "github.com/prometheus/common/model" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/status,verbs=get +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/finalizers,verbs=update + +const ( + finalizerName = "prometheusrule.dis.altinn.cloud/finalizer" + // This annotation has a comma separated string with the names of the resources created in azure. + azPrometheusRuleGroupResourceNamesAnnotation = "prometheusrule.dis.altinn.cloud/az-prometheusrulegroups-names" + // This annotation has the has of the latest applied ARM template. + azArmTemplateHashAnnotation = "prometheusrule.dis.altinn.cloud/latest-arm-template-deployed-hash" + // This annotation has the name of the latest ARM template deployment + azArmDeploymentNameAnnotation = "prometheusrule.dis.altinn.cloud/az-arm-deployment-name" + // Last time a successful deployment was done + azArmDeploymentLastSuccessfulTimestampAnnotation = "prometheusrule.dis.altinn.cloud/az-arm-deployment-last-successful-deployment-timestamp" +) + +var ( + allAnnotations = [4]string{ + azPrometheusRuleGroupResourceNamesAnnotation, + azArmTemplateHashAnnotation, + azArmDeploymentNameAnnotation, + azArmDeploymentLastSuccessfulTimestampAnnotation, + } ) -// PrometheusRuleReconciler reconciles a PrometheusRule object type PrometheusRuleReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + DeploymentClient *armresources.DeploymentsClient + PrometheusRuleGroupsClient *armalertsmanagement.PrometheusRuleGroupsClient + AzResourceGroupName string + AzResourceGroupLocation string + AzAzureMonitorWorkspace string + AzClusterName string + NodePath string + AzPromRulesConverterPath string } -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/status,verbs=get -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules/finalizers,verbs=update +func (r *PrometheusRuleReconciler) handleCreation(ctx context.Context, req ctrl.Request, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { + log := log.FromContext(ctx) + armTemplateJsonString, err := r.generateArmTemplateFromPromRule(ctx, promRule) + if err != nil { + log.Error(err, "failed to convert the PrometheusRule into an ARM template", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{Requeue: false}, nil + } + + ruleGroupNames := generateRuleGroupNamesAnnotationString(promRule) + suffix := timestamp() + deploymentName := generateArmDeploymentName(req, suffix) + + err = r.deployArmTemplate( + ctx, + deploymentName, + armTemplateJsonString, + os.Getenv("AZ_ACTION_GROUP_ID"), + ) + if err != nil { + log.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 30 * time.Second}, err + } + // Update the annotations on the CR + return r.updateAnnotations(ctx, promRule, ruleGroupNames, hashArmTemplate([]byte(armTemplateJsonString)), deploymentName, suffix) +} + +func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Request, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { + log := log.FromContext(ctx) + + annotations := promRule.GetAnnotations() + lastGeneratedArmtemplateHash := annotations[azArmTemplateHashAnnotation] + suffix := timestamp() + armDeploymentName := generateArmDeploymentName(req, suffix) + regeneratedArmTemplate, err := r.generateArmTemplateFromPromRule(ctx, promRule) + if err != nil { + log.Error(err, "failed to convert the PrometheusRule into an ARM template", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{Requeue: false}, nil + } + + regeneratedArmTemplateHash := hashArmTemplate([]byte(regeneratedArmTemplate)) + if !(regeneratedArmTemplateHash == lastGeneratedArmtemplateHash) { + ruleGroupNames := generateRuleGroupNamesAnnotationString(promRule) + + annotations := promRule.GetAnnotations() + oldPromRuleGroupNames := strings.Split(annotations[azPrometheusRuleGroupResourceNamesAnnotation], ",") + newPromRuleGroupNames := strings.Split(ruleGroupNames, ",") + toDelete := removedGroups(oldPromRuleGroupNames, newPromRuleGroupNames) + + for _, td := range toDelete { + _, err := r.deletePrometheusRuleGroup(ctx, td) + if err != nil { + log.Error(err, "failed to delete PrometheusRuleGroup", "PrometheusRuleGroupName", td) + } + } + + err = r.deployArmTemplate( + ctx, + armDeploymentName, + regeneratedArmTemplate, + os.Getenv("AZ_ACTION_GROUP_ID"), + ) + if err != nil { + log.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 30 * time.Second}, err + } + + return r.updateAnnotations(ctx, promRule, ruleGroupNames, regeneratedArmTemplateHash, armDeploymentName, suffix) + } else { + // TODO: Might make sense to double check that the Azure resources havent been deleted/modified outside the controller too. + } + return ctrl.Result{}, err +} + +func (r *PrometheusRuleReconciler) handleDelete(ctx context.Context, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { + log := log.FromContext(ctx) + log.Info("deletion of PrometheusRule CR detected", "namespace", promRule.Namespace, "name", promRule.Name) + + if controllerutil.ContainsFinalizer(&promRule, finalizerName) { + if err := r.deleteExternalResources(ctx, promRule); err != nil { + log.Info("failed to delete Azure resources", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, err + } + log.Info("removing our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) + ok := controllerutil.RemoveFinalizer(&promRule, finalizerName) + if ok { + if err := r.Update(ctx, &promRule); err != nil { + log.Info("failed to update object", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, err + } + } else { + log.Info("failed to remove out finalizer from object", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, errors.New("failed to remove finalizer from object") + } + } + return ctrl.Result{}, nil +} + +func (r *PrometheusRuleReconciler) addOurFinalizer(ctx context.Context, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { + log := log.FromContext(ctx) + log.Info("updating the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) + ok := controllerutil.AddFinalizer(&promRule, finalizerName) + if ok { + if err := r.Update(ctx, &promRule); err != nil { + log.Error(err, "failed to update the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, err + } + return ctrl.Result{}, nil + } else { + log.Info("failed to add our finalizer to the object", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, errors.New("failed to add our finalizer to the object") + } +} -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the PrometheusRule object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile func (r *PrometheusRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + log := log.FromContext(ctx) - // TODO(user): your logic here + var prometheusRule monitoringv1.PrometheusRule + if err := r.Get(ctx, req.NamespacedName, &prometheusRule); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "unable to fetch PrometheusRule", "namespace", req.Namespace, "name", req.Name) + return ctrl.Result{}, err + } + // The resource is not marked for deletion. + if prometheusRule.GetDeletionTimestamp().IsZero() { + // We need to make sure we add a finalizer to the PrometheusRule CR so we can cleanup Azure resources when the CR is deleted. + if !controllerutil.ContainsFinalizer(&prometheusRule, finalizerName) { + return r.addOurFinalizer(ctx, prometheusRule) + } + // Look into the object's annotations for annotations we own. + annotations := prometheusRule.GetAnnotations() + ok := hasAllAnnotations(annotations) + if !ok { + log.Info("new PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) + return r.handleCreation(ctx, req, prometheusRule) + } else { + log.Info("update to PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) + return r.handleUpdate(ctx, req, prometheusRule) + } + } else { + return r.handleDelete(ctx, prometheusRule) + } +} + +func (r *PrometheusRuleReconciler) updateAnnotations(ctx context.Context, promRule monitoringv1.PrometheusRule, groupNames, armTemplateHash, armDeploymentName, timestamp string) (reconcile.Result, error) { + log := log.FromContext(ctx) + annotations := promRule.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[azPrometheusRuleGroupResourceNamesAnnotation] = groupNames + annotations[azArmTemplateHashAnnotation] = armTemplateHash + annotations[azArmDeploymentNameAnnotation] = armDeploymentName + annotations[azArmDeploymentLastSuccessfulTimestampAnnotation] = timestamp + + promRule.SetAnnotations(annotations) + err := r.Client.Update(ctx, &promRule) + if err != nil { + log.Error(err, "failed to update the PrometheusRule CR with new annotations", "namespace", promRule.Namespace, "name", promRule.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, err + } return ctrl.Result{}, nil } +func (r *PrometheusRuleReconciler) deployArmTemplate(ctx context.Context, deploymentName string, jsonTemplate string, actionGroupId string) error { + log := log.FromContext(ctx) + + contents := make(map[string]interface{}) + _ = json.Unmarshal([]byte(jsonTemplate), &contents) + + deploy, err := r.DeploymentClient.BeginCreateOrUpdate( + ctx, + r.AzResourceGroupName, + deploymentName, + armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: contents, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + Parameters: map[string]interface{}{ + "location": map[string]string{ + "value": r.AzResourceGroupLocation, + }, + "actionGroupId": map[string]string{ + "value": actionGroupId, + }, + "azureMonitorWorkspace": map[string]string{ + "value": r.AzAzureMonitorWorkspace, + }, + "clusterName": map[string]string{ + "value": r.AzClusterName, + }, + }, + }, + }, + nil, + ) + + if err != nil { + log.Error(err, "failed BeginCreateOrUpdate", "deploymentName", deploymentName) + return err + } + // TODO: Check the best practices here. I doubt we want to do this synchronously. + // From my tests it usually takes less than 5s tho so might actually be ok. + _, err = deploy.PollUntilDone(ctx, &azcoreruntime.PollUntilDoneOptions{Frequency: 5 * time.Second}) + if err != nil { + return fmt.Errorf("cannot get the create deployment future respone: %v", err) + } + return nil +} +func (r *PrometheusRuleReconciler) deleteExternalResources(ctx context.Context, promRule monitoringv1.PrometheusRule) error { + annotations := promRule.GetAnnotations() + resourceNames, ok := annotations[azPrometheusRuleGroupResourceNamesAnnotation] + if ok { + for _, rn := range strings.Split(resourceNames, ",") { + _, err := r.deletePrometheusRuleGroup(ctx, rn) + if err != nil { + // TODO: Should we try to delete the rest in case one deletion fails? Or simply retry again? + return err + } + } + } + return nil +} + +func (r *PrometheusRuleReconciler) deletePrometheusRuleGroup(ctx context.Context, ruleGroupName string) (*armalertsmanagement.PrometheusRuleGroupsClientDeleteResponse, error) { + log := log.FromContext(ctx) + resp, err := r.PrometheusRuleGroupsClient.Delete(ctx, r.AzResourceGroupName, ruleGroupName, nil) + + if err != nil { + log.Error(err, "failed to delete the prometheus rule group", "ruleGroupName", ruleGroupName) + return nil, err + } + log.Info("Sucessfully deleted PrometheusRuleGroup", "ruleGroupName", ruleGroupName) + return &resp, nil +} + +func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.Context, promRule monitoringv1.PrometheusRule) (string, error) { + log := log.FromContext(ctx) + + for _, ruleGroup := range promRule.Spec.Groups { + interval, err := prometheusmodel.ParseDuration(string(*ruleGroup.Interval)) + if err != nil { + log.Error(err, "Failed to parse the Interval from the PrometheusRule Spec") + return "", err + } + // Can't be lower than 1m. + if interval < prometheusmodel.Duration(1*time.Minute) { + *ruleGroup.Interval = monitoringv1.Duration("1m") + } + } + + marshalledPromRule, err := json.Marshal(promRule.Spec) + + if err != nil { + log.Error(err, "Failed to marshal the promRule") + return "", err + } + + tmp := strings.Split(r.AzAzureMonitorWorkspace, "/") + azureMonitorWorkspaceName := tmp[len(tmp)-1] + + cmd := exec.Command( + r.NodePath, + r.AzPromRulesConverterPath, + "--azure-monitor-workspace", + azureMonitorWorkspaceName, + "--location", + r.AzResourceGroupLocation, + "--cluster-name", + r.AzClusterName, + ) + + var in bytes.Buffer + var out, errb strings.Builder + + in.Write([]byte(marshalledPromRule)) + + cmd.Stdin = &in + cmd.Stdout = &out + cmd.Stderr = &errb + err = cmd.Run() + if err != nil { + log.Error(err, "Failed to convert PrometheusRule into PrometheusRuleGroup", "Stderr", errb.String()) + return "", err + } + jsonString := out.String() + + return jsonString, nil +} + +func timestamp() string { + now := time.Now() + + var sb strings.Builder + sb.WriteString(strconv.Itoa(now.Year())) + sb.WriteString(strconv.Itoa(int(now.Month()))) + sb.WriteString(strconv.Itoa(now.Day())) + sb.WriteString(strconv.Itoa(now.Hour())) + sb.WriteString(strconv.Itoa(now.Minute())) + sb.WriteString(strconv.Itoa(now.Second())) + + return sb.String() +} + +func hasAllAnnotations(annotations map[string]string) bool { + boolRes := true + for _, a := range allAnnotations { + _, ok := annotations[a] + boolRes = boolRes && ok + } + return boolRes +} + +func generateArmDeploymentName(req ctrl.Request, suffix string) string { + // Limit of 64 characters + return req.Namespace + "-" + req.Name + "-" + suffix +} +func removedGroups(old, new []string) []string { + groupsToRemove := make([]string, 0) + for _, viOld := range old { + found := false + for _, viNew := range new { + if viNew == viOld { + found = true + break + } + } + if !found { + groupsToRemove = append(groupsToRemove, viOld) + } + } + return groupsToRemove +} + +func generateRuleGroupNamesAnnotationString(promRule monitoringv1.PrometheusRule) string { + resourceNames := "" + for idx, p := range promRule.Spec.Groups { + if idx+1 < len(promRule.Spec.Groups) { + resourceNames = resourceNames + p.Name + "," + } else { + resourceNames = resourceNames + p.Name + } + } + return resourceNames +} + +func hashArmTemplate(bytes []byte) string { + h := sha256.New() + h.Write(bytes) + return base64.URLEncoding.EncodeToString(h.Sum(nil)) +} + // SetupWithManager sets up the controller with the Manager. func (r *PrometheusRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go index 4fcf203f..eea4c6c6 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller_test.go @@ -17,16 +17,205 @@ limitations under the License. package controller import ( + "context" + "log" + "os" + "slices" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement" + + armalertsmanagement_fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/alertsmanagement/armalertsmanagement/fake" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + armresources_fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/fake" . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var _ = Describe("PrometheusRule Controller", func() { + + const ( + PrometheusRuleName = "http" + PrometheusRuleNamespace = "monitoring" + + timeout = time.Second * 20 + duration = time.Second * 10 + interval = time.Millisecond * 250 + eventuallyTimeout = 2 * time.Minute + eventuallyPollingInterval = time.Second + ) + Context("When reconciling a resource", func() { + ctx := context.Background() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: PrometheusRuleNamespace, + Namespace: PrometheusRuleNamespace, + }, + } + typeNamespacedName := types.NamespacedName{ + Name: PrometheusRuleName, + Namespace: PrometheusRuleNamespace, + } + + SetDefaultEventuallyTimeout(eventuallyTimeout) + SetDefaultEventuallyPollingInterval(eventuallyPollingInterval) + + BeforeEach(func() { + By("Creating the Namespace to perform the tests") + err := k8sClient.Create(ctx, namespace) + Expect(err).NotTo(HaveOccurred()) - It("should successfully reconcile the resource", func() { + By("Creating the custom resource for the Kind PrometheusRule") + promRule := NewExamplePrometheusRule() - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. + err = k8sClient.Create(ctx, promRule) + Expect(err).NotTo(HaveOccurred()) + + promRuleFromCluster := &monitoringv1.PrometheusRule{} + err = k8sClient.Get(ctx, typeNamespacedName, promRuleFromCluster) + Expect(err).NotTo(HaveOccurred()) + Expect(len(promRuleFromCluster.Spec.Groups)).To(Equal(2)) + }) + + AfterEach(func() { + By("Removing the custom resource for the Kind PrometheusRule") + found := &monitoringv1.PrometheusRule{} + err := k8sClient.Get(ctx, typeNamespacedName, found) + if !errors.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(k8sClient.Delete(context.TODO(), found)).To(Succeed()) + }).Should(Succeed()) + } + By("Deleting the Namespace to perform the tests") + _ = k8sClient.Delete(ctx, namespace) + }) + + It("should successfully reconcile a custom resource for PrometheusRule", func() { + // Fake servers + + fakeDeploymentsServer := NewFakeDeploymentsServer() + fakePrometheusRuleGroupsServer := NewFakeNewPrometheusRuleGroupsServer() + // Clients + deploymentsClient, err := armresources.NewDeploymentsClient("subscriptionID", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: armresources_fake.NewDeploymentsServerTransport(&fakeDeploymentsServer), + }, + }) + if err != nil { + log.Fatal(err) + } + + prometheusRuleGroupsClient, err := armalertsmanagement.NewPrometheusRuleGroupsClient("subscriptionID", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: armalertsmanagement_fake.NewPrometheusRuleGroupsServerTransport(&fakePrometheusRuleGroupsServer), + }, + }) + if err != nil { + log.Fatal(err) + } + + controllerReconciler := &PrometheusRuleReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + DeploymentClient: deploymentsClient, + PrometheusRuleGroupsClient: prometheusRuleGroupsClient, + AzResourceGroupName: "ResourceGroupName", + AzResourceGroupLocation: "ResourceGroupLocation", + AzAzureMonitorWorkspace: "AzureMonitorWorkspace", + AzClusterName: "ClusterName", + NodePath: "node", + AzPromRulesConverterPath: "../../bin/az-tool/node_modules/az-prom-rules-converter/dist/cli.js", + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + promRuleFromCluster := &monitoringv1.PrometheusRule{} + + By("checking that our finalizer is added to the prometheusrule") + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(ctx, typeNamespacedName, promRuleFromCluster)).To(Succeed()) + g.Expect(slices.Contains(promRuleFromCluster.GetFinalizers(), finalizerName)).To(BeTrue()) + }, timeout, interval).Should(Succeed()) + + By("checking that our annotations are added to the prometheusrule") + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(ctx, typeNamespacedName, promRuleFromCluster)).To(Succeed()) + g.Expect(len(promRuleFromCluster.GetAnnotations())).To(Equal(4)) + g.Expect(azArmDeploymentLastSuccessfulTimestampAnnotation).To(BeKeyOf(promRuleFromCluster.GetAnnotations())) + g.Expect(azArmDeploymentNameAnnotation).To(BeKeyOf(promRuleFromCluster.GetAnnotations())) + g.Expect(azArmTemplateHashAnnotation).To(BeKeyOf(promRuleFromCluster.GetAnnotations())) + g.Expect(azPrometheusRuleGroupResourceNamesAnnotation).To(BeKeyOf(promRuleFromCluster.GetAnnotations())) + }, timeout, interval).Should(Succeed()) + + By("checking that changes to the prometheusrule are detected") + err = k8sClient.Get(ctx, typeNamespacedName, promRuleFromCluster) + Expect(err).NotTo(HaveOccurred()) + templateHash := strings.Clone(promRuleFromCluster.GetAnnotations()[azArmTemplateHashAnnotation]) + + *promRuleFromCluster.Spec.Groups[0].Interval = monitoringv1.Duration("5m") + err = k8sClient.Update(ctx, promRuleFromCluster) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(ctx, typeNamespacedName, promRuleFromCluster)).To(Succeed()) + g.Expect(templateHash).To(Not(Equal(promRuleFromCluster.GetAnnotations()[azArmTemplateHashAnnotation]))) + }, timeout, interval).Should(Succeed()) + + By("checking that resources marked to be deleted, are deleted") + err = k8sClient.Delete(ctx, promRuleFromCluster) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func(g Gomega) { + g.Expect(errors.IsNotFound(k8sClient.Get(ctx, typeNamespacedName, promRuleFromCluster))).To(BeTrue()) + }, timeout, interval).Should(Succeed()) + + By("Checking that the ARM template is correctly generated") + tmplt, err := controllerReconciler.generateArmTemplateFromPromRule(context.TODO(), *NewExamplePrometheusRule()) + Expect(err).NotTo(HaveOccurred()) + reftmplt, err := os.ReadFile("../../test/example_arm_template.json") + Expect(err).NotTo(HaveOccurred()) + Expect(string(reftmplt)).To(Equal(tmplt)) }) }) }) + +var _ = DescribeTable("Detecting PrometheusRuleGroups to delete", func(old, new, toDelete []string) { + Expect(removedGroups(old, new)).To(Equal(toDelete)) +}, + Entry(nil, []string{"a", "b", "c"}, []string{"a", "b", "c"}, []string{}), + Entry(nil, []string{"a", "b", "c"}, []string{"a", "b", "d"}, []string{"c"}), + Entry(nil, []string{"a", "b", "c"}, []string{"a", "b"}, []string{"c"}), + Entry(nil, []string{"a", "b", "c"}, []string{"a", "b", "d", "e"}, []string{"c"}), + Entry(nil, []string{"a", "b", "c"}, []string{"b", "a"}, []string{"c"}), + Entry(nil, []string{"a", "b", "c"}, []string{"a", "b", "c", "d"}, []string{}), + Entry(nil, []string{"a", "b", "c"}, []string{}, []string{"a", "b", "c"}), +) From da40ae5b2e65c5cb76d8f07b07e697ca9d770749 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:38:56 +0100 Subject: [PATCH 16/37] k8s deployment configs --- .../config/manager/kustomization.yaml | 7 ++++++ .../config/manager/manager_env_patch.yaml | 22 +++++++++++++++++++ .../config/manager/manager_pod_patch.yaml | 6 +++++ .../config/rbac/kustomization.yaml | 4 ++++ .../config/rbac/service_account_patch.yaml | 6 +++++ .../dist/install.yaml | 22 +++++++++++++++++++ 6 files changed, 67 insertions(+) create mode 100644 services/dis-promrulegroups-operator/config/manager/manager_env_patch.yaml create mode 100644 services/dis-promrulegroups-operator/config/manager/manager_pod_patch.yaml create mode 100644 services/dis-promrulegroups-operator/config/rbac/service_account_patch.yaml diff --git a/services/dis-promrulegroups-operator/config/manager/kustomization.yaml b/services/dis-promrulegroups-operator/config/manager/kustomization.yaml index ad13e96b..ae6d4263 100644 --- a/services/dis-promrulegroups-operator/config/manager/kustomization.yaml +++ b/services/dis-promrulegroups-operator/config/manager/kustomization.yaml @@ -1,5 +1,12 @@ resources: - manager.yaml +patches: +- path: manager_env_patch.yaml + target: + kind: Deployment +- path: manager_pod_patch.yaml + target: + kind: Deployment apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: diff --git a/services/dis-promrulegroups-operator/config/manager/manager_env_patch.yaml b/services/dis-promrulegroups-operator/config/manager/manager_env_patch.yaml new file mode 100644 index 00000000..0be0e21c --- /dev/null +++ b/services/dis-promrulegroups-operator/config/manager/manager_env_patch.yaml @@ -0,0 +1,22 @@ +# This patch adds the required env vars +- op: add + path: /spec/template/spec/containers/0/env + value: + - name: AZ_SUBSCRIPTION_ID + value: "" # # TODO: Should be populated based on the output from Terraform + - name: AZ_RESOURCE_GROUP_NAME + value: "" # TODO: Should be populated based on the output from Terraform + - name: AZ_MONITOR_WORKSPACE_NAME + value: "" # TODO: Should be populated based on the output from Terraform + - name: AZ_RESOURCE_GROUP_LOCATION + value: "" # TODO: Should be populated based on the output from Terraform + - name: AZ_ACTION_GROUP_ID + value: "" # TODO: Should be configurable with information coming from the PrometheusRule CR + - name: AZ_AZURE_MONITOR_WORKSPACE + value: "" # TODO: Should be populated based on the output from Terraform + - name: AZ_CLUSTER_NAME + value: "" # TODO: Should be populated based on the output from Terraform + - name: NODE_PATH + value: /nodejs/bin/node # From the Dockerfile + - name: AZ_PROM_RULES_CONVERTER_PATH + value: /tool/node_modules/az-prom-rules-converter/dist/cli.js diff --git a/services/dis-promrulegroups-operator/config/manager/manager_pod_patch.yaml b/services/dis-promrulegroups-operator/config/manager/manager_pod_patch.yaml new file mode 100644 index 00000000..5fcab25d --- /dev/null +++ b/services/dis-promrulegroups-operator/config/manager/manager_pod_patch.yaml @@ -0,0 +1,6 @@ +# This patch adds an Azure Workload Identity required label to the pod +- op: add + path: /spec/template/metadata/labels + value: + control-plane: controller-manager + azure.workload.identity/use: "true" diff --git a/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml b/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml index 5619aa00..c6838731 100644 --- a/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml +++ b/services/dis-promrulegroups-operator/config/rbac/kustomization.yaml @@ -18,3 +18,7 @@ resources: - metrics_auth_role.yaml - metrics_auth_role_binding.yaml - metrics_reader_role.yaml +patches: +- path: service_account_patch.yaml + target: + kind: ServiceAccount diff --git a/services/dis-promrulegroups-operator/config/rbac/service_account_patch.yaml b/services/dis-promrulegroups-operator/config/rbac/service_account_patch.yaml new file mode 100644 index 00000000..cd74cc56 --- /dev/null +++ b/services/dis-promrulegroups-operator/config/rbac/service_account_patch.yaml @@ -0,0 +1,6 @@ +# This patch adds an Azure Workload Identity required annotation +# to the service account with the application client ID +- op: add + path: /metadata/annotations + value: + azure.workload.identity/client-id: "" # TODO: Should be populated based on the output from Terraform diff --git a/services/dis-promrulegroups-operator/dist/install.yaml b/services/dis-promrulegroups-operator/dist/install.yaml index 7875fe0b..89331e6e 100644 --- a/services/dis-promrulegroups-operator/dist/install.yaml +++ b/services/dis-promrulegroups-operator/dist/install.yaml @@ -10,6 +10,8 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: + annotations: + azure.workload.identity/client-id: "" labels: app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: dis-promrulegroups-operator @@ -196,6 +198,7 @@ spec: annotations: kubectl.kubernetes.io/default-container: manager labels: + azure.workload.identity/use: "true" control-plane: controller-manager spec: containers: @@ -205,6 +208,25 @@ spec: - --health-probe-bind-address=:8081 command: - /manager + env: + - name: AZ_SUBSCRIPTION_ID + value: "" + - name: AZ_RESOURCE_GROUP_NAME + value: "" + - name: AZ_MONITOR_WORKSPACE_NAME + value: "" + - name: AZ_RESOURCE_GROUP_LOCATION + value: "" + - name: AZ_ACTION_GROUP_ID + value: "" + - name: AZ_AZURE_MONITOR_WORKSPACE + value: "" + - name: AZ_CLUSTER_NAME + value: "" + - name: NODE_PATH + value: /nodejs/bin/node + - name: AZ_PROM_RULES_CONVERTER_PATH + value: /tool/node_modules/az-prom-rules-converter/dist/cli.js image: controller:latest livenessProbe: httpGet: From bd48b3bad8c2a6359bf0aa4289c92c2b721e16ae Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:55:47 +0100 Subject: [PATCH 17/37] add a prefix to the rule names to make them unique in the centralized monitoring subscription --- .../internal/controller/prometheusrule_controller.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 21cf7ca8..7779a4b6 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -323,8 +323,12 @@ func (r *PrometheusRuleReconciler) deletePrometheusRuleGroup(ctx context.Context func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.Context, promRule monitoringv1.PrometheusRule) (string, error) { log := log.FromContext(ctx) - - for _, ruleGroup := range promRule.Spec.Groups { + promRuleCopy := promRule.DeepCopy() + for idx, ruleGroup := range promRuleCopy.Spec.Groups { + // The names are the same for every cluster so we need to prefix them + if !strings.Contains(promRuleCopy.Spec.Groups[idx].Name, r.AzClusterName) { + promRuleCopy.Spec.Groups[idx].Name = r.AzClusterName + "-" + ruleGroup.Name + } interval, err := prometheusmodel.ParseDuration(string(*ruleGroup.Interval)) if err != nil { log.Error(err, "Failed to parse the Interval from the PrometheusRule Spec") @@ -336,7 +340,7 @@ func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.C } } - marshalledPromRule, err := json.Marshal(promRule.Spec) + marshalledPromRule, err := json.Marshal(promRuleCopy.Spec) if err != nil { log.Error(err, "Failed to marshal the promRule") From 0e3a64717022bed2f5a9675bb8b92eb2bcec695f Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:16:04 +0100 Subject: [PATCH 18/37] kubebuilder edit --plugins=go/v4,helm/v1-alpha --- .../.github/workflows/test-chart.yml | 90 +++++++++++++++++++ services/dis-promrulegroups-operator/PROJECT | 2 + .../dist/chart/.helmignore | 25 ++++++ .../dist/chart/Chart.yaml | 7 ++ .../dist/chart/templates/_helpers.tpl | 50 +++++++++++ .../templates/certmanager/certificate.yaml | 60 +++++++++++++ .../dist/chart/templates/manager/manager.yaml | 71 +++++++++++++++ .../templates/metrics/metrics-service.yaml | 17 ++++ .../network-policy/allow-metrics-traffic.yaml | 27 ++++++ .../chart/templates/prometheus/monitor.yaml | 38 ++++++++ .../templates/rbac/leader_election_role.yaml | 42 +++++++++ .../rbac/leader_election_role_binding.yaml | 17 ++++ .../templates/rbac/metrics_auth_role.yaml | 21 +++++ .../rbac/metrics_auth_role_binding.yaml | 16 ++++ .../templates/rbac/metrics_reader_role.yaml | 13 +++ .../dist/chart/templates/rbac/role.yaml | 32 +++++++ .../chart/templates/rbac/role_binding.yaml | 16 ++++ .../chart/templates/rbac/service_account.yaml | 15 ++++ .../dist/chart/values.yaml | 76 ++++++++++++++++ 19 files changed, 635 insertions(+) create mode 100644 services/dis-promrulegroups-operator/.github/workflows/test-chart.yml create mode 100644 services/dis-promrulegroups-operator/dist/chart/.helmignore create mode 100644 services/dis-promrulegroups-operator/dist/chart/Chart.yaml create mode 100644 services/dis-promrulegroups-operator/dist/chart/templates/_helpers.tpl create mode 100644 services/dis-promrulegroups-operator/dist/chart/templates/certmanager/certificate.yaml create mode 100644 services/dis-promrulegroups-operator/dist/chart/templates/manager/manager.yaml create mode 100644 services/dis-promrulegroups-operator/dist/chart/templates/metrics/metrics-service.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/network-policy/allow-metrics-traffic.yaml create mode 100644 services/dis-promrulegroups-operator/dist/chart/templates/prometheus/monitor.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role_binding.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role_binding.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_reader_role.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/role.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/role_binding.yaml create mode 100755 services/dis-promrulegroups-operator/dist/chart/templates/rbac/service_account.yaml create mode 100644 services/dis-promrulegroups-operator/dist/chart/values.yaml diff --git a/services/dis-promrulegroups-operator/.github/workflows/test-chart.yml b/services/dis-promrulegroups-operator/.github/workflows/test-chart.yml new file mode 100644 index 00000000..63ed287a --- /dev/null +++ b/services/dis-promrulegroups-operator/.github/workflows/test-chart.yml @@ -0,0 +1,90 @@ +name: Test Chart + +on: + push: + pull_request: + +jobs: + test-e2e: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Install the latest version of kind + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + + - name: Verify kind installation + run: kind version + + - name: Create kind cluster + run: kind create cluster + + - name: Prepare dis-promrulegroups-operator + run: | + go mod tidy + make docker-build IMG=dis-promrulegroups-operator:v0.1.0 + kind load docker-image dis-promrulegroups-operator:v0.1.0 + + - name: Install Helm + run: | + curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + + - name: Verify Helm installation + run: helm version + + - name: Lint Helm Chart + run: | + helm lint ./dist/chart + +# TODO: Uncomment if cert-manager is enabled +# - name: Install cert-manager via Helm +# run: | +# helm repo add jetstack https://charts.jetstack.io +# helm repo update +# helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set installCRDs=true +# +# - name: Wait for cert-manager to be ready +# run: | +# kubectl wait --namespace cert-manager --for=condition=available --timeout=300s deployment/cert-manager +# kubectl wait --namespace cert-manager --for=condition=available --timeout=300s deployment/cert-manager-cainjector +# kubectl wait --namespace cert-manager --for=condition=available --timeout=300s deployment/cert-manager-webhook + +# TODO: Uncomment if Prometheus is enabled +# - name: Install Prometheus Operator CRDs +# run: | +# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +# helm repo update +# helm install prometheus-crds prometheus-community/prometheus-operator-crds +# +# - name: Install Prometheus via Helm +# run: | +# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +# helm repo update +# helm install prometheus prometheus-community/prometheus --namespace monitoring --create-namespace +# +# - name: Wait for Prometheus to be ready +# run: | +# kubectl wait --namespace monitoring --for=condition=available --timeout=300s deployment/prometheus-server + + - name: Install Helm chart for project + run: | + helm install my-release ./dist/chart --create-namespace --namespace dis-promrulegroups-operator-system + + - name: Check Helm release status + run: | + helm status my-release --namespace dis-promrulegroups-operator-system + +# TODO: Uncomment if prometheus.enabled is set to true to confirm that the ServiceMonitor gets created +# - name: Check Presence of ServiceMonitor +# run: | +# kubectl wait --namespace dis-promrulegroups-operator-system --for=jsonpath='{.kind}'=ServiceMonitor servicemonitor/dis-promrulegroups-operator-controller-manager-metrics-monitor diff --git a/services/dis-promrulegroups-operator/PROJECT b/services/dis-promrulegroups-operator/PROJECT index b00589d7..c2a60bd9 100644 --- a/services/dis-promrulegroups-operator/PROJECT +++ b/services/dis-promrulegroups-operator/PROJECT @@ -5,6 +5,8 @@ domain: dis.altinn.cloud layout: - go.kubebuilder.io/v4 +plugins: + helm.kubebuilder.io/v1-alpha: {} projectName: dis-promrulegroups-operator repo: github.com/Altinn/altinn-platform/services/dis-promrulegroups-operator resources: diff --git a/services/dis-promrulegroups-operator/dist/chart/.helmignore b/services/dis-promrulegroups-operator/dist/chart/.helmignore new file mode 100644 index 00000000..7d92f7fb --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building Helm packages. +# Operating system files +.DS_Store + +# Version control directories +.git/ +.gitignore +.bzr/ +.hg/ +.hgignore +.svn/ + +# Backup and temporary files +*.swp +*.tmp +*.bak +*.orig +*~ + +# IDE and editor-related files +.idea/ +.vscode/ + +# Helm chart artifacts +dist/chart/*.tgz diff --git a/services/dis-promrulegroups-operator/dist/chart/Chart.yaml b/services/dis-promrulegroups-operator/dist/chart/Chart.yaml new file mode 100644 index 00000000..d5a74c48 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: dis-promrulegroups-operator +description: A Helm chart to distribute the project dis-promrulegroups-operator +type: application +version: 0.1.0 +appVersion: "0.1.0" +icon: "https://example.com/icon.png" diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/_helpers.tpl b/services/dis-promrulegroups-operator/dist/chart/templates/_helpers.tpl new file mode 100644 index 00000000..2f09d1da --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/_helpers.tpl @@ -0,0 +1,50 @@ +{{- define "chart.name" -}} +{{- if .Chart }} + {{- if .Chart.Name }} + {{- .Chart.Name | trunc 63 | trimSuffix "-" }} + {{- else if .Values.nameOverride }} + {{ .Values.nameOverride | trunc 63 | trimSuffix "-" }} + {{- else }} + dis-promrulegroups-operator + {{- end }} +{{- else }} + dis-promrulegroups-operator +{{- end }} +{{- end }} + + +{{- define "chart.labels" -}} +{{- if .Chart.AppVersion -}} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Chart.Version }} +helm.sh/chart: {{ .Chart.Version | quote }} +{{- end }} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + + +{{- define "chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + + +{{- define "chart.hasMutatingWebhooks" -}} +{{- $hasMutating := false }} +{{- range . }} + {{- if eq .type "mutating" }} + $hasMutating = true }}{{- end }} +{{- end }} +{{ $hasMutating }}}}{{- end }} + + +{{- define "chart.hasValidatingWebhooks" -}} +{{- $hasValidating := false }} +{{- range . }} + {{- if eq .type "validating" }} + $hasValidating = true }}{{- end }} +{{- end }} +{{ $hasValidating }}}}{{- end }} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/certmanager/certificate.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/certmanager/certificate.yaml new file mode 100644 index 00000000..31c2f4d6 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/certmanager/certificate.yaml @@ -0,0 +1,60 @@ +{{- if .Values.certmanager.enable }} +# Self-signed Issuer +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: selfsigned-issuer + namespace: {{ .Release.Namespace }} +spec: + selfSigned: {} +{{- if .Values.webhook.enable }} +--- +# Certificate for the webhook +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + annotations: + {{- if .Values.crd.keep }} + "helm.sh/resource-policy": keep + {{- end }} + name: serving-cert + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + dnsNames: + - dis-promrulegroups-operator.{{ .Release.Namespace }}.svc + - dis-promrulegroups-operator.{{ .Release.Namespace }}.svc.cluster.local + - dis-promrulegroups-operator-webhook-service.{{ .Release.Namespace }}.svc + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert +{{- end }} +{{- if and .Values.metrics.enable .Values.certmanager.enable }} +--- +# Certificate for the metrics +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + annotations: + {{- if .Values.crd.keep }} + "helm.sh/resource-policy": keep + {{- end }} + labels: + {{- include "chart.labels" . | nindent 4 }} + name: metrics-certs + namespace: {{ .Release.Namespace }} +spec: + dnsNames: + - dis-promrulegroups-operator.{{ .Release.Namespace }}.svc + - dis-promrulegroups-operator.{{ .Release.Namespace }}.svc.cluster.local + - dis-promrulegroups-operator-metrics-service.{{ .Release.Namespace }}.svc + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: metrics-server-cert +{{- end }} +{{- end }} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/manager/manager.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/manager/manager.yaml new file mode 100644 index 00000000..caa65b3c --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/manager/manager.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dis-promrulegroups-operator-controller-manager + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} + control-plane: controller-manager +spec: + replicas: {{ .Values.controllerManager.replicas }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + {{- include "chart.labels" . | nindent 8 }} + control-plane: controller-manager + {{- if and .Values.controllerManager.pod .Values.controllerManager.pod.labels }} + {{- range $key, $value := .Values.controllerManager.pod.labels }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + containers: + - name: manager + args: + {{- range .Values.controllerManager.container.args }} + - {{ . }} + {{- end }} + command: + - /manager + image: {{ .Values.controllerManager.container.image.repository }}:{{ .Values.controllerManager.container.image.tag }} + {{- if .Values.controllerManager.container.env }} + env: + {{- range $key, $value := .Values.controllerManager.container.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + {{- end }} + livenessProbe: + {{- toYaml .Values.controllerManager.container.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.controllerManager.container.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.controllerManager.container.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.controllerManager.container.securityContext | nindent 12 }} + {{- if and .Values.certmanager.enable (or .Values.webhook.enable .Values.metrics.enable) }} + volumeMounts: + {{- if and .Values.metrics.enable .Values.certmanager.enable }} + - name: metrics-certs + mountPath: /tmp/k8s-metrics-server/metrics-certs + readOnly: true + {{- end }} + {{- end }} + securityContext: + {{- toYaml .Values.controllerManager.securityContext | nindent 8 }} + serviceAccountName: {{ .Values.controllerManager.serviceAccountName }} + terminationGracePeriodSeconds: {{ .Values.controllerManager.terminationGracePeriodSeconds }} + {{- if and .Values.certmanager.enable (or .Values.webhook.enable .Values.metrics.enable) }} + volumes: + {{- if and .Values.metrics.enable .Values.certmanager.enable }} + - name: metrics-certs + secret: + secretName: metrics-server-cert + {{- end }} + {{- end }} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/metrics/metrics-service.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/metrics/metrics-service.yaml new file mode 100644 index 00000000..8f1550e2 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/metrics/metrics-service.yaml @@ -0,0 +1,17 @@ +{{- if .Values.metrics.enable }} +apiVersion: v1 +kind: Service +metadata: + name: dis-promrulegroups-operator-controller-manager-metrics-service + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + name: https + selector: + control-plane: controller-manager +{{- end }} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/network-policy/allow-metrics-traffic.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/network-policy/allow-metrics-traffic.yaml new file mode 100755 index 00000000..8dfbaf4b --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enable }} +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gathering data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: allow-metrics-traffic + namespace: {{ .Release.Namespace }} +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/prometheus/monitor.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/prometheus/monitor.yaml new file mode 100644 index 00000000..134ca687 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/prometheus/monitor.yaml @@ -0,0 +1,38 @@ +# To integrate with Prometheus. +{{- if .Values.prometheus.enable }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: dis-promrulegroups-operator-controller-manager-metrics-monitor + namespace: {{ .Release.Namespace }} +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + {{- if .Values.certmanager.enable }} + # Apply secure TLS configuration with cert-manager + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key + {{- else }} + # Development/Test mode (insecure configuration) + insecureSkipVerify: true + {{- end }} + selector: + matchLabels: + control-plane: controller-manager +{{- end }} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role.yaml new file mode 100755 index 00000000..c7c96f60 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role.yaml @@ -0,0 +1,42 @@ +{{- if .Values.rbac.enable }} +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} + name: dis-promrulegroups-operator-leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role_binding.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role_binding.yaml new file mode 100755 index 00000000..d6d752e8 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/leader_election_role_binding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} + name: dis-promrulegroups-operator-leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: dis-promrulegroups-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role.yaml new file mode 100755 index 00000000..5ab32f6d --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.enable .Values.metrics.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: dis-promrulegroups-operator-metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role_binding.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role_binding.yaml new file mode 100755 index 00000000..fc7ee9c8 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.enable .Values.metrics.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: dis-promrulegroups-operator-metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dis-promrulegroups-operator-metrics-auth-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_reader_role.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_reader_role.yaml new file mode 100755 index 00000000..72a56320 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/metrics_reader_role.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.rbac.enable .Values.metrics.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: dis-promrulegroups-operator-metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/role.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/role.yaml new file mode 100755 index 00000000..27322789 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/role.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rbac.enable }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: dis-promrulegroups-operator-manager-role +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/finalizers + verbs: + - update +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/status + verbs: + - get +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/role_binding.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/role_binding.yaml new file mode 100755 index 00000000..0f744873 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/role_binding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: dis-promrulegroups-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dis-promrulegroups-operator-manager-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/templates/rbac/service_account.yaml b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/service_account.yaml new file mode 100755 index 00000000..4d8ed464 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/templates/rbac/service_account.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbac.enable }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- if and .Values.controllerManager.serviceAccount .Values.controllerManager.serviceAccount.annotations }} + annotations: + {{- range $key, $value := .Values.controllerManager.serviceAccount.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + name: dis-promrulegroups-operator-controller-manager + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/services/dis-promrulegroups-operator/dist/chart/values.yaml b/services/dis-promrulegroups-operator/dist/chart/values.yaml new file mode 100644 index 00000000..729103e5 --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/chart/values.yaml @@ -0,0 +1,76 @@ +# [MANAGER]: Manager Deployment Configurations +controllerManager: + replicas: 1 + container: + image: + repository: controller + tag: latest + args: + - "--leader-elect" + - "--metrics-bind-address=:8443" + - "--health-probe-bind-address=:8081" + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 20 + httpGet: + path: /healthz + port: 8081 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + httpGet: + path: /readyz + port: 8081 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 10 + serviceAccountName: dis-promrulegroups-operator-controller-manager + +# [RBAC]: To enable RBAC (Permissions) configurations +rbac: + enable: true + +# [CRDs]: To enable the CRDs +crd: + # This option determines whether the CRDs are included + # in the installation process. + enable: true + + # Enabling this option adds the "helm.sh/resource-policy": keep + # annotation to the CRD, ensuring it remains installed even when + # the Helm release is uninstalled. + # NOTE: Removing the CRDs will also remove all cert-manager CR(s) + # (Certificates, Issuers, ...) due to garbage collection. + keep: true + +# [METRICS]: Set to true to generate manifests for exporting metrics. +# To disable metrics export set false, and ensure that the +# ControllerManager argument "--metrics-bind-address=:8443" is removed. +metrics: + enable: true + +# [PROMETHEUS]: To enable a ServiceMonitor to export metrics to Prometheus set true +prometheus: + enable: false + +# [CERT-MANAGER]: To enable cert-manager injection to webhooks set true +certmanager: + enable: false + +# [NETWORK POLICIES]: To enable NetworkPolicies set true +networkPolicy: + enable: false From 1a2baa2a30e7f2a631af6975d83bf3b121a0e5fc Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:16:19 +0100 Subject: [PATCH 19/37] helm template --- .../dist/helm-install.yaml | 293 ++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 services/dis-promrulegroups-operator/dist/helm-install.yaml diff --git a/services/dis-promrulegroups-operator/dist/helm-install.yaml b/services/dis-promrulegroups-operator/dist/helm-install.yaml new file mode 100644 index 00000000..20eec14c --- /dev/null +++ b/services/dis-promrulegroups-operator/dist/helm-install.yaml @@ -0,0 +1,293 @@ +--- +# Source: dis-promrulegroups-operator/templates/rbac/service_account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-controller-manager + namespace: hvasomhelst +--- +# Source: dis-promrulegroups-operator/templates/rbac/metrics_auth_role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +# Source: dis-promrulegroups-operator/templates/rbac/metrics_reader_role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get +--- +# Source: dis-promrulegroups-operator/templates/rbac/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-manager-role +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/finalizers + verbs: + - update +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules/status + verbs: + - get +--- +# Source: dis-promrulegroups-operator/templates/rbac/metrics_auth_role_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dis-promrulegroups-operator-metrics-auth-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: hvasomhelst +--- +# Source: dis-promrulegroups-operator/templates/rbac/role_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dis-promrulegroups-operator-manager-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: hvasomhelst +--- +# Source: dis-promrulegroups-operator/templates/rbac/leader_election_role.yaml +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + namespace: hvasomhelst + name: dis-promrulegroups-operator-leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: dis-promrulegroups-operator/templates/rbac/leader_election_role_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + namespace: hvasomhelst + name: dis-promrulegroups-operator-leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: dis-promrulegroups-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: dis-promrulegroups-operator-controller-manager + namespace: hvasomhelst +--- +# Source: dis-promrulegroups-operator/templates/metrics/metrics-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: dis-promrulegroups-operator-controller-manager-metrics-service + namespace: hvasomhelst + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +spec: + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + name: https + selector: + control-plane: controller-manager +--- +# Source: dis-promrulegroups-operator/templates/manager/manager.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dis-promrulegroups-operator-controller-manager + namespace: hvasomhelst + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + control-plane: controller-manager +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + control-plane: controller-manager + spec: + containers: + - name: manager + args: + - --leader-elect + - --metrics-bind-address=:8443 + - --health-probe-bind-address=:8081 + command: + - /manager + image: controller:latest + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: dis-promrulegroups-operator-controller-manager + terminationGracePeriodSeconds: 10 +--- +# Source: dis-promrulegroups-operator/templates/prometheus/monitor.yaml +# To integrate with Prometheus. From 730858baf18e742038be1ef6bb44c607ca47c7db Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:17:57 +0100 Subject: [PATCH 20/37] add .env to gitignore --- services/dis-promrulegroups-operator/.gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/dis-promrulegroups-operator/.gitignore b/services/dis-promrulegroups-operator/.gitignore index ada68ff0..7fcefeb3 100644 --- a/services/dis-promrulegroups-operator/.gitignore +++ b/services/dis-promrulegroups-operator/.gitignore @@ -25,3 +25,6 @@ go.work *.swp *.swo *~ + +# Ignore .env +.env \ No newline at end of file From ff46f57b8df9d99120ef857a3403a5b34796bee2 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:27:37 +0100 Subject: [PATCH 21/37] update values.yaml --- .../dist/chart/values.yaml | 22 +++++++-- .../dist/helm-install.yaml | 47 ++++++++++++++++++- 2 files changed, 65 insertions(+), 4 deletions(-) diff --git a/services/dis-promrulegroups-operator/dist/chart/values.yaml b/services/dis-promrulegroups-operator/dist/chart/values.yaml index 729103e5..5322e6d9 100644 --- a/services/dis-promrulegroups-operator/dist/chart/values.yaml +++ b/services/dis-promrulegroups-operator/dist/chart/values.yaml @@ -1,7 +1,23 @@ # [MANAGER]: Manager Deployment Configurations controllerManager: + serviceAccount: + annotations: + azure.workload.identity/client-id: "" # TODO: Should be populated based on the output from Terraform + pod: + labels: + azure.workload.identity/use: "true" replicas: 1 container: + env: + AZ_SUBSCRIPTION_ID: "" # # TODO: Should be populated based on the output from Terraform + AZ_RESOURCE_GROUP_NAME: "" # TODO: Should be populated based on the output from Terraform + AZ_MONITOR_WORKSPACE_NAME: "" # TODO: Should be populated based on the output from Terraform + AZ_RESOURCE_GROUP_LOCATION: "" # TODO: Should be populated based on the output from Terraform + AZ_ACTION_GROUP_ID: "" # TODO: Should be configurable with information coming from the PrometheusRule CR + AZ_AZURE_MONITOR_WORKSPACE: "" # TODO: Should be populated based on the output from Terraform + AZ_CLUSTER_NAME: "" # TODO: Should be populated based on the output from Terraform + NODE_PATH: /nodejs/bin/node # From the Dockerfile + AZ_PROM_RULES_CONVERTER_PATH: /tool/node_modules/az-prom-rules-converter/dist/cli.js image: repository: controller tag: latest @@ -37,7 +53,7 @@ controllerManager: runAsNonRoot: true seccompProfile: type: RuntimeDefault - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: 30 serviceAccountName: dis-promrulegroups-operator-controller-manager # [RBAC]: To enable RBAC (Permissions) configurations @@ -48,7 +64,7 @@ rbac: crd: # This option determines whether the CRDs are included # in the installation process. - enable: true + enable: false # Enabling this option adds the "helm.sh/resource-policy": keep # annotation to the CRD, ensuring it remains installed even when @@ -65,7 +81,7 @@ metrics: # [PROMETHEUS]: To enable a ServiceMonitor to export metrics to Prometheus set true prometheus: - enable: false + enable: true # [CERT-MANAGER]: To enable cert-manager injection to webhooks set true certmanager: diff --git a/services/dis-promrulegroups-operator/dist/helm-install.yaml b/services/dis-promrulegroups-operator/dist/helm-install.yaml index 20eec14c..5d1d33a9 100644 --- a/services/dis-promrulegroups-operator/dist/helm-install.yaml +++ b/services/dis-promrulegroups-operator/dist/helm-install.yaml @@ -9,6 +9,8 @@ metadata: app.kubernetes.io/name: dis-promrulegroups-operator app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm + annotations: + azure.workload.identity/client-id: name: dis-promrulegroups-operator-controller-manager namespace: hvasomhelst --- @@ -248,6 +250,7 @@ spec: app.kubernetes.io/instance: release-name app.kubernetes.io/managed-by: Helm control-plane: controller-manager + azure.workload.identity/use: true spec: containers: - name: manager @@ -258,6 +261,25 @@ spec: command: - /manager image: controller:latest + env: + - name: AZ_ACTION_GROUP_ID + value: + - name: AZ_AZURE_MONITOR_WORKSPACE + value: + - name: AZ_CLUSTER_NAME + value: + - name: AZ_MONITOR_WORKSPACE_NAME + value: + - name: AZ_PROM_RULES_CONVERTER_PATH + value: /tool/node_modules/az-prom-rules-converter/dist/cli.js + - name: AZ_RESOURCE_GROUP_LOCATION + value: + - name: AZ_RESOURCE_GROUP_NAME + value: + - name: AZ_SUBSCRIPTION_ID + value: + - name: NODE_PATH + value: /nodejs/bin/node livenessProbe: httpGet: path: /healthz @@ -287,7 +309,30 @@ spec: seccompProfile: type: RuntimeDefault serviceAccountName: dis-promrulegroups-operator-controller-manager - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: 30 --- # Source: dis-promrulegroups-operator/templates/prometheus/monitor.yaml # To integrate with Prometheus. +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app.kubernetes.io/version: "0.1.0" + helm.sh/chart: "0.1.0" + app.kubernetes.io/name: dis-promrulegroups-operator + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + name: dis-promrulegroups-operator-controller-manager-metrics-monitor + namespace: hvasomhelst +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # Development/Test mode (insecure configuration) + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager From a1377b7851ec25cdbd79c0dd10b2384bd628422f Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 12:49:30 +0100 Subject: [PATCH 22/37] Remove Dockerfile.bak --- .../Dockerfile.bak | 33 ------------------- 1 file changed, 33 deletions(-) delete mode 100644 services/dis-promrulegroups-operator/Dockerfile.bak diff --git a/services/dis-promrulegroups-operator/Dockerfile.bak b/services/dis-promrulegroups-operator/Dockerfile.bak deleted file mode 100644 index 4ba18b68..00000000 --- a/services/dis-promrulegroups-operator/Dockerfile.bak +++ /dev/null @@ -1,33 +0,0 @@ -# Build the manager binary -FROM golang:1.22 AS builder -ARG TARGETOS -ARG TARGETARCH - -WORKDIR /workspace -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download - -# Copy the go source -COPY cmd/main.go cmd/main.go -COPY api/ api/ -COPY internal/ internal/ - -# Build -# the GOARCH has not a default value to allow the binary be built according to the host where the command -# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO -# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, -# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. -RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go - -# Use distroless as minimal base image to package the manager binary -# Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot -WORKDIR / -COPY --from=builder /workspace/manager . -USER 65532:65532 - -ENTRYPOINT ["/manager"] From c0066c9fc8ddd2abaf9ab5034584cdf62e7acee3 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:01:28 +0100 Subject: [PATCH 23/37] return error as nil when RequeueAfter is configured --- .../controller/prometheusrule_controller.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 7779a4b6..c1853042 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -22,7 +22,6 @@ import ( "crypto/sha256" "encoding/base64" "encoding/json" - "errors" "fmt" "os" "os/exec" @@ -103,7 +102,7 @@ func (r *PrometheusRuleReconciler) handleCreation(ctx context.Context, req ctrl. ) if err != nil { log.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 30 * time.Second}, err + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } // Update the annotations on the CR return r.updateAnnotations(ctx, promRule, ruleGroupNames, hashArmTemplate([]byte(armTemplateJsonString)), deploymentName, suffix) @@ -146,7 +145,7 @@ func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Re ) if err != nil { log.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 30 * time.Second}, err + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } return r.updateAnnotations(ctx, promRule, ruleGroupNames, regeneratedArmTemplateHash, armDeploymentName, suffix) @@ -163,18 +162,18 @@ func (r *PrometheusRuleReconciler) handleDelete(ctx context.Context, promRule mo if controllerutil.ContainsFinalizer(&promRule, finalizerName) { if err := r.deleteExternalResources(ctx, promRule); err != nil { log.Info("failed to delete Azure resources", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 5 * time.Second}, err + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } log.Info("removing our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) ok := controllerutil.RemoveFinalizer(&promRule, finalizerName) if ok { if err := r.Update(ctx, &promRule); err != nil { log.Info("failed to update object", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 5 * time.Second}, err + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } else { log.Info("failed to remove out finalizer from object", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 5 * time.Second}, errors.New("failed to remove finalizer from object") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } return ctrl.Result{}, nil @@ -187,12 +186,12 @@ func (r *PrometheusRuleReconciler) addOurFinalizer(ctx context.Context, promRule if ok { if err := r.Update(ctx, &promRule); err != nil { log.Error(err, "failed to update the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 5 * time.Second}, err + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } return ctrl.Result{}, nil } else { log.Info("failed to add our finalizer to the object", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 5 * time.Second}, errors.New("failed to add our finalizer to the object") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } @@ -244,7 +243,7 @@ func (r *PrometheusRuleReconciler) updateAnnotations(ctx context.Context, promRu err := r.Client.Update(ctx, &promRule) if err != nil { log.Error(err, "failed to update the PrometheusRule CR with new annotations", "namespace", promRule.Namespace, "name", promRule.Name) - return ctrl.Result{RequeueAfter: 5 * time.Second}, err + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } return ctrl.Result{}, nil } From 3227610bb3cc2451534c6e3e66b075bc3f706055 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:03:09 +0100 Subject: [PATCH 24/37] simplify if condition --- .../internal/controller/prometheusrule_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index c1853042..6b0f872d 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -122,7 +122,7 @@ func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Re } regeneratedArmTemplateHash := hashArmTemplate([]byte(regeneratedArmTemplate)) - if !(regeneratedArmTemplateHash == lastGeneratedArmtemplateHash) { + if regeneratedArmTemplateHash != lastGeneratedArmtemplateHash { ruleGroupNames := generateRuleGroupNamesAnnotationString(promRule) annotations := promRule.GetAnnotations() From 6a25df2e1ac5730bb9ab1fb992316c7696ccbaf2 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:09:54 +0100 Subject: [PATCH 25/37] log errors as errors and not as info --- .../internal/controller/prometheusrule_controller.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 6b0f872d..4edc8f18 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -22,6 +22,7 @@ import ( "crypto/sha256" "encoding/base64" "encoding/json" + "errors" "fmt" "os" "os/exec" @@ -161,18 +162,18 @@ func (r *PrometheusRuleReconciler) handleDelete(ctx context.Context, promRule mo if controllerutil.ContainsFinalizer(&promRule, finalizerName) { if err := r.deleteExternalResources(ctx, promRule); err != nil { - log.Info("failed to delete Azure resources", "namespace", promRule.Namespace, "name", promRule.Name) + log.Error(err, "failed to delete Azure resources", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } log.Info("removing our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) ok := controllerutil.RemoveFinalizer(&promRule, finalizerName) if ok { if err := r.Update(ctx, &promRule); err != nil { - log.Info("failed to update object", "namespace", promRule.Namespace, "name", promRule.Name) + log.Error(err, "failed to update object", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } else { - log.Info("failed to remove out finalizer from object", "namespace", promRule.Namespace, "name", promRule.Name) + log.Error(errors.New("failed to remove out finalizer from object"), "failed to remove out finalizer from object", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } @@ -190,7 +191,7 @@ func (r *PrometheusRuleReconciler) addOurFinalizer(ctx context.Context, promRule } return ctrl.Result{}, nil } else { - log.Info("failed to add our finalizer to the object", "namespace", promRule.Namespace, "name", promRule.Name) + log.Error(errors.New("failed to add our finalizer to the object"), "failed to add our finalizer to the object", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } From c325b96da839d383d7d1c6a4684b60de693bb2a8 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:16:26 +0100 Subject: [PATCH 26/37] simplify if else block --- .../controller/prometheusrule_controller.go | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 4edc8f18..194c113b 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -207,25 +207,23 @@ func (r *PrometheusRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reque log.Error(err, "unable to fetch PrometheusRule", "namespace", req.Namespace, "name", req.Name) return ctrl.Result{}, err } - // The resource is not marked for deletion. - if prometheusRule.GetDeletionTimestamp().IsZero() { - // We need to make sure we add a finalizer to the PrometheusRule CR so we can cleanup Azure resources when the CR is deleted. - if !controllerutil.ContainsFinalizer(&prometheusRule, finalizerName) { - return r.addOurFinalizer(ctx, prometheusRule) - } - // Look into the object's annotations for annotations we own. - annotations := prometheusRule.GetAnnotations() - ok := hasAllAnnotations(annotations) - if !ok { - log.Info("new PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) - return r.handleCreation(ctx, req, prometheusRule) - } else { - log.Info("update to PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) - return r.handleUpdate(ctx, req, prometheusRule) - } - } else { + // The resource is marked for deletion. + if !prometheusRule.GetDeletionTimestamp().IsZero() { return r.handleDelete(ctx, prometheusRule) } + // We need to make sure we add a finalizer to the PrometheusRule CR so we can cleanup Azure resources when the CR is deleted. + if !controllerutil.ContainsFinalizer(&prometheusRule, finalizerName) { + return r.addOurFinalizer(ctx, prometheusRule) + } + // Look into the object's annotations for annotations we own. + annotations := prometheusRule.GetAnnotations() + ok := hasAllAnnotations(annotations) + if ok { + log.Info("update to PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) + return r.handleUpdate(ctx, req, prometheusRule) + } + log.Info("new PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) + return r.handleCreation(ctx, req, prometheusRule) } func (r *PrometheusRuleReconciler) updateAnnotations(ctx context.Context, promRule monitoringv1.PrometheusRule, groupNames, armTemplateHash, armDeploymentName, timestamp string) (reconcile.Result, error) { From 257877f510356b572627328c735e97cf6a9d3f04 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:29:25 +0100 Subject: [PATCH 27/37] rename variable to logger to avoid confusion with the package name --- .../controller/prometheusrule_controller.go | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 194c113b..177c7c44 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -84,10 +84,10 @@ type PrometheusRuleReconciler struct { } func (r *PrometheusRuleReconciler) handleCreation(ctx context.Context, req ctrl.Request, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) armTemplateJsonString, err := r.generateArmTemplateFromPromRule(ctx, promRule) if err != nil { - log.Error(err, "failed to convert the PrometheusRule into an ARM template", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to convert the PrometheusRule into an ARM template", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{Requeue: false}, nil } @@ -102,7 +102,7 @@ func (r *PrometheusRuleReconciler) handleCreation(ctx context.Context, req ctrl. os.Getenv("AZ_ACTION_GROUP_ID"), ) if err != nil { - log.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } // Update the annotations on the CR @@ -110,7 +110,7 @@ func (r *PrometheusRuleReconciler) handleCreation(ctx context.Context, req ctrl. } func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Request, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) annotations := promRule.GetAnnotations() lastGeneratedArmtemplateHash := annotations[azArmTemplateHashAnnotation] @@ -118,7 +118,7 @@ func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Re armDeploymentName := generateArmDeploymentName(req, suffix) regeneratedArmTemplate, err := r.generateArmTemplateFromPromRule(ctx, promRule) if err != nil { - log.Error(err, "failed to convert the PrometheusRule into an ARM template", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to convert the PrometheusRule into an ARM template", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{Requeue: false}, nil } @@ -134,7 +134,7 @@ func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Re for _, td := range toDelete { _, err := r.deletePrometheusRuleGroup(ctx, td) if err != nil { - log.Error(err, "failed to delete PrometheusRuleGroup", "PrometheusRuleGroupName", td) + logger.Error(err, "failed to delete PrometheusRuleGroup", "PrometheusRuleGroupName", td) } } @@ -145,7 +145,7 @@ func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Re os.Getenv("AZ_ACTION_GROUP_ID"), ) if err != nil { - log.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to deploy arm template", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } @@ -157,23 +157,23 @@ func (r *PrometheusRuleReconciler) handleUpdate(ctx context.Context, req ctrl.Re } func (r *PrometheusRuleReconciler) handleDelete(ctx context.Context, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { - log := log.FromContext(ctx) - log.Info("deletion of PrometheusRule CR detected", "namespace", promRule.Namespace, "name", promRule.Name) + logger := log.FromContext(ctx) + logger.Info("deletion of PrometheusRule CR detected", "namespace", promRule.Namespace, "name", promRule.Name) if controllerutil.ContainsFinalizer(&promRule, finalizerName) { if err := r.deleteExternalResources(ctx, promRule); err != nil { - log.Error(err, "failed to delete Azure resources", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to delete Azure resources", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } - log.Info("removing our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Info("removing our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) ok := controllerutil.RemoveFinalizer(&promRule, finalizerName) if ok { if err := r.Update(ctx, &promRule); err != nil { - log.Error(err, "failed to update object", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to update object", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } else { - log.Error(errors.New("failed to remove out finalizer from object"), "failed to remove out finalizer from object", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(errors.New("failed to remove out finalizer from object"), "failed to remove out finalizer from object", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } @@ -181,30 +181,30 @@ func (r *PrometheusRuleReconciler) handleDelete(ctx context.Context, promRule mo } func (r *PrometheusRuleReconciler) addOurFinalizer(ctx context.Context, promRule monitoringv1.PrometheusRule) (reconcile.Result, error) { - log := log.FromContext(ctx) - log.Info("updating the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) + logger := log.FromContext(ctx) + logger.Info("updating the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) ok := controllerutil.AddFinalizer(&promRule, finalizerName) if ok { if err := r.Update(ctx, &promRule); err != nil { - log.Error(err, "failed to update the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to update the PrometheusRule CR with our finalizer", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } return ctrl.Result{}, nil } else { - log.Error(errors.New("failed to add our finalizer to the object"), "failed to add our finalizer to the object", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(errors.New("failed to add our finalizer to the object"), "failed to add our finalizer to the object", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } } func (r *PrometheusRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) var prometheusRule monitoringv1.PrometheusRule if err := r.Get(ctx, req.NamespacedName, &prometheusRule); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } - log.Error(err, "unable to fetch PrometheusRule", "namespace", req.Namespace, "name", req.Name) + logger.Error(err, "unable to fetch PrometheusRule", "namespace", req.Namespace, "name", req.Name) return ctrl.Result{}, err } // The resource is marked for deletion. @@ -219,15 +219,15 @@ func (r *PrometheusRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reque annotations := prometheusRule.GetAnnotations() ok := hasAllAnnotations(annotations) if ok { - log.Info("update to PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) + logger.Info("update to PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) return r.handleUpdate(ctx, req, prometheusRule) } - log.Info("new PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) + logger.Info("new PrometheusRule CR detected", "namespace", prometheusRule.Namespace, "name", prometheusRule.Name) return r.handleCreation(ctx, req, prometheusRule) } func (r *PrometheusRuleReconciler) updateAnnotations(ctx context.Context, promRule monitoringv1.PrometheusRule, groupNames, armTemplateHash, armDeploymentName, timestamp string) (reconcile.Result, error) { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) annotations := promRule.GetAnnotations() if annotations == nil { @@ -241,14 +241,14 @@ func (r *PrometheusRuleReconciler) updateAnnotations(ctx context.Context, promRu promRule.SetAnnotations(annotations) err := r.Client.Update(ctx, &promRule) if err != nil { - log.Error(err, "failed to update the PrometheusRule CR with new annotations", "namespace", promRule.Namespace, "name", promRule.Name) + logger.Error(err, "failed to update the PrometheusRule CR with new annotations", "namespace", promRule.Namespace, "name", promRule.Name) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } return ctrl.Result{}, nil } func (r *PrometheusRuleReconciler) deployArmTemplate(ctx context.Context, deploymentName string, jsonTemplate string, actionGroupId string) error { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) contents := make(map[string]interface{}) _ = json.Unmarshal([]byte(jsonTemplate), &contents) @@ -281,7 +281,7 @@ func (r *PrometheusRuleReconciler) deployArmTemplate(ctx context.Context, deploy ) if err != nil { - log.Error(err, "failed BeginCreateOrUpdate", "deploymentName", deploymentName) + logger.Error(err, "failed BeginCreateOrUpdate", "deploymentName", deploymentName) return err } // TODO: Check the best practices here. I doubt we want to do this synchronously. @@ -308,19 +308,19 @@ func (r *PrometheusRuleReconciler) deleteExternalResources(ctx context.Context, } func (r *PrometheusRuleReconciler) deletePrometheusRuleGroup(ctx context.Context, ruleGroupName string) (*armalertsmanagement.PrometheusRuleGroupsClientDeleteResponse, error) { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) resp, err := r.PrometheusRuleGroupsClient.Delete(ctx, r.AzResourceGroupName, ruleGroupName, nil) if err != nil { - log.Error(err, "failed to delete the prometheus rule group", "ruleGroupName", ruleGroupName) + logger.Error(err, "failed to delete the prometheus rule group", "ruleGroupName", ruleGroupName) return nil, err } - log.Info("Sucessfully deleted PrometheusRuleGroup", "ruleGroupName", ruleGroupName) + logger.Info("Sucessfully deleted PrometheusRuleGroup", "ruleGroupName", ruleGroupName) return &resp, nil } func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.Context, promRule monitoringv1.PrometheusRule) (string, error) { - log := log.FromContext(ctx) + logger := log.FromContext(ctx) promRuleCopy := promRule.DeepCopy() for idx, ruleGroup := range promRuleCopy.Spec.Groups { // The names are the same for every cluster so we need to prefix them @@ -329,7 +329,7 @@ func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.C } interval, err := prometheusmodel.ParseDuration(string(*ruleGroup.Interval)) if err != nil { - log.Error(err, "Failed to parse the Interval from the PrometheusRule Spec") + logger.Error(err, "Failed to parse the Interval from the PrometheusRule Spec") return "", err } // Can't be lower than 1m. @@ -341,7 +341,7 @@ func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.C marshalledPromRule, err := json.Marshal(promRuleCopy.Spec) if err != nil { - log.Error(err, "Failed to marshal the promRule") + logger.Error(err, "Failed to marshal the promRule") return "", err } @@ -369,7 +369,7 @@ func (r *PrometheusRuleReconciler) generateArmTemplateFromPromRule(ctx context.C cmd.Stderr = &errb err = cmd.Run() if err != nil { - log.Error(err, "Failed to convert PrometheusRule into PrometheusRuleGroup", "Stderr", errb.String()) + logger.Error(err, "Failed to convert PrometheusRule into PrometheusRuleGroup", "Stderr", errb.String()) return "", err } jsonString := out.String() From fd2b33c18c99530c50cd543aa3636629951a7023 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:43:19 +0100 Subject: [PATCH 28/37] try to delete the rest of the resources even if one deletion fails --- .../controller/prometheusrule_controller.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index 177c7c44..ee29a9c6 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -293,18 +293,20 @@ func (r *PrometheusRuleReconciler) deployArmTemplate(ctx context.Context, deploy return nil } func (r *PrometheusRuleReconciler) deleteExternalResources(ctx context.Context, promRule monitoringv1.PrometheusRule) error { + allResourcesDeleted := true annotations := promRule.GetAnnotations() - resourceNames, ok := annotations[azPrometheusRuleGroupResourceNamesAnnotation] - if ok { + if resourceNames, ok := annotations[azPrometheusRuleGroupResourceNamesAnnotation]; ok { for _, rn := range strings.Split(resourceNames, ",") { - _, err := r.deletePrometheusRuleGroup(ctx, rn) - if err != nil { - // TODO: Should we try to delete the rest in case one deletion fails? Or simply retry again? - return err + if _, err := r.deletePrometheusRuleGroup(ctx, rn); err != nil { + allResourcesDeleted = false } } } - return nil + if allResourcesDeleted { + return nil + } else { + return fmt.Errorf("failed to delete all Azure resources associated with PrometheusRule %s", promRule.Name) + } } func (r *PrometheusRuleReconciler) deletePrometheusRuleGroup(ctx context.Context, ruleGroupName string) (*armalertsmanagement.PrometheusRuleGroupsClientDeleteResponse, error) { From ad2cf3c4b74314db417b99662f91780ed1d39136 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:48:19 +0100 Subject: [PATCH 29/37] return false as soon as the first missing annotation is seen --- .../internal/controller/prometheusrule_controller.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go index ee29a9c6..39c9d1b7 100644 --- a/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go +++ b/services/dis-promrulegroups-operator/internal/controller/prometheusrule_controller.go @@ -394,12 +394,13 @@ func timestamp() string { } func hasAllAnnotations(annotations map[string]string) bool { - boolRes := true for _, a := range allAnnotations { _, ok := annotations[a] - boolRes = boolRes && ok + if !ok { + return false + } } - return boolRes + return true } func generateArmDeploymentName(req ctrl.Request, suffix string) string { From d164a3bd6298a7937639bf6f270dc2863ef9fe25 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Mon, 9 Dec 2024 08:57:55 +0100 Subject: [PATCH 30/37] point to Azure repo now that the PRs have been merged --- services/dis-promrulegroups-operator/Dockerfile | 2 +- services/dis-promrulegroups-operator/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/dis-promrulegroups-operator/Dockerfile b/services/dis-promrulegroups-operator/Dockerfile index 9b65c6fb..7388dcac 100644 --- a/services/dis-promrulegroups-operator/Dockerfile +++ b/services/dis-promrulegroups-operator/Dockerfile @@ -25,7 +25,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o ma FROM node:20 AS builder2 WORKDIR /tool -RUN npm i --omit=dev https://gitpkg.now.sh/monteiro-renato/prometheus-collector/tools/az-prom-rules-converter?with-piped-input +RUN npm i --omit=dev https://gitpkg.now.sh/Azure/prometheus-collector/tools/az-prom-rules-converter?main # Use distroless as minimal base image to package the manager binary diff --git a/services/dis-promrulegroups-operator/Makefile b/services/dis-promrulegroups-operator/Makefile index e4abb9fe..a51200a9 100644 --- a/services/dis-promrulegroups-operator/Makefile +++ b/services/dis-promrulegroups-operator/Makefile @@ -68,7 +68,7 @@ test: manifests generate fmt vet envtest ## Run tests. @command -v $(LOCALBIN)/az-tool/node_modules/.bin/az-prom-rules-converter >/dev/null 2>&1 || { \ echo "Installing az-prom-rules-converter in $(LOCALBIN)/az-tool"; \ mkdir -p $(LOCALBIN)/az-tool; \ - npm i --prefix $(LOCALBIN)/az-tool --omit=dev https://gitpkg.now.sh/monteiro-renato/prometheus-collector/tools/az-prom-rules-converter?with-piped-input; \ + npm i --prefix $(LOCALBIN)/az-tool --omit=dev https://gitpkg.now.sh/Azure/prometheus-collector/tools/az-prom-rules-converter?main; \ } KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out From b83adad7740245a8e3c382361e8171bf1522f03f Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Tue, 10 Dec 2024 12:59:27 +0100 Subject: [PATCH 31/37] add new line at the end of the file --- services/dis-promrulegroups-operator/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dis-promrulegroups-operator/Dockerfile b/services/dis-promrulegroups-operator/Dockerfile index 7388dcac..98ff2c22 100644 --- a/services/dis-promrulegroups-operator/Dockerfile +++ b/services/dis-promrulegroups-operator/Dockerfile @@ -38,4 +38,4 @@ ENV PATH="$PATH:nodejs/bin/" ENV ENVIRONMENT="prod" USER 65532:65532 -ENTRYPOINT ["/manager"] \ No newline at end of file +ENTRYPOINT ["/manager"] From 3adda5aa72a3169f83393e03aa275e51a44b335d Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 2 Jan 2025 07:45:10 +0100 Subject: [PATCH 32/37] prefix promrulegroups workflow files --- .../.github/workflows/{lint.yml => dis-promrulegroups-lint.yml} | 0 .../{test-chart.yml => dis-promrulegroups-test-chart.yml} | 0 .../workflows/{test-e2e.yml => dis-promrulegroups-test-e2e.yml} | 0 .../.github/workflows/{test.yml => dis-promrulegroups-test.yml} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename services/dis-promrulegroups-operator/.github/workflows/{lint.yml => dis-promrulegroups-lint.yml} (100%) rename services/dis-promrulegroups-operator/.github/workflows/{test-chart.yml => dis-promrulegroups-test-chart.yml} (100%) rename services/dis-promrulegroups-operator/.github/workflows/{test-e2e.yml => dis-promrulegroups-test-e2e.yml} (100%) rename services/dis-promrulegroups-operator/.github/workflows/{test.yml => dis-promrulegroups-test.yml} (100%) diff --git a/services/dis-promrulegroups-operator/.github/workflows/lint.yml b/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-lint.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/lint.yml rename to services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-lint.yml diff --git a/services/dis-promrulegroups-operator/.github/workflows/test-chart.yml b/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-chart.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/test-chart.yml rename to services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-chart.yml diff --git a/services/dis-promrulegroups-operator/.github/workflows/test-e2e.yml b/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-e2e.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/test-e2e.yml rename to services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-e2e.yml diff --git a/services/dis-promrulegroups-operator/.github/workflows/test.yml b/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/test.yml rename to services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test.yml From e48c06e9dde5603fde42dbe2c3d461d00b58fb49 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 2 Jan 2025 07:46:57 +0100 Subject: [PATCH 33/37] move promrulegroups workflow files to root .github directory --- .../.github => .github}/workflows/dis-promrulegroups-lint.yml | 0 .../workflows/dis-promrulegroups-test-chart.yml | 0 .../.github => .github}/workflows/dis-promrulegroups-test-e2e.yml | 0 .../.github => .github}/workflows/dis-promrulegroups-test.yml | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename {services/dis-promrulegroups-operator/.github => .github}/workflows/dis-promrulegroups-lint.yml (100%) rename {services/dis-promrulegroups-operator/.github => .github}/workflows/dis-promrulegroups-test-chart.yml (100%) rename {services/dis-promrulegroups-operator/.github => .github}/workflows/dis-promrulegroups-test-e2e.yml (100%) rename {services/dis-promrulegroups-operator/.github => .github}/workflows/dis-promrulegroups-test.yml (100%) diff --git a/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-lint.yml b/.github/workflows/dis-promrulegroups-lint.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-lint.yml rename to .github/workflows/dis-promrulegroups-lint.yml diff --git a/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-chart.yml b/.github/workflows/dis-promrulegroups-test-chart.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-chart.yml rename to .github/workflows/dis-promrulegroups-test-chart.yml diff --git a/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-e2e.yml b/.github/workflows/dis-promrulegroups-test-e2e.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test-e2e.yml rename to .github/workflows/dis-promrulegroups-test-e2e.yml diff --git a/services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test.yml b/.github/workflows/dis-promrulegroups-test.yml similarity index 100% rename from services/dis-promrulegroups-operator/.github/workflows/dis-promrulegroups-test.yml rename to .github/workflows/dis-promrulegroups-test.yml From 64426bd5b41dc1dfecebcb58f086f3b4dff17482 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 2 Jan 2025 08:15:00 +0100 Subject: [PATCH 34/37] tweaks to workflow files --- .github/workflows/dis-promrulegroups-lint.yml | 17 +++++++++++++++-- .../workflows/dis-promrulegroups-test-chart.yml | 17 +++++++++++++++-- .../workflows/dis-promrulegroups-test-e2e.yml | 14 ++++++++++++-- .github/workflows/dis-promrulegroups-test.yml | 17 +++++++++++++++-- 4 files changed, 57 insertions(+), 8 deletions(-) diff --git a/.github/workflows/dis-promrulegroups-lint.yml b/.github/workflows/dis-promrulegroups-lint.yml index b6967b35..9379f685 100644 --- a/.github/workflows/dis-promrulegroups-lint.yml +++ b/.github/workflows/dis-promrulegroups-lint.yml @@ -1,13 +1,26 @@ -name: Lint +name: dis-promrulegroups Lint on: push: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-lint.yml pull_request: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-lint.yml jobs: lint: name: Run on Ubuntu runs-on: ubuntu-latest + defaults: + run: + working-directory: services/dis-promrulegroups-operator steps: - name: Clone the code uses: actions/checkout@v4 @@ -15,7 +28,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '~1.22' + go-version-file: 'services/dis-promrulegroups-operator/go.mod' - name: Run linter uses: golangci/golangci-lint-action@v6 diff --git a/.github/workflows/dis-promrulegroups-test-chart.yml b/.github/workflows/dis-promrulegroups-test-chart.yml index 63ed287a..ac7d1f3a 100644 --- a/.github/workflows/dis-promrulegroups-test-chart.yml +++ b/.github/workflows/dis-promrulegroups-test-chart.yml @@ -1,13 +1,26 @@ -name: Test Chart +name: dis-promrulegroups Test Chart on: push: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-test-chart.yml pull_request: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-test-chart.yml jobs: test-e2e: name: Run on Ubuntu runs-on: ubuntu-latest + defaults: + run: + working-directory: services/dis-promrulegroups-operator steps: - name: Clone the code uses: actions/checkout@v4 @@ -15,7 +28,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version-file: go.mod + go-version-file: 'services/dis-promrulegroups-operator/go.mod' - name: Install the latest version of kind run: | diff --git a/.github/workflows/dis-promrulegroups-test-e2e.yml b/.github/workflows/dis-promrulegroups-test-e2e.yml index 87806440..e8df067e 100644 --- a/.github/workflows/dis-promrulegroups-test-e2e.yml +++ b/.github/workflows/dis-promrulegroups-test-e2e.yml @@ -1,8 +1,18 @@ -name: E2E Tests +name: dis-promrulegroups E2E Tests on: push: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-test-e2e.yml pull_request: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-test-e2e.yml jobs: test-e2e: @@ -15,7 +25,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '~1.22' + go-version-file: 'services/dis-promrulegroups-operator/go.mod' - name: Install the latest version of kind run: | diff --git a/.github/workflows/dis-promrulegroups-test.yml b/.github/workflows/dis-promrulegroups-test.yml index 7baf6579..550d2abf 100644 --- a/.github/workflows/dis-promrulegroups-test.yml +++ b/.github/workflows/dis-promrulegroups-test.yml @@ -1,13 +1,26 @@ -name: Tests +name: dis-promrulegroups Tests on: push: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-test.yml pull_request: + branches: + - main + paths: + - services/dis-promrulegroups-operator/** + - .github/workflows/dis-promrulegroups-test.yml jobs: test: name: Run on Ubuntu runs-on: ubuntu-latest + defaults: + run: + working-directory: services/dis-promrulegroups-operator steps: - name: Clone the code uses: actions/checkout@v4 @@ -15,7 +28,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '~1.22' + go-version-file: 'services/dis-promrulegroups-operator/go.mod' - name: Running Tests run: | From ab448a119f8cd756981f2cbebbf8016055a62771 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 2 Jan 2025 09:19:08 +0100 Subject: [PATCH 35/37] release workflows --- .../dis-promrulegroups-chart-release.yml | 27 ++++++++++++ .../workflows/dis-promrulegroups-release.yml | 44 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 .github/workflows/dis-promrulegroups-chart-release.yml create mode 100644 .github/workflows/dis-promrulegroups-release.yml diff --git a/.github/workflows/dis-promrulegroups-chart-release.yml b/.github/workflows/dis-promrulegroups-chart-release.yml new file mode 100644 index 00000000..4822d7f4 --- /dev/null +++ b/.github/workflows/dis-promrulegroups-chart-release.yml @@ -0,0 +1,27 @@ +name: "Release charts dis-promrulegroups" + +on: + push: + tags: + - "dis-promrulegroups-chart*" + +permissions: + contents: read + packages: write + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/dis-promrulegroups-release.yml b/.github/workflows/dis-promrulegroups-release.yml new file mode 100644 index 00000000..93154b7a --- /dev/null +++ b/.github/workflows/dis-promrulegroups-release.yml @@ -0,0 +1,44 @@ +name: Release dis-promrulegroups + +on: + push: + tags: + - "dis-promrulegroups-*" + +permissions: + contents: read + packages: write + +jobs: + release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Get variables from tags + id: vars + run: | + tag=${GITHUB_REF/refs\/tags\/dis-promrulegroups-/} + echo "version=${tag%-demo}" >> $GITHUB_OUTPUT + echo "reponame=${GITHUB_REPOSITORY,,}" >>${GITHUB_OUTPUT} + - name: Get git commit timestamps + run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Docker + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push + id: docker_build + uses: docker/build-push-action@v6 + with: + context: ./services/dis-promrulegroups-operator/ + push: true + platforms: linux/amd64,linux/arm64 + tags: ghcr.io/${{ steps.vars.outputs.reponame }}/dis-promrulegroups:v${{ steps.vars.outputs.version }} + env: + SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} From 026505927a3e383120724445536b6759285b6244 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 2 Jan 2025 11:10:32 +0100 Subject: [PATCH 36/37] config working-directory in golangci-lint-action --- .github/workflows/dis-promrulegroups-lint.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/dis-promrulegroups-lint.yml b/.github/workflows/dis-promrulegroups-lint.yml index 9379f685..48ba56da 100644 --- a/.github/workflows/dis-promrulegroups-lint.yml +++ b/.github/workflows/dis-promrulegroups-lint.yml @@ -34,3 +34,4 @@ jobs: uses: golangci/golangci-lint-action@v6 with: version: v1.59 + working-directory: services/dis-promrulegroups-operator \ No newline at end of file From 67beed37a3ca342ba928c8798f25dd48becdf2f0 Mon Sep 17 00:00:00 2001 From: Renato Monteiro <45536168+monteiro-renato@users.noreply.github.com> Date: Thu, 2 Jan 2025 11:12:33 +0100 Subject: [PATCH 37/37] set the default working-directory --- .github/workflows/dis-promrulegroups-test-e2e.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/dis-promrulegroups-test-e2e.yml b/.github/workflows/dis-promrulegroups-test-e2e.yml index e8df067e..37c03881 100644 --- a/.github/workflows/dis-promrulegroups-test-e2e.yml +++ b/.github/workflows/dis-promrulegroups-test-e2e.yml @@ -18,6 +18,9 @@ jobs: test-e2e: name: Run on Ubuntu runs-on: ubuntu-latest + defaults: + run: + working-directory: services/dis-apim-operator steps: - name: Clone the code uses: actions/checkout@v4