From 39d4875c2cbc90ae75e01d60be59144748793c0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Kalke?= <56382792+MichalKalke@users.noreply.github.com> Date: Tue, 30 Apr 2024 10:21:21 +0200 Subject: [PATCH] Add docker registry operator (#1) * Extract docker-registry source code from serverless module. * prapare images to be usable in ADO build jobs --------- Co-authored-by: Marcin Dobrochowski --- .github/actions/setup-libgit2/action.yaml | 28 + .github/dependabot.yml | 38 ++ .github/scripts/create_changelog.sh | 56 ++ .github/scripts/create_draft_release.sh | 37 ++ .github/scripts/publish_release.sh | 24 + .github/scripts/release.sh | 66 ++ .../scripts/upgrade-sec-scanners-config.sh | 22 + .github/scripts/verify-actions-status.sh | 26 + .../verify-docker-registry-jobs-status.sh | 34 + .github/scripts/verify-image-changes.sh | 20 + .github/stale.yml | 19 + .github/workflows/create-release.yaml | 118 ++++ .github/workflows/images-verify.yaml | 44 ++ .github/workflows/lint-markdown-links.yml | 14 - .github/workflows/markdown.yaml | 18 + .github/workflows/operator-verify.yaml | 82 +++ .github/workflows/pull-gitleaks.yml | 23 - .github/workflows/stale.yml | 31 - .gitignore | 44 ++ .markdownlint.yaml | 26 + .markdownlintignore | 1 + .mlc.config.json | 24 +- .reuse/dep5 | 26 +- CODEOWNERS | 42 +- CODE_OF_CONDUCT.md | 4 +- CONTRIBUTING.md | 4 +- Makefile | 31 + README.md | 124 ++-- SECURITY.md | 3 - components/operator/.dockerignore | 10 + components/operator/.golangci.yaml | 15 + components/operator/Dockerfile | 34 + components/operator/Makefile | 142 +++++ .../api/v1alpha1/dockerregistry_types.go | 162 +++++ .../api/v1alpha1/groupversion_info.go | 41 ++ components/operator/api/v1alpha1/helpers.go | 26 + .../api/v1alpha1/zz_generated.deepcopy.go | 137 ++++ components/operator/controllers/controller.go | 80 +++ .../operator/controllers/controller_rbac.go | 35 + .../operator/controllers/controller_test.go | 99 +++ components/operator/controllers/suite_test.go | 120 ++++ .../operator/controllers/testhelper_test.go | 285 +++++++++ components/operator/hack/boilerplate.go.txt | 15 + .../hack/verify_dockerregistry_status.sh | 18 + .../internal/annotation/disclaimer.go | 22 + .../internal/annotation/disclaimer_test.go | 17 + components/operator/internal/chart/cache.go | 142 +++++ .../operator/internal/chart/cache_test.go | 262 ++++++++ components/operator/internal/chart/chart.go | 143 +++++ .../operator/internal/chart/chart_test.go | 94 +++ components/operator/internal/chart/check.go | 94 +++ .../operator/internal/chart/check_test.go | 179 ++++++ .../operator/internal/chart/client_getter.go | 44 ++ components/operator/internal/chart/flags.go | 94 +++ .../operator/internal/chart/flags_test.go | 85 +++ components/operator/internal/chart/install.go | 102 +++ .../operator/internal/chart/install_test.go | 247 +++++++ components/operator/internal/chart/pvc.go | 64 ++ .../operator/internal/chart/pvc_test.go | 133 ++++ .../operator/internal/chart/uninstall.go | 160 +++++ .../operator/internal/chart/uninstall_test.go | 99 +++ components/operator/internal/chart/verify.go | 72 +++ .../operator/internal/chart/verify_test.go | 146 +++++ components/operator/internal/config/config.go | 14 + .../kubernetes/configmap_service.go | 98 +++ .../kubernetes/namespace_controller.go | 116 ++++ .../kubernetes/secret_controller.go | 103 +++ .../controllers/kubernetes/secret_service.go | 175 +++++ .../kubernetes/serviceaccount_service.go | 130 ++++ .../internal/controllers/kubernetes/shared.go | 55 ++ .../internal/gitrepository/cleanup.go | 31 + .../internal/gitrepository/cleanup_test.go | 61 ++ .../operator/internal/predicate/predicate.go | 33 + .../internal/predicate/predicate_test.go | 98 +++ .../operator/internal/registry/node_port.go | 158 +++++ .../internal/registry/node_port_test.go | 144 +++++ .../operator/internal/registry/secret.go | 61 ++ .../operator/internal/resource/resource.go | 107 ++++ .../operator/internal/state/add_finalizer.go | 32 + .../internal/state/add_finalizer_test.go | 84 +++ components/operator/internal/state/apply.go | 37 ++ .../operator/internal/state/apply_test.go | 104 +++ .../state/controller_configuration.go | 45 ++ .../state/controller_configuration_test.go | 110 ++++ components/operator/internal/state/delete.go | 99 +++ .../operator/internal/state/delete_test.go | 121 ++++ .../operator/internal/state/emit_event.go | 41 ++ .../internal/state/emit_event_test.go | 94 +++ components/operator/internal/state/fsm.go | 143 +++++ .../operator/internal/state/fsm_test.go | 145 +++++ .../operator/internal/state/initialize.go | 18 + .../internal/state/initialize_test.go | 67 ++ components/operator/internal/state/new.go | 36 ++ .../operator/internal/state/registry.go | 66 ++ .../operator/internal/state/registry_test.go | 39 ++ .../internal/state/remove_finalizer.go | 17 + .../internal/state/remove_finalizer_test.go | 68 ++ .../operator/internal/state/served_filter.go | 50 ++ .../internal/state/served_filter_test.go | 136 ++++ components/operator/internal/state/state.go | 65 ++ .../operator/internal/state/state_test.go | 110 ++++ .../operator/internal/state/update_status.go | 25 + components/operator/internal/state/utils.go | 46 ++ components/operator/internal/state/verify.go | 49 ++ .../operator/internal/state/verify_test.go | 178 ++++++ .../operator/internal/tracing/watcher.go | 57 ++ .../operator/internal/warning/warning.go | 27 + .../operator/internal/warning/warning_test.go | 22 + components/operator/main.go | 218 +++++++ config.yaml | 7 + config/docker-registry/.helmignore | 22 + config/docker-registry/Chart.yaml | 8 + .../charts/docker-registry/.helmignore | 21 + .../charts/docker-registry/Chart.yaml | 14 + .../charts/docker-registry/README.md | 79 +++ .../docker-registry/templates/_helpers.tpl | 24 + .../docker-registry/templates/configmap.yaml | 13 + .../docker-registry/templates/deployment.yaml | 231 +++++++ .../docker-registry/templates/ingress.yaml | 37 ++ .../templates/poddisruptionbudget.yaml | 18 + .../templates/priorityclass.yaml | 7 + .../charts/docker-registry/templates/pvc.yaml | 27 + .../docker-registry/templates/secret.yaml | 31 + .../docker-registry/templates/service.yaml | 25 + .../charts/docker-registry/values.yaml | 171 +++++ config/docker-registry/templates/_helpers.tpl | 49 ++ .../templates/registry-config.yaml | 21 + config/docker-registry/values.yaml | 47 ++ ...ator.kyma-project.io_dockerregistries.yaml | 165 +++++ config/operator/base/crd/kustomization.yaml | 10 + config/operator/base/crd/kustomizeconfig.yaml | 19 + .../operator/base/deployment/deployment.yaml | 64 ++ .../base/deployment/kustomization.yaml | 8 + config/operator/base/kustomization.yaml | 20 + config/operator/base/rbac/editor_role.yaml | 31 + config/operator/base/rbac/kustomization.yaml | 10 + config/operator/base/rbac/role.yaml | 288 +++++++++ config/operator/base/rbac/role_binding.yaml | 19 + .../operator/base/rbac/service_account.yaml | 12 + config/operator/base/rbac/viewer_role.yaml | 27 + .../base/ui-extensions/dockerregistry/details | 91 +++ .../base/ui-extensions/dockerregistry/form | 25 + .../base/ui-extensions/dockerregistry/general | 15 + .../dockerregistry/kustomization.yaml | 14 + .../base/ui-extensions/dockerregistry/list | 6 + .../base/ui-extensions/kustomization.yaml | 2 + config/operator/dev/.gitignore | 1 + config/operator/dev/kustomization.yaml.tpl | 9 + config/samples/default-dockerregistry-cr.yaml | 6 + docs/README.md | 39 -- docs/contributor/README.md | 1 - docs/user/README.md | 14 - docs/user/_sidebar.md | 1 - go.mod | 164 +++++ go.sum | 602 ++++++++++++++++++ hack/Makefile | 7 + hack/boilerplate.go.txt | 15 + hack/gardener.mk | 27 + hack/get_kyma_file_name.sh | 16 + hack/help.mk | 5 + hack/k3d.mk | 26 + hack/makefile-strategy.md | 39 ++ hack/tools.mk | 77 +++ markdown_heading_capitalization.js | 31 + module-config-template.yaml | 8 + sec-scanners-config.yaml | 11 + 166 files changed, 10724 insertions(+), 233 deletions(-) create mode 100644 .github/actions/setup-libgit2/action.yaml create mode 100644 .github/dependabot.yml create mode 100755 .github/scripts/create_changelog.sh create mode 100755 .github/scripts/create_draft_release.sh create mode 100755 .github/scripts/publish_release.sh create mode 100755 .github/scripts/release.sh create mode 100755 .github/scripts/upgrade-sec-scanners-config.sh create mode 100755 .github/scripts/verify-actions-status.sh create mode 100755 .github/scripts/verify-docker-registry-jobs-status.sh create mode 100755 .github/scripts/verify-image-changes.sh create mode 100644 .github/stale.yml create mode 100644 .github/workflows/create-release.yaml create mode 100644 .github/workflows/images-verify.yaml delete mode 100644 .github/workflows/lint-markdown-links.yml create mode 100644 .github/workflows/markdown.yaml create mode 100644 .github/workflows/operator-verify.yaml delete mode 100644 .github/workflows/pull-gitleaks.yml delete mode 100644 .github/workflows/stale.yml create mode 100644 .gitignore create mode 100644 .markdownlint.yaml create mode 100644 .markdownlintignore mode change 100755 => 100644 .reuse/dep5 create mode 100644 Makefile delete mode 100644 SECURITY.md create mode 100644 components/operator/.dockerignore create mode 100644 components/operator/.golangci.yaml create mode 100644 components/operator/Dockerfile create mode 100644 components/operator/Makefile create mode 100644 components/operator/api/v1alpha1/dockerregistry_types.go create mode 100644 components/operator/api/v1alpha1/groupversion_info.go create mode 100644 components/operator/api/v1alpha1/helpers.go create mode 100644 components/operator/api/v1alpha1/zz_generated.deepcopy.go create mode 100644 components/operator/controllers/controller.go create mode 100644 components/operator/controllers/controller_rbac.go create mode 100644 components/operator/controllers/controller_test.go create mode 100644 components/operator/controllers/suite_test.go create mode 100644 components/operator/controllers/testhelper_test.go create mode 100755 components/operator/hack/boilerplate.go.txt create mode 100755 components/operator/hack/verify_dockerregistry_status.sh create mode 100644 components/operator/internal/annotation/disclaimer.go create mode 100644 components/operator/internal/annotation/disclaimer_test.go create mode 100644 components/operator/internal/chart/cache.go create mode 100644 components/operator/internal/chart/cache_test.go create mode 100644 components/operator/internal/chart/chart.go create mode 100644 components/operator/internal/chart/chart_test.go create mode 100644 components/operator/internal/chart/check.go create mode 100644 components/operator/internal/chart/check_test.go create mode 100644 components/operator/internal/chart/client_getter.go create mode 100644 components/operator/internal/chart/flags.go create mode 100644 components/operator/internal/chart/flags_test.go create mode 100644 components/operator/internal/chart/install.go create mode 100644 components/operator/internal/chart/install_test.go create mode 100644 components/operator/internal/chart/pvc.go create mode 100644 components/operator/internal/chart/pvc_test.go create mode 100644 components/operator/internal/chart/uninstall.go create mode 100644 components/operator/internal/chart/uninstall_test.go create mode 100644 components/operator/internal/chart/verify.go create mode 100644 components/operator/internal/chart/verify_test.go create mode 100644 components/operator/internal/config/config.go create mode 100644 components/operator/internal/controllers/kubernetes/configmap_service.go create mode 100644 components/operator/internal/controllers/kubernetes/namespace_controller.go create mode 100644 components/operator/internal/controllers/kubernetes/secret_controller.go create mode 100644 components/operator/internal/controllers/kubernetes/secret_service.go create mode 100644 components/operator/internal/controllers/kubernetes/serviceaccount_service.go create mode 100644 components/operator/internal/controllers/kubernetes/shared.go create mode 100644 components/operator/internal/gitrepository/cleanup.go create mode 100644 components/operator/internal/gitrepository/cleanup_test.go create mode 100644 components/operator/internal/predicate/predicate.go create mode 100644 components/operator/internal/predicate/predicate_test.go create mode 100644 components/operator/internal/registry/node_port.go create mode 100644 components/operator/internal/registry/node_port_test.go create mode 100644 components/operator/internal/registry/secret.go create mode 100644 components/operator/internal/resource/resource.go create mode 100644 components/operator/internal/state/add_finalizer.go create mode 100644 components/operator/internal/state/add_finalizer_test.go create mode 100644 components/operator/internal/state/apply.go create mode 100644 components/operator/internal/state/apply_test.go create mode 100644 components/operator/internal/state/controller_configuration.go create mode 100644 components/operator/internal/state/controller_configuration_test.go create mode 100644 components/operator/internal/state/delete.go create mode 100644 components/operator/internal/state/delete_test.go create mode 100644 components/operator/internal/state/emit_event.go create mode 100644 components/operator/internal/state/emit_event_test.go create mode 100644 components/operator/internal/state/fsm.go create mode 100644 components/operator/internal/state/fsm_test.go create mode 100644 components/operator/internal/state/initialize.go create mode 100644 components/operator/internal/state/initialize_test.go create mode 100644 components/operator/internal/state/new.go create mode 100644 components/operator/internal/state/registry.go create mode 100644 components/operator/internal/state/registry_test.go create mode 100644 components/operator/internal/state/remove_finalizer.go create mode 100644 components/operator/internal/state/remove_finalizer_test.go create mode 100644 components/operator/internal/state/served_filter.go create mode 100644 components/operator/internal/state/served_filter_test.go create mode 100644 components/operator/internal/state/state.go create mode 100644 components/operator/internal/state/state_test.go create mode 100644 components/operator/internal/state/update_status.go create mode 100644 components/operator/internal/state/utils.go create mode 100644 components/operator/internal/state/verify.go create mode 100644 components/operator/internal/state/verify_test.go create mode 100644 components/operator/internal/tracing/watcher.go create mode 100644 components/operator/internal/warning/warning.go create mode 100644 components/operator/internal/warning/warning_test.go create mode 100644 components/operator/main.go create mode 100644 config.yaml create mode 100644 config/docker-registry/.helmignore create mode 100644 config/docker-registry/Chart.yaml create mode 100644 config/docker-registry/charts/docker-registry/.helmignore create mode 100644 config/docker-registry/charts/docker-registry/Chart.yaml create mode 100644 config/docker-registry/charts/docker-registry/README.md create mode 100644 config/docker-registry/charts/docker-registry/templates/_helpers.tpl create mode 100644 config/docker-registry/charts/docker-registry/templates/configmap.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/deployment.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/ingress.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/priorityclass.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/pvc.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/secret.yaml create mode 100644 config/docker-registry/charts/docker-registry/templates/service.yaml create mode 100644 config/docker-registry/charts/docker-registry/values.yaml create mode 100644 config/docker-registry/templates/_helpers.tpl create mode 100644 config/docker-registry/templates/registry-config.yaml create mode 100644 config/docker-registry/values.yaml create mode 100644 config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml create mode 100644 config/operator/base/crd/kustomization.yaml create mode 100644 config/operator/base/crd/kustomizeconfig.yaml create mode 100644 config/operator/base/deployment/deployment.yaml create mode 100644 config/operator/base/deployment/kustomization.yaml create mode 100644 config/operator/base/kustomization.yaml create mode 100644 config/operator/base/rbac/editor_role.yaml create mode 100644 config/operator/base/rbac/kustomization.yaml create mode 100644 config/operator/base/rbac/role.yaml create mode 100644 config/operator/base/rbac/role_binding.yaml create mode 100644 config/operator/base/rbac/service_account.yaml create mode 100644 config/operator/base/rbac/viewer_role.yaml create mode 100644 config/operator/base/ui-extensions/dockerregistry/details create mode 100644 config/operator/base/ui-extensions/dockerregistry/form create mode 100644 config/operator/base/ui-extensions/dockerregistry/general create mode 100644 config/operator/base/ui-extensions/dockerregistry/kustomization.yaml create mode 100644 config/operator/base/ui-extensions/dockerregistry/list create mode 100644 config/operator/base/ui-extensions/kustomization.yaml create mode 100644 config/operator/dev/.gitignore create mode 100644 config/operator/dev/kustomization.yaml.tpl create mode 100644 config/samples/default-dockerregistry-cr.yaml delete mode 100644 docs/README.md delete mode 100644 docs/contributor/README.md delete mode 100644 docs/user/README.md delete mode 100644 docs/user/_sidebar.md create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hack/Makefile create mode 100755 hack/boilerplate.go.txt create mode 100644 hack/gardener.mk create mode 100755 hack/get_kyma_file_name.sh create mode 100644 hack/help.mk create mode 100644 hack/k3d.mk create mode 100644 hack/makefile-strategy.md create mode 100644 hack/tools.mk create mode 100644 markdown_heading_capitalization.js create mode 100644 module-config-template.yaml create mode 100644 sec-scanners-config.yaml diff --git a/.github/actions/setup-libgit2/action.yaml b/.github/actions/setup-libgit2/action.yaml new file mode 100644 index 00000000..7028c797 --- /dev/null +++ b/.github/actions/setup-libgit2/action.yaml @@ -0,0 +1,28 @@ +name: 'Setup libgit2' +description: 'Action for the libgit2 setup' + +inputs: + version: + description: 'libgit2 version to checkout' + required: true + default: 'v1.5.2' + +runs: + using: 'composite' + steps: + - name: Install libssh2 + run: | + sudo apt update + sudo apt install libssh2-1-dev -y + shell: bash + + - name: Install libgit2 + run: | + git clone https://github.com/libgit2/libgit2.git + cd libgit2 + git checkout ${{ inputs.version }} + cmake . -DBUILD_TESTS=OFF -DBUILD_CLI=OFF -DUSE_SSH=ON + sudo make install + sudo ldconfig + shell: bash + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..aa1e19da --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + labels: + - "area/dependency" + - "kind/chore" + schedule: + interval: "weekly" + commit-message: + prefix: "gomod" + include: "scope" + ignore: + # ignore minor k8s updates, e.g. 1.27.x -> 1.28.x + - dependency-name: "k8s.io/*" + update-types: ["version-update:semver-minor"] + - dependency-name: "sigs.k8s.io/*" + update-types: ["version-update:semver-minor"] + - dependency-name: "helm.sh/helm/v3" + update-types: ["version-update:semver-minor"] + groups: + k8s-io: + patterns: + - "k8s.io/*" + + - package-ecosystem: "docker" + directory: "/components/operator" + labels: + - "area/dependency" + - "kind/chore" + schedule: + interval: "weekly" + commit-message: + prefix: "operator" + include: "scope" + diff --git a/.github/scripts/create_changelog.sh b/.github/scripts/create_changelog.sh new file mode 100755 index 00000000..ace47064 --- /dev/null +++ b/.github/scripts/create_changelog.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +PREVIOUS_RELEASE=$2 # for testability + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +RELEASE_TAG=$1 + +REPOSITORY=${REPOSITORY:-kyma-project/docker-registry} +GITHUB_URL=https://api.github.com/repos/${REPOSITORY} +GITHUB_AUTH_HEADER="Authorization: token ${GITHUB_TOKEN}" +CHANGELOG_FILE="CHANGELOG.md" + +if [ "${PREVIOUS_RELEASE}" == "" ] +then + PREVIOUS_RELEASE=$(git describe --tags --abbrev=0) +fi + +echo "## What has changed" >> ${CHANGELOG_FILE} + +git log ${PREVIOUS_RELEASE}..HEAD --pretty=tformat:"%h" --reverse | while read -r commit +do + COMMIT_AUTHOR=$(curl -H "${GITHUB_AUTH_HEADER}" -sS "${GITHUB_URL}/commits/${commit}" | jq -r '.author.login') + if [ "${COMMIT_AUTHOR}" != "kyma-bot" ]; then + git show -s ${commit} --format="* %s by @${COMMIT_AUTHOR}" >> ${CHANGELOG_FILE} + fi +done + +NEW_CONTRIB=$$.new + +join -v2 \ +<(curl -H "${GITHUB_AUTH_HEADER}" -sS "${GITHUB_URL}/compare/$(git rev-list --max-parents=0 HEAD)...${PREVIOUS_RELEASE}" | jq -r '.commits[].author.login' | sort -u) \ +<(curl -H "${GITHUB_AUTH_HEADER}" -sS "${GITHUB_URL}/compare/${PREVIOUS_RELEASE}...HEAD" | jq -r '.commits[].author.login' | sort -u) >${NEW_CONTRIB} + +if [ -s ${NEW_CONTRIB} ] +then + echo -e "\n## New contributors" >> ${CHANGELOG_FILE} + while read -r user + do + REF_PR=$(grep "@${user}" ${CHANGELOG_FILE} | head -1 | grep -o " (#[0-9]\+)" || true) + if [ -n "${REF_PR}" ] #reference found + then + REF_PR=" in ${REF_PR}" + fi + echo "* @${user} made first contribution${REF_PR}" >> ${CHANGELOG_FILE} + done <${NEW_CONTRIB} +fi + +echo -e "\n**Full changelog**: https://github.com/$REPOSITORY/compare/${PREVIOUS_RELEASE}...${RELEASE_TAG}" >> ${CHANGELOG_FILE} + +# cleanup +rm ${NEW_CONTRIB} || echo "cleaned up" \ No newline at end of file diff --git a/.github/scripts/create_draft_release.sh b/.github/scripts/create_draft_release.sh new file mode 100755 index 00000000..75957632 --- /dev/null +++ b/.github/scripts/create_draft_release.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# This script returns the id of the draft release + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +RELEASE_TAG=$1 + +REPOSITORY=${REPOSITORY:-kyma-project/docker-registry} +GITHUB_URL=https://api.github.com/repos/${REPOSITORY} +GITHUB_AUTH_HEADER="Authorization: Bearer ${GITHUB_TOKEN}" +CHANGELOG_FILE=$(cat CHANGELOG.md) + +JSON_PAYLOAD=$(jq -n \ + --arg tag_name "$RELEASE_TAG" \ + --arg name "$RELEASE_TAG" \ + --arg body "$CHANGELOG_FILE" \ + '{ + "tag_name": $tag_name, + "name": $name, + "body": $body, + "draft": true + }') + +CURL_RESPONSE=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "${GITHUB_AUTH_HEADER}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + ${GITHUB_URL}/releases \ + -d "$JSON_PAYLOAD") + +echo "$(echo $CURL_RESPONSE | jq -r ".id")" diff --git a/.github/scripts/publish_release.sh b/.github/scripts/publish_release.sh new file mode 100755 index 00000000..e56797ab --- /dev/null +++ b/.github/scripts/publish_release.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# This script publishes a draft release + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +RELEASE_ID=$1 +IS_LATEST_RELEASE=$2 + +REPOSITORY=${REPOSITORY:-kyma-project/docker-registry} +GITHUB_URL=https://api.github.com/repos/${REPOSITORY} +GITHUB_AUTH_HEADER="Authorization: Bearer ${GITHUB_TOKEN}" + +CURL_RESPONSE=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "${GITHUB_AUTH_HEADER}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + ${GITHUB_URL}/releases/${RELEASE_ID} \ + -d '{"draft": false, "make_latest": '"$IS_LATEST_RELEASE"'}') diff --git a/.github/scripts/release.sh b/.github/scripts/release.sh new file mode 100755 index 00000000..cc583cb5 --- /dev/null +++ b/.github/scripts/release.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +# Expected variables: +IMG=${IMG?"Define IMG env"} # operator image +PULL_BASE_REF=${PULL_BASE_REF?"Define PULL_BASE_REF env"} # name of the tag +GITHUB_TOKEN=${GITHUB_TOKEN?"Define GITHUB_TOKEN env"} # github token used to upload the template yaml + +uploadFile() { + filePath=${1} + ghAsset=${2} + + echo "Uploading ${filePath} as ${ghAsset}" + response=$(curl -s -o output.txt -w "%{http_code}" \ + --request POST --data-binary @"$filePath" \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: text/yaml" \ + $ghAsset) + if [[ "$response" != "201" ]]; then + echo "Unable to upload the asset ($filePath): " + echo "HTTP Status: $response" + cat output.txt + exit 1 + else + echo "$filePath uploaded" + fi +} + +echo "IMG: ${IMG}" +IMG=${IMG} make -C components/operator/ render-manifest + +echo "Generated dockerregistry-operator.yaml:" +cat dockerregistry-operator.yaml + +echo "Fetching releases" +CURL_RESPONSE=$(curl -w "%{http_code}" -sL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GITHUB_TOKEN"\ + https://api.github.com/repos/kyma-project/docker-registry/releases) +JSON_RESPONSE=$(sed '$ d' <<< "${CURL_RESPONSE}") +HTTP_CODE=$(tail -n1 <<< "${CURL_RESPONSE}") +if [[ "${HTTP_CODE}" != "200" ]]; then + echo "${CURL_RESPONSE}" + exit 1 +fi + +echo "Finding release id for: ${PULL_BASE_REF}" +RELEASE_ID=$(jq <<< ${JSON_RESPONSE} --arg tag "${PULL_BASE_REF}" '.[] | select(.tag_name == $ARGS.named.tag) | .id') + +echo "Got '${RELEASE_ID}' release id" +if [ -z "${RELEASE_ID}" ] +then + echo "No release with tag = ${PULL_BASE_REF}" + exit 1 +fi + +echo "Updating github release with assets" +UPLOAD_URL="https://uploads.github.com/repos/kyma-project/docker-registry/releases/${RELEASE_ID}/assets" + +uploadFile "dockerregistry-operator.yaml" "${UPLOAD_URL}?name=dockerregistry-operator.yaml" +uploadFile "config/samples/default-dockerregistry-cr.yaml" "${UPLOAD_URL}?name=default-dockerregistry-cr.yaml" diff --git a/.github/scripts/upgrade-sec-scanners-config.sh b/.github/scripts/upgrade-sec-scanners-config.sh new file mode 100755 index 00000000..a9b2fdec --- /dev/null +++ b/.github/scripts/upgrade-sec-scanners-config.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +IMG_VERSION=${IMG_VERSION?"Define IMG_VERSION env"} + +yq eval-all --inplace ' + select(fileIndex == 0).protecode=[ + select(fileIndex == 1) + | .global.containerRegistry.path as $registryPath + | ( + { + "dockerregistry_operator" : { + "name" : "dockerregistry-operator", + "directory" : "prod", + "version" : env(IMG_VERSION) + } + } + + .global.images + )[] + | $registryPath + "/" + .directory + "/" + .name + ":" + .version + ] + | select(fileIndex == 0) + ' sec-scanners-config.yaml config/docker-registry/values.yaml \ No newline at end of file diff --git a/.github/scripts/verify-actions-status.sh b/.github/scripts/verify-actions-status.sh new file mode 100755 index 00000000..598000d0 --- /dev/null +++ b/.github/scripts/verify-actions-status.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +echo "Checking status of github actions for docker-registry" + +REF_NAME="${1:-"main"}" +RAW_EXPECTED_SHA=$(git log "${REF_NAME}" --max-count 1 --format=format:%H) +REPOSITORY_ID="563346860" + +STATUS_URL="https://api.github.com/repositories/${REPOSITORY_ID}/actions/workflows/gardener-integration.yaml/runs?head_sha=${RAW_EXPECTED_SHA}" +GET_STATUS_JQ_QUERY=".workflow_runs[0] | \"\(.status)-\(.conclusion)\"" +GET_COUNT_JQ_QUERY=".total_count" + +response=`curl -s ${STATUS_URL}` + +count=`echo $response | jq -r "${GET_COUNT_JQ_QUERY}"` +if [[ "$count" == "0" ]]; then + echo "No actions to verify" +else + fullstatus=`echo $response | jq -r "${GET_STATUS_JQ_QUERY}"` + if [[ "$fullstatus" == "completed-success" ]]; then + echo "All actions succeeded" + else + echo "Actions failed or pending - Check github actions status" + exit 1 + fi +fi diff --git a/.github/scripts/verify-docker-registry-jobs-status.sh b/.github/scripts/verify-docker-registry-jobs-status.sh new file mode 100755 index 00000000..5adf1528 --- /dev/null +++ b/.github/scripts/verify-docker-registry-jobs-status.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +### Verify post-submit prow jobs status +# +# Optional input args: +# - REF_NAME - branch/tag/commit +# Return status: +# - return 0 - if status is "success" +# - return 1 - if status is "failure" or after timeout (~25min) + +# wait until Prow trigger pipelines +sleep 10 + +echo "Checking status of POST Jobs for docker-registry" + +REF_NAME="${1:-"main"}" +STATUS_URL="https://api.github.com/repos/kyma-project/docker-registry/commits/${REF_NAME}/status" + +function verify_github_jobs_status () { + local number=1 + while [[ $number -le 100 ]] ; do + echo ">--> checking docker-registry job status #$number" + local STATUS=`curl -L -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" ${STATUS_URL} | jq -r .state ` + echo "jobs status: ${STATUS:='UNKNOWN'}" + [[ "$STATUS" == "success" ]] && return 0 + [[ "$STATUS" == "failure" ]] && return 1 + sleep 15 + ((number = number + 1)) + done + + exit 1 +} + +verify_github_jobs_status \ No newline at end of file diff --git a/.github/scripts/verify-image-changes.sh b/.github/scripts/verify-image-changes.sh new file mode 100755 index 00000000..8e03bdff --- /dev/null +++ b/.github/scripts/verify-image-changes.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +MAIN_IMAGES=(${MAIN_IMAGES?"Define MAIN_IMAGES env"}) +PR_NOT_MAIN_IMAGES=(${PR_NOT_MAIN_IMAGES?"Define PR_NOT_MAIN_IMAGES env"}) + +FAIL=false +for main_image in "${MAIN_IMAGES[@]}"; do + echo "${main_image} checking..." + + for pr_image in "${PR_NOT_MAIN_IMAGES[@]}"; do + if [ "${main_image}" == "${pr_image}" ]; then + echo " warning: ${pr_image} tag/version seems to be modified (should be main)!" + FAIL=true + fi + done +done + +if $FAIL; then + exit 1 +fi diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 00000000..b70d9bad --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,19 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 60 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 7 +# Label to use when marking an issue as stale +staleLabel: lifecycle/stale +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - lifecycle/frozen + - lifecycle/active +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: | + This issue has been automatically marked as stale due to the lack of recent activity. It will soon be closed if no further activity occurs. + Thank you for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: | + This issue has been automatically closed due to the lack of recent activity. + /lifecycle rotten + diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml new file mode 100644 index 00000000..b9650864 --- /dev/null +++ b/.github/workflows/create-release.yaml @@ -0,0 +1,118 @@ +name: "Create release" + +on: + workflow_dispatch: + inputs: + name: + description: 'Release name ( e.g. "2.1.3" )' + default: "" + required: true + latest_release: + description: 'Latest release' + type: boolean + default: false + +jobs: + verify-head-status: + name: Verify HEAD + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Verify prow post jobs + run: ./.github/scripts/verify-docker-registry-jobs-status.sh ${{ github.ref_name }} + + - name: Verify github actions + run: ./.github/scripts/verify-actions-status.sh ${{ github.ref_name }} + + upgrade-images: + name: Upgrade main images + needs: verify-head-status + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + token: ${{ secrets.BOT_TOKEN }} + fetch-depth: 0 + + - name: Bump sec-scanners-config.yaml based on values.yaml + run: ./.github/scripts/upgrade-sec-scanners-config.sh + env: + IMG_VERSION: ${{ github.event.inputs.name }} + + - name: Commit&Push + run: | + git config --local user.email "team-otters@sap.com" + git config --local user.name "ottersbot" + + git add . + git commit --allow-empty -m "upgrade dependencies" + git push origin ${{ github.ref_name }} + + create-draft: + name: Create draft release + needs: upgrade-images + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.ref_name }} # checkout to latest branch changes ( by default this action checkouts to the SHA that triggers action ) + + - name: Create changelog + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PULL_BASE_REF: ${{ github.event.inputs.name }} + run: ./.github/scripts/create_changelog.sh ${{ github.event.inputs.name }} + + - name: Create draft release + id: create-draft + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + RELEASE_ID=$(./.github/scripts/create_draft_release.sh ${{ github.event.inputs.name }}) + echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT + + - name: Create lightweight tag + run: | + git tag ${{ github.event.inputs.name }} + git push origin ${{ github.event.inputs.name }} + + - name: Create release assets + id: create-assets + env: + IMG: "europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:${{ github.event.inputs.name }}" + PULL_BASE_REF: ${{ github.event.inputs.name }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: ./.github/scripts/release.sh + + - name: Verify prow release jobs + run: ./.github/scripts/verify-docker-registry-jobs-status.sh ${{ github.ref_name }} + + outputs: + release_id: ${{ steps.create-draft.outputs.release_id }} + + publish-release: + name: Publish release + needs: create-draft + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.name }} # checkout to latest branch changes ( by default this action checkouts to the SHA that triggers action ) + + - name: Publish release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: ./.github/scripts/publish_release.sh ${{ needs.create-draft.outputs.release_id }} ${{ github.event.inputs.latest_release }} diff --git a/.github/workflows/images-verify.yaml b/.github/workflows/images-verify.yaml new file mode 100644 index 00000000..5bd1a746 --- /dev/null +++ b/.github/workflows/images-verify.yaml @@ -0,0 +1,44 @@ +name: Images verify +run-name: ${{github.event.pull_request.title}} +on: + pull_request: + branches: + - main + paths: + - sec-scanners-config.yaml + - config/docker-registry/values.yaml + +jobs: + # check if developer doesn't change `main` images in the values.yaml and sec-scanners-config.yaml files + check-main-tags: + runs-on: ubuntu-latest + steps: + - name: Checkout to main + uses: actions/checkout@v4 + with: + ref: main + + - name: Export main images from the main ref + run: | + # export sec-scanners-config.yaml images with the main tag as github env + echo SSC_MAIN_IMAGES=$(yq '.protecode[] | select(contains(":main")) | sub(":.*", "")' sec-scanners-config.yaml) >> $GITHUB_ENV + + # export values. images with the main tag as github env + echo VALUES_MAIN_IMAGES=$(yq '.global.images[] | select(.version == "main") | .name' config/docker-registry/values.yaml) >> $GITHUB_ENV + + - name: Checkout to context + uses: actions/checkout@v4 + + - name: Verify sec-scanners-config.yaml images + run: | + PR_NOT_MAIN_IMAGES=$(yq '.protecode[] | select(contains(":main") | not ) | sub(":.*", "")' sec-scanners-config.yaml) \ + .github/scripts/verify-image-changes.sh + env: + MAIN_IMAGES: ${{ env.SSC_MAIN_IMAGES }} + + - name: Verify values.yaml images + run: | + PR_NOT_MAIN_IMAGES=$(yq '.global.images[] | select(.version != "main") | .name' config/docker-registry/values.yaml) \ + .github/scripts/verify-image-changes.sh + env: + MAIN_IMAGES: ${{ env.VALUES_MAIN_IMAGES }} diff --git a/.github/workflows/lint-markdown-links.yml b/.github/workflows/lint-markdown-links.yml deleted file mode 100644 index d1790224..00000000 --- a/.github/workflows/lint-markdown-links.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Lint Markdown Links -run-name: ${{github.event.pull_request.title}} -on: [ pull_request ] -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: gaurav-nelson/github-action-markdown-link-check@v1 - with: - use-verbose-mode: 'no' - config-file: '.mlc.config.json' - folder-path: '.' - max-depth: -1 diff --git a/.github/workflows/markdown.yaml b/.github/workflows/markdown.yaml new file mode 100644 index 00000000..a871742d --- /dev/null +++ b/.github/workflows/markdown.yaml @@ -0,0 +1,18 @@ +name: Markdown +run-name: ${{github.event.pull_request.title}} +on: + schedule: + - cron: "0 5 * * *" # Run everyday at 5:00 AM + pull_request: +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 +# - uses: gaurav-nelson/github-action-markdown-link-check@v1 //TODO: after adjusting *.md bring the test back +# with: +# use-quiet-mode: 'yes' +# use-verbose-mode: 'yes' +# config-file: '.mlc.config.json' +# folder-path: '.' +# max-depth: -1 diff --git a/.github/workflows/operator-verify.yaml b/.github/workflows/operator-verify.yaml new file mode 100644 index 00000000..f2a14b10 --- /dev/null +++ b/.github/workflows/operator-verify.yaml @@ -0,0 +1,82 @@ +name: Operator verify + +on: + push: + branches: [ "main", "release-*" ] + pull_request: + paths-ignore: + - 'docs/**' + - 'examples/**' + types: + - opened + - reopened + - synchronize + - ready_for_review + - converted_to_draft + +jobs: + lint: + if: github.event_name == 'pull_request' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: golangci/golangci-lint-action@v3 + with: + version: latest + working-directory: 'components/operator' + + unit-test: + if: github.event_name == 'pull_request' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: run test + run: make -C components/operator test + + +# upgrade-test: //TODO: change it to run dockerregistry verify (need to be implemented) +# runs-on: ubuntu-latest +# if: github.event_name == 'push' +# steps: +# - uses: actions/checkout@v4 +# - name: create single cluster +# uses: AbsaOSS/k3d-action@4e8b3239042be1dc0aed6c5eb80c13b18200fc79 #v2.4.0 +# with: +# cluster-name: "k3dCluster" +# args: >- +# --agents 1 +# --image rancher/k3s:v1.28.6-k3s1 +# --port 80:80@loadbalancer +# --port 443:443@loadbalancer +# --wait +# - name: upgrade test +# run: make -C hack upgrade-test +# env: +# IMG: europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:${{ github.sha }} + +# gardener-integration-test: //TODO: change it to run dockerregistry verify (need to be implemented) +# if: github.event_name == 'push' +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v4 +# - name: save sa +# shell: bash +# run: 'echo "$GARDENER_SA" > /tmp/gardener-sa.yaml' +# env: +# GARDENER_SA: ${{ secrets.GARDENER_SA }} +# - name: provision gardener +# run: make -C hack provision-gardener +# env: +# GARDENER_SECRET_NAME: ${{ secrets.GARDENER_SECRET_NAME }} +# GARDENER_PROJECT: ${{ secrets.GARDENER_PROJECT }} +# GARDENER_SA_PATH: /tmp/gardener-sa.yaml +# - name: run test +# run: make -C hack integration-test-on-cluster +# env: +# IMG: europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:${{ github.sha }} +# - name: deprovision gardener +## https://docs.github.com/en/actions/learn-github-actions/expressions#always +# if: ${{ always() }} +# run: make -C hack deprovision-gardener +# env: +# GARDENER_SA_PATH: /tmp/gardener-sa.yaml diff --git a/.github/workflows/pull-gitleaks.yml b/.github/workflows/pull-gitleaks.yml deleted file mode 100644 index ee92cf44..00000000 --- a/.github/workflows/pull-gitleaks.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: pull-gitleaks -on: - pull_request: - types: [opened, edited, synchronize, reopened, ready_for_review] - -env: - GITLEAKS_VERSION: 8.18.2 - -jobs: - scan: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Fetch gitleaks ${{ env.GITLEAKS_VERSION }} - run: curl -Lso gitleaks.tar.gz https://github.com/gitleaks/gitleaks/releases/download/v${{ env.GITLEAKS_VERSION }}/gitleaks_${{ env.GITLEAKS_VERSION }}_linux_x64.tar.gz && tar -xvzf ./gitleaks.tar.gz - - name: Run gitleaks - # Scan commits between base and head of the pull request - run: ./gitleaks detect --log-opts=${PULL_BASE_SHA}...${PULL_HEAD_SHA} --verbose --redact - env: - PULL_BASE_SHA: ${{ github.event.pull_request.base.sha }} - PULL_HEAD_SHA: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index 1d415fed..00000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: 'Manage Stale Issues and Pull Requests' - -on: - schedule: - - cron: '0 0 * * *' # Runs daily at midnight - workflow_dispatch: # Allows manual triggering of the workflow - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9 - with: - days-before-stale: 60 - days-before-close: 7 - stale-issue-label: 'lifecycle/stale' - stale-pr-label: 'lifecycle/stale' - exempt-issue-labels: 'lifecycle/frozen,lifecycle/active' - exempt-pr-labels: 'lifecycle/frozen,lifecycle/active' - stale-issue-message: | - This issue has been automatically marked as stale due to the lack of recent activity. It will soon be closed if no further activity occurs. - Thank you for your contributions. - stale-pr-message: | - This pull request has been automatically marked as stale due to the lack of recent activity. It will soon be closed if no further activity occurs. - Thank you for your contributions. - close-issue-message: | - This issue has been automatically closed due to the lack of recent activity. - /lifecycle rotten - close-pr-message: | - This pull request has been automatically closed due to the lack of recent activity. - /lifecycle rotten diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..7d4dd69e --- /dev/null +++ b/.gitignore @@ -0,0 +1,44 @@ +# IDEs +.vscode +.idea +*.swp +*.swo +*~ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* +Dockerfile.cross + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +module-chart +module-chart-test +mod +default.yaml +moduletemplate.yaml +moduletemplate-k3d.yaml +docs/.DS_Store +.DS_Store +__debug_bin +vendor + +moduletemplate-latest.yaml +module-config.yaml +dockerregistry-operator.yaml +dockerregistry.yaml + +examples/python-text2img/resources/secrets/deepai.env \ No newline at end of file diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 00000000..51f4cf9c --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,26 @@ +# This is a configuration file for the markdownlint. You can use this file to overwrite the default settings. +# MD013 is set to false by default because many files include lines longer than the conventional 80 character limit +MD013: false +# Disable the Multiple headings with the same content rule +MD024: false +# MD029 is set to false because it generated some issues with longer lists +MD029: false +# MD044 is used to set capitalization for particular words. You can determine whether it should be used also for code blocks and HTML elements +MD044: + code_blocks: false + html_elements: false + names: + - Kyma + - Kubernetes + - ConfigMap + - CronJob + - CustomResourceDefinition + - Ingress + - Node + - PodPreset + - Pod + - ProwJob + - Secret + - ServiceBinding + - ServiceClass + - ServiceInstance \ No newline at end of file diff --git a/.markdownlintignore b/.markdownlintignore new file mode 100644 index 00000000..578db9cb --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1 @@ +_sidebar.md \ No newline at end of file diff --git a/.mlc.config.json b/.mlc.config.json index 9e9e139e..70d4d516 100644 --- a/.mlc.config.json +++ b/.mlc.config.json @@ -1,14 +1,12 @@ { - "_comment": "This is a configuration file for the [Markdown link check](https://github.com/tcort/markdown-link-check).", - "_comment": "All `/kyma-project` repositories in GitHub use [Markdown link check](https://github.com/tcort/markdown-link-check) to check their Markdown files for broken links.", - "_comment": "Configuration and maintenance of the Markdown link check tool is the responsibility of a repository owner.", - "_comment": "See the following configuration example.", - "_comment": "For more details read the [repository guidelines](https://github.com/kyma-project/community/blob/main/docs/guidelines/repository-guidelines/01-new-repository-settings.md).", - "replacementPatterns": [ - { - "_comment": "a replacement rule for all the in-repository references", - "pattern": "^/", - "replacement": "{{BASEURL}}/" - } - ] -} + "replacementPatterns": [ + { + "_comment": "a replacement rule for all the in-repository references", + "pattern": "^/", + "replacement": "{{BASEURL}}/" + } + ], + "timeout": "20s", + "retryCount": 5, + "fallbackRetryDelay": "30s" + } \ No newline at end of file diff --git a/.reuse/dep5 b/.reuse/dep5 old mode 100755 new mode 100644 index 96d499f7..62c36819 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -1,11 +1,11 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: -Upstream-Contact: -Source: -Disclaimer: The code in this project may include calls to APIs ("API Calls") of +Upstream-Name: docker-registry +Upstream-Contact: krzysztof.kwiatosz@sap.com +Source: https://github.com/kyma-project/docker-registry +Disclaimer: The code in this project may include calls to APIs (“API Calls”) of SAP or third-party products or services developed outside of this project - ("External Products"). - "APIs" means application programming interfaces, as well as their respective + (“External Products”). + “APIs” means application programming interfaces, as well as their respective specifications and implementing code that allows software to communicate with other software. API Calls to External Products are not licensed under the open source license @@ -16,7 +16,7 @@ Disclaimer: The code in this project may include calls to APIs ("API Calls") of alter, expand or supersede any terms of the applicable additional agreements. If you have a valid license agreement with SAP for the use of a particular SAP External Product, then you may make use of any API Calls included in this - project's code for that SAP External Product, subject to the terms of such + project’s code for that SAP External Product, subject to the terms of such license agreement. If you do not have a valid license agreement for the use of a particular SAP External Product, then you may only make use of any API Calls in this project for that SAP External Product for your internal, non-productive @@ -24,14 +24,6 @@ Disclaimer: The code in this project may include calls to APIs ("API Calls") of you any rights to use or access any SAP External Product, or provide any third parties the right to use of access any SAP External Product, through API Calls. -Files: -Copyright: SAP SE or an SAP affiliate company and contributors +Files: * +Copyright: 2023 SAP SE or an SAP affiliate company and Kyma contributors License: Apache-2.0 - -Files: -Copyright: -License: - -Files: -Copyright: -License: \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS index 35d99741..164aba18 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,38 +1,8 @@ -# Overview +# These are the default owners for the whole content of the `dockerregistry` repository. The default owners are automatically added as reviewers when you open a pull request unless different owners are specified in the file. +* @kyma-project/otters -# The CODEOWNERS file is a GitHub's feature which allows you to create an overview of the code ownership structure in your repository. -# Specify the default owners of your repository and code owners of particular repository parts to define who is automatically requested for a review each time a contributor creates a pull request to the main branch. -# Modify the default settings of the repository and select the "Require review from Code Owners" option on the protected main branch to require one approval from the owners of every part of the repository included in the pull request. For more details, read the following article on GitHub: https://help.github.com/articles/enabling-required-reviews-for-pull-requests/. +# All files and subdirectories in /docs +/docs/ @kyma-project/technical-writers -# Details - -# The CODEOWNERS file is located at the root of your repository and includes a specification of the code ownership structure of the repository. -# It is up to you to decide who is responsible for the review of particular parts and types of files in your repository. - -# When defining the file, keep in mind the following rules: - -# Lines starting with a hash (#) are comments. -# Each line of the file is a file pattern followed by one or more owners. -# You can use individual GitHub usernames, e-mail addresses, or team names to define owners. To define the owners with a team name, first add the team to your repository as collaborators with write access permissions. For more details, read the following article on GitHub: https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/. -# Define the default owners of the repository. They are automatically requested for a review of any content at the root of the repository and any content for which no owners are specified in this file. -# Provide granular ownership specification for folders and subfolders. You can also define the owners of specific file types in your repository. -# The order is important. The last matching pattern in the file has the most precedence. - -# Examples - -# These are the default owners for the whole content of the repository, including the content for which no owners are specified in the file. -# * @global-owner1 globalowner@example.com @org/team-name -# The following rule indicates that if a pull request affects folder1 at the root of the repository and any content in that folder, only this owner is requested for a review. -# /folder1/ @testuser1 -# When you use the following pattern, you specify that @testuser2 is responsible for the review of any file in folder2, excluding subfolders located therein. -# /folder2/* @testuser2 -# In this example, you define @testuser3 as the owner of any content in every "docs" folder in the repository. -# docs/ @testuser3 -# When you open a pull request that modifies the "yaml" files, only @testuser4 is requested for a review, and the global owner(s) are not. -# *.yaml @testuser4 - -# Reference - -# For more details, read the following articles on GitHub: -# https://help.github.com/articles/about-codeowners/ -# https://github.com/blog/2392-introducing-code-owners/ +# All .md files +*.md @kyma-project/technical-writers diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ed7a6b01..b86285c7 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -# Code of Conduct +# Code of conduct -Each contributor and maintainer of this project agrees to follow the community [Code of Conduct](https://github.com/kyma-project/community/blob/main/docs/contributing/01-code-of-conduct.md) that relies on the CNCF Code of Conduct. Read it to learn about the agreed standards of behavior, shared values that govern our community, and details on how to report any suspected Code of Conduct violations. +Each contributor and maintainer of this project agrees to follow the [community Code of Conduct](https://github.com/kyma-project/community/blob/main/docs/contributing/01-code-of-conduct.md) that relies on the CNCF Code of Conduct. Read it to learn about the agreed standards of behavior, shared values that govern our community, and details on how to report any suspected Code of Conduct violations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1ac0012c..71bc0fc6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -# Contributing +## Overview -To contribute to this project, follow the general [Contributing Rules](https://github.com/kyma-project/community/blob/main/docs/contributing/02-contributing.md). +To contribute to this project, follow the general [contributing](https://github.com/kyma-project/community/blob/main/docs/contributing/02-contributing.md) guidelines. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a2ecafa1 --- /dev/null +++ b/Makefile @@ -0,0 +1,31 @@ +PROJECT_ROOT=. +OPERATOR_ROOT=./components/operator +include ${PROJECT_ROOT}/hack/help.mk +include ${PROJECT_ROOT}/hack/k3d.mk + +##@ Installation +.PHONY: install-dockerregistry-main +install-dockerregistry-main: ## Install dockerregistry with operator using default dockerregistry cr + make -C ${OPERATOR_ROOT} deploy-main apply-default-dockerregistry-cr check-dockerregistry-installation + +.PHONY: install-dockerregistry-custom-operator +install-dockerregistry-custom-operator: ## Install dockerregistry with operator from IMG env using default dockerregistry cr + $(call check-var,IMG) + make -C ${OPERATOR_ROOT} deploy apply-default-dockerregistry-cr check-dockerregistry-installation + +.PHONY: install-dockerregistry-latest-release +install-dockerregistry-latest-release: ## Install dockerregistry from latest release + kubectl create namespace kyma-system || true + kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/dockerregistry-operator.yaml + kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/default-dockerregistry-cr.yaml -n kyma-system + make -C ${OPERATOR_ROOT} check-dockerregistry-installation + +.PHONY: remove-dockerregistry +remove-dockerregistry: ## Remove dockerregistry-cr and dockerregistry operator + make -C ${OPERATOR_ROOT} remove-dockerregistry undeploy + +.PHONY: run +run: create-k3d install-dockerregistry-main ## Create k3d cluster and install dockerregistry from main + +check-var = $(if $(strip $($1)),,$(error "$1" is not defined)) + diff --git a/README.md b/README.md index 5714e17b..f4daabbd 100644 --- a/README.md +++ b/README.md @@ -1,55 +1,105 @@ -> **NOTE:** This is a general template that you can use for a project README.md. Except for the mandatory sections, use only those sections that suit your use case but keep the proposed section order. -> -> Mandatory sections: -> - `Overview` -> - `Prerequisites`, if there are any requirements regarding hard- or software -> - `Installation` -> - `Contributing` - do not change this! -> - `Code of Conduct` - do not change this! -> - `Licensing` - do not change this! - -# {Project Title} - -> Modify the title and insert the name of your project. Use Heading 1 (H1). +# Dockerregistry + +## Status +![GitHub tag checks state](https://img.shields.io/github/checks-status/kyma-project/serverless-manager/main?label=serverless-operator&link=https%3A%2F%2Fgithub.com%2Fkyma-project%2Fserverless-manager%2Fcommits%2Fmain) + +[![REUSE status](https://api.reuse.software/badge/github.com/kyma-project/docker-registry)](https://api.reuse.software/info/github.com/kyma-project/docker-registry) + ## Overview - -> Provide a description of the project's functionality. -> -> If it is an example README.md, describe what the example illustrates. +Serverless Operator allows deploying the [Serverless](https://kyma-project.io/docs/kyma/latest/01-overview/serverless/) component in the Kyma cluster in compatibility with [Lifecycle Manager](https://github.com/kyma-project/lifecycle-manager). -## Prerequisites +## Install -> List the requirements to run the project or example. +Create the `kyma-system` namespace: -## Installation +```bash +kubectl create namespace kyma-system +``` -> Explain the steps to install your project. If there are multiple installation options, mention the recommended one and include others in a separate document. Create an ordered list for each installation task. -> -> If it is an example README.md, describe how to build, run locally, and deploy the example. Format the example as code blocks and specify the language, highlighting where possible. Explain how you can validate that the example ran successfully. For example, define the expected output or commands to run which check a successful deployment. -> -> Add subsections (H3) for better readability. +Apply the following script to install Serverless Operator: -## Usage +```bash +kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/serverless-operator.yaml +``` -> Explain how to use the project. You can create multiple subsections (H3). Include the instructions or provide links to the related documentation. +To get Serverless installed, apply the sample Serverless CR: + +```bash +kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/default-serverless-cr.yaml +``` ## Development -> Add instructions on how to develop the project or example. It must be clear what to do and, for example, how to trigger the tests so that other contributors know how to make their pull requests acceptable. Include the instructions or provide links to related documentation. +### Prerequisites + +- Access to a Kubernetes (v1.24 or higher) cluster +- [Go](https://go.dev/) +- [k3d](https://k3d.io/) +- [Docker](https://www.docker.com/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Kubebuilder](https://book.kubebuilder.io/) + + +## Installation in the k3d Cluster Using Make Targets + +1. Clone the project. + + ```bash + git clone https://github.com/kyma-project/docker-registry.git && cd serverless/ + ``` + +2. Build Serverless Operator locally and run it in the k3d cluster. + + ```bash + make run + ``` + +> **NOTE:** To clean up the k3d cluster, use the `make delete-k3d` make target. + + +## Using Serverless Operator + +- Create a Serverless instance. + + ```bash + kubectl apply -f config/samples/default-dockerregistry-cr.yaml + ``` + +- Delete a Serverless instance. -## Contributing - + ```bash + kubectl delete -f config/samples/default-dockerregistry-cr.yaml + ``` -See the [Contributing Rules](CONTRIBUTING.md). +- Use external registry. -## Code of Conduct - + The following example shows how you can modify the Serverless Docker registry address using the `serverless.operator.kyma-project.io` CR: -See the [Code of Conduct](CODE_OF_CONDUCT.md) document. + ```bash + kubectl create secret generic my-secret \ + --namespace kyma-system \ + --from-literal username="" \ + --from-literal password="" \ + --from-literal serverAddress="" \ + --from-literal registryAddress="" + ``` -## Licensing - + > **NOTE:** For DockerHub: + + > - SERVER_ADDRESS is "https://index.docker.io/v1/", + > - USERNAME and REGISTRY_ADDRESS must be identical. -See the [license](./LICENSE) file. + ```bash + cat < $(PROJECT_ROOT)/dockerregistry-operator.yaml + + +.PHONY: apply-default-dockerregistry-cr +apply-default-dockerregistry-cr: ## Apply the k3d dockerregistry CR. + kubectl apply \ + -f ${PROJECT_ROOT}/config/samples/default-dockerregistry-cr.yaml + +.PHONY: remove-dockerregistry +remove-dockerregistry: ## Remove Dockerregistry CR + kubectl delete dockerregistry -n kyma-system default --timeout 2m || (kubectl get dockerregistry -n kyma-system -oyaml && false) + + +.PHONY: check-dockerregistry-installation +check-dockerregistry-installation: ## Wait for Dockerregistry CR to be in Ready state. + # wait some time to make sure operator starts the reconciliation first + sleep 10 + + ./hack/verify_dockerregistry_status.sh || \ + (make print-dockerregistry-details && false) + + kubectl wait --for condition=Available -n kyma-system deployment dockerregistry-operator --timeout=60s || \ + (make print-dockerregistry-details && false) + +.PHONY: print-dockerregistry-details +print-dockerregistry-details: ## Print all pods, deploys and dockerregistry CRs in the kyma-system namespace. + kubectl get dockerregistry -n kyma-system -oyaml + kubectl get deploy -n kyma-system -oyaml + kubectl get pods -n kyma-system -oyaml + + +##@ Module +.PHONY: module-image +module-image: docker-build docker-push ## Build the Module Image and push it to a registry defined in IMG. + echo "built and pushed module image $(IMG)" diff --git a/components/operator/api/v1alpha1/dockerregistry_types.go b/components/operator/api/v1alpha1/dockerregistry_types.go new file mode 100644 index 00000000..4d3b574a --- /dev/null +++ b/components/operator/api/v1alpha1/dockerregistry_types.go @@ -0,0 +1,162 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Endpoint struct { + Endpoint string `json:"endpoint"` +} + +// DockerRegistrySpec defines the desired state of DockerRegistry +type DockerRegistrySpec struct { + // Sets the timeout for the Function health check. The default value in seconds is `10` + HealthzLivenessTimeout string `json:"healthzLivenessTimeout,omitempty"` +} + +type State string + +type Served string + +type ConditionReason string + +type ConditionType string + +const ( + StateReady State = "Ready" + StateProcessing State = "Processing" + StateWarning State = "Warning" + StateError State = "Error" + StateDeleting State = "Deleting" + + ServedTrue Served = "True" + ServedFalse Served = "False" + + // installation and deletion details + ConditionTypeInstalled = ConditionType("Installed") + + // prerequisites and soft dependencies + ConditionTypeConfigured = ConditionType("Configured") + + // deletion + ConditionTypeDeleted = ConditionType("Deleted") + + ConditionReasonConfiguration = ConditionReason("Configuration") + ConditionReasonConfigurationErr = ConditionReason("ConfigurationErr") + ConditionReasonConfigured = ConditionReason("Configured") + ConditionReasonInstallation = ConditionReason("Installation") + ConditionReasonInstallationErr = ConditionReason("InstallationErr") + ConditionReasonInstalled = ConditionReason("Installed") + ConditionReasonDuplicated = ConditionReason("Duplicated") + ConditionReasonDeletion = ConditionReason("Deletion") + ConditionReasonDeletionErr = ConditionReason("DeletionErr") + ConditionReasonDeleted = ConditionReason("Deleted") + + Finalizer = "dockerregistry-operator.kyma-project.io/deletion-hook" +) + +type DockerRegistryStatus struct { + SecretName string `json:"secretName,omitempty"` + + HealthzLivenessTimeout string `json:"healthzLivenessTimeout,omitempty"` + + // State signifies current state of DockerRegistry. + // Value can be one of ("Ready", "Processing", "Error", "Deleting"). + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=Processing;Deleting;Ready;Error;Warning + State State `json:"state,omitempty"` + + // Served signifies that current DockerRegistry is managed. + // Value can be one of ("True", "False"). + // +kubebuilder:validation:Enum=True;False + Served Served `json:"served"` + + // Conditions associated with CustomStatus. + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen=true + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="Configured",type="string",JSONPath=".status.conditions[?(@.type=='Configured')].status" +//+kubebuilder:printcolumn:name="Installed",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" +//+kubebuilder:printcolumn:name="generation",type="integer",JSONPath=".metadata.generation" +//+kubebuilder:printcolumn:name="age",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="state",type="string",JSONPath=".status.state" + +// DockerRegistry is the Schema for the dockerregistry API +type DockerRegistry struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DockerRegistrySpec `json:"spec,omitempty"` + Status DockerRegistryStatus `json:"status,omitempty"` +} + +func (s *DockerRegistry) UpdateConditionFalse(c ConditionType, r ConditionReason, err error) { + condition := metav1.Condition{ + Type: string(c), + Status: "False", + LastTransitionTime: metav1.Now(), + Reason: string(r), + Message: err.Error(), + } + meta.SetStatusCondition(&s.Status.Conditions, condition) +} + +func (s *DockerRegistry) UpdateConditionUnknown(c ConditionType, r ConditionReason, msg string) { + condition := metav1.Condition{ + Type: string(c), + Status: "Unknown", + LastTransitionTime: metav1.Now(), + Reason: string(r), + Message: msg, + } + meta.SetStatusCondition(&s.Status.Conditions, condition) +} + +func (s *DockerRegistry) UpdateConditionTrue(c ConditionType, r ConditionReason, msg string) { + condition := metav1.Condition{ + Type: string(c), + Status: "True", + LastTransitionTime: metav1.Now(), + Reason: string(r), + Message: msg, + } + meta.SetStatusCondition(&s.Status.Conditions, condition) +} + +func (s *DockerRegistry) IsServedEmpty() bool { + return s.Status.Served == "" +} + +//+kubebuilder:object:root=true + +// DockerRegistryList contains a list of DockerRegistry +type DockerRegistryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DockerRegistry `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DockerRegistry{}, &DockerRegistryList{}) +} diff --git a/components/operator/api/v1alpha1/groupversion_info.go b/components/operator/api/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..9a7c8cb2 --- /dev/null +++ b/components/operator/api/v1alpha1/groupversion_info.go @@ -0,0 +1,41 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the operator v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=operator.kyma-project.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const ( + DockerregistryGroup = "operator.kyma-project.io" + DockerregistryVersion = "v1alpha1" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: DockerregistryGroup, Version: DockerregistryVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/components/operator/api/v1alpha1/helpers.go b/components/operator/api/v1alpha1/helpers.go new file mode 100644 index 00000000..eea1d0e1 --- /dev/null +++ b/components/operator/api/v1alpha1/helpers.go @@ -0,0 +1,26 @@ +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (s *DockerRegistry) IsInState(state State) bool { + return s.Status.State == state +} + +func (s *DockerRegistry) IsCondition(conditionType ConditionType) bool { + return meta.FindStatusCondition( + s.Status.Conditions, string(conditionType), + ) != nil +} + +func (s *DockerRegistry) IsConditionTrue(conditionType ConditionType) bool { + condition := meta.FindStatusCondition(s.Status.Conditions, string(conditionType)) + return condition != nil && condition.Status == metav1.ConditionTrue +} + +const ( + DefaultEnableInternal = false + EndpointDisabled = "" +) diff --git a/components/operator/api/v1alpha1/zz_generated.deepcopy.go b/components/operator/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..74d48ab7 --- /dev/null +++ b/components/operator/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistry) DeepCopyInto(out *DockerRegistry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistry. +func (in *DockerRegistry) DeepCopy() *DockerRegistry { + if in == nil { + return nil + } + out := new(DockerRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerRegistry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistryList) DeepCopyInto(out *DockerRegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DockerRegistry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistryList. +func (in *DockerRegistryList) DeepCopy() *DockerRegistryList { + if in == nil { + return nil + } + out := new(DockerRegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerRegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistrySpec) DeepCopyInto(out *DockerRegistrySpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistrySpec. +func (in *DockerRegistrySpec) DeepCopy() *DockerRegistrySpec { + if in == nil { + return nil + } + out := new(DockerRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistryStatus) DeepCopyInto(out *DockerRegistryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistryStatus. +func (in *DockerRegistryStatus) DeepCopy() *DockerRegistryStatus { + if in == nil { + return nil + } + out := new(DockerRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} diff --git a/components/operator/controllers/controller.go b/components/operator/controllers/controller.go new file mode 100644 index 00000000..658ee537 --- /dev/null +++ b/components/operator/controllers/controller.go @@ -0,0 +1,80 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/kyma-project/docker-registry/components/operator/internal/predicate" + "github.com/kyma-project/docker-registry/components/operator/internal/state" + "github.com/kyma-project/docker-registry/components/operator/internal/tracing" + "github.com/pkg/errors" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// dockerRegistryReconciler reconciles a DockerRegistry object +type dockerRegistryReconciler struct { + initStateMachine func(*zap.SugaredLogger) state.StateReconciler + client client.Client + log *zap.SugaredLogger +} + +func NewDockerRegistryReconciler(client client.Client, config *rest.Config, recorder record.EventRecorder, log *zap.SugaredLogger, chartPath string) *dockerRegistryReconciler { + cache := chart.NewSecretManifestCache(client) + + return &dockerRegistryReconciler{ + initStateMachine: func(log *zap.SugaredLogger) state.StateReconciler { + return state.NewMachine(client, config, recorder, log, cache, chartPath) + }, + client: client, + log: log, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (sr *dockerRegistryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.DockerRegistry{}, builder.WithPredicates(predicate.NoStatusChangePredicate{})). + Watches(&corev1.Service{}, tracing.ServiceCollectorWatcher()). + Complete(sr) +} + +func (sr *dockerRegistryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := sr.log.With("request", req) + log.Info("reconciliation started") + + instance, err := state.GetDockerRegistryOrServed(ctx, req, sr.client) + if err != nil { + log.Warnf("while getting dockerregistry, got error: %s", err.Error()) + return ctrl.Result{}, errors.Wrap(err, "while fetching dockerregistry instance") + } + if instance == nil { + log.Info("Couldn't find proper instance of dockerregistry") + return ctrl.Result{}, nil + } + + r := sr.initStateMachine(log) + return r.Reconcile(ctx, *instance) +} diff --git a/components/operator/controllers/controller_rbac.go b/components/operator/controllers/controller_rbac.go new file mode 100644 index 00000000..4d94ff8a --- /dev/null +++ b/components/operator/controllers/controller_rbac.go @@ -0,0 +1,35 @@ +package controllers + +// TODO: dockerregistry-manager doesn't need almost half of these rbscs. It uses them only to create another rbacs ( is there any onther option? - investigate ) + +//+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;patch +//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services;secrets;serviceaccounts;configmaps,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=nodes,verbs=list;watch;get +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=apps,resources=replicasets,verbs=list +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get +//+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=batch,resources=jobs/status,verbs=get + +//+kubebuilder:rbac:groups=policy,resources=podsecuritypolicies,verbs=use + +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings;roles,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=operator.kyma-project.io,resources=dockerregistries,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=operator.kyma-project.io,resources=dockerregistries/status,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=operator.kyma-project.io,resources=dockerregistries/finalizers,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations;mutatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=scheduling.k8s.io,resources=priorityclasses,verbs=get;list;watch;create;update;patch;delete;deletecollection diff --git a/components/operator/controllers/controller_test.go b/components/operator/controllers/controller_test.go new file mode 100644 index 00000000..30c26993 --- /dev/null +++ b/components/operator/controllers/controller_test.go @@ -0,0 +1,99 @@ +package controllers + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/utils/ptr" +) + +var _ = Describe("DockerRegistry controller", func() { + Context("When creating fresh instance", func() { + const ( + namespaceName = "kyma-system" + crName = "cr-test" + deploymentName = "internal-docker-registry" + registrySecret = registry.SecretName + ) + + var ( + defaultData = dockerRegistryData{ + TraceCollectorURL: ptr.To[string](v1alpha1.EndpointDisabled), + EnableInternal: ptr.To[bool](v1alpha1.DefaultEnableInternal), + } + ) + + It("The status should be Success", func() { + h := testHelper{ + ctx: context.Background(), + namespaceName: namespaceName, + } + h.createNamespace() + + { + emptyData := v1alpha1.DockerRegistrySpec{} + shouldCreateDockerRegistry(h, crName, deploymentName, emptyData) + shouldPropagateSpecProperties(h, registrySecret, defaultData) + } + + shouldDeleteDockerRegistry(h, crName, deploymentName) + }) + }) +}) + +func shouldCreateDockerRegistry(h testHelper, name, deploymentName string, spec v1alpha1.DockerRegistrySpec) { + // act + h.createDockerRegistry(name, spec) + + // we have to update deployment status manually + h.updateDeploymentStatus(deploymentName) + + // assert + Eventually(h.getDockerRegistryStatusFunc(name)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 20). + Should(ConditionTrueMatcher()) +} + +func shouldPropagateSpecProperties(h testHelper, registrySecretName string, expected dockerRegistryData) { + Eventually(h.createCheckRegistrySecretFunc(registrySecretName, expected.registrySecretData)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) +} + +func shouldDeleteDockerRegistry(h testHelper, name, deploymentName string) { + // initial assert + var deployList appsv1.DeploymentList + Eventually(h.listKubernetesObjectFunc(&deployList)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) + + Expect(deployList.Items).To(HaveLen(1)) + + // act + var dockerRegistry v1alpha1.DockerRegistry + Eventually(h.getKubernetesObjectFunc(name, &dockerRegistry)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) + + Expect(k8sClient.Delete(h.ctx, &dockerRegistry)).To(Succeed()) + + Eventually(h.getKubernetesObjectFunc(name, &dockerRegistry)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) + + // assert + Eventually(h.getKubernetesObjectFunc(deploymentName, &appsv1.Deployment{})). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) +} diff --git a/components/operator/controllers/suite_test.go b/components/operator/controllers/suite_test.go new file mode 100644 index 00000000..64dca95e --- /dev/null +++ b/components/operator/controllers/suite_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + uberzap "go.uber.org/zap" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + operatorv1alpha1 "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + config *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + + suiteCtx context.Context + cancelSuiteCtx context.CancelFunc +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "operator", "base", "crd", "bases"), + }, + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "..", "..", "bin", "k8s", "kubebuilder_assets"), + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + config, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(config).NotTo(BeNil()) + + err = operatorv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(config, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + k8sManager, err := ctrl.NewManager(config, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + config := uberzap.NewProductionConfig() + reconcilerLogger, err := config.Build() + Expect(err).NotTo(HaveOccurred()) + + chartPath := filepath.Join("..", "..", "..", "config", "docker-registry") + err = (NewDockerRegistryReconciler( + k8sManager.GetClient(), + k8sManager.GetConfig(), + record.NewFakeRecorder(100), + reconcilerLogger.Sugar(), + chartPath)). + SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + + suiteCtx, cancelSuiteCtx = context.WithCancel(context.Background()) + + err = k8sManager.Start(suiteCtx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancelSuiteCtx() + + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/components/operator/controllers/testhelper_test.go b/components/operator/controllers/testhelper_test.go new file mode 100644 index 00000000..bb5b299d --- /dev/null +++ b/components/operator/controllers/testhelper_test.go @@ -0,0 +1,285 @@ +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + gomegatypes "github.com/onsi/gomega/types" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type conditionMatcher struct { + expectedState v1alpha1.State + expectedConditionStatus metav1.ConditionStatus +} + +func ConditionTrueMatcher() gomegatypes.GomegaMatcher { + return &conditionMatcher{ + expectedState: v1alpha1.StateReady, + expectedConditionStatus: metav1.ConditionTrue, + } +} + +func (matcher *conditionMatcher) Match(actual interface{}) (success bool, err error) { + status, ok := actual.(v1alpha1.DockerRegistryStatus) + if !ok { + return false, fmt.Errorf("ConditionMatcher matcher expects an v1alpha1.DockerRegistryStatus") + } + + if status.State != matcher.expectedState { + return false, nil + } + + for _, condition := range status.Conditions { + if condition.Status != matcher.expectedConditionStatus { + return false, nil + } + } + + return true, nil +} + +func (matcher *conditionMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n\t%#v\nto be in %s state with all %s conditions", + actual, matcher.expectedState, matcher.expectedConditionStatus) +} + +func (matcher *conditionMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n\t%#v\nto be in %s state with all %s conditions", + actual, matcher.expectedState, matcher.expectedConditionStatus) +} + +type testHelper struct { + ctx context.Context + namespaceName string +} + +func (h *testHelper) updateDeploymentStatus(deploymentName string) { + By(fmt.Sprintf("Updating deployment status: %s", deploymentName)) + var deployment appsv1.Deployment + Eventually(h.getKubernetesObjectFunc(deploymentName, &deployment)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 30). + Should(BeTrue()) + + deployment.Status.Conditions = append(deployment.Status.Conditions, appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + Reason: "test-reason", + Message: "test-message", + }) + deployment.Status.Replicas = 1 + Expect(k8sClient.Status().Update(h.ctx, &deployment)).To(Succeed()) + + replicaSetName := h.createReplicaSetForDeployment(deployment) + + var replicaSet appsv1.ReplicaSet + Eventually(h.getKubernetesObjectFunc(replicaSetName, &replicaSet)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 30). + Should(BeTrue()) + + replicaSet.Status.ReadyReplicas = 1 + replicaSet.Status.Replicas = 1 + Expect(k8sClient.Status().Update(h.ctx, &replicaSet)).To(Succeed()) + + By(fmt.Sprintf("Deployment status updated: %s", deploymentName)) +} + +func (h *testHelper) createReplicaSetForDeployment(deployment appsv1.Deployment) string { + replicaSetName := fmt.Sprintf("%s-replica-set", deployment.Name) + By(fmt.Sprintf("Creating replica set (for deployment): %s", replicaSetName)) + var ( + trueValue = true + one = int32(1) + ) + replicaSet := appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: replicaSetName, + Namespace: h.namespaceName, + Labels: deployment.Spec.Selector.MatchLabels, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: deployment.Name, + UID: deployment.GetUID(), + Controller: &trueValue, + }, + }, + }, + // dummy values + Spec: appsv1.ReplicaSetSpec{ + Replicas: &one, + Selector: deployment.Spec.Selector, + Template: deployment.Spec.Template, + }, + } + Expect(k8sClient.Create(h.ctx, &replicaSet)).To(Succeed()) + By(fmt.Sprintf("Replica set (for deployment) created: %s", replicaSetName)) + return replicaSetName +} + +func (h *testHelper) createDockerRegistry(crName string, spec v1alpha1.DockerRegistrySpec) { + By(fmt.Sprintf("Creating cr: %s", crName)) + dockerRegistry := v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: crName, + Namespace: h.namespaceName, + Labels: map[string]string{ + "operator.kyma-project.io/kyma-name": "test", + }, + }, + Spec: spec, + } + Expect(k8sClient.Create(h.ctx, &dockerRegistry)).To(Succeed()) + By(fmt.Sprintf("Crd created: %s", crName)) +} + +func (h *testHelper) createNamespace() { + By(fmt.Sprintf("Creating namespace: %s", h.namespaceName)) + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: h.namespaceName, + }, + } + Expect(k8sClient.Create(h.ctx, &namespace)).To(Succeed()) + By(fmt.Sprintf("Namespace created: %s", h.namespaceName)) +} + +func (h *testHelper) getKubernetesObjectFunc(objectName string, obj client.Object) func() (bool, error) { + return func() (bool, error) { + return h.getKubernetesObject(objectName, obj) + } +} + +func (h *testHelper) getKubernetesObject(objectName string, obj client.Object) (bool, error) { + key := types.NamespacedName{ + Name: objectName, + Namespace: h.namespaceName, + } + + err := k8sClient.Get(h.ctx, key, obj) + if err != nil { + return false, err + } + return true, err +} + +func (h *testHelper) listKubernetesObjectFunc(list client.ObjectList) func() (bool, error) { + return func() (bool, error) { + return h.listKubernetesObject(list) + } +} + +func (h *testHelper) listKubernetesObject(list client.ObjectList) (bool, error) { + opts := client.ListOptions{ + Namespace: h.namespaceName, + } + + err := k8sClient.List(h.ctx, list, &opts) + if err != nil { + return false, err + } + return true, err +} + +func (h *testHelper) getDockerRegistryStatusFunc(name string) func() (v1alpha1.DockerRegistryStatus, error) { + return func() (v1alpha1.DockerRegistryStatus, error) { + return h.getDockerRegistryStatus(name) + } +} + +func (h *testHelper) getDockerRegistryStatus(name string) (v1alpha1.DockerRegistryStatus, error) { + var dockerRegistry v1alpha1.DockerRegistry + key := types.NamespacedName{ + Name: name, + Namespace: h.namespaceName, + } + err := k8sClient.Get(h.ctx, key, &dockerRegistry) + if err != nil { + return v1alpha1.DockerRegistryStatus{}, err + } + return dockerRegistry.Status, nil +} + +type dockerRegistryData struct { + EventPublisherProxyURL *string + TraceCollectorURL *string + EnableInternal *bool + registrySecretData +} + +type registrySecretData struct { + Username *string + Password *string + ServerAddress *string + RegistryAddress *string +} + +func (d *registrySecretData) toMap() map[string]string { + result := map[string]string{} + if d.Username != nil { + result["username"] = *d.Username + } + if d.Password != nil { + result["password"] = *d.Password + } + if d.ServerAddress != nil { + result["serverAddress"] = *d.ServerAddress + } + if d.RegistryAddress != nil { + result["registryAddress"] = *d.RegistryAddress + } + return result +} + +func (h *testHelper) createCheckRegistrySecretFunc(registrySecret string, expected registrySecretData) func() (bool, error) { + return func() (bool, error) { + var configurationSecret corev1.Secret + + if ok, err := h.getKubernetesObject( + registrySecret, &configurationSecret); !ok || err != nil { + return ok, err + } + if ok, err := secretContainsSameValues( + expected.toMap(), configurationSecret); err != nil { + return ok, err + } + if ok, err := secretContainsRequired(configurationSecret); err != nil { + return ok, err + } + return true, nil + } +} + +func secretContainsRequired(configurationSecret corev1.Secret) (bool, error) { + for _, k := range []string{"username", "password", "pullRegAddr", "pushRegAddr", ".dockerconfigjson"} { + _, ok := configurationSecret.Data[k] + if !ok { + return false, fmt.Errorf("values not propagated (%s is required)", k) + } + } + return false, nil +} + +func secretContainsSameValues(expected map[string]string, configurationSecret corev1.Secret) (bool, error) { + for k, expectedV := range expected { + v, okV := configurationSecret.Data[k] + if okV == false { + return false, fmt.Errorf("values not propagated (%s: nil != %s )", k, expectedV) + } + if expectedV != string(v) { + return false, fmt.Errorf("values not propagated (%s: %s != %s )", k, string(v), expectedV) + } + } + return false, nil +} diff --git a/components/operator/hack/boilerplate.go.txt b/components/operator/hack/boilerplate.go.txt new file mode 100755 index 00000000..29c55ecd --- /dev/null +++ b/components/operator/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/components/operator/hack/verify_dockerregistry_status.sh b/components/operator/hack/verify_dockerregistry_status.sh new file mode 100755 index 00000000..5726552c --- /dev/null +++ b/components/operator/hack/verify_dockerregistry_status.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +function get_dockerregistry_status () { + local number=1 + while [[ $number -le 100 ]] ; do + echo ">--> checking dockerregistry status #$number" + local STATUS=$(kubectl get dockerregistry -n kyma-system default -o jsonpath='{.status.state}') + echo "dockerregistry status: ${STATUS:='UNKNOWN'}" + [[ "$STATUS" == "Ready" ]] && return 0 + sleep 5 + ((number = number + 1)) + done + + kubectl get all --all-namespaces + exit 1 +} + +get_dockerregistry_status diff --git a/components/operator/internal/annotation/disclaimer.go b/components/operator/internal/annotation/disclaimer.go new file mode 100644 index 00000000..1ed584fc --- /dev/null +++ b/components/operator/internal/annotation/disclaimer.go @@ -0,0 +1,22 @@ +package annotation + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +const ( + annotation = "dockerregistry-manager.kyma-project.io/managed-by-dockerregistry-manager-disclaimer" + message = "DO NOT EDIT - This resource is managed by DockerRegistry-Manager.\nAny modifications are discarded and the resource is reverted to the original state." +) + +func AddDoNotEditDisclaimer(obj unstructured.Unstructured) unstructured.Unstructured { + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[annotation] = message + obj.SetAnnotations(annotations) + + return obj +} diff --git a/components/operator/internal/annotation/disclaimer_test.go b/components/operator/internal/annotation/disclaimer_test.go new file mode 100644 index 00000000..26f013f1 --- /dev/null +++ b/components/operator/internal/annotation/disclaimer_test.go @@ -0,0 +1,17 @@ +package annotation + +import ( + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func TestAddDoNotEditDisclaimer(t *testing.T) { + t.Run("add disclaimer", func(t *testing.T) { + obj := unstructured.Unstructured{} + obj = AddDoNotEditDisclaimer(obj) + + require.Equal(t, message, obj.GetAnnotations()[annotation]) + }) +} diff --git a/components/operator/internal/chart/cache.go b/components/operator/internal/chart/cache.go new file mode 100644 index 00000000..3582308f --- /dev/null +++ b/components/operator/internal/chart/cache.go @@ -0,0 +1,142 @@ +package chart + +import ( + "context" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + _ ManifestCache = (*inMemoryManifestCache)(nil) + _ ManifestCache = (*secretManifestCache)(nil) +) + +var ( + emptySpecManifest = DockerRegistrySpecManifest{} +) + +type ManifestCache interface { + Set(context.Context, client.ObjectKey, DockerRegistrySpecManifest) error + Get(context.Context, client.ObjectKey) (DockerRegistrySpecManifest, error) + Delete(context.Context, client.ObjectKey) error +} + +// inMemoryManifestCache provides an in-memory processor to store dockerregistry Spec and rendered chart manifest. By using sync.Map for caching, +// concurrent operations to the processor from diverse reconciliations are considered safe. +// +// Inside the processor is stored chart manifest with used custom flags by client.ObjectKey key. +type inMemoryManifestCache struct { + processor sync.Map +} + +// NewInMemoryManifestCache returns a new instance of inMemoryManifestCache. +func NewInMemoryManifestCache() *inMemoryManifestCache { + return &inMemoryManifestCache{ + processor: sync.Map{}, + } +} + +// Get loads the DockerRegistrySpecManifest from inMemoryManifestCache for the passed client.ObjectKey. +func (r *inMemoryManifestCache) Get(_ context.Context, key client.ObjectKey) (DockerRegistrySpecManifest, error) { + value, ok := r.processor.Load(key) + if !ok { + return emptySpecManifest, nil + } + + return *value.(*DockerRegistrySpecManifest), nil +} + +// Set saves the passed flags and manifest into inMemoryManifestCache for the client.ObjectKey. +func (r *inMemoryManifestCache) Set(_ context.Context, key client.ObjectKey, spec DockerRegistrySpecManifest) error { + r.processor.Store(key, &spec) + + return nil +} + +// Delete deletes flags and manifest from inMemoryManifestCache for the passed client.ObjectKey. +func (r *inMemoryManifestCache) Delete(_ context.Context, key client.ObjectKey) error { + r.processor.Delete(key) + return nil +} + +// secretManifestCache - provides a Secret based processor to store dockerregistry Spec and rendered chart manifest. +// +// Inside the secret we store manifest and flags used to render it. +type secretManifestCache struct { + client client.Client +} + +type DockerRegistrySpecManifest struct { + ManagerUID string + CustomFlags map[string]interface{} + Manifest string +} + +// NewSecretManifestCache - returns a new instance of SecretManifestCache. +func NewSecretManifestCache(client client.Client) *secretManifestCache { + return &secretManifestCache{ + client: client, + } +} + +// Delete - removes Secret cache based on the passed client.ObjectKey. +func (m *secretManifestCache) Delete(ctx context.Context, key client.ObjectKey) error { + err := m.client.Delete(ctx, &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + }) + + return client.IgnoreNotFound(err) +} + +// Get - loads the DockerRegistrySpecManifest from SecretManifestCache based on the passed client.ObjectKey. +func (m *secretManifestCache) Get(ctx context.Context, key client.ObjectKey) (DockerRegistrySpecManifest, error) { + secret := corev1.Secret{} + err := m.client.Get(ctx, key, &secret) + if errors.IsNotFound(err) { + return emptySpecManifest, nil + } + if err != nil { + return emptySpecManifest, err + } + + spec := DockerRegistrySpecManifest{} + err = json.Unmarshal(secret.Data["spec"], &spec) + if err != nil { + return emptySpecManifest, err + } + + return spec, nil +} + +// Set - saves the passed flags and manifest into Secret based on the client.ObjectKey. +func (m *secretManifestCache) Set(ctx context.Context, key client.ObjectKey, spec DockerRegistrySpecManifest) error { + byteSpec, err := json.Marshal(&spec) + if err != nil { + return err + } + + secret := corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Data: map[string][]byte{ + "spec": byteSpec, + }, + } + + err = m.client.Update(ctx, &secret) + if !errors.IsNotFound(err) { + return err + } + + return m.client.Create(ctx, &secret) +} diff --git a/components/operator/internal/chart/cache_test.go b/components/operator/internal/chart/cache_test.go new file mode 100644 index 00000000..7b182399 --- /dev/null +++ b/components/operator/internal/chart/cache_test.go @@ -0,0 +1,262 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const testSecretNamespace = "kyma-system" + +func TestManifestCache_Delete(t *testing.T) { + t.Run("delete secret", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + fixSecretCache(t, key, emptySpecManifest), + ).Build() + + cache := NewSecretManifestCache(client) + + err := cache.Delete(ctx, key) + require.NoError(t, err) + + var secret corev1.Secret + err = client.Get(ctx, key, &secret) + require.True(t, errors.IsNotFound(err), fmt.Sprintf("got error: %v", err)) + }) + + t.Run("delete error", func(t *testing.T) { + scheme := runtime.NewScheme() + // apiextensionscheme does not contains v1.Secret scheme + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + cache := NewSecretManifestCache(client) + + err := cache.Delete(ctx, key) + require.Error(t, err) + }) + + t.Run("do nothing when cache is not found", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + + cache := NewSecretManifestCache(client) + + err := cache.Delete(ctx, key) + require.NoError(t, err) + }) +} + +func TestManifestCache_Get(t *testing.T) { + t.Run("get secret value", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + fixSecretCache(t, key, DockerRegistrySpecManifest{ + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + Manifest: "schmetterling", + }), + ).Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.NoError(t, err) + + expectedResult := DockerRegistrySpecManifest{ + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + Manifest: "schmetterling", + } + require.Equal(t, expectedResult, result) + }) + + t.Run("client error", func(t *testing.T) { + scheme := runtime.NewScheme() + // apiextensionscheme does not contains v1.Secret scheme + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.Error(t, err) + require.Equal(t, emptySpecManifest, result) + }) + + t.Run("secret not found", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.NoError(t, err) + require.Equal(t, emptySpecManifest, result) + }) + + t.Run("conversion error", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Data: map[string][]byte{ + "spec": []byte("{UNEXPECTED}"), + }, + }).Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.Error(t, err) + require.Equal(t, emptySpecManifest, result) + }) +} + +func TestManifestCache_Set(t *testing.T) { + t.Run("create secret", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + + cache := NewSecretManifestCache(client) + expectedSpec := DockerRegistrySpecManifest{ + Manifest: "schmetterling", + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + } + + err := cache.Set(ctx, key, expectedSpec) + require.NoError(t, err) + + var secret corev1.Secret + require.NoError(t, client.Get(ctx, key, &secret)) + + actualSpec := DockerRegistrySpecManifest{} + err = json.Unmarshal(secret.Data["spec"], &actualSpec) + require.NoError(t, err) + + require.Equal(t, expectedSpec, actualSpec) + }) + + t.Run("update secret", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + fixSecretCache(t, key, emptySpecManifest), + ).Build() + + cache := NewSecretManifestCache(client) + expectedSpec := DockerRegistrySpecManifest{ + Manifest: "schmetterling", + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + } + err := cache.Set(ctx, key, expectedSpec) + require.NoError(t, err) + + var secret corev1.Secret + require.NoError(t, client.Get(ctx, key, &secret)) + + actualSpec := DockerRegistrySpecManifest{} + err = json.Unmarshal(secret.Data["spec"], &actualSpec) + require.NoError(t, err) + + require.Equal(t, expectedSpec, actualSpec) + }) + + t.Run("marshal error", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + wrongFlags := map[string]interface{}{ + "flag1": func() {}, + } + + cache := NewSecretManifestCache(client) + + err := cache.Set(ctx, key, DockerRegistrySpecManifest{ + Manifest: "", + CustomFlags: wrongFlags, + }) + require.Error(t, err) + }) +} + +func fixSecretCache(t *testing.T, key types.NamespacedName, spec DockerRegistrySpecManifest) *corev1.Secret { + byteSpec, err := json.Marshal(&spec) + require.NoError(t, err) + + return &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Data: map[string][]byte{ + "spec": byteSpec, + }, + } +} diff --git a/components/operator/internal/chart/chart.go b/components/operator/internal/chart/chart.go new file mode 100644 index 00000000..330d4d92 --- /dev/null +++ b/components/operator/internal/chart/chart.go @@ -0,0 +1,143 @@ +package chart + +import ( + "context" + "fmt" + "io" + "reflect" + "strings" + + "go.uber.org/zap" + "gopkg.in/yaml.v3" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Config struct { + Ctx context.Context + Log *zap.SugaredLogger + Cache ManifestCache + CacheKey types.NamespacedName + ManagerUID string + Cluster Cluster + Release Release +} + +type Release struct { + ChartPath string + Name string + Namespace string +} + +type Cluster struct { + Client client.Client + Config *rest.Config +} + +func parseManifest(manifest string) ([]unstructured.Unstructured, error) { + results := make([]unstructured.Unstructured, 0) + decoder := yaml.NewDecoder(strings.NewReader(manifest)) + + for { + var obj map[string]interface{} + err := decoder.Decode(&obj) + + if err == io.EOF { + break + } + + if err != nil { + return nil, err + } + + // no obj between separators + if len(obj) == 0 { + continue + } + + u := unstructured.Unstructured{Object: obj} + // some resources need to be applied first (before workloads) + // if this statement gets bigger then extract it to the separated place + if u.GetObjectKind().GroupVersionKind().Kind == "CustomResourceDefinition" || + u.GetObjectKind().GroupVersionKind().Kind == "PriorityClass" { + results = append([]unstructured.Unstructured{u}, results...) + continue + } + results = append(results, u) + } + + return results, nil +} + +func getCachedAndCurrentManifest(config *Config, customFlags map[string]interface{}, renderChartFunc func(config *Config, customFlags map[string]interface{}) (*release.Release, error)) (string, string, error) { + cachedSpecManifest, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return "", "", fmt.Errorf("could not get manifest from cache : %s", err.Error()) + } + + if !shouldRenderAgain(cachedSpecManifest, config, customFlags) { + return cachedSpecManifest.Manifest, cachedSpecManifest.Manifest, nil + } + + currentRelease, err := renderChartFunc(config, customFlags) + if err != nil { + return cachedSpecManifest.Manifest, "", fmt.Errorf("could not render manifest : %s", err.Error()) + } + + return cachedSpecManifest.Manifest, currentRelease.Manifest, nil +} + +func shouldRenderAgain(cachedSpec DockerRegistrySpecManifest, config *Config, customFlags map[string]interface{}) bool { + // cachedSpec is up-to-date only if flags used to render and manager is the same one who rendered it before + equalFlags := reflect.DeepEqual(cachedSpec.CustomFlags, customFlags) + return !(cachedSpec.ManagerUID == config.ManagerUID && equalFlags) +} + +func renderChart(config *Config, customFlags map[string]interface{}) (*release.Release, error) { + chart, err := loader.Load(config.Release.ChartPath) + if err != nil { + return nil, fmt.Errorf("while loading chart from path '%s': %s", config.Release.ChartPath, err.Error()) + } + + installAction := newInstallAction(config) + + rel, err := installAction.Run(chart, customFlags) + if err != nil { + return nil, fmt.Errorf("while templating chart: %s", err.Error()) + } + + return rel, nil +} + +func newInstallAction(config *Config) *action.Install { + helmRESTGetter := &clientGetter{ + config: config.Cluster.Config, + } + + helmClient := kube.New(helmRESTGetter) + helmClient.Log = config.Log.Debugf + + actionConfig := new(action.Configuration) + actionConfig.KubeClient = helmClient + actionConfig.Log = helmClient.Log + + actionConfig.Releases = storage.Init(driver.NewMemory()) + actionConfig.RESTClientGetter = helmRESTGetter + + action := action.NewInstall(actionConfig) + action.ReleaseName = config.Release.Name + action.Namespace = config.Release.Namespace + action.Replace = true + action.IsUpgrade = true + action.DryRun = true + + return action +} diff --git a/components/operator/internal/chart/chart_test.go b/components/operator/internal/chart/chart_test.go new file mode 100644 index 00000000..0d155da6 --- /dev/null +++ b/components/operator/internal/chart/chart_test.go @@ -0,0 +1,94 @@ +package chart + +import ( + "context" + "testing" + + "helm.sh/helm/v3/pkg/release" + "k8s.io/apimachinery/pkg/types" +) + +func Test_getOrRenderManifestWithRenderer(t *testing.T) { + noCRDManifestKey := types.NamespacedName{ + Name: "no", Namespace: "crd", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), noCRDManifestKey, + DockerRegistrySpecManifest{Manifest: testDeploy}) + + type args struct { + config *Config + customFlags map[string]interface{} + renderChartFunc func(config *Config, customFlags map[string]interface{}) (*release.Release, error) + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "return manifest when flags and managerUID are not changed", + args: args{ + config: &Config{ + Ctx: context.Background(), + Cache: cache, + CacheKey: noCRDManifestKey, + }, + }, + want: testDeploy, + wantErr: false, + }, + { + name: "render manifest when flags are changed", + args: args{ + renderChartFunc: fixManifestRenderFunc("test-new-manifest"), + customFlags: map[string]interface{}{ + "flag1": "val1", + }, + config: &Config{ + Ctx: context.Background(), + Cache: cache, + CacheKey: noCRDManifestKey, + }, + }, + want: "test-new-manifest", + wantErr: false, + }, + { + name: "render manifest when managerUID is changed", + args: args{ + renderChartFunc: fixManifestRenderFunc("test-new-manifest-2"), + config: &Config{ + Ctx: context.Background(), + Cache: cache, + CacheKey: noCRDManifestKey, + ManagerUID: "new-UID", + }, + }, + want: "test-new-manifest-2", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, gotCurrent, err := getCachedAndCurrentManifest(tt.args.config, tt.args.customFlags, tt.args.renderChartFunc) + if (err != nil) != tt.wantErr { + t.Errorf("getCachedAndCurrentManifest() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotCurrent != tt.want { + t.Errorf("getCachedAndCurrentManifest() = %v, want %v", gotCurrent, tt.want) + } + }) + } +} + +func fixManifestRenderFunc(manifest string) func(config *Config, customFlags map[string]interface{}) (*release.Release, error) { + return func(config *Config, customFlags map[string]interface{}) (*release.Release, error) { + return &release.Release{ + Manifest: manifest, + }, nil + } +} diff --git a/components/operator/internal/chart/check.go b/components/operator/internal/chart/check.go new file mode 100644 index 00000000..77436174 --- /dev/null +++ b/components/operator/internal/chart/check.go @@ -0,0 +1,94 @@ +package chart + +import ( + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func CheckCRDOrphanResources(config *Config) error { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return fmt.Errorf("could not render manifest from chart: %s", err.Error()) + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + for _, obj := range objs { + // continue if obj is not crd + if !isCRD(obj) { + continue + } + + // check if crd exist on the cluster + objCopy := unstructured.Unstructured{Object: obj.Object} + err := config.Cluster.Client.Get(config.Ctx, types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }, &objCopy) + if errors.IsNotFound(err) { + continue + } + if err != nil { + return err + } + + // check if CRs exist on the cluster + crList, err := buildResourceListFromCRD(obj) + if err != nil { + return err + } + + err = config.Cluster.Client.List(config.Ctx, &crList) + if client.IgnoreNotFound(err) != nil { + return err + } + + if len(crList.Items) > 0 { + return fmt.Errorf("found %d items with VersionKind %s", len(crList.Items), crList.GetAPIVersion()) + } + } + + return nil +} + +func isCRD(u unstructured.Unstructured) bool { + return u.GroupVersionKind().GroupKind() == apiextensionsv1.Kind("CustomResourceDefinition") +} + +func buildResourceListFromCRD(u unstructured.Unstructured) (unstructured.UnstructuredList, error) { + crd := apiextensionsv1.CustomResourceDefinition{} + crdList := unstructured.UnstructuredList{} + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &crd) + if err != nil { + return crdList, err + } + + crdList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: crd.Spec.Group, + Version: getCRDStoredVersion(crd), + Kind: crd.Spec.Names.Kind, + }) + + return crdList, nil +} + +func getCRDStoredVersion(crd apiextensionsv1.CustomResourceDefinition) string { + for _, version := range crd.Spec.Versions { + if version.Storage { + return version.Name + } + } + + return "" +} diff --git a/components/operator/internal/chart/check_test.go b/components/operator/internal/chart/check_test.go new file mode 100644 index 00000000..6b0e10d9 --- /dev/null +++ b/components/operator/internal/chart/check_test.go @@ -0,0 +1,179 @@ +package chart + +import ( + "context" + "fmt" + "github.com/stretchr/testify/require" + "testing" + + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + testOrphanCR = ` +apiVersion: test.group/v1alpha2 +kind: TestKind +metadata: + name: test-deploy + namespace: default +` +) + +var ( + testOrphanObj = unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "test.group/v1alpha2", + "kind": "TestKind", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "namespace", + }, + }, + } +) + +func TestCheckCRDOrphanResources(t *testing.T) { + noCRDManifestKey := types.NamespacedName{ + Name: "no", Namespace: "crd", + } + noOrphanManifestKey := types.NamespacedName{ + Name: "no", Namespace: "orphan", + } + oneOrphanManifestKey := types.NamespacedName{ + Name: "one", Namespace: "orphan", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), noCRDManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testDeploy)}) + _ = cache.Set(context.Background(), noOrphanManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), oneOrphanManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testOrphanCR)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: ""}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + type args struct { + config *Config + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + }, + }, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + wantErr: true, + }, + { + name: "no CRDs in manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: noCRDManifestKey, + }, + }, + wantErr: false, + }, + { + name: "no orphan for CRD", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: noOrphanManifestKey, + Ctx: context.Background(), + Cluster: Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(apiextensionsscheme.Scheme). + WithObjects(testCRDObj). + Build(), + }, + }, + }, + wantErr: false, + }, + { + name: "one orphan for CRD", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: oneOrphanManifestKey, + Ctx: context.Background(), + Cluster: Cluster{ + Client: func() client.Client { + scheme := runtime.NewScheme() + scheme.AddKnownTypes(schema.GroupVersion{ + Group: "test.group", + Version: "v1alpha2", + }, &testOrphanObj) + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(&testOrphanObj). + WithObjects(testCRDObj). + Build() + return c + }(), + }, + }, + }, + wantErr: true, + }, + { + name: "missing CRD on cluster", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: oneOrphanManifestKey, + Ctx: context.Background(), + Cluster: Cluster{ + Client: func() client.Client { + scheme := runtime.NewScheme() + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + return c + }(), + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := CheckCRDOrphanResources(tt.args.config); (err != nil) != tt.wantErr { + t.Errorf("CheckCRDOrphanResources() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/components/operator/internal/chart/client_getter.go b/components/operator/internal/chart/client_getter.go new file mode 100644 index 00000000..969ae132 --- /dev/null +++ b/components/operator/internal/chart/client_getter.go @@ -0,0 +1,44 @@ +package chart + +import ( + "helm.sh/helm/v3/pkg/action" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" +) + +var _ action.RESTClientGetter = &clientGetter{} + +type clientGetter struct { + config *rest.Config +} + +func (cg *clientGetter) ToRESTConfig() (*rest.Config, error) { + return cg.config, nil +} + +func (cg *clientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + discoveryClient, _ := discovery.NewDiscoveryClientForConfig(cg.config) + return memory.NewMemCacheClient(discoveryClient), nil +} + +func (cg *clientGetter) ToRESTMapper() (meta.RESTMapper, error) { + discoveryClient, err := cg.ToDiscoveryClient() + if err != nil { + return nil, err + } + + mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient, nil) + return expander, nil +} + +func (cg *clientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) +} diff --git a/components/operator/internal/chart/flags.go b/components/operator/internal/chart/flags.go new file mode 100644 index 00000000..02b6d9bc --- /dev/null +++ b/components/operator/internal/chart/flags.go @@ -0,0 +1,94 @@ +package chart + +import ( + "fmt" + "strings" +) + +type FlagsBuilder interface { + Build() map[string]interface{} + WithControllerConfiguration(healthzLivenessTimeout string) *flagsBuilder + WithRegistryCredentials(username string, password string) *flagsBuilder + WithRegistryHttpSecret(httpSecret string) *flagsBuilder + WithNodePort(nodePort int64) *flagsBuilder +} + +type flagsBuilder struct { + flags map[string]interface{} +} + +func NewFlagsBuilder() FlagsBuilder { + return &flagsBuilder{ + flags: map[string]interface{}{}, + } +} + +func (fb *flagsBuilder) Build() map[string]interface{} { + flags := map[string]interface{}{} + for key, value := range fb.flags { + flagPath := strings.Split(key, ".") + appendFlag(flags, flagPath, value) + } + return flags +} + +func appendFlag(flags map[string]interface{}, flagPath []string, value interface{}) { + currentFlag := flags + for i, pathPart := range flagPath { + createIfEmpty(currentFlag, pathPart) + if lastElement(flagPath, i) { + currentFlag[pathPart] = value + } else { + currentFlag = nextDeeperFlag(currentFlag, pathPart) + } + } +} + +func createIfEmpty(flags map[string]interface{}, key string) { + if _, ok := flags[key]; !ok { + flags[key] = map[string]interface{}{} + } +} + +func lastElement(values []string, i int) bool { + return i == len(values)-1 +} + +func nextDeeperFlag(currentFlag map[string]interface{}, path string) map[string]interface{} { + return currentFlag[path].(map[string]interface{}) +} + +func (fb *flagsBuilder) WithControllerConfiguration(healthzLivenessTimeout string) *flagsBuilder { + optionalFlags := []struct { + key string + value string + }{ + {"healthzLivenessTimeout", healthzLivenessTimeout}, + } + + for _, flag := range optionalFlags { + if flag.value != "" { + fullPath := fmt.Sprintf("containers.manager.configuration.data.%s", flag.key) + fb.flags[fullPath] = flag.value + } + } + + return fb +} + +func (fb *flagsBuilder) WithRegistryCredentials(username, password string) *flagsBuilder { + fb.flags["dockerRegistry.username"] = username + fb.flags["dockerRegistry.password"] = password + return fb +} + +func (fb *flagsBuilder) WithRegistryHttpSecret(httpSecret string) *flagsBuilder { + fb.flags["docker-registry.rollme"] = "dontrollplease" + fb.flags["docker-registry.registryHTTPSecret"] = httpSecret + return fb +} + +func (fb *flagsBuilder) WithNodePort(nodePort int64) *flagsBuilder { + fb.flags["global.registryNodePort"] = nodePort + return fb +} diff --git a/components/operator/internal/chart/flags_test.go b/components/operator/internal/chart/flags_test.go new file mode 100644 index 00000000..bb8f5599 --- /dev/null +++ b/components/operator/internal/chart/flags_test.go @@ -0,0 +1,85 @@ +package chart + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_flagsBuilder_Build(t *testing.T) { + t.Run("build empty flags", func(t *testing.T) { + flags := NewFlagsBuilder().Build() + require.Equal(t, map[string]interface{}{}, flags) + }) + + t.Run("build flags", func(t *testing.T) { + expectedFlags := map[string]interface{}{ + "containers": map[string]interface{}{ + "manager": map[string]interface{}{ + "configuration": map[string]interface{}{ + "data": map[string]interface{}{ + "healthzLivenessTimeout": "testHealthzLivenessTimeout", + }, + }, + }, + }, + "docker-registry": map[string]interface{}{ + "registryHTTPSecret": "testHttpSecret", + "rollme": "dontrollplease", + }, + "dockerRegistry": map[string]interface{}{ + "password": "testPassword", + "username": "testUsername", + }, + "global": map[string]interface{}{ + "registryNodePort": int64(1234), + }, + } + + flags := NewFlagsBuilder(). + WithNodePort(1234). + WithRegistryCredentials("testUsername", "testPassword"). + WithRegistryHttpSecret("testHttpSecret"). + WithControllerConfiguration( + "testHealthzLivenessTimeout", + ).Build() + + require.Equal(t, expectedFlags, flags) + }) + + t.Run("build registry flags only", func(t *testing.T) { + expectedFlags := map[string]interface{}{ + "dockerRegistry": map[string]interface{}{ + "password": "testPassword", + "username": "testUsername", + }, + } + + flags := NewFlagsBuilder(). + WithRegistryCredentials("testUsername", "testPassword"). + Build() + + require.Equal(t, expectedFlags, flags) + }) + + t.Run("build not empty controller configuration flags only", func(t *testing.T) { + expectedFlags := map[string]interface{}{ + "containers": map[string]interface{}{ + "manager": map[string]interface{}{ + "configuration": map[string]interface{}{ + "data": map[string]interface{}{ + "healthzLivenessTimeout": "testHealthzLivenessTimeout", + }, + }, + }, + }, + } + + flags := NewFlagsBuilder(). + WithControllerConfiguration( + "testHealthzLivenessTimeout", + ).Build() + + require.Equal(t, expectedFlags, flags) + }) +} diff --git a/components/operator/internal/chart/install.go b/components/operator/internal/chart/install.go new file mode 100644 index 00000000..ee396dd4 --- /dev/null +++ b/components/operator/internal/chart/install.go @@ -0,0 +1,102 @@ +package chart + +import ( + "fmt" + + "github.com/kyma-project/docker-registry/components/operator/internal/annotation" + "github.com/pkg/errors" + "helm.sh/helm/v3/pkg/release" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func Install(config *Config, customFlags map[string]interface{}) error { + return install(config, customFlags, renderChart) +} + +func install(config *Config, customFlags map[string]interface{}, renderChartFunc func(config *Config, customFlags map[string]interface{}) (*release.Release, error)) error { + cachedManifest, currentManifest, err := getCachedAndCurrentManifest(config, customFlags, renderChartFunc) + if err != nil { + return err + } + + objs, unusedObjs, err := getObjectsToInstallAndRemove(cachedManifest, currentManifest) + if err != nil { + return err + } + + err = updateObjects(config, objs) + if err != nil { + return err + } + + err = uninstallObjects(config, unusedObjs) + if err != nil { + return err + } + + return config.Cache.Set(config.Ctx, config.CacheKey, DockerRegistrySpecManifest{ + ManagerUID: config.ManagerUID, + CustomFlags: customFlags, + Manifest: currentManifest, + }) +} + +func getObjectsToInstallAndRemove(cachedManifest string, currentManifest string) ([]unstructured.Unstructured, []unstructured.Unstructured, error) { + objs, err := parseManifest(currentManifest) + if err != nil { + return nil, nil, fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + oldObjs, err := parseManifest(cachedManifest) + if err != nil { + return nil, nil, fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + unusedObjs := unusedOldObjects(oldObjs, objs) + return objs, unusedObjs, nil +} + +func updateObjects(config *Config, objs []unstructured.Unstructured) error { + for i := range objs { + u := objs[i] + config.Log.Debugf("creating %s %s/%s", u.GetKind(), u.GetNamespace(), u.GetName()) + + u = annotation.AddDoNotEditDisclaimer(u) + if IsPVC(u.GroupVersionKind()) { + modifiedObj, err := AdjustDockerRegToClusterPVCSize(config.Ctx, config.Cluster.Client, u) + if err != nil { + return errors.Wrap(err, "while adjusting pvc size") + } + u = modifiedObj + } + + // TODO: what if Path returns error in the middle of manifest? + // maybe we should in this case translate applied objs into manifest and set it into cache? + err := config.Cluster.Client.Patch(config.Ctx, &u, client.Apply, &client.PatchOptions{ + Force: ptr.To[bool](true), + FieldManager: "dockerregistry-operator", + }) + if err != nil { + return fmt.Errorf("could not install object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()) + } + } + return nil +} + +func unusedOldObjects(previousObjs []unstructured.Unstructured, currentObjs []unstructured.Unstructured) []unstructured.Unstructured { + currentNames := make(map[string]struct{}, len(currentObjs)) + for _, obj := range currentObjs { + objFullName := fmt.Sprintf("%s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) + currentNames[objFullName] = struct{}{} + } + result := []unstructured.Unstructured{} + for _, obj := range previousObjs { + objFullName := fmt.Sprintf("%s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) + if _, found := currentNames[objFullName]; !found { + result = append(result, obj) + } + } + return result +} diff --git a/components/operator/internal/chart/install_test.go b/components/operator/internal/chart/install_test.go new file mode 100644 index 00000000..377def8a --- /dev/null +++ b/components/operator/internal/chart/install_test.go @@ -0,0 +1,247 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + separator = `---` + testCRD = ` +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: test-crd +spec: + group: test.group + names: + kind: TestKind + versions: + - storage: false + name: v1alpha1 + - storage: true + name: v1alpha2 +` + testDeploy = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deploy + namespace: default +` + testServiceAccount = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-service-account + namespace: test-namespace + labels: + label-key: 'label-val' +` +) + +var ( + testDeployCR = &appsv1.Deployment{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-deploy", + Namespace: "default", + }, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionStatus(v1.ConditionTrue), + }, + }, + }, + } + testCRDObj = &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-crd", + }, + } +) + +func Test_install_delete(t *testing.T) { + t.Run("should delete all unused resources", func(t *testing.T) { + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + client := fake.NewClientBuilder().WithObjects(testDeployCR).WithObjects(testCRDObj).Build() + customFlags := map[string]interface{}{ + "flag1": "val1", + } + config := &Config{ + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: client, + }, + Log: zap.NewNop().Sugar(), + } + err := install(config, customFlags, fixManifestRenderFunc("")) + require.NoError(t, err) + + deploymentList := appsv1.DeploymentList{} + err = client.List(context.Background(), &deploymentList) + require.NoError(t, err) + require.Empty(t, deploymentList.Items) + + crdList := apiextensionsv1.CustomResourceDefinitionList{} + err = client.List(context.Background(), &crdList) + require.NoError(t, err) + require.Empty(t, crdList.Items) + }) +} + +func Test_install(t *testing.T) { + log := zap.NewNop().Sugar() + + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: ""}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + type args struct { + config *Config + customFlags map[string]interface{} + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + }, + }, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + wantErr: true, + }, + { + name: "installation error", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().WithScheme(apiextensionsscheme.Scheme).Build(), + }, + }, + }, + wantErr: true, + }, + // we can't simply test succeded installation here because it uses + // tha Patch method which is not fully supported by the fake client. This case is tested in controllers pkg + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Install(tt.args.config, tt.args.customFlags); (err != nil) != tt.wantErr { + t.Errorf("install() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_unusedOldObjects(t *testing.T) { + firstManifest := fmt.Sprint(testCRD, separator, testDeploy) + firstObjs, _ := parseManifest(firstManifest) + differentManifest := fmt.Sprint(testServiceAccount) + differentObjs, _ := parseManifest(differentManifest) + withCommonPartManifest := fmt.Sprint(testServiceAccount, separator, testDeploy) + withCommonPartObjs, _ := parseManifest(withCommonPartManifest) + firstWithoutCommonPartManifest := fmt.Sprint(testCRD) + firstWithoutCommonPartObjs, _ := parseManifest(firstWithoutCommonPartManifest) + + type args struct { + old []unstructured.Unstructured + new []unstructured.Unstructured + } + tests := []struct { + name string + args args + want []unstructured.Unstructured + }{ + { + name: "empty minus empty should be empty", + args: args{ + old: []unstructured.Unstructured{}, + new: []unstructured.Unstructured{}, + }, + want: []unstructured.Unstructured{}, + }, + { + name: "list minus empty should return the same list", + args: args{ + old: firstObjs, + new: []unstructured.Unstructured{}, + }, + want: firstObjs, + }, + { + name: "list minus list with different elements should return first list", + args: args{ + old: firstObjs, + new: differentObjs, + }, + want: firstObjs, + }, + { + name: "list minus list with common part should return first list without common part", + args: args{ + old: firstObjs, + new: withCommonPartObjs, + }, + want: firstWithoutCommonPartObjs, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := unusedOldObjects(tt.args.old, tt.args.new) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/components/operator/internal/chart/pvc.go b/components/operator/internal/chart/pvc.go new file mode 100644 index 00000000..f178c9d8 --- /dev/null +++ b/components/operator/internal/chart/pvc.go @@ -0,0 +1,64 @@ +package chart + +import ( + "context" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + dockerRegistryPVCName = "internal-docker-registry" + pvcKind = "PersistentVolumeClaim" + pvcVersion = "v1" + pvcGroup = "" +) + +func AdjustDockerRegToClusterPVCSize(ctx context.Context, c client.Client, obj unstructured.Unstructured) (unstructured.Unstructured, error) { + if obj.GetName() != dockerRegistryPVCName { + return obj, nil + } + clusterPVC := corev1.PersistentVolumeClaim{} + objKey := client.ObjectKey{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } + if err := c.Get(ctx, objKey, &clusterPVC); err != nil { + if k8serrors.IsNotFound(err) { + return obj, nil + } + return obj, errors.Wrap(err, "while getting pvc from cluster") + } + objPVC := corev1.PersistentVolumeClaim{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &objPVC); err != nil { + return obj, errors.Wrap(err, "while converting unstructured to pvc") + } + storage := clusterPVC.Spec.Resources.Requests.Storage() + if storage.Equal(*objPVC.Spec.Resources.Requests.Storage()) { + return obj, nil + } + objPVCcopy := objPVC.DeepCopy() + objPVCcopy.Spec.Resources.Requests[corev1.ResourceStorage] = *clusterPVC.Spec.Resources.Requests.Storage() + + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(objPVCcopy) + if err != nil { + return obj, errors.Wrap(err, "while converting copied pvc object to unstructured") + } + + return unstructured.Unstructured{Object: out}, nil +} + +func IsPVC(objKind schema.GroupVersionKind) bool { + expected := schema.GroupVersionKind{ + Group: pvcGroup, + Version: pvcVersion, + Kind: pvcKind, + } + + return expected.Group == objKind.Group && expected.Kind == objKind.Kind && expected.Version == objKind.Version +} diff --git a/components/operator/internal/chart/pvc_test.go b/components/operator/internal/chart/pvc_test.go new file mode 100644 index 00000000..526d5a53 --- /dev/null +++ b/components/operator/internal/chart/pvc_test.go @@ -0,0 +1,133 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestAdjustToClusterSize(t *testing.T) { + testCases := map[string]struct { + rawPVCToInstall *corev1.PersistentVolumeClaim + clusterPVC []client.Object + expectedPVC *corev1.PersistentVolumeClaim + }{ + "pvc not exists in cluster": { + rawPVCToInstall: fixPVC(dockerRegistryPVCName, 20), + expectedPVC: fixPVC(dockerRegistryPVCName, 20), + }, + "pvc is not docker registry": { + rawPVCToInstall: fixPVC("random-pvc", 20), + expectedPVC: fixPVC("random-pvc", 20), + }, + "pvc exists with the same size": { + rawPVCToInstall: fixPVC(dockerRegistryPVCName, 20), + clusterPVC: []client.Object{fixPVC(dockerRegistryPVCName, 20)}, + expectedPVC: fixPVC(dockerRegistryPVCName, 20), + }, + "pvc exists with bigger size": { + rawPVCToInstall: fixPVC(dockerRegistryPVCName, 20), + clusterPVC: []client.Object{fixPVC(dockerRegistryPVCName, 30)}, + expectedPVC: fixPVC(dockerRegistryPVCName, 30), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + //GIVEN + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(testCase.rawPVCToInstall) + require.NoError(t, err) + obj := unstructured.Unstructured{Object: out} + + c := fake.NewClientBuilder().WithObjects(testCase.clusterPVC...).Build() + + //WHEN + finalObj, err := AdjustDockerRegToClusterPVCSize(context.TODO(), c, obj) + + //THEN + require.NoError(t, err) + + expected, err := runtime.DefaultUnstructuredConverter.ToUnstructured(testCase.expectedPVC) + + require.NoError(t, err) + require.EqualValues(t, expected, finalObj.Object) + }) + } +} + +func fixPVC(name string, size int) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "kyma-system", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(fmt.Sprintf("%dGi", size)), + }, + }, + }, + } +} + +func TestIsPVC(t *testing.T) { + testCases := map[string]struct { + input schema.GroupVersionKind + expected bool + }{ + "Equal": { + input: schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PersistentVolumeClaim", + }, + expected: true, + }, + "Different kind": { + input: schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }, + expected: false, + }, + "Different version": { + input: schema.GroupVersionKind{ + Group: "", + Version: "v2alpha1", + Kind: "PersistentVolumeClaim", + }, + expected: false, + }, + "Different group": { + input: schema.GroupVersionKind{ + Group: "networking", + Version: "v1", + Kind: "NetworkPolicy", + }, + expected: false, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + //GIVEN + + //WHEN + equal := IsPVC(testCase.input) + //THEN + require.Equal(t, testCase.expected, equal) + }) + } +} diff --git a/components/operator/internal/chart/uninstall.go b/components/operator/internal/chart/uninstall.go new file mode 100644 index 00000000..270694df --- /dev/null +++ b/components/operator/internal/chart/uninstall.go @@ -0,0 +1,160 @@ +package chart + +import ( + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FilterFunc func(unstructured.Unstructured) bool + +func Uninstall(config *Config, filterFunc ...FilterFunc) error { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return fmt.Errorf("could not render manifest from chart: %s", err.Error()) + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + err2 := uninstallObjects(config, objs, filterFunc...) + if err2 != nil { + return err2 + } + + err3 := uninstallOrphanedResources(config) + if err3 != nil { + return err3 + } + + return config.Cache.Delete(config.Ctx, config.CacheKey) +} + +func uninstallObjects(config *Config, objs []unstructured.Unstructured, filterFunc ...FilterFunc) error { + for i := range objs { + u := objs[i] + if !fitToFilters(u, filterFunc...) { + continue + } + + config.Log.Debugf("deleting %s %s", u.GetKind(), u.GetName()) + err := config.Cluster.Client.Delete(config.Ctx, &u) + if k8serrors.IsNotFound(err) { + config.Log.Debugf("deletion skipped for %s %s", u.GetKind(), u.GetName()) + continue + } + if err != nil { + return fmt.Errorf("could not uninstall object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()) + } + } + return nil +} + +func UninstallSecrets(config *Config, filterFunc ...FilterFunc) (error, bool) { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return fmt.Errorf("could not render manifest from chart: %s", err.Error()), false + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return fmt.Errorf("could not parse chart manifest: %s", err.Error()), false + } + + err2, done := uninstallSecrets(config, objs, filterFunc...) + if err2 != nil { + return err2, false + } + + return nil, done +} + +func uninstallSecrets(config *Config, objs []unstructured.Unstructured, filterFunc ...FilterFunc) (error, bool) { + done := true + for i := range objs { + u := objs[i] + if !fitToFilters(u, filterFunc...) { + continue + } + if u.GetKind() != "Secret" { + continue + } + + config.Log.Debugf("deleting %s %s", u.GetKind(), u.GetName()) + err := config.Cluster.Client.Delete(config.Ctx, &u) + if k8serrors.IsNotFound(err) { + config.Log.Debugf("deletion skipped for %s %s", u.GetKind(), u.GetName()) + continue + } + if err != nil { + return fmt.Errorf("could not uninstall object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()), false + } + done = false + } + return nil, done +} + +func WithoutCRDFilter(u unstructured.Unstructured) bool { + return !isCRD(u) +} + +func fitToFilters(u unstructured.Unstructured, filterFunc ...FilterFunc) bool { + for _, fn := range filterFunc { + if !fn(u) { + return false + } + } + + return true +} + +func uninstallOrphanedResources(config *Config) error { + //TODO: move this to finalizers logic in controller + var namespaces corev1.NamespaceList + if err := config.Cluster.Client.List(config.Ctx, &namespaces); err != nil { + return errors.Wrap(err, "couldn't get namespaces during Docker Registry uninstallation") + } + + if err := uninstallOrphanedConfigMaps(config, namespaces); err != nil { + return err + } + if err := uninstallOrphanedServiceAccounts(config, namespaces); err != nil { + return err + } + + return nil +} + +func uninstallOrphanedServiceAccounts(config *Config, namespaces corev1.NamespaceList) error { + for _, namespace := range namespaces.Items { + err := config.Cluster.Client.DeleteAllOf(config.Ctx, &corev1.ServiceAccount{}, + client.InNamespace(namespace.GetName()), + client.MatchingLabels{"dockerregistry.kyma-project.io/config": "service-account"}) + if err != nil { + return errors.Wrapf(err, + "couldn't delete ServiceAccount from namespace \"%s\" during DockerRegistry uninstallation", + namespace.GetName()) + } + } + return nil +} + +func uninstallOrphanedConfigMaps(config *Config, namespaces corev1.NamespaceList) error { + for _, namespace := range namespaces.Items { + err := config.Cluster.Client.DeleteAllOf(config.Ctx, &corev1.ConfigMap{}, + client.InNamespace(namespace.GetName()), + client.MatchingLabels{"dockerregistry.kyma-project.io/config": "runtime"}) + if err != nil { + return errors.Wrapf(err, + "couldn't delete ConfigMap from namespace \"%s\" during Docker Registry uninstallation", + namespace.GetName()) + } + } + return nil +} diff --git a/components/operator/internal/chart/uninstall_test.go b/components/operator/internal/chart/uninstall_test.go new file mode 100644 index 00000000..17b3c827 --- /dev/null +++ b/components/operator/internal/chart/uninstall_test.go @@ -0,0 +1,99 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_Uninstall(t *testing.T) { + log := zap.NewNop().Sugar() + + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: ""}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}} + + type args struct { + config *Config + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(&ns). + Build(), + }, + }, + }, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + wantErr: true, + }, + { + name: "installation error", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(&ns). + Build(), + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Uninstall(tt.args.config); (err != nil) != tt.wantErr { + t.Errorf("uninstall() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/components/operator/internal/chart/verify.go b/components/operator/internal/chart/verify.go new file mode 100644 index 00000000..bf92f7c4 --- /dev/null +++ b/components/operator/internal/chart/verify.go @@ -0,0 +1,72 @@ +package chart + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" +) + +func Verify(config *Config) (bool, error) { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return false, fmt.Errorf("could not render manifest from chart: %s", err.Error()) + } + // sometimes cache is not created yet + if len(spec.Manifest) == 0 { + return false, nil + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return false, fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + for i := range objs { + u := objs[i] + + var verifyFunc verifyFunc + switch u.GetKind() { + case "Deployment": + verifyFunc = verifyDeployment + case "DaemonSet": + // TODO: right now we don't support internal docker registry + default: + continue + } + + ready, err := verifyFunc(config, u) + if err != nil { + return false, fmt.Errorf("could not verify object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()) + } + + if !ready { + return false, nil + } + } + + return true, nil +} + +type verifyFunc func(*Config, unstructured.Unstructured) (bool, error) + +func verifyDeployment(config *Config, u unstructured.Unstructured) (bool, error) { + var deployment appsv1.Deployment + err := config.Cluster.Client.Get(config.Ctx, types.NamespacedName{ + Name: u.GetName(), + Namespace: u.GetNamespace(), + }, &deployment) + if err != nil { + return false, err + } + + for _, cond := range deployment.Status.Conditions { + if cond.Type == appsv1.DeploymentAvailable && cond.Status == v1.ConditionTrue { + return true, nil + } + } + + return false, nil +} diff --git a/components/operator/internal/chart/verify_test.go b/components/operator/internal/chart/verify_test.go new file mode 100644 index 00000000..bd54fd37 --- /dev/null +++ b/components/operator/internal/chart/verify_test.go @@ -0,0 +1,146 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testDeployNotReadyCR = &appsv1.Deployment{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-deploy", + Namespace: "default", + }, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionStatus(v1.ConditionFalse), + }, + }, + }, + } +) + +func Test_verify(t *testing.T) { + log := zap.NewNop().Sugar() + + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: "---"}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + type args struct { + config *Config + } + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + }, + }, + want: true, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + want: false, + wantErr: true, + }, + { + name: "verify", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().WithObjects(testDeployCR).Build(), + }, + }, + }, + want: true, + wantErr: false, + }, + { + name: "obj not ready", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().WithObjects(testDeployNotReadyCR).Build(), + }, + }, + }, + want: false, + wantErr: false, + }, + { + name: "obj not found", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().Build(), + }, + }, + }, + want: false, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Verify(tt.args.config) + if (err != nil) != tt.wantErr { + t.Errorf("verify() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("verify() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/components/operator/internal/config/config.go b/components/operator/internal/config/config.go new file mode 100644 index 00000000..f298d373 --- /dev/null +++ b/components/operator/internal/config/config.go @@ -0,0 +1,14 @@ +package config + +import "github.com/vrischmann/envconfig" + +type Config struct { + ChartPath string `envconfig:"default=/module-chart"` +} + +func GetConfig(prefix string) (Config, error) { + cfg := Config{} + err := envconfig.InitWithPrefix(&cfg, prefix) + return cfg, err + +} diff --git a/components/operator/internal/controllers/kubernetes/configmap_service.go b/components/operator/internal/controllers/kubernetes/configmap_service.go new file mode 100644 index 00000000..36acf569 --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/configmap_service.go @@ -0,0 +1,98 @@ +package kubernetes + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kyma-project/docker-registry/components/operator/internal/resource" +) + +type ConfigMapService interface { + IsBase(configMap *corev1.ConfigMap) bool + ListBase(ctx context.Context) ([]corev1.ConfigMap, error) + UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ConfigMap) error +} + +var _ ConfigMapService = &configMapService{} + +type configMapService struct { + client resource.Client + config Config +} + +func NewConfigMapService(client resource.Client, config Config) ConfigMapService { + return &configMapService{ + client: client, + config: config, + } +} + +func (r *configMapService) ListBase(ctx context.Context) ([]corev1.ConfigMap, error) { + configMaps := corev1.ConfigMapList{} + if err := r.client.ListByLabel(ctx, r.config.BaseNamespace, map[string]string{ConfigLabel: RuntimeLabelValue}, &configMaps); err != nil { + return nil, err + } + + return configMaps.Items, nil +} + +func (r *configMapService) IsBase(configMap *corev1.ConfigMap) bool { + return configMap.Namespace == r.config.BaseNamespace && configMap.Labels[ConfigLabel] == RuntimeLabelValue +} + +func (r *configMapService) UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ConfigMap) error { + logger.Debug(fmt.Sprintf("Updating ConfigMap '%s/%s'", namespace, baseInstance.GetName())) + instance := &corev1.ConfigMap{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstance.GetName()}, instance); err != nil { + if errors.IsNotFound(err) { + return r.createConfigMap(ctx, logger, namespace, baseInstance) + } + logger.Error(err, fmt.Sprintf("Gathering existing ConfigMap '%s/%s' failed", namespace, baseInstance.GetName())) + return err + } + + return r.updateConfigMap(ctx, logger, instance, baseInstance) +} + +func (r *configMapService) createConfigMap(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ConfigMap) error { + configMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseInstance.GetName(), + Namespace: namespace, + Labels: baseInstance.Labels, + Annotations: baseInstance.Annotations, + }, + Data: baseInstance.Data, + BinaryData: baseInstance.BinaryData, + } + + logger.Debug(fmt.Sprintf("Creating ConfigMap '%s/%s'", configMap.GetNamespace(), configMap.GetName())) + if err := r.client.Create(ctx, &configMap); err != nil { + logger.Error(err, fmt.Sprintf("Creating ConfigMap '%s/%s' failed", configMap.GetNamespace(), configMap.GetName())) + return err + } + + return nil +} + +func (r *configMapService) updateConfigMap(ctx context.Context, logger *zap.SugaredLogger, instance, baseInstance *corev1.ConfigMap) error { + copy := instance.DeepCopy() + copy.Annotations = baseInstance.GetAnnotations() + copy.Labels = baseInstance.GetLabels() + copy.Data = baseInstance.Data + copy.BinaryData = baseInstance.BinaryData + + if err := r.client.Update(ctx, copy); err != nil { + logger.Error(err, fmt.Sprintf("Updating ConfigMap '%s/%s' failed", copy.GetNamespace(), copy.GetName())) + return err + } + + return nil +} diff --git a/components/operator/internal/controllers/kubernetes/namespace_controller.go b/components/operator/internal/controllers/kubernetes/namespace_controller.go new file mode 100644 index 00000000..2a53941f --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/namespace_controller.go @@ -0,0 +1,116 @@ +package kubernetes + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +type NamespaceReconciler struct { + Log *zap.SugaredLogger + client client.Client + config Config + configMapSvc ConfigMapService + secretSvc SecretService + serviceAccountSvc ServiceAccountService +} + +func NewNamespace(client client.Client, log *zap.SugaredLogger, config Config, + configMapSvc ConfigMapService, secretSvc SecretService, serviceAccountSvc ServiceAccountService) *NamespaceReconciler { + return &NamespaceReconciler{ + client: client, + Log: log, + config: config, + configMapSvc: configMapSvc, + secretSvc: secretSvc, + serviceAccountSvc: serviceAccountSvc, + } +} + +func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("namespace-controller"). + For(&corev1.Namespace{}). + WithEventFilter(r.predicate()). + Complete(r) +} + +func (r *NamespaceReconciler) predicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + namespace, ok := e.Object.(*corev1.Namespace) + if !ok { + return false + } + return !isExcludedNamespace(namespace.Name, r.config.BaseNamespace, r.config.ExcludedNamespaces) + }, + GenericFunc: func(genericEvent event.GenericEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } +} + +// Reconcile reads that state of the cluster for a Namespace object and updates other resources based on it +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=configmaps;secrets;serviceaccounts,verbs=get;list;watch;create;update;patch;delete + +func (r *NamespaceReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { + instance := &corev1.Namespace{} + if err := r.client.Get(ctx, request.NamespacedName, instance); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + logger := r.Log.With("name", instance.GetName()) + + logger.Debug(fmt.Sprintf("Updating ConfigMaps in namespace '%s'", instance.GetName())) + configMaps, err := r.configMapSvc.ListBase(ctx) + if err != nil { + logger.Error(err, "Listing base ConfigMaps failed") + return ctrl.Result{}, err + } + for _, configMap := range configMaps { + c := configMap + if err := r.configMapSvc.UpdateNamespace(ctx, logger, instance.GetName(), &c); err != nil { + return ctrl.Result{}, err + } + } + + logger.Debug(fmt.Sprintf("Updating Secret in namespace '%s'", instance.GetName())) + secret, err := r.secretSvc.GetBase(ctx) + if err != nil { + logger.Error(err, "Listing base Secrets failed") + return ctrl.Result{}, err + } + + if err := r.secretSvc.UpdateNamespace(ctx, logger, instance.GetName(), secret); err != nil { + return ctrl.Result{}, err + } + + logger.Debug(fmt.Sprintf("Updating ServiceAccounts in namespace '%s'", instance.GetName())) + serviceAccounts, err := r.serviceAccountSvc.ListBase(ctx) + if err != nil { + logger.Error(err, "Listing base ServiceAccounts failed") + return ctrl.Result{}, err + } + for _, serviceAccount := range serviceAccounts { + sa := serviceAccount + if err := r.serviceAccountSvc.UpdateNamespace(ctx, logger, instance.GetName(), &sa); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} diff --git a/components/operator/internal/controllers/kubernetes/secret_controller.go b/components/operator/internal/controllers/kubernetes/secret_controller.go new file mode 100644 index 00000000..006eddfd --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/secret_controller.go @@ -0,0 +1,103 @@ +package kubernetes + +import ( + "context" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +type SecretReconciler struct { + Log *zap.SugaredLogger + client client.Client + config Config + svc SecretService +} + +func NewSecret(client client.Client, log *zap.SugaredLogger, config Config, secretSvc SecretService) *SecretReconciler { + return &SecretReconciler{ + client: client, + Log: log, + config: config, + svc: secretSvc, + } +} + +func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("secret-controller"). + For(&corev1.Secret{}). + WithEventFilter(r.predicate()). + Complete(r) +} + +func (r *SecretReconciler) predicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + runtime, ok := e.Object.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + runtime, ok := e.ObjectNew.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + GenericFunc: func(e event.GenericEvent) bool { + runtime, ok := e.Object.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + runtime, ok := e.Object.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + } +} + +// Reconcile reads that state of the cluster for a Secret object and makes changes based +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch + +func (r *SecretReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { + instance := &corev1.Secret{} + if err := r.client.Get(ctx, request.NamespacedName, instance); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + logger := r.Log.With("namespace", instance.GetNamespace(), "name", instance.GetName()) + + namespaces, err := getNamespaces(ctx, r.client, r.config.BaseNamespace, r.config.ExcludedNamespaces) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.svc.HandleFinalizer(ctx, logger, instance, namespaces); err != nil { + return ctrl.Result{}, err + } + if !instance.ObjectMeta.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + for _, namespace := range namespaces { + if err = r.svc.UpdateNamespace(ctx, logger, namespace, instance); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{RequeueAfter: r.config.SecretRequeueDuration}, nil +} diff --git a/components/operator/internal/controllers/kubernetes/secret_service.go b/components/operator/internal/controllers/kubernetes/secret_service.go new file mode 100644 index 00000000..af4e347d --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/secret_service.go @@ -0,0 +1,175 @@ +package kubernetes + +import ( + "context" + "fmt" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kyma-project/docker-registry/components/operator/internal/resource" +) + +const ( + FunctionManagedByLabel = "dockerregistry.kyma-project.io/managed-by" + cfgSecretFinalizerName = "dockerregistry.kyma-project.io/finalizer-registry-config" + FunctionResourceLabelUserValue = "user" +) + +type SecretService interface { + IsBase(secret *corev1.Secret) bool + GetBase(ctx context.Context) (*corev1.Secret, error) + UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.Secret) error + HandleFinalizer(ctx context.Context, logger *zap.SugaredLogger, secret *corev1.Secret, namespaces []string) error +} + +var _ SecretService = &secretService{} + +type secretService struct { + client resource.Client + config Config +} + +func NewSecretService(client resource.Client, config Config) SecretService { + return &secretService{ + client: client, + config: config, + } +} + +func (r *secretService) GetBase(ctx context.Context) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := r.client.Get(ctx, types.NamespacedName{ + Namespace: r.config.BaseNamespace, + Name: r.config.BaseDefaultSecretName, + }, secret) + + return secret, err +} + +func (r *secretService) IsBase(secret *corev1.Secret) bool { + return secret.Namespace == r.config.BaseNamespace && + secret.Name == r.config.BaseDefaultSecretName && + secret.Labels[ConfigLabel] == CredentialsLabelValue +} + +func (r *secretService) UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.Secret) error { + logger.Debug(fmt.Sprintf("Updating Secret '%s/%s'", namespace, baseInstance.GetName())) + instance := &corev1.Secret{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstance.GetName()}, instance); err != nil { + if errors.IsNotFound(err) { + return r.createSecret(ctx, logger, namespace, baseInstance) + } + logger.Error(err, fmt.Sprintf("Gathering existing Secret '%s/%s' failed", namespace, baseInstance.GetName())) + return err + } + if instance.Labels[FunctionManagedByLabel] == FunctionResourceLabelUserValue { + return nil + } + return r.updateSecret(ctx, logger, instance, baseInstance) +} + +func (r *secretService) HandleFinalizer(ctx context.Context, logger *zap.SugaredLogger, instance *corev1.Secret, namespaces []string) error { + if instance.ObjectMeta.DeletionTimestamp.IsZero() { + if containsString(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) { + return nil + } + instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) + if err := r.client.Update(context.Background(), instance); err != nil { + return err + } + } else { + if !containsString(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) { + return nil + } + for _, namespace := range namespaces { + logger.Debug(fmt.Sprintf("Deleting Secret '%s/%s'", namespace, instance.Name)) + if err := r.deleteSecret(ctx, logger, namespace, instance.Name); err != nil { + return err + } + } + instance.ObjectMeta.Finalizers = removeString(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) + if err := r.client.Update(context.Background(), instance); err != nil { + return err + } + } + return nil +} + +func (r *secretService) createSecret(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.Secret) error { + secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseInstance.GetName(), + Namespace: namespace, + Labels: baseInstance.Labels, + Annotations: baseInstance.Annotations, + }, + Data: baseInstance.Data, + StringData: baseInstance.StringData, + Type: baseInstance.Type, + } + + logger.Debug(fmt.Sprintf("Creating Secret '%s/%s'", secret.GetNamespace(), secret.GetName())) + if err := r.client.Create(ctx, &secret); err != nil { + logger.Error(err, fmt.Sprintf("Creating Secret '%s/%s' failed", secret.GetNamespace(), secret.GetName())) + return err + } + + return nil +} + +func (r *secretService) updateSecret(ctx context.Context, logger *zap.SugaredLogger, instance, baseInstance *corev1.Secret) error { + copy := instance.DeepCopy() + copy.Annotations = baseInstance.GetAnnotations() + copy.Labels = baseInstance.GetLabels() + copy.Data = baseInstance.Data + copy.StringData = baseInstance.StringData + copy.Type = baseInstance.Type + + if err := r.client.Update(ctx, copy); err != nil { + logger.Error(err, fmt.Sprintf("Updating Secret '%s/%s' failed", copy.GetNamespace(), copy.GetName())) + return err + } + + return nil +} + +func (r *secretService) deleteSecret(ctx context.Context, logger *zap.SugaredLogger, namespace, baseInstanceName string) error { + instance := &corev1.Secret{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstanceName}, instance); err != nil { + return client.IgnoreNotFound(err) + } + if instance.Labels[FunctionManagedByLabel] == FunctionResourceLabelUserValue { + return nil + } + if err := r.client.Delete(ctx, instance); err != nil { + logger.Error(err, fmt.Sprintf("Deleting Secret '%s/%s' failed", namespace, baseInstanceName)) + return err + } + + return nil +} + +// Helper functions to check and remove string from a slice of strings. +func containsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} + +func removeString(slice []string, s string) (result []string) { + for _, item := range slice { + if item == s { + continue + } + result = append(result, item) + } + return +} diff --git a/components/operator/internal/controllers/kubernetes/serviceaccount_service.go b/components/operator/internal/controllers/kubernetes/serviceaccount_service.go new file mode 100644 index 00000000..de7e143f --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/serviceaccount_service.go @@ -0,0 +1,130 @@ +package kubernetes + +import ( + "context" + "fmt" + "strings" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kyma-project/docker-registry/components/operator/internal/resource" +) + +type ServiceAccountService interface { + IsBase(serviceAccount *corev1.ServiceAccount) bool + ListBase(ctx context.Context) ([]corev1.ServiceAccount, error) + UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ServiceAccount) error +} + +type serviceAccountService struct { + client resource.Client + config Config +} + +func NewServiceAccountService(client resource.Client, config Config) ServiceAccountService { + return &serviceAccountService{ + client: client, + config: config, + } +} + +func (r *serviceAccountService) ListBase(ctx context.Context) ([]corev1.ServiceAccount, error) { + serviceAccounts := &corev1.ServiceAccountList{} + if err := r.client.ListByLabel(ctx, r.config.BaseNamespace, map[string]string{ConfigLabel: ServiceAccountLabelValue}, serviceAccounts); err != nil { + return nil, err + } + + return serviceAccounts.Items, nil +} + +func (r *serviceAccountService) IsBase(serviceAccount *corev1.ServiceAccount) bool { + return serviceAccount.Namespace == r.config.BaseNamespace && serviceAccount.Labels[ConfigLabel] == ServiceAccountLabelValue +} + +func (r *serviceAccountService) UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ServiceAccount) error { + logger.Debug(fmt.Sprintf("Updating ServiceAccount '%s/%s'", namespace, baseInstance.GetName())) + serviceAccount := &corev1.ServiceAccount{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstance.GetName()}, serviceAccount); err != nil { + if errors.IsNotFound(err) { + return r.createServiceAccount(ctx, logger, namespace, baseInstance) + } + logger.Error(err, fmt.Sprintf("Gathering existing ServiceAccount '%s/%s' failed", namespace, baseInstance.GetName())) + return err + } + + return r.updateServiceAccount(ctx, logger, serviceAccount, baseInstance) +} + +func (r *serviceAccountService) createServiceAccount(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ServiceAccount) error { + secrets := r.shiftSecretTokens(baseInstance) + serviceAccount := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseInstance.GetName(), + Namespace: namespace, + Labels: baseInstance.Labels, + Annotations: baseInstance.Annotations, + }, + Secrets: secrets, + ImagePullSecrets: baseInstance.ImagePullSecrets, + AutomountServiceAccountToken: baseInstance.AutomountServiceAccountToken, + } + + logger.Debug(fmt.Sprintf("Creating ServiceAccount '%s/%s'", serviceAccount.GetNamespace(), serviceAccount.GetName())) + if err := r.client.Create(ctx, &serviceAccount); err != nil { + logger.Error(err, fmt.Sprintf("Creating ServiceAccount '%s/%s'", serviceAccount.GetNamespace(), serviceAccount.GetName())) + return err + } + + return nil +} + +func (r *serviceAccountService) updateServiceAccount(ctx context.Context, logger *zap.SugaredLogger, instance, baseInstance *corev1.ServiceAccount) error { + tokens := r.extractSecretTokens(instance) + secrets := r.shiftSecretTokens(baseInstance) + secrets = append(secrets, tokens...) + + copy := instance.DeepCopy() + copy.Annotations = baseInstance.GetAnnotations() + copy.Labels = baseInstance.GetLabels() + copy.ImagePullSecrets = baseInstance.ImagePullSecrets + copy.AutomountServiceAccountToken = baseInstance.AutomountServiceAccountToken + copy.Secrets = secrets + + if err := r.client.Update(ctx, copy); err != nil { + logger.Error(err, fmt.Sprintf("Updating ServiceAccount '%s/%s' failed", copy.GetNamespace(), copy.GetName())) + return err + } + + return nil +} + +func (*serviceAccountService) shiftSecretTokens(baseInstance *corev1.ServiceAccount) []corev1.ObjectReference { + prefix := fmt.Sprintf("%s-token", baseInstance.Name) + + secrets := make([]corev1.ObjectReference, 0) + for _, secret := range baseInstance.Secrets { + if !strings.HasPrefix(secret.Name, prefix) { + secrets = append(secrets, secret) + } + } + + return secrets +} + +func (*serviceAccountService) extractSecretTokens(serviceAccount *corev1.ServiceAccount) []corev1.ObjectReference { + prefix := fmt.Sprintf("%s-token", serviceAccount.Name) + + secrets := make([]corev1.ObjectReference, 0) + for _, secret := range serviceAccount.Secrets { + if strings.HasPrefix(secret.Name, prefix) { + secrets = append(secrets, secret) + } + } + + return secrets +} diff --git a/components/operator/internal/controllers/kubernetes/shared.go b/components/operator/internal/controllers/kubernetes/shared.go new file mode 100644 index 00000000..10eb59ad --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/shared.go @@ -0,0 +1,55 @@ +package kubernetes + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + ConfigLabel = "dockerregistry.kyma-project.io/config" + CredentialsLabelValue = "credentials" + ServiceAccountLabelValue = "service-account" + RuntimeLabelValue = "runtime" +) + +type Config struct { + BaseNamespace string `envconfig:"default=kyma-system"` + BaseDefaultSecretName string `envconfig:"default=internal-dockerregistry-config"` + ExcludedNamespaces []string `envconfig:"default=kyma-system"` + ConfigMapRequeueDuration time.Duration `envconfig:"default=1m"` + SecretRequeueDuration time.Duration `envconfig:"default=1m"` + ServiceAccountRequeueDuration time.Duration `envconfig:"default=1m"` +} + +func getNamespaces(ctx context.Context, client client.Client, base string, excluded []string) ([]string, error) { + var namespaces corev1.NamespaceList + if err := client.List(ctx, &namespaces); err != nil { + return nil, err + } + + names := make([]string, 0) + for _, namespace := range namespaces.Items { + if !isExcludedNamespace(namespace.GetName(), base, excluded) && namespace.Status.Phase != corev1.NamespaceTerminating { + names = append(names, namespace.GetName()) + } + } + + return names, nil +} + +func isExcludedNamespace(name, base string, excluded []string) bool { + if name == base { + return true + } + + for _, namespace := range excluded { + if name == namespace { + return true + } + } + + return false +} diff --git a/components/operator/internal/gitrepository/cleanup.go b/components/operator/internal/gitrepository/cleanup.go new file mode 100644 index 00000000..6a41405a --- /dev/null +++ b/components/operator/internal/gitrepository/cleanup.go @@ -0,0 +1,31 @@ +package gitrepository + +import ( + "context" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + gitRepoCRDName = "gitrepositories.dockerregistry.kyma-project.io" +) + +// Cleanup removes gitrepository CRD and its resources +func Cleanup(ctx context.Context, c client.Client) error { + crd, err := getCRD(ctx, c) + if err != nil { + return client.IgnoreNotFound(err) + } + + return c.Delete(ctx, crd, &client.DeleteOptions{}) +} + +func getCRD(ctx context.Context, client client.Client) (*apiextensionsv1.CustomResourceDefinition, error) { + var crd apiextensionsv1.CustomResourceDefinition + err := client.Get(ctx, types.NamespacedName{ + Name: gitRepoCRDName, + }, &crd) + return &crd, err +} diff --git a/components/operator/internal/gitrepository/cleanup_test.go b/components/operator/internal/gitrepository/cleanup_test.go new file mode 100644 index 00000000..3fa1ab7d --- /dev/null +++ b/components/operator/internal/gitrepository/cleanup_test.go @@ -0,0 +1,61 @@ +package gitrepository + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestCleanup(t *testing.T) { + t.Run("remove crd", func(t *testing.T) { + ctx := context.Background() + c := fake.NewClientBuilder(). + WithScheme(apiextensionsscheme.Scheme). + WithObjects(fixGitRepoCRD()). + Build() + + err := Cleanup(ctx, c) + + require.NoError(t, err) + + err = c.Get(ctx, types.NamespacedName{ + Name: gitRepoCRDName, + }, fixGitRepoCRD()) + require.True(t, errors.IsNotFound(err)) + }) + + t.Run("crd not found", func(t *testing.T) { + ctx := context.Background() + c := fake.NewClientBuilder(). + WithScheme(apiextensionsscheme.Scheme). + Build() + + err := Cleanup(ctx, c) + + require.NoError(t, err) + }) + + t.Run("client get error", func(t *testing.T) { + ctx := context.Background() + c := fake.NewClientBuilder().Build() + + err := Cleanup(ctx, c) + + require.Error(t, err) + }) +} + +func fixGitRepoCRD() *apiextensionsv1.CustomResourceDefinition { + return &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{ + Name: gitRepoCRDName, + }, + } +} diff --git a/components/operator/internal/predicate/predicate.go b/components/operator/internal/predicate/predicate.go new file mode 100644 index 00000000..21b0d09c --- /dev/null +++ b/components/operator/internal/predicate/predicate.go @@ -0,0 +1,33 @@ +package predicate + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// this predicate allows not reacting on status changes +type NoStatusChangePredicate struct { + predicate.Funcs +} + +func (p NoStatusChangePredicate) Update(e event.UpdateEvent) bool { + if e.ObjectNew == nil || e.ObjectOld == nil { + return false + } + + // first resource version (after apply) + if e.ObjectOld.GetResourceVersion() == e.ObjectNew.GetResourceVersion() { + return true + } + + return !isStatusUpdate(e) +} + +func isStatusUpdate(e event.UpdateEvent) bool { + if e.ObjectOld.GetGeneration() == e.ObjectNew.GetGeneration() && + e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() { + return true + } + + return false +} diff --git a/components/operator/internal/predicate/predicate_test.go b/components/operator/internal/predicate/predicate_test.go new file mode 100644 index 00000000..e9119b88 --- /dev/null +++ b/components/operator/internal/predicate/predicate_test.go @@ -0,0 +1,98 @@ +package predicate + +import ( + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestNoStatusChangePredicate_Update(t *testing.T) { + type args struct { + e event.UpdateEvent + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "nil objs", + args: args{ + e: event.UpdateEvent{ + ObjectOld: nil, + ObjectNew: nil, + }, + }, + want: false, + }, + { + name: "first obj iteration", + args: args{ + e: event.UpdateEvent{ + ObjectOld: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + ObjectNew: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + }, + }, + want: true, + }, + { + name: "status update", + args: args{ + e: event.UpdateEvent{ + ObjectOld: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + ObjectNew: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("600") + return u + }(), + }, + }, + want: false, + }, + { + name: "spec update", + args: args{ + e: event.UpdateEvent{ + ObjectOld: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + ObjectNew: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(2) + u.SetResourceVersion("600") + return u + }(), + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := NoStatusChangePredicate{} + if got := p.Update(tt.args.e); got != tt.want { + t.Errorf("NoStatusChangePredicate.Update() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/components/operator/internal/registry/node_port.go b/components/operator/internal/registry/node_port.go new file mode 100644 index 00000000..f7c84e4e --- /dev/null +++ b/components/operator/internal/registry/node_port.go @@ -0,0 +1,158 @@ +package registry + +import ( + "context" + "fmt" + "math/rand" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + dockerRegistryNodePort = 32_137 + + //Available ports according to documentation https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + maxNodePort = 32_767 + minNodePort = 30_000 +) + +const ( + dockerRegistryService = "internal-docker-registry" + dockerRegistryPortName = "http-registry" + + allNamespaces = "" +) + +type nodePortFinder func() int32 + +type NodePortResolver struct { + nodePortFinder +} + +func NewNodePortResolver(finder nodePortFinder) *NodePortResolver { + return &NodePortResolver{nodePortFinder: finder} +} + +func (npr *NodePortResolver) ResolveDockerRegistryNodePortFn(ctx context.Context, k8sClient client.Client, namespace string) (int32, error) { + svc, err := getService(ctx, k8sClient, namespace, dockerRegistryService) + if err != nil { + return 0, errors.Wrap(err, fmt.Sprintf("while checking if %s service is installed on cluster", dockerRegistryService)) + } + + if svc != nil && svc.Spec.Type == corev1.ServiceTypeNodePort { + if isDefaultNodePortValue(svc) { + return dockerRegistryNodePort, nil + } + currentNodePort := getNodePort(svc) + return currentNodePort, nil + } + + svcs, err := getAllNodePortServices(ctx, k8sClient) + if err != nil { + return 0, errors.Wrap(err, "while fetching all services from cluster") + } + + if possibleConflict(svcs) { + newPort, err := npr.drawEmptyPortNumber(svcs) + if err != nil { + return 0, errors.Wrap(err, "while drawing available port number") + } + return newPort, nil + } + return dockerRegistryNodePort, nil +} + +func (npr *NodePortResolver) drawEmptyPortNumber(svcs *corev1.ServiceList) (int32, error) { + nodePorts := map[int32]struct{}{} + for _, svc := range svcs.Items { + for _, port := range svc.Spec.Ports { + nodePorts[port.NodePort] = struct{}{} + } + } + + retries := 100 + var emptyPort int32 + for i := 0; i < retries; i++ { + possibleEmptyPort := npr.nodePortFinder() + if _, ok := nodePorts[possibleEmptyPort]; !ok { + emptyPort = possibleEmptyPort + break + } + } + if emptyPort == 0 { + return 0, errors.New("couldn't draw available port number, try again") + } + return emptyPort, nil +} + +func getNodePort(svc *corev1.Service) int32 { + for _, port := range svc.Spec.Ports { + if port.Name == dockerRegistryPortName { + return port.NodePort + } + } + return dockerRegistryNodePort +} + +func getService(ctx context.Context, k8sClient client.Client, namespace, name string) (*corev1.Service, error) { + svc := corev1.Service{} + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &svc) + if client.IgnoreNotFound(err) != nil { + return nil, errors.Wrap(err, fmt.Sprintf("while getting %s servicce", name)) + } + return &svc, nil +} + +func isDefaultNodePortValue(svc *corev1.Service) bool { + ports := svc.Spec.Ports + for _, port := range ports { + if port.NodePort == dockerRegistryNodePort { + return true + } + } + return false +} + +func getAllNodePortServices(ctx context.Context, k8sClient client.Client) (*corev1.ServiceList, error) { + svcs := corev1.ServiceList{} + err := k8sClient.List(ctx, &svcs, &client.ListOptions{Namespace: allNamespaces}) + if err != nil { + return nil, errors.Wrap(err, "while getting list of all services") + } + nodePortSvcs := &corev1.ServiceList{} + for _, svc := range svcs.Items { + if svc.Spec.Type == corev1.ServiceTypeNodePort { + nodePortSvcs.Items = append(nodePortSvcs.Items, svc) + } + if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + for _, port := range svc.Spec.Ports { + if port.NodePort != 0 { + nodePortSvcs.Items = append(nodePortSvcs.Items, svc) + break + } + } + } + } + return nodePortSvcs, nil +} + +func possibleConflict(svcs *corev1.ServiceList) bool { + for _, svc := range svcs.Items { + ports := svc.Spec.Ports + for _, port := range ports { + if port.NodePort == dockerRegistryNodePort { + return true + } + } + } + return false +} + +var _ nodePortFinder = RandomNodePort + +func RandomNodePort() int32 { + number := rand.Int31n(maxNodePort - minNodePort) + return minNodePort + number +} diff --git a/components/operator/internal/registry/node_port_test.go b/components/operator/internal/registry/node_port_test.go new file mode 100644 index 00000000..9965b8d3 --- /dev/null +++ b/components/operator/internal/registry/node_port_test.go @@ -0,0 +1,144 @@ +package registry + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const nonConflictPort int32 = 32238 + +const kymaNamespace = "kyma-system" + +type assertFn func(t *testing.T, overrides map[string]interface{}) + +func TestNodePortAction(t *testing.T) { + testCases := map[string]struct { + givenService *corev1.Service + expectedPort int32 + assertFn assertFn + }{ + "Return default port new port when nodePort installed on default port": { + givenService: fixtureServiceNodePort(dockerRegistryService, kymaNamespace, dockerRegistryNodePort), + expectedPort: dockerRegistryNodePort, + }, + "Generate new port when nodePort service installed on different port": { + givenService: fixtureServiceNodePort(dockerRegistryService, kymaNamespace, nonConflictPort), + expectedPort: nonConflictPort, + }, + "Return default port new port when nodePort not installed, without port conflict": { + expectedPort: dockerRegistryNodePort, + }, + "Generate new port when nodePort not installed, with port conflict": { + givenService: fixtureServiceNodePort("conflicting-svc", kymaNamespace, dockerRegistryNodePort), + expectedPort: nonConflictPort, + }, + "Return default port new port when service is ClusterIP before upgrade without port conflict": { + givenService: fixtureServiceClusterIP(dockerRegistryService, kymaNamespace), + expectedPort: dockerRegistryNodePort, + }, + "Generate new port when cluster has NodePort service in different namespace with port conflict": { + givenService: fixtureServiceNodePort(dockerRegistryService, "different-ns", dockerRegistryNodePort), + expectedPort: nonConflictPort, + }, + "Generate new port when cluster has LoadBalancer service in different namespace with port conflict": { + givenService: fixtureLoadBalancer(), + expectedPort: nonConflictPort, + }, + } + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + //GIVEN + ctx := context.TODO() + k8sClient := fake.NewClientBuilder(). + WithRuntimeObjects(fixtureServices()...). + Build() + resolver := NewNodePortResolver(fixedNodePort(nonConflictPort)) + if testCase.givenService != nil { + err := k8sClient.Create(ctx, testCase.givenService, &client.CreateOptions{}) + require.NoError(t, err) + } + + //WHEN + port, err := resolver.ResolveDockerRegistryNodePortFn(ctx, k8sClient, kymaNamespace) + + //THEN + require.NoError(t, err) + require.Equal(t, testCase.expectedPort, port) + }) + } +} + +func fixtureServiceNodePort(name, namespace string, nodePort int32) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + {Name: dockerRegistryPortName, NodePort: nodePort}}, + }, + } +} + +func fixtureServiceClusterIP(name, namespace string) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + {Name: dockerRegistryPortName, Port: 5000}}, + }, + } +} + +func fixtureServices() []runtime.Object { + l := []runtime.Object{ + fixtureServiceNodePort("other-node-port", kymaNamespace, dockerRegistryNodePort-1), + fixtureServiceNodePort("many-ports", kymaNamespace, dockerRegistryNodePort+2), + } + return l +} + +func fixedNodePort(expectedPort int32) func() int32 { + return func() int32 { + return expectedPort + } +} + +func fixtureLoadBalancer() *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "istio-ingressgateway", + Namespace: "istio-system", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Ports: []corev1.ServicePort{ + { + NodePort: dockerRegistryNodePort, + Name: "http2", + }, + { + NodePort: 30857, + Name: "https", + }, + }, + }, + Status: corev1.ServiceStatus{}, + } +} diff --git a/components/operator/internal/registry/secret.go b/components/operator/internal/registry/secret.go new file mode 100644 index 00000000..63b44319 --- /dev/null +++ b/components/operator/internal/registry/secret.go @@ -0,0 +1,61 @@ +package registry + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + SecretName = "internal-dockerregistry-config" + LabelConfigKey = "dockerregistry.kyma-project.io/config" + LabelConfigVal = "credentials" + IsInternalKey = "isInternal" + DeploymentName = "internal-docker-registry" + HttpEnvKey = "REGISTRY_HTTP_SECRET" +) + +func GetDockerRegistryInternalRegistrySecret(ctx context.Context, c client.Client, namespace string) (*corev1.Secret, error) { + secret := corev1.Secret{} + key := client.ObjectKey{ + Namespace: namespace, + Name: SecretName, + } + err := c.Get(ctx, key, &secret) + if err != nil { + return nil, client.IgnoreNotFound(err) + } + + if val, ok := secret.GetLabels()[LabelConfigKey]; !ok || val != LabelConfigVal { + return nil, nil + } + + if val := string(secret.Data[IsInternalKey]); val != "true" { + return nil, nil + } + + return &secret, nil +} + +func GetRegistryHTTPSecretEnvValue(ctx context.Context, c client.Client, namespace string) (string, error) { + deployment := appsv1.Deployment{} + key := client.ObjectKey{ + Namespace: namespace, + Name: DeploymentName, + } + err := c.Get(ctx, key, &deployment) + if err != nil { + return "", client.IgnoreNotFound(err) + } + + envs := deployment.Spec.Template.Spec.Containers[0].Env + for _, v := range envs { + if v.Name == HttpEnvKey && v.Value != "" { + return v.Value, nil + } + } + + return "", nil +} diff --git a/components/operator/internal/resource/resource.go b/components/operator/internal/resource/resource.go new file mode 100644 index 00000000..afb88f91 --- /dev/null +++ b/components/operator/internal/resource/resource.go @@ -0,0 +1,107 @@ +package resource + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apilabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +//go:generate mockery --name=Client --output=automock --outpkg=automock --case=underscore +type Client interface { + Create(ctx context.Context, object Object) error + CreateWithReference(ctx context.Context, parent Object, object Object) error + Update(ctx context.Context, object Object) error + Get(ctx context.Context, key ctrlclient.ObjectKey, object Object) error + ListByLabel(ctx context.Context, namespace string, labels map[string]string, object ctrlclient.ObjectList) error + DeleteAllBySelector(ctx context.Context, resourceType Object, namespace string, selector apilabels.Selector) error + Delete(ctx context.Context, resourceType Object) error + Status() ctrlclient.StatusWriter +} + +//go:generate mockery --name=K8sClient --output=automock --outpkg=automock --case=underscore +type K8sClient interface { + Create(context.Context, ctrlclient.Object, ...ctrlclient.CreateOption) error + Update(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.UpdateOption) error + Get(ctx context.Context, key ctrlclient.ObjectKey, obj ctrlclient.Object, opts ...ctrlclient.GetOption) error + List(context.Context, ctrlclient.ObjectList, ...ctrlclient.ListOption) error + DeleteAllOf(context.Context, ctrlclient.Object, ...ctrlclient.DeleteAllOfOption) error + Status() ctrlclient.StatusWriter + Delete(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.DeleteOption) error +} + +type Object interface { + runtime.Object + metav1.Object +} + +var _ Client = &client{} + +type client struct { + k8sClient K8sClient + schema *runtime.Scheme +} + +func (c *client) Delete(ctx context.Context, obj Object) error { + propagationPolicy := metav1.DeletePropagationBackground + return c.k8sClient.Delete(ctx, obj, &ctrlclient.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }) +} + +func New(k8sClient K8sClient, schema *runtime.Scheme) Client { + return &client{ + k8sClient: k8sClient, + schema: schema, + } +} + +func (c *client) Create(ctx context.Context, object Object) error { + return c.CreateWithReference(ctx, nil, object) +} + +func (c *client) CreateWithReference(ctx context.Context, parent, object Object) error { + if parent != nil { + if err := controllerutil.SetControllerReference(parent, object, c.schema); err != nil { + return err + } + } + + return c.k8sClient.Create(ctx, object) +} + +func (c *client) Update(ctx context.Context, object Object) error { + return c.k8sClient.Update(ctx, object) +} + +func (c *client) Get(ctx context.Context, key ctrlclient.ObjectKey, object Object) error { + return c.k8sClient.Get(ctx, key, object) +} + +func (c *client) ListByLabel(ctx context.Context, namespace string, labels map[string]string, list ctrlclient.ObjectList) error { + return c.k8sClient.List(ctx, list, &ctrlclient.ListOptions{ + LabelSelector: apilabels.SelectorFromSet(labels), + Namespace: namespace, + }) +} + +func (c *client) DeleteAllBySelector(ctx context.Context, resourceType Object, namespace string, selector apilabels.Selector) error { + propagationPolicy := metav1.DeletePropagationBackground + + return c.k8sClient.DeleteAllOf(ctx, resourceType, &ctrlclient.DeleteAllOfOptions{ + ListOptions: ctrlclient.ListOptions{ + LabelSelector: selector, + Namespace: namespace, + }, + DeleteOptions: ctrlclient.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }, + }) +} + +func (c *client) Status() ctrlclient.StatusWriter { + return c.k8sClient.Status() +} diff --git a/components/operator/internal/state/add_finalizer.go b/components/operator/internal/state/add_finalizer.go new file mode 100644 index 00000000..9ed07295 --- /dev/null +++ b/components/operator/internal/state/add_finalizer.go @@ -0,0 +1,32 @@ +package state + +import ( + "context" + + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func sFnAddFinalizer(ctx context.Context, r *reconciler, s *systemState) (stateFn, *controllerruntime.Result, error) { + instanceIsBeingDeleted := !s.instance.GetDeletionTimestamp().IsZero() + instanceHasFinalizer := controllerutil.ContainsFinalizer(&s.instance, r.finalizer) + if !instanceHasFinalizer { + // in case instance has no finalizer and instance is being deleted - end reconciliation + if instanceIsBeingDeleted { + // stop state machine + return stop() + } + + if err := addFinalizer(ctx, r, s); err != nil { + // stop state machine with potential error + return stopWithEventualError(err) + } + } + return nextState(sFnInitialize) +} + +func addFinalizer(ctx context.Context, r *reconciler, s *systemState) error { + // in case instance does not have finalizer - add it and update instance + controllerutil.AddFinalizer(&s.instance, r.finalizer) + return updateDockerRegistryWithoutStatus(ctx, r, s) +} diff --git a/components/operator/internal/state/add_finalizer_test.go b/components/operator/internal/state/add_finalizer_test.go new file mode 100644 index 00000000..9771e856 --- /dev/null +++ b/components/operator/internal/state/add_finalizer_test.go @@ -0,0 +1,84 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "testing" +) + +func Test_sFnAddFinalizer(t *testing.T) { + t.Run("set finalizer", func(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, v1alpha1.AddToScheme(scheme)) + + dockerRegistry := v1alpha1.DockerRegistry{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-name", + Namespace: "test-namespace", + ResourceVersion: "123", + }, + } + s := &systemState{ + instance: dockerRegistry, + } + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(&dockerRegistry). + Build(), + }, + } + + // set finalizer + next, result, err := sFnAddFinalizer(context.Background(), r, s) + require.NoError(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnInitialize, next) + + // check finalizer in systemState + require.Contains(t, s.instance.GetFinalizers(), r.cfg.finalizer) + + // check finalizer in k8s + obj := v1alpha1.DockerRegistry{} + err = r.k8s.client.Get(context.Background(), + client.ObjectKey{ + Namespace: dockerRegistry.Namespace, + Name: dockerRegistry.Name, + }, + &obj) + require.NoError(t, err) + require.Contains(t, obj.GetFinalizers(), r.cfg.finalizer) + }) + + t.Run("stop when no finalizer and instance is being deleted", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + } + + metaTimeNow := v1.Now() + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: v1.ObjectMeta{ + DeletionTimestamp: &metaTimeNow, + }, + }, + } + + // stop + next, result, err := sFnAddFinalizer(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + }) +} diff --git a/components/operator/internal/state/apply.go b/components/operator/internal/state/apply.go new file mode 100644 index 00000000..12a5fb96 --- /dev/null +++ b/components/operator/internal/state/apply.go @@ -0,0 +1,37 @@ +package state + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// run dockerregistry chart installation +func sFnApplyResources(_ context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + // set condition Installed if it does not exist + if !s.instance.IsCondition(v1alpha1.ConditionTypeInstalled) { + s.setState(v1alpha1.StateProcessing) + s.instance.UpdateConditionUnknown(v1alpha1.ConditionTypeInstalled, v1alpha1.ConditionReasonInstallation, + "Installing for configuration") + } + + // install component + err := chart.Install(s.chartConfig, s.flagsBuilder.Build()) + if err != nil { + r.log.Warnf("error while installing resource %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstallationErr, + err, + ) + return stopWithEventualError(err) + } + + // switch state verify + return nextState(sFnVerifyResources) +} diff --git a/components/operator/internal/state/apply_test.go b/components/operator/internal/state/apply_test.go new file mode 100644 index 00000000..016e35b3 --- /dev/null +++ b/components/operator/internal/state/apply_test.go @@ -0,0 +1,104 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func Test_buildSFnApplyResources(t *testing.T) { + t.Run("switch state and add condition when condition is missing", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{}, + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + Release: chart.Release{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + + next, result, err := sFnApplyResources(context.Background(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnVerifyResources, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateProcessing, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionUnknown, + v1alpha1.ConditionReasonInstallation, + "Installing for configuration", + ) + }) + + t.Run("apply resources", func(t *testing.T) { + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + Release: chart.Release{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + r := &reconciler{} + + // run installation process and return verificating state + next, result, err := sFnApplyResources(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnVerifyResources, next) + }) + + t.Run("install chart error", func(t *testing.T) { + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixManifestCache("\t"), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + // handle error and return update condition state + next, result, err := sFnApplyResources(context.Background(), r, s) + require.EqualError(t, err, "could not parse chart manifest: yaml: found character that cannot start any token") + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateError, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionFalse, + v1alpha1.ConditionReasonInstallationErr, + "could not parse chart manifest: yaml: found character that cannot start any token", + ) + }) +} diff --git a/components/operator/internal/state/controller_configuration.go b/components/operator/internal/state/controller_configuration.go new file mode 100644 index 00000000..b6170ede --- /dev/null +++ b/components/operator/internal/state/controller_configuration.go @@ -0,0 +1,45 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + controllerruntime "sigs.k8s.io/controller-runtime" +) + +func sFnControllerConfiguration(_ context.Context, r *reconciler, s *systemState) (stateFn, *controllerruntime.Result, error) { + err := updateControllerConfigurationStatus(r, &s.instance) + if err != nil { + return stopWithEventualError(err) + } + + configureControllerConfigurationFlags(s) + + s.setState(v1alpha1.StateProcessing) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeConfigured, + v1alpha1.ConditionReasonConfigured, + "Configuration ready", + ) + + return nextState(sFnApplyResources) +} + +func updateControllerConfigurationStatus(r *reconciler, instance *v1alpha1.DockerRegistry) error { + spec := instance.Spec + fields := fieldsToUpdate{ + {spec.HealthzLivenessTimeout, &instance.Status.HealthzLivenessTimeout, "Duration of health check", ""}, + {registry.SecretName, &instance.Status.SecretName, "Name of secret with registry access data", ""}, + } + + updateStatusFields(r.k8s, instance, fields) + return nil +} + +func configureControllerConfigurationFlags(s *systemState) { + s.flagsBuilder. + WithControllerConfiguration( + s.instance.Status.HealthzLivenessTimeout, + ) +} diff --git a/components/operator/internal/state/controller_configuration_test.go b/components/operator/internal/state/controller_configuration_test.go new file mode 100644 index 00000000..431231b2 --- /dev/null +++ b/components/operator/internal/state/controller_configuration_test.go @@ -0,0 +1,110 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + healthzLivenessTimeoutTest = "test-healthz-liveness-timeout" +) + +func Test_sFnControllerConfiguration(t *testing.T) { + configurationReadyMsg := "Configuration ready" + + t.Run("update status additional configuration overrides", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Spec: v1alpha1.DockerRegistrySpec{ + HealthzLivenessTimeout: healthzLivenessTimeoutTest, + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + + c := fake.NewClientBuilder().Build() + eventRecorder := record.NewFakeRecorder(10) + r := &reconciler{log: zap.NewNop().Sugar(), k8s: k8s{client: c, EventRecorder: eventRecorder}} + next, result, err := sFnControllerConfiguration(context.TODO(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnApplyResources, next) + + status := s.instance.Status + require.Equal(t, healthzLivenessTimeoutTest, status.HealthzLivenessTimeout) + require.Equal(t, registry.SecretName, status.SecretName) + + require.Equal(t, v1alpha1.StateProcessing, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeConfigured, + metav1.ConditionTrue, + v1alpha1.ConditionReasonConfigured, + configurationReadyMsg, + ) + + expectedEvents := []string{ + "Normal Configuration Duration of health check set from '' to 'test-healthz-liveness-timeout'", + } + + for _, expectedEvent := range expectedEvents { + require.Equal(t, expectedEvent, <-eventRecorder.Events) + } + }) + + t.Run("reconcile from configurationError", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Type: string(v1alpha1.ConditionTypeConfigured), + Status: metav1.ConditionFalse, + Reason: string(v1alpha1.ConditionReasonConfigurationErr), + }, + { + Type: string(v1alpha1.ConditionTypeInstalled), + Status: metav1.ConditionTrue, + Reason: string(v1alpha1.ConditionReasonInstallation), + }, + }, + State: v1alpha1.StateError, + }, + }, + statusSnapshot: v1alpha1.DockerRegistryStatus{}, + flagsBuilder: chart.NewFlagsBuilder(), + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "boo", + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + k8s: k8s{ + client: fake.NewClientBuilder().WithObjects(secret).Build(), + EventRecorder: record.NewFakeRecorder(2), + }, + } + + next, result, err := sFnControllerConfiguration(context.Background(), r, s) + require.NoError(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnApplyResources, next) + requireContainsCondition(t, s.instance.Status, + v1alpha1.ConditionTypeConfigured, + metav1.ConditionTrue, + v1alpha1.ConditionReasonConfigured, + configurationReadyMsg) + require.Equal(t, v1alpha1.StateProcessing, s.instance.Status.State) + }) +} diff --git a/components/operator/internal/state/delete.go b/components/operator/internal/state/delete.go new file mode 100644 index 00000000..09332081 --- /dev/null +++ b/components/operator/internal/state/delete.go @@ -0,0 +1,99 @@ +package state + +import ( + "context" + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// delete dockerregistry based on previously installed resources +func sFnDeleteResources(_ context.Context, _ *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + s.setState(v1alpha1.StateDeleting) + s.instance.UpdateConditionUnknown( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletion, + "Uninstalling", + ) + + return nextState(sFnSafeDeletionState) +} + +func sFnSafeDeletionState(_ context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + if err := chart.CheckCRDOrphanResources(s.chartConfig); err != nil { + // stop state machine with a warning and requeue reconciliation in 1min + // warning state indicates that user intervention would fix it. It's not reconciliation error. + s.setState(v1alpha1.StateWarning) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletionErr, + err, + ) + return stopWithEventualError(err) + } + + return deleteResourcesWithFilter(r, s) +} + +func deleteResourcesWithFilter(r *reconciler, s *systemState, filterFuncs ...chart.FilterFunc) (stateFn, *ctrl.Result, error) { + err, done := chart.UninstallSecrets(s.chartConfig, filterFuncs...) + if err != nil { + return uninstallSecretsError(r, s, err) + } + if !done { + return awaitingSecretsRemoval(s) + } + + if err := chart.Uninstall(s.chartConfig, filterFuncs...); err != nil { + return uninstallResourcesError(r, s, err) + } + + s.setState(v1alpha1.StateDeleting) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeleted, + "DockerRegistry module deleted", + ) + + // if resources are ready to be deleted, remove finalizer + return nextState(sFnRemoveFinalizer) +} + +func uninstallResourcesError(r *reconciler, s *systemState, err error) (stateFn, *ctrl.Result, error) { + r.log.Warnf("error while uninstalling resource %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletionErr, + err, + ) + return stopWithEventualError(err) +} + +func awaitingSecretsRemoval(s *systemState) (stateFn, *ctrl.Result, error) { + s.setState(v1alpha1.StateDeleting) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletion, + "Deleting secrets", + ) + + // wait one sec until ctrl-mngr remove finalizers from secrets + return requeueAfter(time.Second) +} + +func uninstallSecretsError(r *reconciler, s *systemState, err error) (stateFn, *ctrl.Result, error) { + r.log.Warnf("error while uninstalling secrets %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletionErr, + err, + ) + return stopWithEventualError(err) +} diff --git a/components/operator/internal/state/delete_test.go b/components/operator/internal/state/delete_test.go new file mode 100644 index 00000000..8f0fb16d --- /dev/null +++ b/components/operator/internal/state/delete_test.go @@ -0,0 +1,121 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testDeletingDockerRegistry = func() v1alpha1.DockerRegistry { + dockerRegistry := testInstalledDockerRegistry + dockerRegistry.Status.State = v1alpha1.StateDeleting + dockerRegistry.Status.Conditions = []metav1.Condition{ + { + Type: string(v1alpha1.ConditionTypeDeleted), + Reason: string(v1alpha1.ConditionReasonDeletion), + Status: metav1.ConditionUnknown, + }, + } + return dockerRegistry + }() +) + +func Test_sFnDeleteResources(t *testing.T) { + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}} + + t.Run("update condition", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{}, + } + + next, result, err := sFnDeleteResources(context.Background(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnSafeDeletionState, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateDeleting, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeDeleted, + metav1.ConditionUnknown, + v1alpha1.ConditionReasonDeletion, + "Uninstalling", + ) + }) + + t.Run("deletion error while checking orphan resources", func(t *testing.T) { + s := &systemState{ + instance: *testDeletingDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixManifestCache("\t"), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + next, result, err := sFnSafeDeletionState(context.TODO(), r, s) + require.EqualError(t, err, "could not parse chart manifest: yaml: found character that cannot start any token") + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateWarning, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeDeleted, + metav1.ConditionFalse, + v1alpha1.ConditionReasonDeletionErr, + "could not parse chart manifest: yaml: found character that cannot start any token", + ) + }) + + t.Run("deletion", func(t *testing.T) { + s := &systemState{ + instance: *testDeletingDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testDeletingDockerRegistry.GetName(), + Namespace: testDeletingDockerRegistry.GetNamespace(), + }, + Cluster: chart.Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(&ns). + Build(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + next, result, err := sFnSafeDeletionState(context.TODO(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnRemoveFinalizer, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateDeleting, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeDeleted, + metav1.ConditionTrue, + v1alpha1.ConditionReasonDeleted, + "DockerRegistry module deleted", + ) + }) +} diff --git a/components/operator/internal/state/emit_event.go b/components/operator/internal/state/emit_event.go new file mode 100644 index 00000000..65a1febb --- /dev/null +++ b/components/operator/internal/state/emit_event.go @@ -0,0 +1,41 @@ +package state + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + warningMessagePrefix = "Warning" +) + +func emitEvent(m *reconciler, s *systemState) { + // compare if any condition change + for _, condition := range s.instance.Status.Conditions { + // check if condition exists in memento status + memorizedCondition := meta.FindStatusCondition(s.statusSnapshot.Conditions, condition.Type) + // ignore unchanged conditions + if memorizedCondition != nil && + memorizedCondition.Status == condition.Status && + memorizedCondition.Reason == condition.Reason && + memorizedCondition.Message == condition.Message { + continue + } + m.Event( + &s.instance, + eventType(condition, condition.Message), + condition.Reason, + condition.Message, + ) + } +} + +func eventType(condition metav1.Condition, message string) string { + eventType := "Normal" + if condition.Status == metav1.ConditionFalse || strings.HasPrefix(message, warningMessagePrefix) { + eventType = "Warning" + } + return eventType +} diff --git a/components/operator/internal/state/emit_event_test.go b/components/operator/internal/state/emit_event_test.go new file mode 100644 index 00000000..23b9e3a9 --- /dev/null +++ b/components/operator/internal/state/emit_event_test.go @@ -0,0 +1,94 @@ +package state + +import ( + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" +) + +var ( + testDockerRegistryConditions1 = v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Reason: "test-reason", + Message: "test message 1", + Type: "test-type-1", + }, + { + Status: metav1.ConditionUnknown, + Reason: "test-reason", + Message: "test message 1", + Type: "test-type-2", + }, + }, + }, + } + testDockerRegistryConditions2 = v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Reason: "test-reason", + Message: "test message 2", + Type: "test-type-1", + }, + { + Status: metav1.ConditionTrue, + Reason: "test-reason", + Message: "test message 2", + Type: "test-type-2", + }, + }, + }, + } +) + +func Test_emitEvent(t *testing.T) { + t.Run("don't emit event", func(t *testing.T) { + eventRecorder := record.NewFakeRecorder(5) + s := &systemState{ + instance: *testDockerRegistryConditions1.DeepCopy(), + statusSnapshot: *testDockerRegistryConditions1.Status.DeepCopy(), + } + r := &reconciler{ + k8s: k8s{ + EventRecorder: eventRecorder, + }, + } + + emitEvent(r, s) + + // check conditions, don't emit event + require.Len(t, eventRecorder.Events, 0) + }) + + t.Run("emit events", func(t *testing.T) { + eventRecorder := record.NewFakeRecorder(5) + s := &systemState{ + instance: *testDockerRegistryConditions2.DeepCopy(), + statusSnapshot: *testDockerRegistryConditions1.Status.DeepCopy(), + } + r := &reconciler{ + k8s: k8s{ + EventRecorder: eventRecorder, + }, + } + + // build emitEventFunc + emitEvent(r, s) + + // check conditions, don't emit event + require.Len(t, eventRecorder.Events, 2) + + expectedEvents := []string{"Warning test-reason test message 2", "Normal test-reason test message 2"} + close(eventRecorder.Events) + for v := range eventRecorder.Events { + require.Contains(t, expectedEvents, v) + } + }) +} diff --git a/components/operator/internal/state/fsm.go b/components/operator/internal/state/fsm.go new file mode 100644 index 00000000..a5b636ba --- /dev/null +++ b/components/operator/internal/state/fsm.go @@ -0,0 +1,143 @@ +package state + +import ( + "context" + "fmt" + "reflect" + "runtime" + "strings" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/kyma-project/docker-registry/components/operator/internal/warning" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + defaultResult = ctrl.Result{} + secretCacheKey = types.NamespacedName{ + Name: "dockerregistry-manifest-cache", + Namespace: "kyma-system", + } +) + +type stateFn func(context.Context, *reconciler, *systemState) (stateFn, *ctrl.Result, error) + +type cfg struct { + finalizer string + chartPath string + managerPodUID string +} + +type systemState struct { + instance v1alpha1.DockerRegistry + statusSnapshot v1alpha1.DockerRegistryStatus + chartConfig *chart.Config + warningBuilder *warning.Builder + flagsBuilder chart.FlagsBuilder +} + +func (s *systemState) saveStatusSnapshot() { + result := s.instance.Status.DeepCopy() + if result == nil { + result = &v1alpha1.DockerRegistryStatus{} + } + s.statusSnapshot = *result +} + +func (s *systemState) setState(state v1alpha1.State) { + s.instance.Status.State = state +} + +func (s *systemState) setServed(served v1alpha1.Served) { + s.instance.Status.Served = served +} + +func chartConfig(ctx context.Context, r *reconciler, namespace string) *chart.Config { + return &chart.Config{ + Ctx: ctx, + Log: r.log, + Cache: r.cache, + CacheKey: secretCacheKey, + ManagerUID: r.managerPodUID, + Cluster: chart.Cluster{ + Client: r.client, + Config: r.config, + }, + Release: chart.Release{ + ChartPath: r.chartPath, + Namespace: namespace, + Name: "dockerregistry", + }, + } +} + +type k8s struct { + client client.Client + config *rest.Config + record.EventRecorder +} + +type reconciler struct { + fn stateFn + log *zap.SugaredLogger + cache chart.ManifestCache + result ctrl.Result + k8s + cfg +} + +func (m *reconciler) stateFnName() string { + fullName := runtime.FuncForPC(reflect.ValueOf(m.fn).Pointer()).Name() + splitFullName := strings.Split(fullName, ".") + + if len(splitFullName) < 3 { + return fullName + } + + shortName := splitFullName[2] + return shortName +} + +func (m *reconciler) Reconcile(ctx context.Context, v v1alpha1.DockerRegistry) (ctrl.Result, error) { + state := systemState{ + instance: v, + warningBuilder: warning.NewBuilder(), + flagsBuilder: chart.NewFlagsBuilder(), + chartConfig: chartConfig(ctx, m, v.Namespace), + } + state.saveStatusSnapshot() + var err error + var result *ctrl.Result +loop: + for m.fn != nil && err == nil { + select { + case <-ctx.Done(): + err = ctx.Err() + break loop + + default: + m.log.Info(fmt.Sprintf("switching state: %s", m.stateFnName())) + m.fn, result, err = m.fn(ctx, m, &state) + if updateErr := updateDockerRegistryStatus(ctx, m, &state); updateErr != nil { + err = updateErr + } + } + } + + if result == nil { + result = &defaultResult + } + + m.log. + With("error", err). + With("result", result). + Info("reconciliation done") + + return *result, err +} diff --git a/components/operator/internal/state/fsm_test.go b/components/operator/internal/state/fsm_test.go new file mode 100644 index 00000000..b05f42fe --- /dev/null +++ b/components/operator/internal/state/fsm_test.go @@ -0,0 +1,145 @@ +package state + +import ( + "context" + "reflect" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testStateFn = func(ctx context.Context, r *reconciler, ss *systemState) (stateFn, *ctrl.Result, error) { + return nil, &testResult, nil + } + + testWrappedStateFn = func(ctx context.Context, r *reconciler, ss *systemState) (stateFn, *ctrl.Result, error) { + return testStateFn, nil, nil + } + + testResult = ctrl.Result{ + Requeue: true, + } + + canceledCtx = func() context.Context { + ctx, done := context.WithCancel(context.Background()) + done() + return ctx + }() +) + +func Test_reconciler_Reconcile(t *testing.T) { + type fields struct { + fn stateFn + log *zap.SugaredLogger + cache chart.ManifestCache + result ctrl.Result + k8s k8s + cfg cfg + } + type args struct { + ctx context.Context + v v1alpha1.DockerRegistry + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + }{ + { + name: "empty fn", + fields: fields{ + log: zap.NewNop().Sugar(), + }, + want: defaultResult, + wantErr: false, + }, + { + name: "with ctx done", + fields: fields{ + log: zap.NewNop().Sugar(), + fn: testStateFn, + }, + args: args{ + ctx: canceledCtx, + }, + want: defaultResult, + wantErr: true, + }, + { + name: "with many fns", + fields: fields{ + log: zap.NewNop().Sugar(), + fn: testWrappedStateFn, + }, + args: args{ + ctx: context.Background(), + }, + want: testResult, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &reconciler{ + fn: tt.fields.fn, + log: tt.fields.log, + cache: tt.fields.cache, + result: tt.fields.result, + k8s: tt.fields.k8s, + cfg: tt.fields.cfg, + } + got, err := m.Reconcile(tt.args.ctx, tt.args.v) + if (err != nil) != tt.wantErr { + t.Errorf("reconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("reconciler.Reconcile() = %v, want %v", got, tt.want) + } + }) + } + + t.Run("take status snapshot", func(t *testing.T) { + fn := func(_ context.Context, _ *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + // check status + require.Equal(t, s.instance.Status, s.statusSnapshot) + return nil, nil, nil + } + r := &reconciler{ + fn: fn, + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + log: zap.NewNop().Sugar(), + } + dockerRegistry := v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Type: "test-type", + Status: "test-status", + Reason: "test-reason", + Message: "test-message", + ObservedGeneration: 1, + LastTransitionTime: metav1.Now(), + }, + }, + State: v1alpha1.StateError, + }, + } + _, err := r.Reconcile(context.Background(), dockerRegistry) + require.NoError(t, err) + }) +} diff --git a/components/operator/internal/state/initialize.go b/components/operator/internal/state/initialize.go new file mode 100644 index 00000000..afd12a61 --- /dev/null +++ b/components/operator/internal/state/initialize.go @@ -0,0 +1,18 @@ +package state + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" +) + +// choose right scenario to start (installation/deletion) +func sFnInitialize(_ context.Context, _ *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + // in case instance is being deleted and has finalizer - delete all resources + instanceIsBeingDeleted := !s.instance.GetDeletionTimestamp().IsZero() + if instanceIsBeingDeleted { + return nextState(sFnDeleteResources) + } + + return nextState(sFnRegistryConfiguration) +} diff --git a/components/operator/internal/state/initialize_test.go b/components/operator/internal/state/initialize_test.go new file mode 100644 index 00000000..e6fd0e25 --- /dev/null +++ b/components/operator/internal/state/initialize_test.go @@ -0,0 +1,67 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnInitialize(t *testing.T) { + t.Run("setup and return next step sFnRegistryConfiguration", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + } + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + r.cfg.finalizer, + }, + }, + }, + } + + // setup and return buildSFnPrerequisites + next, result, err := sFnInitialize(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnRegistryConfiguration, next) + }) + + t.Run("setup and return next step sFnDeleteResources", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + } + metaTime := metav1.Now() + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + r.cfg.finalizer, + }, + DeletionTimestamp: &metaTime, + }, + }, + } + + // setup and return buildSFnDeleteResources + next, result, err := sFnInitialize(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnDeleteResources, next) + }) +} diff --git a/components/operator/internal/state/new.go b/components/operator/internal/state/new.go new file mode 100644 index 00000000..54e00edf --- /dev/null +++ b/components/operator/internal/state/new.go @@ -0,0 +1,36 @@ +package state + +import ( + "context" + "os" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type StateReconciler interface { + Reconcile(ctx context.Context, v v1alpha1.DockerRegistry) (ctrl.Result, error) +} + +func NewMachine(client client.Client, config *rest.Config, recorder record.EventRecorder, log *zap.SugaredLogger, cache chart.ManifestCache, chartPath string) StateReconciler { + return &reconciler{ + fn: sFnServedFilter, + cache: cache, + log: log, + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + chartPath: chartPath, + managerPodUID: os.Getenv("DOCKERREGISTRY_MANAGER_UID"), + }, + k8s: k8s{ + client: client, + config: config, + EventRecorder: recorder, + }, + } +} diff --git a/components/operator/internal/state/registry.go b/components/operator/internal/state/registry.go new file mode 100644 index 00000000..c39d56c6 --- /dev/null +++ b/components/operator/internal/state/registry.go @@ -0,0 +1,66 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" +) + +func sFnRegistryConfiguration(ctx context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + s.setState(v1alpha1.StateProcessing) + // setup status.dockerRegistry and set possible warnings + err := configureRegistry(ctx, r, s) + if err != nil { + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeConfigured, + v1alpha1.ConditionReasonConfigurationErr, + err, + ) + return stopWithEventualError(err) + } + + return nextState(sFnControllerConfiguration) +} + +func configureRegistry(ctx context.Context, r *reconciler, s *systemState) error { + err := setInternalRegistryConfig(ctx, r, s) + if err != nil { + return err + } + + return nil +} + +func setInternalRegistryConfig(ctx context.Context, r *reconciler, s *systemState) error { + existingIntRegSecret, err := registry.GetDockerRegistryInternalRegistrySecret(ctx, r.client, s.instance.Namespace) + if err != nil { + return errors.Wrap(err, "while fetching existing internal docker registry secret") + } + if existingIntRegSecret != nil { + r.log.Debugf("reusing existing credentials for internal docker registry to avoiding docker registry rollout") + registryHttpSecretEnvValue, getErr := registry.GetRegistryHTTPSecretEnvValue(ctx, r.client, s.instance.Namespace) + if getErr != nil { + return errors.Wrap(getErr, "while reading env value registryHttpSecret from internal docker registry deployment") + } + s.flagsBuilder. + WithRegistryCredentials( + string(existingIntRegSecret.Data["username"]), + string(existingIntRegSecret.Data["password"]), + ). + WithRegistryHttpSecret( + registryHttpSecretEnvValue, + ) + } + + resolver := registry.NewNodePortResolver(registry.RandomNodePort) + nodePort, err := resolver.ResolveDockerRegistryNodePortFn(ctx, r.client, s.instance.Namespace) + if err != nil { + return errors.Wrap(err, "while resolving registry node port") + } + r.log.Debugf("docker registry node port: %d", nodePort) + s.flagsBuilder.WithNodePort(int64(nodePort)) + return nil +} diff --git a/components/operator/internal/state/registry_test.go b/components/operator/internal/state/registry_test.go new file mode 100644 index 00000000..514dae5f --- /dev/null +++ b/components/operator/internal/state/registry_test.go @@ -0,0 +1,39 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnRegistryConfiguration(t *testing.T) { + t.Run("internal registry and update", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{}, + statusSnapshot: v1alpha1.DockerRegistryStatus{}, + flagsBuilder: chart.NewFlagsBuilder(), + } + r := &reconciler{ + k8s: k8s{client: fake.NewClientBuilder().Build()}, + log: zap.NewNop().Sugar(), + } + expectedFlags := map[string]interface{}{ + "global": map[string]interface{}{ + "registryNodePort": int64(32_137), + }, + } + + next, result, err := sFnRegistryConfiguration(context.Background(), r, s) + require.NoError(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnControllerConfiguration, next) + + require.EqualValues(t, expectedFlags, s.flagsBuilder.Build()) + require.Equal(t, v1alpha1.StateProcessing, s.instance.Status.State) + }) +} diff --git a/components/operator/internal/state/remove_finalizer.go b/components/operator/internal/state/remove_finalizer.go new file mode 100644 index 00000000..e5e9c406 --- /dev/null +++ b/components/operator/internal/state/remove_finalizer.go @@ -0,0 +1,17 @@ +package state + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func sFnRemoveFinalizer(ctx context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + if !controllerutil.RemoveFinalizer(&s.instance, r.finalizer) { + return requeue() + } + + err := updateDockerRegistryWithoutStatus(ctx, r, s) + return stopWithEventualError(err) +} diff --git a/components/operator/internal/state/remove_finalizer_test.go b/components/operator/internal/state/remove_finalizer_test.go new file mode 100644 index 00000000..e2da8273 --- /dev/null +++ b/components/operator/internal/state/remove_finalizer_test.go @@ -0,0 +1,68 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnRemoveFinalizer(t *testing.T) { + t.Run("remove finalizer", func(t *testing.T) { + scheme := scheme.Scheme + require.NoError(t, v1alpha1.AddToScheme(scheme)) + instance := v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Finalizers: []string{ + v1alpha1.Finalizer, + }, + }, + } + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(&instance). + Build(), + }, + } + s := &systemState{ + instance: instance, + } + + // remove finalizer + next, result, err := sFnRemoveFinalizer(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + }) + + t.Run("requeue when is no finalizer", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + } + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{}, + }, + } + + // remove finalizer + next, result, err := sFnRemoveFinalizer(context.Background(), r, s) + require.Nil(t, err) + require.Equal(t, &ctrl.Result{Requeue: true}, result) + require.Nil(t, next) + }) +} diff --git a/components/operator/internal/state/served_filter.go b/components/operator/internal/state/served_filter.go new file mode 100644 index 00000000..6b74e7fa --- /dev/null +++ b/components/operator/internal/state/served_filter.go @@ -0,0 +1,50 @@ +package state + +import ( + "context" + "fmt" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" +) + +func sFnServedFilter(ctx context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + if s.instance.IsServedEmpty() { + if err := setInitialServed(ctx, r, s); err != nil { + return stopWithEventualError(err) + } + } + + if s.instance.Status.Served == v1alpha1.ServedFalse { + return stop() + } + return nextState(sFnAddFinalizer) +} + +func setInitialServed(ctx context.Context, r *reconciler, s *systemState) error { + servedDockerRegistry, err := GetServedDockerRegistry(ctx, r.k8s.client) + if err != nil { + return err + } + + return setServed(servedDockerRegistry, s) +} + +func setServed(servedDockerRegistry *v1alpha1.DockerRegistry, s *systemState) error { + if servedDockerRegistry == nil { + s.setServed(v1alpha1.ServedTrue) + return nil + } + + s.setServed(v1alpha1.ServedFalse) + s.setState(v1alpha1.StateWarning) + err := fmt.Errorf( + "Only one instance of DockerRegistry is allowed (current served instance: %s/%s). This DockerRegistry CR is redundant. Remove it to fix the problem.", + servedDockerRegistry.GetNamespace(), servedDockerRegistry.GetName()) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeConfigured, + v1alpha1.ConditionReasonDuplicated, + err, + ) + return err +} diff --git a/components/operator/internal/state/served_filter_test.go b/components/operator/internal/state/served_filter_test.go new file mode 100644 index 00000000..7b9585de --- /dev/null +++ b/components/operator/internal/state/served_filter_test.go @@ -0,0 +1,136 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiruntime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnServedFilter(t *testing.T) { + t.Run("skip processing when served is false", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Served: v1alpha1.ServedFalse, + }, + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, nextFn) + }) + + t.Run("do next step when served is true", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Served: v1alpha1.ServedTrue, + }, + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnAddFinalizer, nextFn) + }) + + t.Run("set served value from nil to true when there is no served dockerregistry on cluster", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{}, + }, + } + + r := &reconciler{ + k8s: k8s{ + client: func() client.Client { + scheme := apiruntime.NewScheme() + require.NoError(t, v1alpha1.AddToScheme(scheme)) + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects( + fixServedDockerRegistry("test-1", "default", ""), + fixServedDockerRegistry("test-2", "dockerregistry-test", v1alpha1.ServedFalse), + fixServedDockerRegistry("test-3", "dockerregistry-test-2", ""), + fixServedDockerRegistry("test-4", "default", v1alpha1.ServedFalse), + ).Build() + + return client + }(), + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnAddFinalizer, nextFn) + require.Equal(t, v1alpha1.ServedTrue, s.instance.Status.Served) + }) + + t.Run("set served value from nil to false and set condition to error when there is at lease one served dockerregistry on cluster", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{}, + }, + } + + r := &reconciler{ + k8s: k8s{ + client: func() client.Client { + scheme := apiruntime.NewScheme() + require.NoError(t, v1alpha1.AddToScheme(scheme)) + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects( + fixServedDockerRegistry("test-1", "default", v1alpha1.ServedFalse), + fixServedDockerRegistry("test-2", "dockerregistry-test", v1alpha1.ServedTrue), + fixServedDockerRegistry("test-3", "dockerregistry-test-2", ""), + fixServedDockerRegistry("test-4", "default", v1alpha1.ServedFalse), + ).Build() + + return client + }(), + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), r, s) + + expectedErrorMessage := "Only one instance of DockerRegistry is allowed (current served instance: dockerregistry-test/test-2). This DockerRegistry CR is redundant. Remove it to fix the problem." + require.EqualError(t, err, expectedErrorMessage) + require.Nil(t, result) + require.Nil(t, nextFn) + require.Equal(t, v1alpha1.ServedFalse, s.instance.Status.Served) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateWarning, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeConfigured, + metav1.ConditionFalse, + v1alpha1.ConditionReasonDuplicated, + expectedErrorMessage, + ) + }) +} + +func fixServedDockerRegistry(name, namespace string, served v1alpha1.Served) *v1alpha1.DockerRegistry { + return &v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: v1alpha1.DockerRegistryStatus{ + Served: served, + }, + } +} diff --git a/components/operator/internal/state/state.go b/components/operator/internal/state/state.go new file mode 100644 index 00000000..fdbf8a99 --- /dev/null +++ b/components/operator/internal/state/state.go @@ -0,0 +1,65 @@ +package state + +import ( + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" +) + +var requeueResult = &ctrl.Result{ + Requeue: true, +} + +func nextState(next stateFn) (stateFn, *ctrl.Result, error) { + return next, nil, nil +} + +func stopWithEventualError(err error) (stateFn, *ctrl.Result, error) { + return nil, nil, err +} + +func stop() (stateFn, *ctrl.Result, error) { + return nil, nil, nil +} + +func requeue() (stateFn, *ctrl.Result, error) { + return nil, requeueResult, nil +} + +func requeueAfter(duration time.Duration) (stateFn, *ctrl.Result, error) { + return nil, &ctrl.Result{ + RequeueAfter: duration, + }, nil +} + +type fieldsToUpdate []struct { + specField string + statusField *string + fieldName string + defaultValue string +} + +func updateStatusFields(eventRecorder record.EventRecorder, instance *v1alpha1.DockerRegistry, fields fieldsToUpdate) { + for _, field := range fields { + // set default value if spec field is empty + if field.specField == "" { + field.specField = field.defaultValue + } + + if field.specField != *field.statusField { + oldStatusValue := *field.statusField + *field.statusField = field.specField + eventRecorder.Eventf( + instance, + "Normal", + string(v1alpha1.ConditionReasonConfiguration), + "%s set from '%s' to '%s'", + field.fieldName, + oldStatusValue, + field.specField, + ) + } + } +} diff --git a/components/operator/internal/state/state_test.go b/components/operator/internal/state/state_test.go new file mode 100644 index 00000000..b957ac67 --- /dev/null +++ b/components/operator/internal/state/state_test.go @@ -0,0 +1,110 @@ +package state + +import ( + "context" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var ( + testInstalledDockerRegistry = v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Type: string(v1alpha1.ConditionTypeConfigured), + Status: metav1.ConditionTrue, + Reason: string(v1alpha1.ConditionReasonConfiguration), + }, + { + Type: string(v1alpha1.ConditionTypeInstalled), + Status: metav1.ConditionTrue, + Reason: string(v1alpha1.ConditionReasonInstallation), + }, + }, + State: v1alpha1.StateReady, + }, + } +) + +func fixEmptyManifestCache() chart.ManifestCache { + return fixManifestCache("---") +} + +func fixManifestCache(manifest string) chart.ManifestCache { + cache := chart.NewInMemoryManifestCache() + _ = cache.Set(context.Background(), types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, chart.DockerRegistrySpecManifest{Manifest: manifest, CustomFlags: map[string]interface{}{}}) + + return cache +} + +func requireEqualFunc(t *testing.T, expected, actual stateFn) { + require.NotNil(t, actual) + + expectedFnName := getFnName(expected) + actualFnName := getFnName(actual) + + if expectedFnName == actualFnName { + // return if functions are simply same + return + } + + expectedElems := strings.Split(expectedFnName, "/") + actualElems := strings.Split(actualFnName, "/") + + // check package paths (prefix) + require.Equal(t, + strings.Join(expectedElems[0:len(expectedElems)-2], "/"), + strings.Join(actualElems[0:len(actualElems)-2], "/"), + ) + + // check direct fn names (suffix) + require.Equal(t, + getDirectFnName(expectedElems[len(expectedElems)-1]), + getDirectFnName(actualElems[len(actualElems)-1]), + ) +} + +func getDirectFnName(nameSuffix string) string { + elements := strings.Split(nameSuffix, ".") + for i := range elements { + elemI := len(elements) - i - 1 + if !strings.HasPrefix(elements[elemI], "func") { + return elements[elemI] + } + } + + return "" +} + +func getFnName(fn stateFn) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +func requireContainsCondition(t *testing.T, status v1alpha1.DockerRegistryStatus, + conditionType v1alpha1.ConditionType, conditionStatus metav1.ConditionStatus, conditionReason v1alpha1.ConditionReason, conditionMessage string) { + hasExpectedCondition := false + for _, condition := range status.Conditions { + if condition.Type == string(conditionType) { + require.Equal(t, string(conditionReason), condition.Reason) + require.Equal(t, conditionStatus, condition.Status) + require.Equal(t, conditionMessage, condition.Message) + hasExpectedCondition = true + } + } + require.True(t, hasExpectedCondition) +} diff --git a/components/operator/internal/state/update_status.go b/components/operator/internal/state/update_status.go new file mode 100644 index 00000000..dffea574 --- /dev/null +++ b/components/operator/internal/state/update_status.go @@ -0,0 +1,25 @@ +package state + +import ( + "context" + "reflect" + "time" +) + +var ( + requeueDuration = time.Second * 3 +) + +func updateDockerRegistryWithoutStatus(ctx context.Context, r *reconciler, s *systemState) error { + return r.client.Update(ctx, &s.instance) +} + +func updateDockerRegistryStatus(ctx context.Context, r *reconciler, s *systemState) error { + if !reflect.DeepEqual(s.instance.Status, s.statusSnapshot) { + err := r.client.Status().Update(ctx, &s.instance) + emitEvent(r, s) + s.saveStatusSnapshot() + return err + } + return nil +} diff --git a/components/operator/internal/state/utils.go b/components/operator/internal/state/utils.go new file mode 100644 index 00000000..ee04ab2e --- /dev/null +++ b/components/operator/internal/state/utils.go @@ -0,0 +1,46 @@ +package state + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/pkg/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func GetDockerRegistryOrServed(ctx context.Context, req ctrl.Request, c client.Client) (*v1alpha1.DockerRegistry, error) { + instance := &v1alpha1.DockerRegistry{} + err := c.Get(ctx, req.NamespacedName, instance) + if err == nil { + return instance, nil + } + if !k8serrors.IsNotFound(err) { + return nil, errors.Wrap(err, "while fetching dockerregistry instance") + } + + instance, err = GetServedDockerRegistry(ctx, c) + if err != nil { + return nil, errors.Wrap(err, "while fetching served dockerregistry instance") + } + return instance, nil +} + +func GetServedDockerRegistry(ctx context.Context, c client.Client) (*v1alpha1.DockerRegistry, error) { + var dockerRegistryList v1alpha1.DockerRegistryList + + err := c.List(ctx, &dockerRegistryList) + + if err != nil { + return nil, err + } + + for _, item := range dockerRegistryList.Items { + if !item.IsServedEmpty() && item.Status.Served == v1alpha1.ServedTrue { + return &item, nil + } + } + + return nil, nil +} diff --git a/components/operator/internal/state/verify.go b/components/operator/internal/state/verify.go new file mode 100644 index 00000000..d326657e --- /dev/null +++ b/components/operator/internal/state/verify.go @@ -0,0 +1,49 @@ +package state + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// verify if all workloads are in ready state +func sFnVerifyResources(_ context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + ready, err := chart.Verify(s.chartConfig) + if err != nil { + r.log.Warnf("error while verifying resource %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstallationErr, + err, + ) + return stopWithEventualError(err) + } + + if !ready { + return requeueAfter(requeueDuration) + } + + warning := s.warningBuilder.Build() + if warning != "" { + s.setState(v1alpha1.StateWarning) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstalled, + warning, + ) + return stop() + } + + s.setState(v1alpha1.StateReady) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstalled, + "DockerRegistry installed", + ) + return stop() +} diff --git a/components/operator/internal/state/verify_test.go b/components/operator/internal/state/verify_test.go new file mode 100644 index 00000000..f7782896 --- /dev/null +++ b/components/operator/internal/state/verify_test.go @@ -0,0 +1,178 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/kyma-project/docker-registry/components/operator/internal/warning" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testDeployCR = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deploy", + Namespace: "default", + }, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionUnknown, + }, + }, + }, + } +) + +const ( + testDeployManifest = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deploy + namespace: default +` +) + +func Test_sFnVerifyResources(t *testing.T) { + t.Run("ready", func(t *testing.T) { + s := &systemState{ + warningBuilder: warning.NewBuilder(), + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + } + + // verify and return update condition state + next, result, err := sFnVerifyResources(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateReady, status.State) + require.Len(t, status.Conditions, 2) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionTrue, + v1alpha1.ConditionReasonInstalled, + "DockerRegistry installed", + ) + }) + + t.Run("warning", func(t *testing.T) { + s := &systemState{ + warningBuilder: warning.NewBuilder().With("test warning"), + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + // verify and return update condition state + next, result, err := sFnVerifyResources(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateWarning, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionTrue, + v1alpha1.ConditionReasonInstalled, + s.warningBuilder.Build(), + ) + }) + + t.Run("verify error", func(t *testing.T) { + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixManifestCache("\t"), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + // handle verify err and update condition with err + next, result, err := sFnVerifyResources(context.Background(), r, s) + require.EqualError(t, err, "could not parse chart manifest: yaml: found character that cannot start any token") + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateError, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionFalse, + v1alpha1.ConditionReasonInstallationErr, + "could not parse chart manifest: yaml: found character that cannot start any token", + ) + }) + + t.Run("requeue when resources are not ready", func(t *testing.T) { + client := fake.NewClientBuilder().WithObjects(testDeployCR).Build() + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: func() chart.ManifestCache { + cache := chart.NewInMemoryManifestCache() + _ = cache.Set(context.Background(), types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, chart.DockerRegistrySpecManifest{Manifest: testDeployManifest}) + return cache + }(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + Cluster: chart.Cluster{ + Client: client, + }, + }, + } + r := &reconciler{} + + // return requeue on verification failed + next, result, err := sFnVerifyResources(context.Background(), r, s) + + _, expectedResult, _ := requeueAfter(requeueDuration) + require.NoError(t, err) + require.Equal(t, expectedResult, result) + require.Nil(t, next) + }) +} diff --git a/components/operator/internal/tracing/watcher.go b/components/operator/internal/tracing/watcher.go new file mode 100644 index 00000000..15397c69 --- /dev/null +++ b/components/operator/internal/tracing/watcher.go @@ -0,0 +1,57 @@ +package tracing + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + tracingOTLPService = "telemetry-otlp-traces" +) + +type eventHandler struct{} + +func (e eventHandler) Create(_ context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { + if event.Object == nil { + return + } + svcName := event.Object.GetName() + if svcName != tracingOTLPService { + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: event.Object.GetName(), + Namespace: event.Object.GetNamespace(), + }}) +} + +func (e eventHandler) Update(_ context.Context, _ event.UpdateEvent, _ workqueue.RateLimitingInterface) { +} + +func (e eventHandler) Delete(_ context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { + if event.Object == nil { + return + } + svcName := event.Object.GetName() + if svcName != tracingOTLPService { + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: event.Object.GetName(), + Namespace: event.Object.GetNamespace(), + }}) +} + +func (e eventHandler) Generic(_ context.Context, _ event.GenericEvent, _ workqueue.RateLimitingInterface) { +} + +var _ handler.EventHandler = eventHandler{} + +func ServiceCollectorWatcher() handler.EventHandler { + return &eventHandler{} +} diff --git a/components/operator/internal/warning/warning.go b/components/operator/internal/warning/warning.go new file mode 100644 index 00000000..44a32f76 --- /dev/null +++ b/components/operator/internal/warning/warning.go @@ -0,0 +1,27 @@ +package warning + +import ( + "fmt" + "strings" +) + +type Builder struct { + warnings []string +} + +func NewBuilder() *Builder { + return &Builder{} +} + +func (w *Builder) With(warning string) *Builder { + w.warnings = append(w.warnings, warning) + return w +} + +func (w *Builder) Build() string { + msg := "" + if len(w.warnings) > 0 { + msg = fmt.Sprintf("Warning: %s", strings.Join(w.warnings, "; ")) + } + return msg +} diff --git a/components/operator/internal/warning/warning_test.go b/components/operator/internal/warning/warning_test.go new file mode 100644 index 00000000..c34d0bfa --- /dev/null +++ b/components/operator/internal/warning/warning_test.go @@ -0,0 +1,22 @@ +package warning + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuilder_Build(t *testing.T) { + t.Run("build multiple warnings", func(t *testing.T) { + warning := NewBuilder(). + With("warn 1"). + With("warn 2"). + Build() + + require.Equal(t, "Warning: warn 1; warn 2", warning) + }) + t.Run("build empty warning", func(t *testing.T) { + warning := NewBuilder().Build() + require.Equal(t, "", warning) + }) +} diff --git a/components/operator/main.go b/components/operator/main.go new file mode 100644 index 00000000..67b8d80f --- /dev/null +++ b/components/operator/main.go @@ -0,0 +1,218 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "os" + "time" + + "github.com/kyma-project/docker-registry/components/operator/internal/config" + "github.com/kyma-project/docker-registry/components/operator/internal/gitrepository" + "github.com/pkg/errors" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + uberzap "go.uber.org/zap" + uberzapcore "go.uber.org/zap/zapcore" + _ "k8s.io/client-go/plugin/pkg/client/auth" + + k8s "github.com/kyma-project/docker-registry/components/operator/internal/controllers/kubernetes" + internalresource "github.com/kyma-project/docker-registry/components/operator/internal/resource" + corev1 "k8s.io/api/core/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + operatorv1alpha1 "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/controllers" + //+kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + syncPeriod = time.Minute * 30 + cleanupTimeout = time.Second * 10 +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(operatorv1alpha1.AddToScheme(scheme)) + + utilruntime.Must(apiextensionsscheme.AddToScheme(scheme)) + + //+kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + + opts := zap.Options{ + Development: true, + TimeEncoder: uberzapcore.TimeEncoderOfLayout("Jan 02 15:04:05.000000000"), + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + cfg, err := config.GetConfig("") + if err != nil { + setupLog.Error(err, "while getting config") + os.Exit(1) + } + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + ctx, cancel := context.WithTimeout(context.Background(), cleanupTimeout) + defer cancel() + + setupLog.Info("cleaning orphan deprecated resources") + err = cleanupOrphanDeprecatedResources(ctx) + if err != nil { + setupLog.Error(err, "while removing orphan resources") + os.Exit(1) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: ctrlmetrics.Options{ + BindAddress: metricsAddr, + }, + HealthProbeBindAddress: probeAddr, + Cache: ctrlcache.Options{ + SyncPeriod: &syncPeriod, + }, + Client: ctrlclient.Options{ + Cache: &ctrlclient.CacheOptions{ + DisableFor: []ctrlclient.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + }, + }, + }, + // TODO: use our own logger - now eventing use logger with different message format + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + config := uberzap.NewDevelopmentConfig() + config.EncoderConfig.TimeKey = "timestamp" + config.EncoderConfig.EncodeTime = opts.TimeEncoder + config.DisableCaller = true + + reconcilerLogger, err := config.Build() + if err != nil { + setupLog.Error(err, "unable to setup logger") + os.Exit(1) + } + + reconciler := controllers.NewDockerRegistryReconciler( + mgr.GetClient(), mgr.GetConfig(), + mgr.GetEventRecorderFor("dockerregistry-operator"), + reconcilerLogger.Sugar(), + cfg.ChartPath) + + //TODO: get it from some configuration + configKubernetes := k8s.Config{ + BaseNamespace: "kyma-system", + BaseDefaultSecretName: registry.SecretName, + ExcludedNamespaces: []string{"kyma-system"}, + ConfigMapRequeueDuration: time.Minute, + SecretRequeueDuration: time.Minute, + ServiceAccountRequeueDuration: time.Minute, + } + + resourceClient := internalresource.New(mgr.GetClient(), scheme) + secretSvc := k8s.NewSecretService(resourceClient, configKubernetes) + configMapSvc := k8s.NewConfigMapService(resourceClient, configKubernetes) + serviceAccountSvc := k8s.NewServiceAccountService(resourceClient, configKubernetes) + + if err = reconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DockerRegistry") + os.Exit(1) + } + + namespaceLogger, err := config.Build() + if err != nil { + setupLog.Error(err, "unable to setup logger") + os.Exit(1) + } + + if err := k8s.NewNamespace(mgr.GetClient(), namespaceLogger.Sugar(), configKubernetes, configMapSvc, secretSvc, serviceAccountSvc). + SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create Namespace controller") + os.Exit(1) + } + + secretLogger, err := config.Build() + if err != nil { + setupLog.Error(err, "unable to setup logger") + os.Exit(1) + } + + if err := k8s.NewSecret(mgr.GetClient(), secretLogger.Sugar(), configKubernetes, secretSvc). + SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create Secret controller") + os.Exit(1) + } + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func cleanupOrphanDeprecatedResources(ctx context.Context) error { + // We are going to talk to the API server _before_ we start the manager. + // Since the default manager client reads from cache, we will get an error. + // So, we create a "serverClient" that would read from the API directly. + // We only use it here, this only runs at start up, so it shouldn't be to much for the API + serverClient, err := ctrlclient.New(ctrl.GetConfigOrDie(), ctrlclient.Options{ + Scheme: scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create a server client") + } + + return gitrepository.Cleanup(ctx, serverClient) +} diff --git a/config.yaml b/config.yaml new file mode 100644 index 00000000..423459f8 --- /dev/null +++ b/config.yaml @@ -0,0 +1,7 @@ +# Samples Config +configs: +# TODO: Add optional manifest installation chart flags and value overrides +# The format below should be followed +# - name: nginx-ingress +# clientConfig: "CreateNamespace=true,Namespace=jakobs-new" +# overrides: "x=4" \ No newline at end of file diff --git a/config/docker-registry/.helmignore b/config/docker-registry/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/config/docker-registry/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/config/docker-registry/Chart.yaml b/config/docker-registry/Chart.yaml new file mode 100644 index 00000000..8988fbf1 --- /dev/null +++ b/config/docker-registry/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: Kyma component 'docker registry' +name: docker-registry +version: 1.0.0 +home: https://kyma-project.io +icon: https://github.com/kyma-project/kyma/blob/main/logo.png?raw=true +dependencies: + - name: docker-registry \ No newline at end of file diff --git a/config/docker-registry/charts/docker-registry/.helmignore b/config/docker-registry/charts/docker-registry/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/config/docker-registry/charts/docker-registry/Chart.yaml b/config/docker-registry/charts/docker-registry/Chart.yaml new file mode 100644 index 00000000..c8e41309 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A Helm chart for Docker Registry +name: docker-registry +version: 1.9.1 +appVersion: 2.7.1 +home: https://hub.docker.com/_/registry/ +icon: https://hub.docker.com/public/images/logos/mini-logo.svg +sources: + - https://github.com/docker/distribution-library-image +maintainers: + - name: jpds + email: jpds@protonmail.com + - name: rendhalver + email: pete.brown@powerhrg.com diff --git a/config/docker-registry/charts/docker-registry/README.md b/config/docker-registry/charts/docker-registry/README.md new file mode 100644 index 00000000..b9ff4612 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/README.md @@ -0,0 +1,79 @@ +# Docker Registry Helm Chart + +This directory contains a Kubernetes chart to deploy a private Docker Registry. + +## Prerequisites + +* Persistence Volume (PV) support on underlying infrastructure (if persistence is required) + +## Chart Details + +This chart implements the Docker Registry deployment. + +## Installing the Chart + +To install the chart, use the following command: + +```bash +helm install stable/docker-registry +``` + +## Configuration + +The following table lists the configurable parameters of the `docker-registry` chart and +their default values. + +| Parameter | Description | Default | +|:----------------------------|:-------------------------------------------------------------------------------------------|:----------------| +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Container image to use | `registry` | +| `image.tag` | Container image tag to deploy | `2.7.1` | +| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` | +| `persistence.enabled` | Whether to use a PVC for the Docker storage | `false` | +| `persistence.deleteEnabled` | Enable the deletion of image blobs and manifests by digest | `nil` | +| `persistence.size` | Amount of space to claim for PVC | `10Gi` | +| `persistence.storageClass` | Storage Class to use for PVC | `-` | +| `persistence.existingClaim` | Name of an existing PVC to use for config | `nil` | +| `service.port` | TCP port on which the service is exposed | `5000` | +| `service.type` | Service type | `ClusterIP` | +| `service.clusterIP` | If `service.type` is `ClusterIP` and this is non-empty, sets the cluster IP of the service | `nil` | +| `service.nodePort` | If `service.type` is `NodePort` and this is non-empty, sets the node port of the service | `nil` | +| `replicaCount` | Kubernetes replicas | `1` | +| `updateStrategy` | update strategy for deployment | `{}` | +| `podAnnotations` | Annotations for Pod | `{}` | +| `podLabels` | Labels for Pod | `{}` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `resources.limits.cpu` | Container requested CPU | `nil` | +| `resources.limits.memory` | Container requested memory | `nil` | +| `storage` | Storage system to use | `filesystem` | +| `tlsSecretName` | Name of Secret for TLS certs | `nil` | +| `secrets.htpasswd` | Htpasswd authentication | `nil` | +| `secrets.s3.accessKey` | Access Key for S3 configuration | `nil` | +| `secrets.s3.secretKey` | Secret Key for S3 configuration | `nil` | +| `secrets.swift.username` | Username for Swift configuration | `nil` | +| `secrets.swift.password` | Password for Swift configuration | `nil` | +| `haSharedSecret` | Shared Secret for Registry | `nil` | +| `configData` | Configuration hash for Docker | `nil` | +| `s3.region` | S3 region | `nil` | +| `s3.regionEndpoint` | S3 region endpoint | `nil` | +| `s3.bucket` | S3 bucket name | `nil` | +| `s3.encrypt` | Store images in encrypted format | `nil` | +| `s3.secure` | Use HTTPS | `nil` | +| `swift.authurl` | Swift authurl | `nil` | +| `swift.container` | Swift container | `nil` | +| `nodeSelector` | node labels for Pod assignment | `{}` | +| `tolerations` | Pod tolerations | `[]` | +| `ingress.enabled` | If true, Ingress will be created | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Ingress labels | `{}` | +| `ingress.path` | Ingress service path | `/` | +| `ingress.hosts` | Ingress hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration (YAML) | `[]` | +| `extraVolumeMounts` | Additional volumeMounts to the registry container | `[]` | +| `extraVolumes` | Additional volumes to the pod | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument with +`helm install`. + +To generate htpasswd file, run this Docker command: +`docker run --entrypoint htpasswd registry:2 -Bbn user password > ./htpasswd`. diff --git a/config/docker-registry/charts/docker-registry/templates/_helpers.tpl b/config/docker-registry/charts/docker-registry/templates/_helpers.tpl new file mode 100644 index 00000000..a91077ef --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/_helpers.tpl @@ -0,0 +1,24 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "docker-registry.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "docker-registry.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/configmap.yaml b/config/docker-registry/charts/docker-registry/templates/configmap.yaml new file mode 100644 index 00000000..08ee9011 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "docker-registry.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + config.yml: |- +{{ toYaml .Values.configData | indent 4 }} diff --git a/config/docker-registry/charts/docker-registry/templates/deployment.yaml b/config/docker-registry/charts/docker-registry/templates/deployment.yaml new file mode 100644 index 00000000..338ca576 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/deployment.yaml @@ -0,0 +1,231 @@ +{{- $rollme := include "tplValue" ( dict "value" .Values.rollme "context" . ) -}} +{{- $registryHTTPSecret := include "tplValue" ( dict "value" .Values.registryHTTPSecret "context" . ) -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicaCount }} + strategy: + type: Recreate + rollingUpdate: null + minReadySeconds: 5 + template: + metadata: + labels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: + rollme: {{ $rollme | quote }} +{{- if $.Values.podAnnotations }} +{{ toYaml $.Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + priorityClassName: "{{ .Values.global.dockerregistryPriorityClassName }}" +{{- if .Values.pod.securityContext }} + securityContext: + {{- include "tplValue" ( dict "value" .Values.pod.securityContext "context" . ) | nindent 12 }} +{{- end }} + hostNetwork: false # Optional. The default is false if the entry is not there. + hostPID: false # Optional. The default is false if the entry is not there. + hostIPC: false # Optional. The default is false if the entry is not there. + initContainers: + - name: generate-htpasswd + image: "{{ include "imageurl" (dict "reg" .Values.global.containerRegistry "img" .Values.global.images.registry) }}" +{{- if .Values.initContainers.securityContext }} + securityContext: + {{- include "tplValue" ( dict "value" .Values.initContainers.securityContext "context" . ) | nindent 12 }} +{{- end }} + volumeMounts: + {{- if eq .Values.storage "filesystem" }} + - name: data + mountPath: /var/lib/registry/ + {{- end }} + - name: registry-credentials + mountPath: /regcred + readOnly: true + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + command: + - sh + - -ec + - | + htpasswd -Bbn $(cat /regcred/username.txt) $(cat /regcred/password.txt) > ./data/htpasswd + echo "Generated htpasswd file for docker-registry..." +{{- if eq .Values.storage "filesystem" }} + chown -R 1000:1000 "/var/lib/registry/" +{{- end }} + + containers: + - name: {{ .Chart.Name }} + image: "{{ include "imageurl" (dict "reg" .Values.global.containerRegistry "img" .Values.global.images.registry) }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if .Values.containers.securityContext }} + securityContext: + {{- include "tplValue" ( dict "value" .Values.containers.securityContext "context" . ) | nindent 12 }} +{{- end }} + command: + - /bin/registry + - serve + - /etc/docker/registry/config.yml + ports: + - containerPort: 5000 + livenessProbe: + httpGet: +{{- if .Values.tlsSecretName }} + scheme: HTTPS +{{- end }} + path: / + port: 5000 + readinessProbe: + httpGet: +{{- if .Values.tlsSecretName }} + scheme: HTTPS +{{- end }} + path: / + port: 5000 + resources: +{{ toYaml .Values.resources | indent 12 }} + env: + - name: REGISTRY_AUTH + value: "htpasswd" + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: "Registry Realm" + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: "/data/htpasswd" + - name: REGISTRY_HTTP_SECRET + # https://docs.docker.com/registry/configuration/#http, there's no problem that it is plainly seen + # using kubectl describe + value: {{ $registryHTTPSecret | quote }} +{{- if .Values.tlsSecretName }} + - name: REGISTRY_HTTP_TLS_CERTIFICATE + value: /etc/ssl/docker/tls.crt + - name: REGISTRY_HTTP_TLS_KEY + value: /etc/ssl/docker/tls.key +{{- end }} +{{- if eq .Values.storage "filesystem" }} + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: "/var/lib/registry" +{{- else if eq .Values.storage "azure" }} + - name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureAccountName + - name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureAccountKey + - name: REGISTRY_STORAGE_AZURE_CONTAINER + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureContainer +{{- else if eq .Values.storage "s3" }} + {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }} + - name: REGISTRY_STORAGE_S3_ACCESSKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: s3AccessKey + - name: REGISTRY_STORAGE_S3_SECRETKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: s3SecretKey + {{- end }} + - name: REGISTRY_STORAGE_S3_REGION + value: {{ required ".Values.s3.region is required" .Values.s3.region }} + {{- if .Values.s3.regionEndpoint }} + - name: REGISTRY_STORAGE_S3_REGIONENDPOINT + value: {{ .Values.s3.regionEndpoint }} + {{- end }} + - name: REGISTRY_STORAGE_S3_BUCKET + value: {{ required ".Values.s3.bucket is required" .Values.s3.bucket }} + {{- if .Values.s3.encrypt }} + - name: REGISTRY_STORAGE_S3_ENCRYPT + value: {{ .Values.s3.encrypt | quote }} + {{- end }} + {{- if .Values.s3.secure }} + - name: REGISTRY_STORAGE_S3_SECURE + value: {{ .Values.s3.secure | quote }} + {{- end }} +{{- else if eq .Values.storage "swift" }} + - name: REGISTRY_STORAGE_SWIFT_AUTHURL + value: {{ required ".Values.swift.authurl is required" .Values.swift.authurl }} + - name: REGISTRY_STORAGE_SWIFT_USERNAME + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: swiftUsername + - name: REGISTRY_STORAGE_SWIFT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: swiftPassword + - name: REGISTRY_STORAGE_SWIFT_CONTAINER + value: {{ required ".Values.swift.container is required" .Values.swift.container }} +{{- end }} + volumeMounts: +{{- if eq .Values.storage "filesystem" }} + - name: data + mountPath: /var/lib/registry/ +{{- end }} + - name: "{{ template "docker-registry.fullname" . }}-config" + mountPath: "/etc/docker/registry" +{{- if .Values.tlsSecretName }} + - mountPath: /etc/ssl/docker + name: tls-cert + readOnly: true +{{- end }} +{{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} +{{- end }} + +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} + volumes: +{{- if eq .Values.storage "filesystem" }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "docker-registry.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end }} + - name: {{ template "docker-registry.fullname" . }}-config + configMap: + name: {{ template "docker-registry.fullname" . }}-config +{{- if .Values.tlsSecretName }} + - name: tls-cert + secret: + secretName: {{ .Values.tlsSecretName }} +{{- end }} +{{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} +{{- end }} diff --git a/config/docker-registry/charts/docker-registry/templates/ingress.yaml b/config/docker-registry/charts/docker-registry/templates/ingress.yaml new file mode 100644 index 00000000..3060a29f --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/ingress.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "docker-registry.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $path := .Values.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: {{ $path }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml b/config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000..fa5f1a7c --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/priorityclass.yaml b/config/docker-registry/charts/docker-registry/templates/priorityclass.yaml new file mode 100644 index 00000000..4717d7d2 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/priorityclass.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ .Values.global.dockerregistryPriorityClassName }} +value: {{ .Values.global.dockerregistryPriorityClassValue }} +globalDefault: false +description: "Scheduling priority of dockerregistry components. By default, dockerregistry components should not be blocked by unschedulable user workloads." \ No newline at end of file diff --git a/config/docker-registry/charts/docker-registry/templates/pvc.yaml b/config/docker-registry/charts/docker-registry/templates/pvc.yaml new file mode 100644 index 00000000..96da5061 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/secret.yaml b/config/docker-registry/charts/docker-registry/templates/secret.yaml new file mode 100644 index 00000000..c5e04ab3 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/secret.yaml @@ -0,0 +1,31 @@ +{{- if or (eq .Values.storage "azure") (eq .Values.storage "s3") (eq .Values.storage "swift") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "docker-registry.fullname" . }}-secret + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if eq .Values.storage "azure" }} + {{- if and .Values.secrets.azure.accountName .Values.secrets.azure.accountKey .Values.secrets.azure.container }} + azureAccountName: {{ .Values.secrets.azure.accountName | b64enc | quote }} + azureAccountKey: {{ .Values.secrets.azure.accountKey | b64enc | quote }} + azureContainer: {{ .Values.secrets.azure.container | b64enc | quote }} + {{- end }} + {{- else if eq .Values.storage "s3" }} + {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }} + s3AccessKey: {{ .Values.secrets.s3.accessKey | b64enc | quote }} + s3SecretKey: {{ .Values.secrets.s3.secretKey | b64enc | quote }} + {{- end }} + {{- else if eq .Values.storage "swift" }} + {{- if and .Values.secrets.swift.username .Values.secrets.swift.password }} + swiftUsername: {{ .Values.secrets.swift.username | b64enc | quote }} + swiftPassword: {{ .Values.secrets.swift.password | b64enc | quote }} + {{- end }} + {{- end }} + {{- end}} \ No newline at end of file diff --git a/config/docker-registry/charts/docker-registry/templates/service.yaml b/config/docker-registry/charts/docker-registry/templates/service.yaml new file mode 100644 index 00000000..cda1a453 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: NodePort + ports: + - port: {{ .Values.global.registryServicePort }} + protocol: TCP + name: http-{{ .Values.service.name }} + targetPort: {{ .Values.global.registryServicePort }} + nodePort: {{ .Values.global.registryNodePort }} + selector: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} diff --git a/config/docker-registry/charts/docker-registry/values.yaml b/config/docker-registry/charts/docker-registry/values.yaml new file mode 100644 index 00000000..62756567 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/values.yaml @@ -0,0 +1,171 @@ +# Default values for docker-registry. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 + +updateStrategy: + type: Recreate + rollingUpdate: null + # maxSurge: 1 + # maxUnavailable: 0 + +image: + pullPolicy: IfNotPresent +# imagePullSecrets: + # - name: docker +service: + name: registry + port: "{{ .Values.global.registryServicePort }}" # same as configData.http.addr + annotations: {} +ingress: + enabled: false + path: / + # Used to create an Ingress record. + hosts: + - chart-example.local + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 400m + memory: 800Mi + requests: + cpu: 10m + memory: 300Mi + +podAnnotations: + sidecar.istio.io/inject: "false" +podLabels: {} + +persistence: + accessMode: 'ReadWriteOnce' + enabled: true + size: 20Gi + # storageClass: '-' + +# set the type of filesystem to use: filesystem, s3. +# If filesystem is used, you should also add it to configData, below +storage: filesystem + +# Set this to name of secret for tls certs +# tlsSecretName: registry.docker.example.com + +# Secrets for Azure +# azure: +# accountName: "" +# accountKey: "" +# container: "" +# Secrets for S3 access and secret keys +# s3: +# accessKey: "" +# secretKey: "" +# Secrets for Swift username and password +# swift: +# username: "" +# password: "" + +# Options for s3 storage type: +# s3: +# region: us-east-1 +# regionEndpoint: s3.us-east-1.amazonaws.com +# bucket: my-bucket +# encrypt: false +# secure: true + +# Options for swift storage type: +# swift: +# authurl: http://swift.example.com/ +# container: my-container + +# https://docs.docker.com/registry/configuration/ +configData: # example: https://github.com/docker/distribution/blob/master/cmd/registry/config-dev.yml + version: 0.1 + log: + formatter: json + fields: + service: registry + storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry + http: + addr: :5000 # same as .Values.service.port + headers: + X-Content-Type-Options: [nosniff] + debug: + addr: :5001 + prometheus: + enabled: true + path: /metrics + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + +containers: + # the following guidelines should be followed for this https://github.com/kyma-project/community/tree/main/concepts/psp-replacement + securityContext: + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + procMount: default # Optional. The default is false if the entry is not there. + readOnlyRootFilesystem: true # Mandatory + +initContainers: + # the following guidelines should be followed for this https://github.com/kyma-project/community/tree/main/concepts/psp-replacement + securityContext: + # this is required to allow the initContainer to chmod the volumemount for the registry storage volume. This is incompatible with the security requirements above and should be fixed in the future. + runAsUser: 0 + runAsGroup: 0 + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + add: ["CHOWN"] + procMount: default # Optional. The default is false if the entry is not there. + readOnlyRootFilesystem: true # Mandatory + +pod: + # the following guidelines should be followed for this https://github.com/kyma-project/community/tree/main/concepts/psp-replacement + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + seccompProfile: # Optional. This option can also be set on container level but it is recommended to set it on Pod level and leave it undefined on container level. + type: RuntimeDefault + + +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 2 + +nodeSelector: {} + +tolerations: [] + +extraVolumeMounts: [] + +extraVolumes: [] + +nameOverride: +fullnameOverride: + +destinationRule: + enabled: true + +rollme: "" +registryHTTPSecret: "" diff --git a/config/docker-registry/templates/_helpers.tpl b/config/docker-registry/templates/_helpers.tpl new file mode 100644 index 00000000..8fc39071 --- /dev/null +++ b/config/docker-registry/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{- include "tplValue" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +*/}} +{{- define "tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "registry-fullname" -}} +{{- "internal-docker-registry" -}} +{{- end -}} + + +{{/* +Create a URL for container images +*/}} +{{- define "imageurl" -}} +{{- $registry := default $.reg.path $.img.containerRegistryPath -}} +{{- $path := ternary (print $registry) (print $registry "/" $.img.directory) (empty $.img.directory) -}} +{{- $version := ternary (print ":" $.img.version) (print "@sha256:" $.img.sha) (empty $.img.sha) -}} +{{- print $path "/" $.img.name $version -}} +{{- end -}} diff --git a/config/docker-registry/templates/registry-config.yaml b/config/docker-registry/templates/registry-config.yaml new file mode 100644 index 00000000..b099644f --- /dev/null +++ b/config/docker-registry/templates/registry-config.yaml @@ -0,0 +1,21 @@ +{{- $username := include "tplValue" ( dict "value" .Values.dockerRegistry.username "context" . ) -}} +{{- $password := include "tplValue" ( dict "value" .Values.dockerRegistry.password "context" . ) -}} +{{- $encodedUsernamePassword := printf "%s:%s" $username $password | b64enc }} +{{- $internalRegPullAddr := printf "localhost:%d" (int .Values.global.registryNodePort) }} +{{- $internalRegPushAddr := printf "%s.%s.svc.cluster.local:%d" (include "registry-fullname" . ) .Release.Namespace ( int .Values.global.registryServicePort) }} + +apiVersion: v1 +kind: Secret +type: kubernetes.io/dockerconfigjson +metadata: + name: internal-dockerregistry-config + namespace: {{ .Release.Namespace }} + labels: + dockerregistry.kyma-project.io/config: credentials +data: + username: "{{ $username | b64enc }}" + password: "{{ $password | b64enc }}" + isInternal: {{ "true" | b64enc }} + pullRegAddr: {{ $internalRegPullAddr | b64enc }} + pushRegAddr: "{{ $internalRegPushAddr | b64enc }}" + .dockerconfigjson: "{{- (printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}, \"%s\": {\"auth\": \"%s\"}}}" $internalRegPushAddr $encodedUsernamePassword $internalRegPullAddr $encodedUsernamePassword) | b64enc }}" diff --git a/config/docker-registry/values.yaml b/config/docker-registry/values.yaml new file mode 100644 index 00000000..ffc9f205 --- /dev/null +++ b/config/docker-registry/values.yaml @@ -0,0 +1,47 @@ +# Default values for dockerregistry. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +fullnameOverride: "dockerregistry" +global: + registryServicePort: 5000 + registryNodePort: 32137 + containerRegistry: + path: europe-docker.pkg.dev/kyma-project + images: + registry: + name: "tpi/registry" + version: "2.8.1-1ae4c190" + directory: "prod" + dockerregistryPriorityClassValue: 2000000 + dockerregistryPriorityClassName: "dockerregistry-priority" +dockerRegistry: + username: "{{ randAlphaNum 20 | b64enc }}" # for gcr "_json_key" + password: "{{ randAlphaNum 40 | b64enc }}" # for gcr data from json key + # This is the registry address, for dockerhub it's username, for other it's url. + registryAddress: "" + # This is the server address of the registry which will be used to create docker configuration. + serverAddress: "" +docker-registry: + fullnameOverride: "internal-docker-registry" + destinationRule: + enabled: true + secrets: + haSharedSecret: "secret" + htpasswd: "generated-in-init-container" + extraVolumeMounts: + - name: htpasswd-data + mountPath: /data + extraVolumes: + - name: registry-credentials + secret: + secretName: internal-dockerregistry-config + items: + - key: username + path: username.txt + - key: password + path: password.txt + - name: htpasswd-data + emptyDir: {} + rollme: "{{ randAlphaNum 5}}" + registryHTTPSecret: "{{ randAlphaNum 16 | b64enc }}" diff --git a/config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml b/config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml new file mode 100644 index 00000000..088e8e70 --- /dev/null +++ b/config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: dockerregistries.operator.kyma-project.io +spec: + group: operator.kyma-project.io + names: + kind: DockerRegistry + listKind: DockerRegistryList + plural: dockerregistries + singular: dockerregistry + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Configured')].status + name: Configured + type: string + - jsonPath: .status.conditions[?(@.type=='Installed')].status + name: Installed + type: string + - jsonPath: .metadata.generation + name: generation + type: integer + - jsonPath: .metadata.creationTimestamp + name: age + type: date + - jsonPath: .status.state + name: state + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DockerRegistry is the Schema for the dockerregistry API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DockerRegistrySpec defines the desired state of DockerRegistry + properties: + healthzLivenessTimeout: + description: Sets the timeout for the Function health check. The default + value in seconds is `10` + type: string + type: object + status: + properties: + conditions: + description: Conditions associated with CustomStatus. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + healthzLivenessTimeout: + type: string + secretName: + type: string + served: + description: |- + Served signifies that current DockerRegistry is managed. + Value can be one of ("True", "False"). + enum: + - "True" + - "False" + type: string + state: + description: |- + State signifies current state of DockerRegistry. + Value can be one of ("Ready", "Processing", "Error", "Deleting"). + enum: + - Processing + - Deleting + - Ready + - Error + - Warning + type: string + required: + - served + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/operator/base/crd/kustomization.yaml b/config/operator/base/crd/kustomization.yaml new file mode 100644 index 00000000..db1457e0 --- /dev/null +++ b/config/operator/base/crd/kustomization.yaml @@ -0,0 +1,10 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/operator.kyma-project.io_dockerregistries.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/operator/base/crd/kustomizeconfig.yaml b/config/operator/base/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/config/operator/base/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/operator/base/deployment/deployment.yaml b/config/operator/base/deployment/deployment.yaml new file mode 100644 index 00000000..33436299 --- /dev/null +++ b/config/operator/base/deployment/deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operator + namespace: system + labels: + control-plane: operator + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: dockerregistry-operator + app.kubernetes.io/component: operator + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: operator + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: operator + sidecar.istio.io/inject: "false" + spec: + securityContext: + runAsNonRoot: true + containers: + - command: + - /operator + image: controller:latest + name: manager + env: + - name: DOCKERREGISTRY_MANAGER_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: operator + terminationGracePeriodSeconds: 10 diff --git a/config/operator/base/deployment/kustomization.yaml b/config/operator/base/deployment/kustomization.yaml new file mode 100644 index 00000000..2d833f82 --- /dev/null +++ b/config/operator/base/deployment/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- deployment.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: dockerregistry-operator + newTag: 2d332b272e2a diff --git a/config/operator/base/kustomization.yaml b/config/operator/base/kustomization.yaml new file mode 100644 index 00000000..95f2c701 --- /dev/null +++ b/config/operator/base/kustomization.yaml @@ -0,0 +1,20 @@ +# Adds namespace to all resources. +namespace: kyma-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: dockerregistry- + +# Labels to add to all resources and selectors. +commonLabels: + app.kubernetes.io/component: dockerregistry-operator.kyma-project.io + + +resources: +- ./crd +- ./deployment +- ./rbac +- ./ui-extensions diff --git a/config/operator/base/rbac/editor_role.yaml b/config/operator/base/rbac/editor_role.yaml new file mode 100644 index 00000000..00c0c261 --- /dev/null +++ b/config/operator/base/rbac/editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit dockerregistry. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dockerregistry-operator-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator-editor-role +rules: +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/status + verbs: + - get diff --git a/config/operator/base/rbac/kustomization.yaml b/config/operator/base/rbac/kustomization.yaml new file mode 100644 index 00000000..b508482b --- /dev/null +++ b/config/operator/base/rbac/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your operator will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml + diff --git a/config/operator/base/rbac/role.yaml b/config/operator/base/rbac/role.yaml new file mode 100644 index 00000000..45f4f444 --- /dev/null +++ b/config/operator/base/rbac/role.yaml @@ -0,0 +1,288 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments/status + verbs: + - get +- apiGroups: + - apps + resources: + - replicasets + verbs: + - list +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs/status + verbs: + - get +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/status + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - scheduling.k8s.io + resources: + - priorityclasses + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/operator/base/rbac/role_binding.yaml b/config/operator/base/rbac/role_binding.yaml new file mode 100644 index 00000000..3a699c7d --- /dev/null +++ b/config/operator/base/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: dockerregistry-operator-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-role +subjects: +- kind: ServiceAccount + name: operator + namespace: system diff --git a/config/operator/base/rbac/service_account.yaml b/config/operator/base/rbac/service_account.yaml new file mode 100644 index 00000000..c60ca4da --- /dev/null +++ b/config/operator/base/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: dockerregistry-operator-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator + namespace: system diff --git a/config/operator/base/rbac/viewer_role.yaml b/config/operator/base/rbac/viewer_role.yaml new file mode 100644 index 00000000..61a95ad7 --- /dev/null +++ b/config/operator/base/rbac/viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view dockerregistry. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dockerregistry-operator-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator-viewer-role +rules: +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries + verbs: + - get + - list + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/status + verbs: + - get diff --git a/config/operator/base/ui-extensions/dockerregistry/details b/config/operator/base/ui-extensions/dockerregistry/details new file mode 100644 index 00000000..d5cee36c --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/details @@ -0,0 +1,91 @@ +header: + - name: Ready + source: status.state + widget: Badge + highlights: + positive: + - 'Ready' +body: + - name: Desired Specification + widget: Panel + children: + - name: Docker Registry + visibility: $root.spec.dockerRegistry.enableInternal = true + source: spec.dockerRegistry.enableInternal?"INTERNAL":"" + - name: Docker Registry + visibility: '$exists($value)' + source: spec.dockerRegistry.secretName + widget: ResourceLink + resource: + name: spec.dockerRegistry.secretName + namespace: $root.metadata.namespace + kind: "'Secret'" + - name: Eventing Endpoint + source: spec.eventing.endpoint + visibility: '$exists($value)' + - name: OTLP Trace Endpoint + source: spec.tracing.endpoint + visibility: '$exists($value)' + - name: Default Resources Preset (Build-time) + source: spec.defaultBuildJobPreset + visibility: '$exists($value)' + - name: Default Resources Preset (Runtime) + source: spec.defaultRuntimePodPreset + visibility: '$exists($value)' + - name: Custom Build Execution Args + source: spec.functionBuildExecutorArgs + visibility: '$exists($value)' + - name: Max Simultaneous Builds + source: spec.functionBuildMaxSimultaneousJobs + visibility: '$exists($value)' + - name: Function Request Body Limit [Mb] + source: spec.functionRequestBodyLimitMb + visibility: '$exists($value)' + - name: Function Timeout [Sec] + source: spec.functionTimeoutSec + visibility: '$exists($value)' + - name: Function Requeue Duration + source: spec.functionRequeueDuration + visibility: '$exists($value)' + - name: Controller Liveness Timeout + source: spec.healthzLivenessTimeout + visibility: '$exists($value)' + - name: Target CPU utilisation for HPA + source: spec.targetCPUUtilizationPercentage + visibility: '$exists($value)' + - name: Status + widget: Panel + children: + - name: Docker Registry + source: status.dockerRegistry + - name: Eventing Endpoint + source: status.eventingEndpoint + - name: OTLP Trace Endpoint + source: status.tracingEndpoint + + - source: status.conditions + widget: Table + name: Reconciliation Conditions + children: + - source: type + name: Type + - source: status + name: Status + widget: Badge + highlights: + positive: + - 'True' + negative: + - 'False' + - source: reason + name: Reason + - source: message + name: Message + - source: '$readableTimestamp(lastTransitionTime)' + name: Last transition + sort: true + + - widget: EventList + filter: '$matchEvents($$, $root.kind, $root.metadata.name)' + name: events + defaultType: information diff --git a/config/operator/base/ui-extensions/dockerregistry/form b/config/operator/base/ui-extensions/dockerregistry/form new file mode 100644 index 00000000..90e23e78 --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/form @@ -0,0 +1,25 @@ +- path: spec.dockerRegistry.enableInternal + simple: true + name: Enable Internal Docker Registry +- simple: true + widget: Alert + severity: warning + alert: "'Internal Docker Registry is not recommended for production grade installations'" + visibility: "$root.spec.dockerRegistry.enableInternal = true" +- path: spec.dockerRegistry.secretName + visibility: $root.spec.dockerRegistry.enableInternal != true + simple: true + widget: Resource + name: External Docker Registry Configuration + resource: + kind: Secret + version: v1 + scope: namespace +- path: spec.tracing.endpoint + name: OTLP Trace Endpoint + simple: true + required: false +- path: spec.eventing.endpoint + name: Eventing Endpoint + simple: true + required: false \ No newline at end of file diff --git a/config/operator/base/ui-extensions/dockerregistry/general b/config/operator/base/ui-extensions/dockerregistry/general new file mode 100644 index 00000000..a02076a6 --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/general @@ -0,0 +1,15 @@ +resource: + kind: DockerRegistry + group: operator.kyma-project.io + version: v1alpha1 +urlPath: dockerregistries +category: Kyma +name: DockerRegistry +scope: namespace +features: + actions: + disableCreate: true + disableDelete: true +description: >- + {{[DockerRegistry CR](https://github.com/kyma-project/docker-registry/blob/main/config/samples/default-dockerregistry-cr.yaml)}} + specifies dockerregistry module. diff --git a/config/operator/base/ui-extensions/dockerregistry/kustomization.yaml b/config/operator/base/ui-extensions/dockerregistry/kustomization.yaml new file mode 100644 index 00000000..91a95ffb --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/kustomization.yaml @@ -0,0 +1,14 @@ +configMapGenerator: +- name: operator.kyma-project.io + namespace: kube-public + files: + - general + - form + - list + - details + options: + disableNameSuffixHash: true + labels: + app.kubernetes.io/name: dockerregistries.operator.kyma-project.io + busola.io/extension: resource + busola.io/extension-version: "0.5" \ No newline at end of file diff --git a/config/operator/base/ui-extensions/dockerregistry/list b/config/operator/base/ui-extensions/dockerregistry/list new file mode 100644 index 00000000..3c25b7c5 --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/list @@ -0,0 +1,6 @@ +- name: Ready + source: status.state + widget: Badge + highlights: + positive: + - 'Ready' \ No newline at end of file diff --git a/config/operator/base/ui-extensions/kustomization.yaml b/config/operator/base/ui-extensions/kustomization.yaml new file mode 100644 index 00000000..83221f54 --- /dev/null +++ b/config/operator/base/ui-extensions/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- dockerregistry \ No newline at end of file diff --git a/config/operator/dev/.gitignore b/config/operator/dev/.gitignore new file mode 100644 index 00000000..738a410d --- /dev/null +++ b/config/operator/dev/.gitignore @@ -0,0 +1 @@ +kustomization.yaml \ No newline at end of file diff --git a/config/operator/dev/kustomization.yaml.tpl b/config/operator/dev/kustomization.yaml.tpl new file mode 100644 index 00000000..412e9c94 --- /dev/null +++ b/config/operator/dev/kustomization.yaml.tpl @@ -0,0 +1,9 @@ +resources: +- ../base +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +# To overwrite image in base it has to point to the image in base kustomization.yaml +images: +- name: europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator + newName: local-registry + newTag: local diff --git a/config/samples/default-dockerregistry-cr.yaml b/config/samples/default-dockerregistry-cr.yaml new file mode 100644 index 00000000..9f9f1a1c --- /dev/null +++ b/config/samples/default-dockerregistry-cr.yaml @@ -0,0 +1,6 @@ +apiVersion: operator.kyma-project.io/v1alpha1 +kind: DockerRegistry +metadata: + name: default + namespace: kyma-system +spec: {} diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 84aca7bc..00000000 --- a/docs/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Docs - -## Overview - -The `docs` folder contains two subfolders - `user` and `contributor`. - -The `user` subfolder contains the end-user documentation, which is displayed on the [Kyma website](https://kyma-project.io/#/). Depending on your module needs, the subfolder must include overview, usage, or technical reference documents. To display the content on the website properly, create a `_sidebar.md` file in the `user` subfolder and list the documents it contains there. For more information on how to publish user documentation, follow [this guide](https://github.com/kyma-project/community/blob/main/docs/guidelines/content-guidelines/01-user-docs.md). - -The `contributor` subfolder includes any developer-related documentation to help them manually install, develop, and operate a module. - -To have a common structure across all modules, all documents must be properly numbered according to the following structure: - -> **NOTE:** It is suggested to use the following titles if you have the content that matches them; otherwise use your own, more suitable titles, or simply skip the ones you find irrelevant. - - - 00-xx-overview - - 01-xx-tutorial/configuration - - 02-xx-usage - - 03-xx-troubleshooting - -where `xx` is the number of the given document. For example: - - ```bash - 00-00-overview-telemetry-manager - 00-10-overview-logs - 00-20-overview-traces - 00-30-overview-metrics - 01-10-configure-logs - 01-20-configure-traces - 01-30-configure-metrics - 02-10-use-logs - 02-20-use-traces - 02-30-use-metrics - (...) - ``` -> **NOTE:** Before introducing [docsify](https://docsify.js.org/#/?id=docsify), we agreed to use the `10`, `20`, `30` numbering. It was to help maintain the proper order of docs if they were rendered automatically on the website. With docsify, you manually add the content to the `_sidebar.md` file, and docs are displayed in the order you add them. However, this numbering is still recommended to have the unified structure of the docs in the module repositories. - -If you have other content that does not fit into the above topics, create your own 04-10-module-specific document(s). - -You can divide your documentation into subfolders to avoid having too many documents in one `docs/user` or `docs/contributor` folder. For example, if you have many technical reference documents, you can create a `technical reference` subfolder in `docs/user` and keep relevant documentation there. Each subfolder in the `user` folder must have its own `_sidebar.md` file with the links to the main module page and the list of docs it contains. \ No newline at end of file diff --git a/docs/contributor/README.md b/docs/contributor/README.md deleted file mode 100644 index 9a22d6f9..00000000 --- a/docs/contributor/README.md +++ /dev/null @@ -1 +0,0 @@ -In this folder, you can add any developer-related documentation, for example, advanced installation options, testing strategy, governance, etc. \ No newline at end of file diff --git a/docs/user/README.md b/docs/user/README.md deleted file mode 100644 index 1c6854d6..00000000 --- a/docs/user/README.md +++ /dev/null @@ -1,14 +0,0 @@ -> **TIP:** Apart from the {Module Name} heading, you can use your own titles for the remaining sections. You can also add more module-specific sections. - -# {Module Name} -> Modify the title and insert the name of your module. Use Heading 1 (H1). - -## Overview -> Provide a description of your module and its components. Describe its features and functionalities. Mention the scope and add information on the CustomResourceDefinitions (CRDs). -> You can divide this section to the relevant subsections. - -## Useful Links (Optional) -> Provide links to the most relevant module documentation (tutorials, technical references, resources, etc.). - -## Feedback (Optional) -> Describe how users can provide feedback. \ No newline at end of file diff --git a/docs/user/_sidebar.md b/docs/user/_sidebar.md deleted file mode 100644 index 467416ed..00000000 --- a/docs/user/_sidebar.md +++ /dev/null @@ -1 +0,0 @@ -Use this file to create an unordered list of documents you want to display on the [Kyma website](https://kyma-project.io). The list serves to navigate through the user documentation. For more information, visit the [User documentation](https://github.com/kyma-project/community/blob/main/docs/guidelines/content-guidelines/01-user-docs.md) guide. \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..5d6848ec --- /dev/null +++ b/go.mod @@ -0,0 +1,164 @@ +module github.com/kyma-project/docker-registry + +go 1.21 + +toolchain go1.21.3 + +require ( + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 + github.com/pkg/errors v0.9.1 + github.com/stretchr/testify v1.9.0 + github.com/vrischmann/envconfig v1.3.0 + go.uber.org/zap v1.27.0 + gopkg.in/yaml.v3 v3.0.1 + helm.sh/helm/v3 v3.14.3 + k8s.io/api v0.29.4 + k8s.io/apiextensions-apiserver v0.29.4 + k8s.io/apimachinery v0.29.4 + k8s.io/client-go v0.29.4 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime v0.17.3 +) + +require ( + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.17.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/apiserver v0.29.4 // indirect + k8s.io/cli-runtime v0.29.4 // indirect + k8s.io/component-base v0.29.4 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.29.0 // indirect + oras.land/oras-go v1.2.4 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..a6ad8721 --- /dev/null +++ b/go.sum @@ -0,0 +1,602 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= +github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= +github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= +github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/vrischmann/envconfig v1.3.0 h1:4XIvQTXznxmWMnjouj0ST5lFo/WAYf5Exgl3x82crEk= +github.com/vrischmann/envconfig v1.3.0/go.mod h1:bbvxFYJdRSpXrhS63mBFtKJzkDiNkyArOLXtY6q0kuI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +helm.sh/helm/v3 v3.14.3 h1:HmvRJlwyyt9HjgmAuxHbHv3PhMz9ir/XNWHyXfmnOP4= +helm.sh/helm/v3 v3.14.3/go.mod h1:v6myVbyseSBJTzhmeE39UcPLNv6cQK6qss3dvgAySaE= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= +k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= +k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= +k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= +k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= +k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= +k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= +k8s.io/cli-runtime v0.29.4 h1:QvUrddBxVX6XFJ6z64cGpEk7e4bQduKweqbqq+qBd9g= +k8s.io/cli-runtime v0.29.4/go.mod h1:NmklYuZ4DLfOr2XEIT8Nzl883KMZUCv7KMj3wMHayCA= +k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= +k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= +k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= +k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= +oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= +sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= +sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.16.0 h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0= +sigs.k8s.io/kustomize/kyaml v0.16.0/go.mod h1:xOK/7i+vmE14N2FdFyugIshB8eF6ALpy7jI87Q2nRh4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/Makefile b/hack/Makefile new file mode 100644 index 00000000..078eba60 --- /dev/null +++ b/hack/Makefile @@ -0,0 +1,7 @@ +# This Makefile is used to add context to the `gardener.mk` file by adding the PROJECT_ROOT variable +# this is needed to use targets in the gardener.mk file from a prompt (e.g. `make -C hack provision-gardener`) + +PROJECT_ROOT=.. + +include ${PROJECT_ROOT}/hack/help.mk +include ${PROJECT_ROOT}/hack/gardener.mk diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100755 index 00000000..29c55ecd --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/hack/gardener.mk b/hack/gardener.mk new file mode 100644 index 00000000..54522978 --- /dev/null +++ b/hack/gardener.mk @@ -0,0 +1,27 @@ +ifndef PROJECT_ROOT +$(error PROJECT_ROOT is undefined) +endif +include ${PROJECT_ROOT}/hack/tools.mk + +##@ Gardener + +GARDENER_INFRASTRUCTURE = az +HIBERNATION_HOUR=$(shell echo $$(( ( $(shell date +%H | sed s/^0//g) + 5 ) % 24 ))) +GIT_COMMIT_SHA=$(shell git rev-parse --short=8 HEAD) +ifneq (,$(GARDENER_SA_PATH)) +GARDENER_K8S_VERSION=$(shell kubectl --kubeconfig=${GARDENER_SA_PATH} get cloudprofiles.core.gardener.cloud ${GARDENER_INFRASTRUCTURE} -o=jsonpath='{.spec.kubernetes.versions[0].version}') +else +GARDENER_K8S_VERSION=1.27.4 +endif +#Overwrite default kyma cli gardenlinux version because it's not supported. +GARDENER_LINUX_VERSION=1312.3.0 + +.PHONY: provision-gardener +provision-gardener: kyma ## Provision gardener cluster with latest k8s version + ${KYMA} provision gardener ${GARDENER_INFRASTRUCTURE} -c ${GARDENER_SA_PATH} -n test-${GIT_COMMIT_SHA} -p ${GARDENER_PROJECT} -s ${GARDENER_SECRET_NAME} -k ${GARDENER_K8S_VERSION}\ + --gardenlinux-version=$(GARDENER_LINUX_VERSION) --hibernation-start="00 ${HIBERNATION_HOUR} * * ?" + +.PHONY: deprovision-gardener +deprovision-gardener: kyma ## Deprovision gardener cluster + kubectl --kubeconfig=${GARDENER_SA_PATH} annotate shoot test-${GIT_COMMIT_SHA} confirmation.gardener.cloud/deletion=true + kubectl --kubeconfig=${GARDENER_SA_PATH} delete shoot test-${GIT_COMMIT_SHA} --wait=false diff --git a/hack/get_kyma_file_name.sh b/hack/get_kyma_file_name.sh new file mode 100755 index 00000000..f6d792c8 --- /dev/null +++ b/hack/get_kyma_file_name.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +function get_kyma_file_name () { + + local _OS_TYPE=$1 + local _OS_ARCH=$2 + + [ "$_OS_TYPE" == "Linux" ] && [ "$_OS_ARCH" == "x86_64" ] && echo "kyma-linux" || + [ "$_OS_TYPE" == "Linux" ] && [ "$_OS_ARCH" == "arm64" ] && echo "kyma-linux-arm" || + [ "$_OS_TYPE" == "Windows" ] && [ "$_OS_ARCH" == "x86_64" ] && echo "kyma.exe" || + [ "$_OS_TYPE" == "Windows" ] && [ "$_OS_ARCH" == "arm64" ] && echo "kyma-arm.exe" || + [ "$_OS_TYPE" == "Darwin" ] && [ "$_OS_ARCH" == "x86_64" ] && echo "kyma-darwin" || + [ "$_OS_TYPE" == "Darwin" ] && [ "$_OS_ARCH" == "arm64" ] && echo "kyma-darwin-arm" +} + +get_kyma_file_name "$@" diff --git a/hack/help.mk b/hack/help.mk new file mode 100644 index 00000000..a7a9d8f5 --- /dev/null +++ b/hack/help.mk @@ -0,0 +1,5 @@ +##@ General +.DEFAULT_GOAL=help +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/hack/k3d.mk b/hack/k3d.mk new file mode 100644 index 00000000..7eab3ad0 --- /dev/null +++ b/hack/k3d.mk @@ -0,0 +1,26 @@ +CLUSTER_NAME ?= kyma +REGISTRY_PORT ?= 5001 +REGISTRY_NAME ?= ${CLUSTER_NAME}-registry + +ifndef PROJECT_ROOT +$(error PROJECT_ROOT is undefined) +endif +include $(PROJECT_ROOT)/hack/tools.mk + +##@ K3D + +.PHONY: create-k3d +create-k3d: kyma ## Create k3d with kyma CRDs. + ${KYMA} provision k3d --registry-port ${REGISTRY_PORT} --name ${CLUSTER_NAME} --ci -p 6080:8080@loadbalancer -p 6433:8433@loadbalancer + kubectl create namespace kyma-system + +.PHONY: delete-k3d +delete-k3d: delete-k3d-cluster delete-k3d-registry ## Delete k3d registry & cluster. + +.PHONY: delete-k3d-registry +delete-k3d-registry: ## Delete k3d kyma registry. + -k3d registry delete ${REGISTRY_NAME} + +.PHONY: delete-k3d-cluster +delete-k3d-cluster: ## Delete k3d kyma cluster. + -k3d cluster delete ${CLUSTER_NAME} diff --git a/hack/makefile-strategy.md b/hack/makefile-strategy.md new file mode 100644 index 00000000..1b2530f7 --- /dev/null +++ b/hack/makefile-strategy.md @@ -0,0 +1,39 @@ +# Makefile architecture + +The goal is to develop a way to extend Makefile targets in the most readable way, without keeping all targets in one file. + +Pros of the architecture: + +* targets are well organized +* single responsibility +* extensibility + +## Dependencies description +* `Makefile` - The main makefile that allows for installing and running the Docker Operator. It's a high-level target to run the module without knowing its internals. It's the first contact point for the Docker Registry module users. +* `hack/Makefile` - High-level API that contains all targets that may be used by any CI/CD system. It has dependencies on the `hack/*.mk` makefiles. +* `hack/*.mk` - Contains common targets that may be used by other makefiles (they are included and shouldn't be run directly). Targets are grouped by functionality. They should contain helpers' targets. +* `components/operator/Makefile` - Contains all basic operations on Docker Registry Operator like builds, tests, etc., used during development. It's also used by `Makefile`. + +## Good practices + +Every makefile (`Makefile` and `*.mk`) must contain a few pieces, making the file more useful and human-readable: + +* include `hack/help.mk` - this file provides the `help` target describing what is inside `Makefile` and what we can do with it. +* before `include` you must define the `PROJECT_ROOT` environment variable pointing to the project root directory. + +Additionally, `Makefile` (but not `*.mk`) can also contain the following: + +* Description - helps understand what the target does and shows it in the help. (`## description` after target name). +* Sections - allows for separations of targets based on their destination. (`##@`). + +Example of target that includes all good practices: + +```Makefile +PROJECT_ROOT=. +include ${PROJECT_ROOT}/hack/help.mk + +##@ General + +.PHONY: run +run: create-k3d install-dockerregistry-main ## Create k3d cluster and install dockerregistry from main +``` \ No newline at end of file diff --git a/hack/tools.mk b/hack/tools.mk new file mode 100644 index 00000000..751d843f --- /dev/null +++ b/hack/tools.mk @@ -0,0 +1,77 @@ +## Location to install dependencies to +ifndef PROJECT_ROOT +$(error PROJECT_ROOT is undefined) +endif +LOCALBIN ?= $(realpath $(PROJECT_ROOT))/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +# Operating system architecture +OS_ARCH=$(shell uname -m) +# Operating system type +OS_TYPE=$(shell uname) + +##@ Tools + +########## Kyma CLI ########### +KYMA_STABILITY ?= unstable + +define os_error +$(error Error: unsuported platform OS_TYPE:$1, OS_ARCH:$2; to mitigate this problem set variable KYMA with absolute path to kyma-cli binary compatible with your operating system and architecture) +endef + +KYMA ?= $(LOCALBIN)/kyma-$(KYMA_STABILITY) +kyma: $(LOCALBIN) $(KYMA) ## Download kyma locally if necessary. +$(KYMA): + $(eval KYMA_FILE_NAME=$(shell ${PROJECT_ROOT}/hack/get_kyma_file_name.sh ${OS_TYPE} ${OS_ARCH})) + ## Detect if operating system + $(if $(KYMA_FILE_NAME),,$(call os_error, ${OS_TYPE}, ${OS_ARCH})) + test -f $@ || curl -s -Lo $(KYMA) https://storage.googleapis.com/kyma-cli-$(KYMA_STABILITY)/$(KYMA_FILE_NAME) + chmod +x $(KYMA) + +########## Kustomize ########### +KUSTOMIZE ?= $(LOCALBIN)/kustomize +KUSTOMIZE_VERSION ?= v4.5.5 +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } + +########## Controller-Gen ########### +CONTROLLER_TOOLS_VERSION ?= v0.14.0 +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + test "$(${LOCALBIN}/controller-gen --version)" = "Version: ${CONTROLLER_TOOLS_VERSION}" || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +########## Envtest ########### +ENVTEST ?= $(LOCALBIN)/setup-envtest +KUBEBUILDER_ASSETS=$(LOCALBIN)/k8s/kubebuilder_assets + +define path_error +$(error Error: path is empty: $1) +endef + +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.27.1 + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + +# Envtest download binaries to k8s/(K8S_Version)-(arch)-(os) directory which is different on every machine. +# To use the same `envtest` binaries on CI and during local development this target moves it to upfront known directory. +# Additionaly `OS-ARCH` return X86_64, but envtest uses `amd64` name. +.PHONY: kubebuilder-assets +kubebuilder-assets: envtest + $(eval DOWNLOADED_ASSETS=$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)) + $(if $(DOWNLOADED_ASSETS),,$(call path_error, ${DOWNLOADED_ASSETS})) + chmod -R 755 $(DOWNLOADED_ASSETS) + mkdir -p $(LOCALBIN)/k8s/kubebuilder_assets/ + mv $(DOWNLOADED_ASSETS)/* $(LOCALBIN)/k8s/kubebuilder_assets/ + rm -d $(DOWNLOADED_ASSETS) diff --git a/markdown_heading_capitalization.js b/markdown_heading_capitalization.js new file mode 100644 index 00000000..7077d5c1 --- /dev/null +++ b/markdown_heading_capitalization.js @@ -0,0 +1,31 @@ +// This file is used to trigger the custom rule that checks if all markdown headings (words longer than 4 characters) are written in the title case. To run this check, you must include the check in the markdownlint command. +// For example, if you want to run the check on the `docs` folder, run the following command: `markdownlint -r ./markdown_heading_capitalization.js docs/`. +module.exports = [{ + "names": [ "custom/capitalize-headings" ], + "description": "Heading words longer than 4 characters should be capitalized", + "tags": [ "formatting" ], + "function": function rule(params, onError) { + params.tokens.filter(function filterToken(token) { + return token.type === "heading_open"; + }).forEach(function forToken(heading) { + var headingTokenContent = heading.line.trim(); + var wordsInHeading = headingTokenContent.split(' '); + + for (var i = 0; i < wordsInHeading.length; i++) { + if (wordsInHeading[i].length > 4 && wordsInHeading[i] && + wordsInHeading[i].charAt(0) !== wordsInHeading[i].charAt(0).toUpperCase()) { + var capitalizedWord = wordsInHeading[i].charAt(0).toUpperCase() + wordsInHeading[i].slice(1); + var detailMessage = "Change " + "'" + wordsInHeading[i] + "'" + " to " + "'" + capitalizedWord + "'"; + + onError({ + "lineNumber": heading.lineNumber, + "detail": detailMessage, + "context": headingTokenContent, // Show the whole heading as context + "range": [headingTokenContent.indexOf(wordsInHeading[i]), wordsInHeading[i].length] // Underline the word which needs a change + }); + } + } + }); + } + }]; + \ No newline at end of file diff --git a/module-config-template.yaml b/module-config-template.yaml new file mode 100644 index 00000000..96e034e0 --- /dev/null +++ b/module-config-template.yaml @@ -0,0 +1,8 @@ +name: {{.Name}} +channel: {{.Channel}} +version: {{.Version}} +defaultCR: config/samples/default-dockerregistry-cr.yaml +manifest: dockerregistry-operator.yaml +annotations: + "operator.kyma-project.io/doc-url": "https://kyma-project.io/#/docker-registry/user/README" +moduleRepo: https://github.com/kyma-project/docker-registry.git diff --git a/sec-scanners-config.yaml b/sec-scanners-config.yaml new file mode 100644 index 00000000..13036cc1 --- /dev/null +++ b/sec-scanners-config.yaml @@ -0,0 +1,11 @@ +module-name: docker-registry +rc-tag: 1.4.0 +protecode: + - europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:main + - europe-docker.pkg.dev/kyma-project/prod/tpi/registry:2.8.1-1ae4c190 +whitesource: + language: golang-mod + subprojects: false + exclude: + - "**/test/**" + - "**/*_test.go"