diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md
index dd24ed32aee77..b43b91a0e05ce 100644
--- a/.github/ISSUE_TEMPLATE/release.md
+++ b/.github/ISSUE_TEMPLATE/release.md
@@ -9,12 +9,6 @@ assignees: ''
Target RC1 date: ___. __, ____
Target GA date: ___. __, ____
- - [ ] Create new section in the [Release Planning doc](https://docs.google.com/document/d/1trJIomcgXcfvLw0aYnERrFWfPjQOfYMDJOCh1S8nMBc/edit?usp=sharing)
- - [ ] Schedule a Release Planning meeting roughly two weeks before the scheduled Release freeze date by adding it to the community calendar (or delegate this task to someone with write access to the community calendar)
- - [ ] Include Zoom link in the invite
- - [ ] Post in #argo-cd and #argo-contributors one week before the meeting
- - [ ] Post again one hour before the meeting
- - [ ] At the meeting, remove issues/PRs from the project's column for that release which have not been “claimed” by at least one Approver (add it to the next column if Approver requests that)
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 406306bbeca2e..c1a3f42508aaa 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -13,11 +13,12 @@ Checklist:
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
* [ ] Does this PR require documentation updates?
* [ ] I've updated documentation as required by this PR.
-* [ ] Optional. My organization is added to USERS.md.
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/blob/master/community/CONTRIBUTING.md#legal)
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).
* [ ] My new feature complies with the [feature status](https://github.com/argoproj/argoproj/blob/master/community/feature-status.md) guidelines.
* [ ] I have added a brief description of why this PR is necessary and/or what this PR solves.
+* [ ] Optional. My organization is added to USERS.md.
+* [ ] Optional. For bug fixes, I've indicated what older releases this fix should be cherry-picked into (this may or may not happen depending on risk/complexity).
diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml
index adffe526da728..c8a522fbf7198 100644
--- a/.github/workflows/ci-build.yaml
+++ b/.github/workflows/ci-build.yaml
@@ -1,5 +1,5 @@
name: Integration tests
-on:
+on:
push:
branches:
- 'master'
@@ -23,9 +23,28 @@ permissions:
contents: read
jobs:
+ changes:
+ runs-on: ubuntu-latest
+ outputs:
+ backend: ${{ steps.filter.outputs.backend }}
+ frontend: ${{ steps.filter.outputs.frontend }}
+ steps:
+ - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
+ - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2
+ id: filter
+ with:
+ # Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
+ filters: |
+ backend:
+ - '!(ui/**|docs/**|**.md|**/*.md)'
+ frontend:
+ - 'ui/**'
check-go:
name: Ensure Go modules synchronicity
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
+ needs:
+ - changes
steps:
- name: Checkout code
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
@@ -43,7 +62,10 @@ jobs:
build-go:
name: Build & cache Go code
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
+ needs:
+ - changes
steps:
- name: Checkout code
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
@@ -67,7 +89,10 @@ jobs:
contents: read # for actions/checkout to fetch code
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
name: Lint Go code
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
+ needs:
+ - changes
steps:
- name: Checkout code
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
@@ -83,12 +108,14 @@ jobs:
test-go:
name: Run unit tests for Go packages
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
needs:
- build-go
+ - changes
env:
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
- GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
+ GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
@@ -150,12 +177,14 @@ jobs:
test-go-race:
name: Run unit tests with -race for Go packages
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
needs:
- build-go
+ - changes
env:
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
- GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
+ GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
@@ -212,7 +241,10 @@ jobs:
codegen:
name: Check changes to generated code
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
+ needs:
+ - changes
steps:
- name: Checkout code
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
@@ -260,14 +292,17 @@ jobs:
build-ui:
name: Build, test & lint UI code
+ if: ${{ needs.changes.outputs.frontend == 'true' }}
runs-on: ubuntu-22.04
+ needs:
+ - changes
steps:
- name: Checkout code
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
- name: Setup NodeJS
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
with:
- node-version: '20.7.0'
+ node-version: '21.6.1'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
@@ -292,10 +327,12 @@ jobs:
analyze:
name: Process & analyze test artifacts
+ if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
runs-on: ubuntu-22.04
needs:
- test-go
- build-ui
+ - changes
env:
sonar_secret: ${{ secrets.SONAR_TOKEN }}
steps:
@@ -315,7 +352,7 @@ jobs:
- name: Create test-results directory
run: |
mkdir -p test-results
- - name: Get code coverage artifiact
+ - name: Get code coverage artifact
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: code-coverage
@@ -336,35 +373,37 @@ jobs:
SCANNER_PATH: /tmp/cache/scanner
OS: linux
run: |
- # We do not use the provided action, because it does contain an old
- # version of the scanner, and also takes time to build.
- set -e
- mkdir -p ${SCANNER_PATH}
- export SONAR_USER_HOME=${SCANNER_PATH}/.sonar
- if [[ ! -x "${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner" ]]; then
- curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip
- unzip -qq -o sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip -d ${SCANNER_PATH}
- fi
-
- chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
- chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/jre/bin/java
-
- # Explicitly set NODE_MODULES
- export NODE_MODULES=${PWD}/ui/node_modules
- export NODE_PATH=${PWD}/ui/node_modules
-
- ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
+ # We do not use the provided action, because it does contain an old
+ # version of the scanner, and also takes time to build.
+ set -e
+ mkdir -p ${SCANNER_PATH}
+ export SONAR_USER_HOME=${SCANNER_PATH}/.sonar
+ if [[ ! -x "${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner" ]]; then
+ curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip
+ unzip -qq -o sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip -d ${SCANNER_PATH}
+ fi
+
+ chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
+ chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/jre/bin/java
+
+ # Explicitly set NODE_MODULES
+ export NODE_MODULES=${PWD}/ui/node_modules
+ export NODE_PATH=${PWD}/ui/node_modules
+
+ ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
if: env.sonar_secret != ''
test-e2e:
name: Run end-to-end tests
+ if: ${{ needs.changes.outputs.backend == 'true' }}
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
- k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
- needs:
+ k3s-version: [v1.29.1, v1.28.6, v1.27.10, v1.26.13, v1.25.16]
+ needs:
- build-go
+ - changes
env:
GOPATH: /home/runner/go
ARGOCD_FAKE_IN_CLUSTER: "true"
@@ -374,10 +413,10 @@ jobs:
ARGOCD_E2E_K3S: "true"
ARGOCD_IN_CI: "true"
ARGOCD_E2E_APISERVER_PORT: "8088"
- ARGOCD_APPLICATION_NAMESPACES: "argocd-e2e-external"
+ ARGOCD_APPLICATION_NAMESPACES: "argocd-e2e-external,argocd-e2e-external-2"
ARGOCD_SERVER: "127.0.0.1:8088"
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
- GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
+ GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Checkout code
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
@@ -427,9 +466,9 @@ jobs:
git config --global user.email "john.doe@example.com"
- name: Pull Docker image required for tests
run: |
- docker pull ghcr.io/dexidp/dex:v2.37.0
+ docker pull ghcr.io/dexidp/dex:v2.38.0
docker pull argoproj/argo-cd-ci-builder:v1.0.0
- docker pull redis:7.0.11-alpine
+ docker pull redis:7.0.14-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist
@@ -462,3 +501,26 @@ jobs:
name: e2e-server-k8s${{ matrix.k3s-version }}.log
path: /tmp/e2e-server.log
if: ${{ failure() }}
+
+ # workaround for status checks -- check this one job instead of each individual E2E job in the matrix
+ # this allows us to skip the entire matrix when it doesn't need to run while still having accurate status checks
+ # see:
+ # https://github.com/argoproj/argo-workflows/pull/12006
+ # https://github.com/orgs/community/discussions/9141#discussioncomment-2296809
+ # https://github.com/orgs/community/discussions/26822#discussioncomment-3305794
+ test-e2e-composite-result:
+ name: E2E Tests - Composite result
+ if: ${{ always() }}
+ needs:
+ - test-e2e
+ - changes
+ runs-on: ubuntu-22.04
+ steps:
+ - run: |
+ result="${{ needs.test-e2e.result }}"
+ # mark as successful even if skipped
+ if [[ $result == "success" || $result == "skipped" ]]; then
+ exit 0
+ else
+ exit 1
+ fi
\ No newline at end of file
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 58426890abcbf..2311d43925bb7 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -27,10 +27,15 @@ jobs:
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-22.04
-
steps:
- name: Checkout repository
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
+
+ # Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
+ - name: Setup Golang
+ uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
+ with:
+ go-version-file: go.mod
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
diff --git a/.github/workflows/image-reuse.yaml b/.github/workflows/image-reuse.yaml
index 55d3bc309294a..0838f38e4230d 100644
--- a/.github/workflows/image-reuse.yaml
+++ b/.github/workflows/image-reuse.yaml
@@ -74,9 +74,9 @@ jobs:
go-version: ${{ inputs.go-version }}
- name: Install cosign
- uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
+ uses: sigstore/cosign-installer@1fc5bd396d372bee37d608f955b336615edf79c8 # v3.2.0
with:
- cosign-release: 'v2.0.0'
+ cosign-release: 'v2.2.1'
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
@@ -145,7 +145,7 @@ jobs:
- name: Build and push container image
id: image
- uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1
+ uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 #v5.1.0
with:
context: .
platforms: ${{ inputs.platforms }}
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 7e9303f288ae4..ae5174659cf40 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -38,7 +38,7 @@ jobs:
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
if: github.repository == 'argoproj/argo-cd'
- uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
with:
image: quay.io/argoproj/argocd
digest: ${{ needs.argocd-image.outputs.image-digest }}
@@ -120,7 +120,7 @@ jobs:
contents: write # Needed for release uploads
if: github.repository == 'argoproj/argo-cd'
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
- uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
with:
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
provenance-name: "argocd-cli.intoto.jsonl"
@@ -204,7 +204,7 @@ jobs:
contents: write # Needed for release uploads
if: github.repository == 'argoproj/argo-cd'
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
- uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
with:
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
provenance-name: "argocd-sbom.intoto.jsonl"
@@ -265,11 +265,13 @@ jobs:
set -xue
SOURCE_TAG=${{ github.ref_name }}
VERSION_REF="${SOURCE_TAG#*v}"
+ COMMIT_HASH=$(git rev-parse HEAD)
if echo "$VERSION_REF" | grep -E -- '^[0-9]+\.[0-9]+\.0-rc1';then
VERSION=$(awk 'BEGIN {FS=OFS="."} {$2++; print}' <<< "${VERSION_REF%-rc1}")
echo "Updating VERSION to: $VERSION"
echo "UPDATE_VERSION=true" >> $GITHUB_ENV
echo "NEW_VERSION=$VERSION" >> $GITHUB_ENV
+ echo "COMMIT_HASH=$COMMIT_HASH" >> $GITHUB_ENV
else
echo "Not updating VERSION"
echo "UPDATE_VERSION=false" >> $GITHUB_ENV
@@ -278,6 +280,10 @@ jobs:
- name: Update VERSION on master branch
run: |
echo ${{ env.NEW_VERSION }} > VERSION
+ # Replace the 'project-release: vX.X.X-rcX' line in SECURITY-INSIGHTS.yml
+ sed -i "s/project-release: v.*$/project-release: v${{ env.NEW_VERSION }}/" SECURITY-INSIGHTS.yml
+ # Update the 'commit-hash: XXXXXXX' line in SECURITY-INSIGHTS.yml
+ sed -i "s/commit-hash: .*/commit-hash: ${{ env.NEW_VERSION }}/" SECURITY-INSIGHTS.yml
if: ${{ env.UPDATE_VERSION == 'true' }}
- name: Create PR to update VERSION on master branch
diff --git a/CODEOWNERS b/CODEOWNERS
index 507193dad5611..83bb38871d96d 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -2,8 +2,10 @@
** @argoproj/argocd-approvers
# Docs
-/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
-/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
+/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
+/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
+/README.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
+/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
# CI
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
diff --git a/Dockerfile b/Dockerfile
index 2c31b5077f67e..511fa7cceef96 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -51,7 +51,7 @@ RUN groupadd -g $ARGOCD_USER_ID argocd && \
apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y \
- git git-lfs tini gpg tzdata && \
+ git git-lfs tini gpg tzdata connect-proxy && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
-FROM --platform=$BUILDPLATFORM docker.io/library/node:20.6.1@sha256:14bd39208dbc0eb171cbfb26ccb9ac09fa1b2eba04ccd528ab5d12983fd9ee24 AS argocd-ui
+FROM --platform=$BUILDPLATFORM docker.io/library/node:21.6.1@sha256:abc4a25c8b5a2b460f3144aabfc8941ecd7e4fb721e0b14b635e70394c1899fb AS argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
diff --git a/Makefile b/Makefile
index 4d245b9bf15b5..a4d6bd5264624 100644
--- a/Makefile
+++ b/Makefile
@@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
ARGOCD_E2E_YARN_HOST?=localhost
ARGOCD_E2E_DISABLE_AUTH?=
-ARGOCD_E2E_TEST_TIMEOUT?=60m
+ARGOCD_E2E_TEST_TIMEOUT?=90m
ARGOCD_IN_CI?=false
ARGOCD_TEST_E2E?=true
@@ -175,29 +175,21 @@ endif
.PHONY: all
all: cli image
-# We have some legacy requirements for being checked out within $GOPATH.
-# The ensure-gopath target can be used as dependency to ensure we are running
-# within these boundaries.
-.PHONY: ensure-gopath
-ensure-gopath:
-ifneq ("$(PWD)","$(LEGACY_PATH)")
- @echo "Due to legacy requirements for codegen, repository needs to be checked out within \$$GOPATH"
- @echo "Location of this repo should be '$(LEGACY_PATH)' but is '$(PWD)'"
- @exit 1
-endif
-
.PHONY: gogen
-gogen: ensure-gopath
+gogen:
export GO111MODULE=off
go generate ./util/argo/...
.PHONY: protogen
-protogen: ensure-gopath mod-vendor-local
+protogen: mod-vendor-local protogen-fast
+
+.PHONY: protogen-fast
+protogen-fast:
export GO111MODULE=off
./hack/generate-proto.sh
.PHONY: openapigen
-openapigen: ensure-gopath
+openapigen:
export GO111MODULE=off
./hack/update-openapi.sh
@@ -212,19 +204,22 @@ notification-docs:
.PHONY: clientgen
-clientgen: ensure-gopath
+clientgen:
export GO111MODULE=off
./hack/update-codegen.sh
.PHONY: clidocsgen
-clidocsgen: ensure-gopath
+clidocsgen:
go run tools/cmd-docs/main.go
.PHONY: codegen-local
-codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
+codegen-local: mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
rm -rf vendor/
+.PHONY: codegen-local-fast
+codegen-local-fast: gogen protogen-fast clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
+
.PHONY: codegen
codegen: test-tools-image
$(call run-in-test-client,make codegen-local)
@@ -438,6 +433,7 @@ start-e2e: test-tools-image
start-e2e-local: mod-vendor-local dep-ui-local cli-local
kubectl create ns argocd-e2e || true
kubectl create ns argocd-e2e-external || true
+ kubectl create ns argocd-e2e-external-2 || true
kubectl config set-context --current --namespace=argocd-e2e
kustomize build test/manifests/base | kubectl apply -f -
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
@@ -458,8 +454,8 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
BIN_MODE=$(ARGOCD_BIN_MODE) \
- ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external \
- ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external \
+ ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
+ ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
ARGOCD_E2E_TEST=true \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
@@ -491,6 +487,7 @@ start-local: mod-vendor-local dep-ui-local cli-local
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=false \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
+ BIN_MODE=$(ARGOCD_BIN_MODE) \
ARGOCD_E2E_TEST=false \
ARGOCD_APPLICATION_NAMESPACES=$(ARGOCD_APPLICATION_NAMESPACES) \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
diff --git a/OWNERS b/OWNERS
index d8532c550005a..56e037e282a0a 100644
--- a/OWNERS
+++ b/OWNERS
@@ -5,6 +5,7 @@ owners:
approvers:
- alexec
- alexmt
+- gdsoumya
- jannfis
- jessesuen
- jgwest
@@ -30,4 +31,3 @@ reviewers:
- zachaller
- 34fathombelow
- alexef
-- gdsoumya
diff --git a/Procfile b/Procfile
index 2bb26a086fb1d..4862b0230062f 100644
--- a/Procfile
+++ b/Procfile
@@ -1,4 +1,4 @@
-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
+controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
@@ -9,4 +9,5 @@ git-server: test/fixture/testrepos/start-git.sh
helm-registry: test/fixture/testrepos/start-helm-registry.sh
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
-notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
+notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
+
diff --git a/README.md b/README.md
index ef5664de5b5b7..707848191c830 100644
--- a/README.md
+++ b/README.md
@@ -13,6 +13,7 @@
**Social:**
[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)
[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
+[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/)
# Argo CD - Declarative Continuous Delivery for Kubernetes
@@ -85,4 +86,5 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
1. [How to create Argo CD Applications Automatically using ApplicationSet? "Automation of GitOps"](https://amralaayassen.medium.com/how-to-create-argocd-applications-automatically-using-applicationset-automation-of-the-gitops-59455eaf4f72)
+1. [Progressive Delivery with Service Mesh – Argo Rollouts with Istio](https://www.cncf.io/blog/2022/12/16/progressive-delivery-with-service-mesh-argo-rollouts-with-istio/)
diff --git a/SECURITY-INSIGHTS.yml b/SECURITY-INSIGHTS.yml
new file mode 100644
index 0000000000000..8ac4bc36b04ae
--- /dev/null
+++ b/SECURITY-INSIGHTS.yml
@@ -0,0 +1,128 @@
+header:
+ schema-version: 1.0.0
+ expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
+ last-updated: '2023-10-27'
+ last-reviewed: '2023-10-27'
+ commit-hash: b71277c6beb949d0199d647a582bc25822b88838
+ project-url: https://github.com/argoproj/argo-cd
+ project-release: v2.9.0-rc3
+ changelog: https://github.com/argoproj/argo-cd/releases
+ license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
+project-lifecycle:
+ status: active
+ roadmap: https://github.com/orgs/argoproj/projects/25
+ bug-fixes-only: false
+ core-maintainers:
+ - https://github.com/argoproj/argoproj/blob/master/MAINTAINERS.md
+ release-cycle: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/
+ release-process: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#release-process
+contribution-policy:
+ accepts-pull-requests: true
+ accepts-automated-pull-requests: true
+ automated-tools-list:
+ - automated-tool: dependabot
+ action: allowed
+ path:
+ - /
+ - automated-tool: snyk-report
+ action: allowed
+ path:
+ - docs/snyk
+ comment: |
+ This tool runs Snyk and generates a report of vulnerabilities in the project's dependencies. The report is
+ placed in the project's documentation. The workflow is defined here:
+ https://github.com/argoproj/argo-cd/blob/master/.github/workflows/update-snyk.yaml
+ contributing-policy: https://argo-cd.readthedocs.io/en/stable/developer-guide/code-contributions/
+ code-of-conduct: https://github.com/cncf/foundation/blob/master/code-of-conduct.md
+documentation:
+ - https://argo-cd.readthedocs.io/
+distribution-points:
+ - https://github.com/argoproj/argo-cd/releases
+ - https://quay.io/repository/argoproj/argocd
+security-artifacts:
+ threat-model:
+ threat-model-created: true
+ evidence-url:
+ - https://github.com/argoproj/argoproj/blob/master/docs/argo_threat_model.pdf
+ - https://github.com/argoproj/argoproj/blob/master/docs/end_user_threat_model.pdf
+ self-assessment:
+ self-assessment-created: false
+ comment: |
+ An extensive self-assessment was performed for CNCF graduation. Because the self-assessment process was evolving
+ at the time, no standardized document has been published.
+security-testing:
+ - tool-type: sca
+ tool-name: Dependabot
+ tool-version: "2"
+ tool-url: https://github.com/dependabot
+ integration:
+ ad-hoc: false
+ ci: false
+ before-release: false
+ tool-rulesets:
+ - https://github.com/argoproj/argo-cd/blob/master/.github/dependabot.yml
+ - tool-type: sca
+ tool-name: Snyk
+ tool-version: latest
+ tool-url: https://snyk.io/
+ integration:
+ ad-hoc: true
+ ci: true
+ before-release: false
+ - tool-type: sast
+ tool-name: CodeQL
+ tool-version: latest
+ tool-url: https://codeql.github.com/
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: false
+ comment: |
+ We use the default configuration with the latest version.
+security-assessments:
+ - auditor-name: Trail of Bits
+ auditor-url: https://trailofbits.com
+ auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/argo_security_final_report.pdf
+ report-year: 2021
+ - auditor-name: Ada Logics
+ auditor-url: https://adalogics.com
+ auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/argo_security_audit_2022.pdf
+ report-year: 2022
+ - auditor-name: Ada Logics
+ auditor-url: https://adalogics.com
+ auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/audit_fuzzer_adalogics_2022.pdf
+ report-year: 2022
+ comment: |
+ Part of the audit was performed by Ada Logics, focussed on fuzzing.
+ - auditor-name: Chainguard
+ auditor-url: https://chainguard.dev
+ auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/software_supply_chain_slsa_assessment_chainguard_2023.pdf
+ report-year: 2023
+ comment: |
+ Confirmed the project's release process as achieving SLSA (v0.1) level 3.
+security-contacts:
+ - type: email
+ value: cncf-argo-security@lists.cncf.io
+ primary: true
+vulnerability-reporting:
+ accepts-vulnerability-reports: true
+ email-contact: cncf-argo-security@lists.cncf.io
+ security-policy: https://github.com/argoproj/argo-cd/security/policy
+ bug-bounty-available: true
+ bug-bounty-url: https://hackerone.com/ibb/policy_scopes
+ out-scope:
+ - vulnerable and outdated components # See https://github.com/argoproj/argo-cd/blob/master/SECURITY.md#a-word-about-security-scanners
+ - security logging and monitoring failures
+dependencies:
+ third-party-packages: true
+ dependencies-lists:
+ - https://github.com/argoproj/argo-cd/blob/master/go.mod
+ - https://github.com/argoproj/argo-cd/blob/master/Dockerfile
+ - https://github.com/argoproj/argo-cd/blob/master/ui/package.json
+ sbom:
+ - sbom-file: https://github.com/argoproj/argo-cd/releases # Every release's assets include SBOMs.
+ sbom-format: SPDX
+ dependencies-lifecycle:
+ policy-url: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#dependencies-lifecycle-policy
+ env-dependencies-policy:
+ policy-url: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#dependencies-lifecycle-policy
diff --git a/USERS.md b/USERS.md
index 652a68c6e679f..3f164796d099f 100644
--- a/USERS.md
+++ b/USERS.md
@@ -25,7 +25,8 @@ Currently, the following organizations are **officially** using Argo CD:
1. [AppDirect](https://www.appdirect.com)
1. [Arctiq Inc.](https://www.arctiq.ca)
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
-2. [Autodesk](https://www.autodesk.com)
+1. [Autodesk](https://www.autodesk.com)
+1. [Axians ACSP](https://www.axians.fr)
1. [Axual B.V.](https://axual.com)
1. [Back Market](https://www.backmarket.com)
1. [Baloise](https://www.baloise.com)
@@ -39,6 +40,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Boozt](https://www.booztgroup.com/)
1. [Boticario](https://www.boticario.com.br/)
1. [Bulder Bank](https://bulderbank.no)
+1. [CAM](https://cam-inc.co.jp)
1. [Camptocamp](https://camptocamp.com)
1. [Candis](https://www.candis.io)
1. [Capital One](https://www.capitalone.com)
@@ -92,7 +94,9 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Fave](https://myfave.com)
1. [Flexport](https://www.flexport.com/)
1. [Flip](https://flip.id)
+1. [Fly Security](https://www.flysecurity.com.br/)
1. [Fonoa](https://www.fonoa.com/)
+1. [Fortra](https://www.fortra.com)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [Freshop, Inc](https://www.freshop.com/)
1. [Future PLC](https://www.futureplc.com/)
@@ -126,6 +130,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [IBM](https://www.ibm.com/)
1. [Ibotta](https://home.ibotta.com)
1. [IITS-Consulting](https://iits-consulting.de)
+1. [IllumiDesk](https://www.illumidesk.com)
1. [imaware](https://imaware.health)
1. [Indeed](https://indeed.com)
1. [Index Exchange](https://www.indexexchange.com/)
@@ -146,6 +151,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Kinguin](https://www.kinguin.net/)
1. [KintoHub](https://www.kintohub.com/)
1. [KompiTech GmbH](https://www.kompitech.com/)
+1. [Kong Inc.](https://konghq.com/)
1. [KPMG](https://kpmg.com/uk)
1. [KubeSphere](https://github.com/kubesphere)
1. [Kurly](https://www.kurly.com/)
@@ -210,10 +216,12 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Patreon](https://www.patreon.com/)
1. [PayPay](https://paypay.ne.jp/)
1. [Peloton Interactive](https://www.onepeloton.com/)
+1. [Percona](https://percona.com/)
1. [PGS](https://www.pgs.com)
1. [Pigment](https://www.gopigment.com/)
1. [Pipefy](https://www.pipefy.com/)
1. [Pismo](https://pismo.io/)
+1. [PITS Globale Datenrettungsdienste](https://www.pitsdatenrettung.de/)
1. [Platform9 Systems](https://platform9.com/)
1. [Polarpoint.io](https://polarpoint.io)
1. [PostFinance](https://github.com/postfinance)
@@ -240,12 +248,14 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Robotinfra](https://www.robotinfra.com)
1. [Rubin Observatory](https://www.lsst.org)
1. [Saildrone](https://www.saildrone.com/)
+1. [Salad Technologies](https://salad.com/)
1. [Saloodo! GmbH](https://www.saloodo.com)
1. [Sap Labs](http://sap.com)
1. [Sauce Labs](https://saucelabs.com/)
1. [Schwarz IT](https://jobs.schwarz/it-mission)
1. [SCRM Lidl International Hub](https://scrm.lidl)
1. [SEEK](https://seek.com.au)
+1. [Semgrep](https://semgrep.com)
1. [SI Analytics](https://si-analytics.ai)
1. [Skit](https://skit.ai/)
1. [Skyscanner](https://www.skyscanner.net/)
@@ -260,6 +270,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Spendesk](https://spendesk.com/)
1. [Splunk](https://splunk.com/)
1. [Spores Labs](https://spores.app)
+1. [Statsig](https://statsig.com)
1. [StreamNative](https://streamnative.io)
1. [Stuart](https://stuart.com/)
1. [Sumo Logic](https://sumologic.com/)
@@ -273,6 +284,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Tamkeen Technologies](https://tamkeentech.sa/)
1. [Techcombank](https://www.techcombank.com.vn/trang-chu)
1. [Technacy](https://www.technacy.it/)
+1. [Telavita](https://www.telavita.com.br/)
1. [Tesla](https://tesla.com/)
1. [The Scale Factory](https://www.scalefactory.com/)
1. [ThousandEyes](https://www.thousandeyes.com/)
diff --git a/applicationset/controllers/applicationset_controller.go b/applicationset/controllers/applicationset_controller.go
index 60bab2564d92c..4f5ac66fc016d 100644
--- a/applicationset/controllers/applicationset_controller.go
+++ b/applicationset/controllers/applicationset_controller.go
@@ -16,7 +16,6 @@ package controllers
import (
"context"
- "encoding/json"
"fmt"
"reflect"
"time"
@@ -25,7 +24,6 @@ import (
corev1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -46,7 +44,6 @@ import (
"github.com/argoproj/argo-cd/v2/applicationset/generators"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
"github.com/argoproj/argo-cd/v2/common"
- argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/glob"
@@ -111,13 +108,23 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
+ deleteAllowed := utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete()
+ if !deleteAllowed {
+ if err := r.removeOwnerReferencesOnDeleteAppSet(ctx, applicationSetInfo); err != nil {
+ return ctrl.Result{}, err
+ }
+ controllerutil.RemoveFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName)
+ if err := r.Update(ctx, &applicationSetInfo); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
return ctrl.Result{}, nil
}
// Log a warning if there are unrecognized generators
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
// desiredApplications is the main list of all expected Applications from all generators in this appset.
- desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
+ desiredApplications, applicationSetReason, err := r.generateApplications(logCtx, applicationSetInfo)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -163,13 +170,15 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
if r.EnableProgressiveSyncs {
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
- log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
+ // If appset used progressive sync but stopped, clean up the progressive sync application statuses
+ logCtx.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
- err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
+ err := r.setAppSetApplicationStatus(ctx, logCtx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
}
- } else {
+ } else if applicationSetInfo.Spec.Strategy != nil {
+ // appset uses progressive sync
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
@@ -179,7 +188,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
appMap[app.Name] = app
}
- appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
+ appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, applications, desiredApplications, appMap)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
}
@@ -217,7 +226,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
if r.EnableProgressiveSyncs {
// trigger appropriate application syncs if RollingSync strategy is enabled
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
- validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
+ validApps, err = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
@@ -235,7 +244,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
- err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
+ err = r.createOrUpdateInCluster(ctx, logCtx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -249,7 +258,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, err
}
} else {
- err = r.createInCluster(ctx, applicationSetInfo, validApps)
+ err = r.createInCluster(ctx, logCtx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -265,7 +274,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
- err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
+ err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, desiredApplications)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -490,7 +499,7 @@ func getTempApplication(applicationSetTemplate argov1alpha1.ApplicationSetTempla
return &tmplApplication
}
-func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) {
+func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) {
var res []argov1alpha1.Application
var firstError error
@@ -499,7 +508,7 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo, map[string]interface{}{})
if err != nil {
- log.WithError(err).WithField("generator", requestedGenerator).
+ logCtx.WithError(err).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
firstError = err
@@ -513,8 +522,9 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
for _, p := range a.Params {
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
+
if err != nil {
- log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
+ logCtx.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
@@ -523,17 +533,45 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
}
continue
}
+
+ if applicationSetInfo.Spec.TemplatePatch != nil {
+ patchedApplication, err := r.applyTemplatePatch(app, applicationSetInfo, p)
+
+ if err != nil {
+ log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
+ Error("error generating application from params")
+
+ if firstError == nil {
+ firstError = err
+ applicationSetReason = argov1alpha1.ApplicationSetReasonRenderTemplateParamsError
+ }
+ continue
+ }
+
+ app = patchedApplication
+ }
+
res = append(res, *app)
}
}
- log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
- log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
+ logCtx.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
+ logCtx.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
}
return res, applicationSetReason, firstError
}
+func (r *ApplicationSetReconciler) applyTemplatePatch(app *argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet, params map[string]interface{}) (*argov1alpha1.Application, error) {
+ replacedTemplate, err := r.Renderer.Replace(*applicationSetInfo.Spec.TemplatePatch, params, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
+
+ if err != nil {
+ return nil, fmt.Errorf("error replacing values in templatePatch: %w", err)
+ }
+
+ return applyTemplatePatch(app, replacedTemplate)
+}
+
func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
return predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
@@ -542,22 +580,24 @@ func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
}
}
-func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
- if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
- // grab the job object, extract the owner...
- app := rawObj.(*argov1alpha1.Application)
- owner := metav1.GetControllerOf(app)
- if owner == nil {
- return nil
- }
- // ...make sure it's a application set...
- if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
- return nil
- }
+func appControllerIndexer(rawObj client.Object) []string {
+ // grab the job object, extract the owner...
+ app := rawObj.(*argov1alpha1.Application)
+ owner := metav1.GetControllerOf(app)
+ if owner == nil {
+ return nil
+ }
+ // ...make sure it's a application set...
+ if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
+ return nil
+ }
+
+ // ...and if so, return it
+ return []string{owner.Name}
+}
- // ...and if so, return it
- return []string{owner.Name}
- }); err != nil {
+func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
+ if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", appControllerIndexer); err != nil {
return fmt.Errorf("error setting up with manager: %w", err)
}
@@ -601,15 +641,17 @@ func (r *ApplicationSetReconciler) updateCache(ctx context.Context, obj client.O
// - For new applications, it will call create
// - For existing application, it will call update
// The function also adds owner reference to all applications, and uses it to delete them.
-func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
+func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var firstError error
// Creates or updates the application in appList
for _, generatedApp := range desiredApplications {
-
- appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
+ // The app's namespace must be the same as the AppSet's namespace to preserve the appsets-in-any-namespace
+ // security boundary.
generatedApp.Namespace = applicationSet.Namespace
+ appLog := logCtx.WithFields(log.Fields{"app": generatedApp.QualifiedName()})
+
// Normalize to avoid fighting with the application controller.
generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec)
@@ -624,7 +666,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
},
}
- action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
+ action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, found, func() error {
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
found.Spec = generatedApp.Spec
@@ -677,13 +719,6 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
found.ObjectMeta.Finalizers = generatedApp.Finalizers
found.ObjectMeta.Labels = generatedApp.Labels
- if found != nil && len(found.Spec.IgnoreDifferences) > 0 {
- err := applyIgnoreDifferences(applicationSet.Spec.IgnoreApplicationDifferences, found, generatedApp)
- if err != nil {
- return fmt.Errorf("failed to apply ignore differences: %w", err)
- }
- }
-
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
})
@@ -709,57 +744,9 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
return firstError
}
-// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the found application in place.
-func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp argov1alpha1.Application) error {
- diffConfig, err := argodiff.NewDiffConfigBuilder().
- WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
- WithNoCache().
- Build()
- if err != nil {
- return fmt.Errorf("failed to build diff config: %w", err)
- }
- unstructuredFound, err := appToUnstructured(found)
- if err != nil {
- return fmt.Errorf("failed to convert found application to unstructured: %w", err)
- }
- unstructuredGenerated, err := appToUnstructured(&generatedApp)
- if err != nil {
- return fmt.Errorf("failed to convert found application to unstructured: %w", err)
- }
- result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
- if err != nil {
- return fmt.Errorf("failed to normalize application spec: %w", err)
- }
- if len(result.Targets) != 1 {
- return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
- }
- jsonNormalized, err := json.Marshal(result.Targets[0].Object)
- if err != nil {
- return fmt.Errorf("failed to marshal normalized app to json: %w", err)
- }
- err = json.Unmarshal(jsonNormalized, &found)
- if err != nil {
- return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
- }
- // Prohibit jq queries from mutating silly things.
- found.TypeMeta = generatedApp.TypeMeta
- found.Name = generatedApp.Name
- found.Namespace = generatedApp.Namespace
- found.Operation = generatedApp.Operation
- return nil
-}
-
-func appToUnstructured(app *argov1alpha1.Application) (*unstructured.Unstructured, error) {
- u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
- if err != nil {
- return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
- }
- return &unstructured.Unstructured{Object: u}, nil
-}
-
// createInCluster will filter from the desiredApplications only the application that needs to be created
// Then it will call createOrUpdateInCluster to do the actual create
-func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
+func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var createApps []argov1alpha1.Application
current, err := r.getCurrentApplications(ctx, applicationSet)
@@ -782,13 +769,12 @@ func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicat
}
}
- return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
+ return r.createOrUpdateInCluster(ctx, logCtx, applicationSet, createApps)
}
-func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
- // TODO: Should this use the context param?
+func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, applicationSet argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
var current argov1alpha1.ApplicationList
- err := r.Client.List(context.Background(), ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name})
+ err := r.Client.List(ctx, ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name}, client.InNamespace(applicationSet.Namespace))
if err != nil {
return nil, fmt.Errorf("error retrieving applications: %w", err)
@@ -799,7 +785,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, app
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
// The function must be called after all generators had been called and generated applications
-func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
+func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(ctx)
@@ -823,15 +809,15 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicat
// Delete apps that are not in m[string]bool
var firstError error
for _, app := range current {
- appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
+ logCtx = logCtx.WithField("app", app.QualifiedName())
_, exists := m[app.Name]
if !exists {
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
- err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
+ err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, logCtx)
if err != nil {
- appLog.WithError(err).Error("failed to update Application")
+ logCtx.WithError(err).Error("failed to update Application")
if firstError != nil {
firstError = err
}
@@ -840,14 +826,14 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicat
err = r.Client.Delete(ctx, &app)
if err != nil {
- appLog.WithError(err).Error("failed to delete Application")
+ logCtx.WithError(err).Error("failed to delete Application")
if firstError != nil {
firstError = err
}
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
- appLog.Log(log.InfoLevel, "Deleted application")
+ logCtx.Log(log.InfoLevel, "Deleted application")
}
}
return firstError
@@ -910,7 +896,11 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
if len(newFinalizers) != len(app.Finalizers) {
updated := app.DeepCopy()
updated.Finalizers = newFinalizers
- if err := r.Client.Patch(ctx, updated, client.MergeFrom(app)); err != nil {
+ patch := client.MergeFrom(app)
+ if log.IsLevelEnabled(log.DebugLevel) {
+ utils.LogPatch(appLog, patch, updated)
+ }
+ if err := r.Client.Patch(ctx, updated, patch); err != nil {
return fmt.Errorf("error updating finalizers: %w", err)
}
r.updateCache(ctx, updated, appLog)
@@ -925,21 +915,38 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
return nil
}
-func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
+func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx context.Context, applicationSet argov1alpha1.ApplicationSet) error {
+ applications, err := r.getCurrentApplications(ctx, applicationSet)
+ if err != nil {
+ return err
+ }
+
+ for _, app := range applications {
+ app.SetOwnerReferences([]metav1.OwnerReference{})
+ err := r.Client.Update(ctx, &app)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
- appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
+ appDependencyList, appStepMap, err := r.buildAppDependencyList(logCtx, appset, desiredApplications)
if err != nil {
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
}
- _, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
+ _, err = r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
if err != nil {
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
}
- log.Infof("ApplicationSet %v step list:", appset.Name)
+ logCtx.Infof("ApplicationSet %v step list:", appset.Name)
for i, step := range appDependencyList {
- log.Infof("step %v: %+v", i+1, step)
+ logCtx.Infof("step %v: %+v", i+1, step)
}
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
@@ -947,9 +954,9 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
return nil, fmt.Errorf("failed to build app sync map: %w", err)
}
- log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
+ logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
- _, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
+ _, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap, appMap)
if err != nil {
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
}
@@ -963,7 +970,7 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
}
// this list tracks which Applications belong to each RollingUpdate step
-func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
+func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
return [][]string{}, map[string]int{}, nil
@@ -990,9 +997,9 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, a
for _, matchExpression := range step.MatchExpressions {
if val, ok := app.Labels[matchExpression.Key]; ok {
- valueMatched := labelMatchedExpression(val, matchExpression)
+ valueMatched := labelMatchedExpression(logCtx, val, matchExpression)
- if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels
+ if !valueMatched { // none of the matchExpression values was a match with the Application's labels
selected = false
break
}
@@ -1005,7 +1012,7 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, a
if selected {
appDependencyList[i] = append(appDependencyList[i], app.Name)
if val, ok := appStepMap[app.Name]; ok {
- log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
+ logCtx.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
} else {
appStepMap[app.Name] = i
}
@@ -1016,9 +1023,9 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, a
return appDependencyList, appStepMap, nil
}
-func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
+func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
if matchExpression.Operator != "In" && matchExpression.Operator != "NotIn" {
- log.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
+ logCtx.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
return false
}
@@ -1122,7 +1129,7 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
}
// check the status of each Application's status and promote Applications to the next status if needed
-func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
+func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
@@ -1155,7 +1162,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
- log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
+ logCtx.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Waiting"
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
@@ -1167,15 +1174,15 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
// this covers race conditions where syncs initiated by RollingSync miraculously have a sync time before the transition to Pending state occurred (could be a few seconds)
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.Add(time.Duration(10)*time.Second).After(currentAppStatus.LastTransitionTime.Time) {
if !app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
- log.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
+ logCtx.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
}
- log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
+ logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
- log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
+ logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
@@ -1184,7 +1191,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
- log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
+ logCtx.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
@@ -1192,7 +1199,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
- log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
+ logCtx.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
@@ -1202,7 +1209,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
appStatuses = append(appStatuses, currentAppStatus)
}
- err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
+ err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
if err != nil {
return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err)
}
@@ -1211,7 +1218,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
// check Applications that are in Waiting status and promote them to Pending if needed
-func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
+func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
@@ -1253,7 +1260,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if maxUpdate != nil {
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
if err != nil {
- log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
+ logCtx.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
}
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
@@ -1263,13 +1270,13 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
maxUpdateAllowed = false
- log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
+ logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
}
}
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
- log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
+ logCtx.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
appStatus.LastTransitionTime = &now
appStatus.Status = "Pending"
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
@@ -1282,7 +1289,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
}
}
- err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
+ err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
if err != nil {
return nil, fmt.Errorf("failed to set AppSet app status: %w", err)
}
@@ -1344,7 +1351,7 @@ func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplica
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
// with any new/changed Application statuses.
-func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
+func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
needToUpdateStatus := false
if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) {
@@ -1378,7 +1385,7 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
err := r.Client.Status().Update(ctx, applicationSet)
if err != nil {
- log.Errorf("unable to set application set status: %v", err)
+ logCtx.Errorf("unable to set application set status: %v", err)
return fmt.Errorf("unable to set application set status: %v", err)
}
@@ -1393,7 +1400,7 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
return nil
}
-func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
+func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
rolloutApps := []argov1alpha1.Application{}
for i := range validApps {
pruneEnabled := false
@@ -1413,7 +1420,7 @@ func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, ap
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
- log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
+ logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
}
rolloutApps = append(rolloutApps, validApps[i])
@@ -1457,29 +1464,51 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
CreateFunc: func(e event.CreateEvent) bool {
// if we are the owner and there is a create event, we most likely created it and do not need to
// re-reconcile
- log.Debugln("received create event from owning an application")
+ if log.IsLevelEnabled(log.DebugLevel) {
+ var appName string
+ app, isApp := e.Object.(*argov1alpha1.Application)
+ if isApp {
+ appName = app.QualifiedName()
+ }
+ log.WithField("app", appName).Debugln("received create event from owning an application")
+ }
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
- log.Debugln("received delete event from owning an application")
+ if log.IsLevelEnabled(log.DebugLevel) {
+ var appName string
+ app, isApp := e.Object.(*argov1alpha1.Application)
+ if isApp {
+ appName = app.QualifiedName()
+ }
+ log.WithField("app", appName).Debugln("received delete event from owning an application")
+ }
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
- log.Debugln("received update event from owning an application")
appOld, isApp := e.ObjectOld.(*argov1alpha1.Application)
if !isApp {
return false
}
+ logCtx := log.WithField("app", appOld.QualifiedName())
+ logCtx.Debugln("received update event from owning an application")
appNew, isApp := e.ObjectNew.(*argov1alpha1.Application)
if !isApp {
return false
}
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
- log.Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
+ logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
return requeue
},
GenericFunc: func(e event.GenericEvent) bool {
- log.Debugln("received generic event from owning an application")
+ if log.IsLevelEnabled(log.DebugLevel) {
+ var appName string
+ app, isApp := e.Object.(*argov1alpha1.Application)
+ if isApp {
+ appName = app.QualifiedName()
+ }
+ log.WithField("app", appName).Debugln("received generic event from owning an application")
+ }
return true
},
}
diff --git a/applicationset/controllers/applicationset_controller_test.go b/applicationset/controllers/applicationset_controller_test.go
index 7c3721e2ee6ed..81fbad95ac50b 100644
--- a/applicationset/controllers/applicationset_controller_test.go
+++ b/applicationset/controllers/applicationset_controller_test.go
@@ -12,8 +12,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/require"
- "gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -88,6 +86,12 @@ func (g *generatorMock) GenerateParams(appSetGenerator *v1alpha1.ApplicationSetG
return args.Get(0).([]map[string]interface{}), args.Error(1)
}
+func (g *generatorMock) Replace(tmpl string, replaceMap map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (string, error) {
+ args := g.Called(tmpl, replaceMap, useGoTemplate, goTemplateOptions)
+
+ return args.Get(0).(string), args.Error(1)
+}
+
type rendererMock struct {
mock.Mock
}
@@ -109,6 +113,12 @@ func (r *rendererMock) RenderTemplateParams(tmpl *v1alpha1.Application, syncPoli
}
+func (r *rendererMock) Replace(tmpl string, replaceMap map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (string, error) {
+ args := r.Called(tmpl, replaceMap, useGoTemplate, goTemplateOptions)
+
+ return args.Get(0).(string), args.Error(1)
+}
+
func TestExtractApplications(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
@@ -220,7 +230,7 @@ func TestExtractApplications(t *testing.T) {
Cache: &fakeCache{},
}
- got, reason, err := r.generateApplications(v1alpha1.ApplicationSet{
+ got, reason, err := r.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
@@ -333,7 +343,7 @@ func TestMergeTemplateApplications(t *testing.T) {
KubeClientset: kubefake.NewSimpleClientset(),
}
- got, _, _ := r.generateApplications(v1alpha1.ApplicationSet{
+ got, _, _ := r.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
@@ -981,6 +991,296 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
},
},
},
+ }, {
+ // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
+ name: "Ensure that ignored targetRevision difference doesn't cause an update, even if another field changes",
+ appSet: v1alpha1.ApplicationSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "name",
+ Namespace: "namespace",
+ },
+ Spec: v1alpha1.ApplicationSetSpec{
+ IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{".spec.source.targetRevision"}},
+ },
+ Template: v1alpha1.ApplicationSetTemplate{
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Source: &v1alpha1.ApplicationSource{
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ TargetRevision: "foo",
+ },
+ },
+ },
+ },
+ },
+ existingApps: []v1alpha1.Application{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ ResourceVersion: "2",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Source: &v1alpha1.ApplicationSource{
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ TargetRevision: "bar",
+ },
+ },
+ },
+ },
+ desiredApps: []v1alpha1.Application{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Source: &v1alpha1.ApplicationSource{
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ // The targetRevision is ignored, so this should not be updated.
+ TargetRevision: "foo",
+ // This should be updated.
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Parameters: []v1alpha1.HelmParameter{
+ {Name: "hi", Value: "there"},
+ },
+ },
+ },
+ },
+ },
+ },
+ expected: []v1alpha1.Application{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ ResourceVersion: "3",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Source: &v1alpha1.ApplicationSource{
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ // This is the existing value from the cluster, which should not be updated because the field is ignored.
+ TargetRevision: "bar",
+ // This was missing on the cluster, so it should be added.
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Parameters: []v1alpha1.HelmParameter{
+ {Name: "hi", Value: "there"},
+ },
+ },
+ },
+ },
+ },
+ },
+ }, {
+ // For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
+ name: "ignore parameters added to a multi-source app in the cluster",
+ appSet: v1alpha1.ApplicationSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "name",
+ Namespace: "namespace",
+ },
+ Spec: v1alpha1.ApplicationSetSpec{
+ IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
+ },
+ Template: v1alpha1.ApplicationSetTemplate{
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "foo: bar",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ existingApps: []v1alpha1.Application{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ ResourceVersion: "2",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "foo: bar",
+ Parameters: []v1alpha1.HelmParameter{
+ {Name: "hi", Value: "there"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ desiredApps: []v1alpha1.Application{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "foo: bar",
+ },
+ },
+ },
+ },
+ },
+ },
+ expected: []v1alpha1.Application{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ // This should not be updated, because reconciliation shouldn't modify the App.
+ ResourceVersion: "2",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "foo: bar",
+ Parameters: []v1alpha1.HelmParameter{
+ // This existed only in the cluster, but it shouldn't be removed, because the field is ignored.
+ {Name: "hi", Value: "there"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }, {
+ name: "Demonstrate limitation of MergePatch", // Maybe we can fix this in Argo CD 3.0: https://github.com/argoproj/argo-cd/issues/15975
+ appSet: v1alpha1.ApplicationSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "name",
+ Namespace: "namespace",
+ },
+ Spec: v1alpha1.ApplicationSetSpec{
+ IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
+ },
+ Template: v1alpha1.ApplicationSetTemplate{
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "new: values",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ existingApps: []v1alpha1.Application{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ ResourceVersion: "2",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "foo: bar",
+ Parameters: []v1alpha1.HelmParameter{
+ {Name: "hi", Value: "there"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ desiredApps: []v1alpha1.Application{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "new: values",
+ },
+ },
+ },
+ },
+ },
+ },
+ expected: []v1alpha1.Application{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ ResourceVersion: "3",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Sources: []v1alpha1.ApplicationSource{
+ {
+ RepoURL: "https://git.example.com/test-org/test-repo.git",
+ Helm: &v1alpha1.ApplicationSourceHelm{
+ Values: "new: values",
+ // The Parameters field got blown away, because the values field changed. MergePatch
+ // doesn't merge list items, it replaces the whole list if an item changes.
+ // If we eventually add a `name` field to Sources, we can use StrategicMergePatch.
+ },
+ },
+ },
+ },
+ },
+ },
},
} {
@@ -994,7 +1294,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
initObjs = append(initObjs, &a)
}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -1003,8 +1303,8 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
Cache: &fakeCache{},
}
- err = r.createOrUpdateInCluster(context.TODO(), c.appSet, c.desiredApps)
- assert.Nil(t, err)
+ err = r.createOrUpdateInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
+ assert.NoError(t, err)
for _, obj := range c.expected {
got := &v1alpha1.Application{}
@@ -1014,7 +1314,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
}, got)
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
- assert.Nil(t, err)
assert.Equal(t, obj, *got)
}
})
@@ -1088,7 +1387,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
initObjs := []crtclient.Object{&app, &appSet}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
@@ -1250,7 +1549,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
initObjs := []crtclient.Object{&app, &appSet}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
@@ -1306,6 +1605,81 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
}
}
+func TestRemoveOwnerReferencesOnDeleteAppSet(t *testing.T) {
+ scheme := runtime.NewScheme()
+ err := v1alpha1.AddToScheme(scheme)
+ assert.Nil(t, err)
+
+ err = v1alpha1.AddToScheme(scheme)
+ assert.Nil(t, err)
+
+ for _, c := range []struct {
+ // name is human-readable test name
+ name string
+ }{
+ {
+ name: "ownerReferences cleared",
+ },
+ } {
+ t.Run(c.name, func(t *testing.T) {
+ appSet := v1alpha1.ApplicationSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "name",
+ Namespace: "namespace",
+ Finalizers: []string{v1alpha1.ResourcesFinalizerName},
+ },
+ Spec: v1alpha1.ApplicationSetSpec{
+ Template: v1alpha1.ApplicationSetTemplate{
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ },
+ },
+ },
+ }
+
+ app := v1alpha1.Application{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "app1",
+ Namespace: "namespace",
+ },
+ Spec: v1alpha1.ApplicationSpec{
+ Project: "project",
+ Source: &v1alpha1.ApplicationSource{Path: "path", TargetRevision: "revision", RepoURL: "repoURL"},
+ Destination: v1alpha1.ApplicationDestination{
+ Namespace: "namespace",
+ Server: "https://kubernetes.default.svc",
+ },
+ },
+ }
+
+ err := controllerutil.SetControllerReference(&appSet, &app, scheme)
+ assert.NoError(t, err, "Unexpected error")
+
+ initObjs := []crtclient.Object{&app, &appSet}
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
+
+ r := ApplicationSetReconciler{
+ Client: client,
+ Scheme: scheme,
+ Recorder: record.NewFakeRecorder(10),
+ KubeClientset: nil,
+ Cache: &fakeCache{},
+ }
+
+ err = r.removeOwnerReferencesOnDeleteAppSet(context.Background(), appSet)
+ assert.NoError(t, err, "Unexpected error")
+
+ retrievedApp := v1alpha1.Application{}
+ err = client.Get(context.Background(), crtclient.ObjectKeyFromObject(&app), &retrievedApp)
+ assert.NoError(t, err, "Unexpected error")
+
+ ownerReferencesRemoved := len(retrievedApp.OwnerReferences) == 0
+ assert.True(t, ownerReferencesRemoved)
+ })
+ }
+}
+
func TestCreateApplications(t *testing.T) {
scheme := runtime.NewScheme()
@@ -1482,7 +1856,7 @@ func TestCreateApplications(t *testing.T) {
initObjs = append(initObjs, &a)
}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -1491,7 +1865,7 @@ func TestCreateApplications(t *testing.T) {
Cache: &fakeCache{},
}
- err = r.createInCluster(context.TODO(), c.appSet, c.apps)
+ err = r.createInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.apps)
assert.Nil(t, err)
for _, obj := range c.expected {
@@ -1626,7 +2000,7 @@ func TestDeleteInCluster(t *testing.T) {
initObjs = append(initObjs, &temp)
}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -1635,7 +2009,7 @@ func TestDeleteInCluster(t *testing.T) {
KubeClientset: kubefake.NewSimpleClientset(),
}
- err = r.deleteInCluster(context.TODO(), c.appSet, c.desiredApps)
+ err = r.deleteInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
assert.Nil(t, err)
// For each of the expected objects, verify they exist on the cluster
@@ -2000,7 +2374,15 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&project}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
+ goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
+ badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
+ argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
+ argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
+ argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
+ goodCluster,
+ }}, nil)
+
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
@@ -2076,7 +2458,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -2146,7 +2528,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
@@ -2316,7 +2698,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
@@ -2445,17 +2827,24 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
{
name: "Generate an application from a go template application set manifest using a pull request generator",
params: []map[string]interface{}{{
- "number": "1",
- "branch": "branch1",
- "branch_slug": "branchSlug1",
- "head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
- "head_short_sha": "089d92cb",
- "labels": []string{"label1"}}},
+ "number": "1",
+ "branch": "branch1",
+ "branch_slug": "branchSlug1",
+ "head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
+ "head_short_sha": "089d92cb",
+ "branch_slugify_default": "feat/a_really+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
+ "branch_slugify_smarttruncate_disabled": "feat/areallylongpullrequestnametotestargoslugificationandbranchnameshorteningfeature",
+ "branch_slugify_smarttruncate_enabled": "feat/testwithsmarttruncateenabledramdomlonglistofcharacters",
+ "labels": []string{"label1"}},
+ },
template: v1alpha1.ApplicationSetTemplate{
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
Name: "AppSet-{{.branch}}-{{.number}}",
Labels: map[string]string{
- "app1": "{{index .labels 0}}",
+ "app1": "{{index .labels 0}}",
+ "branch-test1": "AppSet-{{.branch_slugify_default | slugify }}",
+ "branch-test2": "AppSet-{{.branch_slugify_smarttruncate_disabled | slugify 49 false }}",
+ "branch-test3": "AppSet-{{.branch_slugify_smarttruncate_enabled | slugify 50 true }}",
},
},
Spec: v1alpha1.ApplicationSpec{
@@ -2474,7 +2863,10 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "AppSet-branch1-1",
Labels: map[string]string{
- "app1": "label1",
+ "app1": "label1",
+ "branch-test1": "AppSet-feat-a-really-long-pull-request-name-to-test-argo",
+ "branch-test2": "AppSet-feat-areallylongpullrequestnametotestargoslugific",
+ "branch-test3": "AppSet-feat",
},
},
Spec: v1alpha1.ApplicationSpec{
@@ -2517,7 +2909,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
KubeClientset: kubefake.NewSimpleClientset(),
}
- gotApp, _, _ := appSetReconciler.generateApplications(v1alpha1.ApplicationSet{
+ gotApp, _, _ := appSetReconciler.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
Spec: v1alpha1.ApplicationSetSpec{
GoTemplate: true,
Generators: []v1alpha1.ApplicationSetGenerator{{
@@ -2627,7 +3019,7 @@ func TestPolicies(t *testing.T) {
},
}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -2806,7 +3198,7 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
KubeClientset: kubeclientset,
}
- err = r.setAppSetApplicationStatus(context.TODO(), &cc.appSet, cc.appStatuses)
+ err = r.setAppSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appStatuses)
assert.Nil(t, err)
assert.Equal(t, cc.expectedAppStatuses, cc.appSet.Status.ApplicationStatus)
@@ -3569,7 +3961,7 @@ func TestBuildAppDependencyList(t *testing.T) {
KubeClientset: kubeclientset,
}
- appDependencyList, appStepMap, err := r.buildAppDependencyList(context.TODO(), cc.appSet, cc.apps)
+ appDependencyList, appStepMap, err := r.buildAppDependencyList(log.NewEntry(log.StandardLogger()), cc.appSet, cc.apps)
assert.Equal(t, err, nil, "expected no errors, but errors occured")
assert.Equal(t, cc.expectedList, appDependencyList, "expected appDependencyList did not match actual")
assert.Equal(t, cc.expectedStepMap, appStepMap, "expected appStepMap did not match actual")
@@ -4823,7 +5215,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
KubeClientset: kubeclientset,
}
- appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), &cc.appSet, cc.apps, cc.appStepMap)
+ appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, cc.appStepMap)
// opt out of testing the LastTransitionTime is accurate
for i := range appStatuses {
@@ -5577,7 +5969,7 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
KubeClientset: kubeclientset,
}
- appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap)
+ appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap)
// opt out of testing the LastTransitionTime is accurate
for i := range appStatuses {
@@ -5719,173 +6111,3 @@ func TestOwnsHandler(t *testing.T) {
})
}
}
-
-func Test_applyIgnoreDifferences(t *testing.T) {
- appMeta := metav1.TypeMeta{
- APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
- Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
- }
- testCases := []struct {
- name string
- ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
- foundApp string
- generatedApp string
- expectedApp string
- }{
- {
- name: "empty ignoreDifferences",
- foundApp: `
-spec: {}`,
- generatedApp: `
-spec: {}`,
- expectedApp: `
-spec: {}`,
- },
- {
- // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
- name: "ignore target revision with jq",
- ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
- {JQPathExpressions: []string{".spec.source.targetRevision"}},
- },
- foundApp: `
-spec:
- source:
- targetRevision: foo`,
- generatedApp: `
-spec:
- source:
- targetRevision: bar`,
- expectedApp: `
-spec:
- source:
- targetRevision: foo`,
- },
- {
- // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
- name: "ignore helm parameter with jq",
- ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
- {JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
- },
- foundApp: `
-spec:
- source:
- helm:
- parameters:
- - name: image.tag
- value: test
- - name: another
- value: value`,
- generatedApp: `
-spec:
- source:
- helm:
- parameters:
- - name: image.tag
- value: v1.0.0
- - name: another
- value: value`,
- expectedApp: `
-spec:
- source:
- helm:
- parameters:
- - name: image.tag
- value: test
- - name: another
- value: value`,
- },
- {
- // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
- name: "ignore auto-sync with jq",
- ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
- {JQPathExpressions: []string{".spec.syncPolicy.automated"}},
- },
- foundApp: `
-spec:
- syncPolicy:
- retry:
- limit: 5`,
- generatedApp: `
-spec:
- syncPolicy:
- automated:
- selfHeal: true
- retry:
- limit: 5`,
- expectedApp: `
-spec:
- syncPolicy:
- retry:
- limit: 5`,
- },
- {
- // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
- name: "ignore a one-off annotation with jq",
- ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
- {JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
- },
- foundApp: `
-metadata:
- annotations:
- foo.bar: baz
- some.other: annotation`,
- generatedApp: `
-metadata:
- annotations:
- some.other: annotation`,
- expectedApp: `
-metadata:
- annotations:
- foo.bar: baz
- some.other: annotation`,
- },
- {
- // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
- name: "ignore the source.plugin field with a json pointer",
- ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
- {JSONPointers: []string{"/spec/source/plugin"}},
- },
- foundApp: `
-spec:
- source:
- plugin:
- parameters:
- - name: url
- string: https://example.com`,
- generatedApp: `
-spec:
- source:
- plugin:
- parameters:
- - name: url
- string: https://example.com/wrong`,
- expectedApp: `
-spec:
- source:
- plugin:
- parameters:
- - name: url
- string: https://example.com`,
- },
- }
-
- for _, tc := range testCases {
- tc := tc
- t.Run(tc.name, func(t *testing.T) {
- t.Parallel()
- foundApp := v1alpha1.Application{TypeMeta: appMeta}
- err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
- require.NoError(t, err, tc.foundApp)
- generatedApp := v1alpha1.Application{TypeMeta: appMeta}
- err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
- require.NoError(t, err, tc.generatedApp)
- err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, generatedApp)
- require.NoError(t, err)
- jsonFound, err := json.Marshal(tc.foundApp)
- require.NoError(t, err)
- jsonExpected, err := json.Marshal(tc.expectedApp)
- require.NoError(t, err)
- assert.Equal(t, string(jsonExpected), string(jsonFound))
- })
- }
-}
diff --git a/applicationset/controllers/templatePatch.go b/applicationset/controllers/templatePatch.go
new file mode 100644
index 0000000000000..f8efd9f376996
--- /dev/null
+++ b/applicationset/controllers/templatePatch.go
@@ -0,0 +1,46 @@
+package controllers
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+
+ "github.com/argoproj/argo-cd/v2/applicationset/utils"
+ appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+)
+
+func applyTemplatePatch(app *appv1.Application, templatePatch string) (*appv1.Application, error) {
+
+ appString, err := json.Marshal(app)
+ if err != nil {
+ return nil, fmt.Errorf("error while marhsalling Application %w", err)
+ }
+
+ convertedTemplatePatch, err := utils.ConvertYAMLToJSON(templatePatch)
+
+ if err != nil {
+ return nil, fmt.Errorf("error while converting template to json %q: %w", convertedTemplatePatch, err)
+ }
+
+ if err := json.Unmarshal([]byte(convertedTemplatePatch), &appv1.Application{}); err != nil {
+ return nil, fmt.Errorf("invalid templatePatch %q: %w", convertedTemplatePatch, err)
+ }
+
+ data, err := strategicpatch.StrategicMergePatch(appString, []byte(convertedTemplatePatch), appv1.Application{})
+
+ if err != nil {
+ return nil, fmt.Errorf("error while applying templatePatch template to json %q: %w", convertedTemplatePatch, err)
+ }
+
+ finalApp := appv1.Application{}
+ err = json.Unmarshal(data, &finalApp)
+ if err != nil {
+ return nil, fmt.Errorf("error while unmarhsalling patched application: %w", err)
+ }
+
+ // Prevent changes to the `project` field. This helps prevent malicious template patches
+ finalApp.Spec.Project = app.Spec.Project
+
+ return &finalApp, nil
+}
diff --git a/applicationset/controllers/templatePatch_test.go b/applicationset/controllers/templatePatch_test.go
new file mode 100644
index 0000000000000..c1a794077c8ee
--- /dev/null
+++ b/applicationset/controllers/templatePatch_test.go
@@ -0,0 +1,249 @@
+package controllers
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+)
+
+func Test_ApplyTemplatePatch(t *testing.T) {
+ testCases := []struct {
+ name string
+ appTemplate *appv1.Application
+ templatePatch string
+ expectedApp *appv1.Application
+ }{
+ {
+ name: "patch with JSON",
+ appTemplate: &appv1.Application{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-cluster-guestbook",
+ Namespace: "namespace",
+ Finalizers: []string{"resources-finalizer.argocd.argoproj.io"},
+ },
+ Spec: appv1.ApplicationSpec{
+ Project: "default",
+ Source: &appv1.ApplicationSource{
+ RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
+ TargetRevision: "HEAD",
+ Path: "guestbook",
+ },
+ Destination: appv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "guestbook",
+ },
+ },
+ },
+ templatePatch: `{
+ "metadata": {
+ "annotations": {
+ "annotation-some-key": "annotation-some-value"
+ }
+ },
+ "spec": {
+ "source": {
+ "helm": {
+ "valueFiles": [
+ "values.test.yaml",
+ "values.big.yaml"
+ ]
+ }
+ },
+ "syncPolicy": {
+ "automated": {
+ "prune": true
+ }
+ }
+ }
+ }`,
+ expectedApp: &appv1.Application{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-cluster-guestbook",
+ Namespace: "namespace",
+ Finalizers: []string{"resources-finalizer.argocd.argoproj.io"},
+ Annotations: map[string]string{
+ "annotation-some-key": "annotation-some-value",
+ },
+ },
+ Spec: appv1.ApplicationSpec{
+ Project: "default",
+ Source: &appv1.ApplicationSource{
+ RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
+ TargetRevision: "HEAD",
+ Path: "guestbook",
+ Helm: &appv1.ApplicationSourceHelm{
+ ValueFiles: []string{
+ "values.test.yaml",
+ "values.big.yaml",
+ },
+ },
+ },
+ Destination: appv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "guestbook",
+ },
+ SyncPolicy: &appv1.SyncPolicy{
+ Automated: &appv1.SyncPolicyAutomated{
+ Prune: true,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "patch with YAML",
+ appTemplate: &appv1.Application{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-cluster-guestbook",
+ Namespace: "namespace",
+ Finalizers: []string{"resources-finalizer.argocd.argoproj.io"},
+ },
+ Spec: appv1.ApplicationSpec{
+ Project: "default",
+ Source: &appv1.ApplicationSource{
+ RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
+ TargetRevision: "HEAD",
+ Path: "guestbook",
+ },
+ Destination: appv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "guestbook",
+ },
+ },
+ },
+ templatePatch: `
+metadata:
+ annotations:
+ annotation-some-key: annotation-some-value
+spec:
+ source:
+ helm:
+ valueFiles:
+ - values.test.yaml
+ - values.big.yaml
+ syncPolicy:
+ automated:
+ prune: true`,
+ expectedApp: &appv1.Application{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-cluster-guestbook",
+ Namespace: "namespace",
+ Finalizers: []string{"resources-finalizer.argocd.argoproj.io"},
+ Annotations: map[string]string{
+ "annotation-some-key": "annotation-some-value",
+ },
+ },
+ Spec: appv1.ApplicationSpec{
+ Project: "default",
+ Source: &appv1.ApplicationSource{
+ RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
+ TargetRevision: "HEAD",
+ Path: "guestbook",
+ Helm: &appv1.ApplicationSourceHelm{
+ ValueFiles: []string{
+ "values.test.yaml",
+ "values.big.yaml",
+ },
+ },
+ },
+ Destination: appv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "guestbook",
+ },
+ SyncPolicy: &appv1.SyncPolicy{
+ Automated: &appv1.SyncPolicyAutomated{
+ Prune: true,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "project field isn't overwritten",
+ appTemplate: &appv1.Application{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-cluster-guestbook",
+ Namespace: "namespace",
+ },
+ Spec: appv1.ApplicationSpec{
+ Project: "default",
+ Source: &appv1.ApplicationSource{
+ RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
+ TargetRevision: "HEAD",
+ Path: "guestbook",
+ },
+ Destination: appv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "guestbook",
+ },
+ },
+ },
+ templatePatch: `
+spec:
+ project: my-project`,
+ expectedApp: &appv1.Application{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Application",
+ APIVersion: "argoproj.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-cluster-guestbook",
+ Namespace: "namespace",
+ },
+ Spec: appv1.ApplicationSpec{
+ Project: "default",
+ Source: &appv1.ApplicationSource{
+ RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
+ TargetRevision: "HEAD",
+ Path: "guestbook",
+ },
+ Destination: appv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "guestbook",
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tcc := tc
+ t.Run(tcc.name, func(t *testing.T) {
+ result, err := applyTemplatePatch(tcc.appTemplate, tcc.templatePatch)
+ require.NoError(t, err)
+ assert.Equal(t, *tcc.expectedApp, *result)
+ })
+ }
+}
+
+func TestError(t *testing.T) {
+ app := &appv1.Application{}
+
+ result, err := applyTemplatePatch(app, "hello world")
+ require.Error(t, err)
+ require.Nil(t, result)
+}
diff --git a/applicationset/generators/git.go b/applicationset/generators/git.go
index 07c1b11849cd0..57fe2835b8df0 100644
--- a/applicationset/generators/git.go
+++ b/applicationset/generators/git.go
@@ -56,12 +56,14 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
return nil, EmptyAppSetGeneratorError
}
+ noRevisionCache := appSet.RefreshRequired()
+
var err error
var res []map[string]interface{}
if len(appSetGenerator.Git.Directories) != 0 {
- res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
+ res, err = g.generateParamsForGitDirectories(appSetGenerator, noRevisionCache, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
} else if len(appSetGenerator.Git.Files) != 0 {
- res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
+ res, err = g.generateParamsForGitFiles(appSetGenerator, noRevisionCache, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
} else {
return nil, EmptyAppSetGeneratorError
}
@@ -72,10 +74,10 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
return res, nil
}
-func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
+func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, noRevisionCache bool, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
// Directories, not files
- allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
+ allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, noRevisionCache)
if err != nil {
return nil, fmt.Errorf("error getting directories from repo: %w", err)
}
@@ -98,12 +100,12 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
return res, nil
}
-func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
+func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, noRevisionCache bool, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
// Get all files that match the requested path string, removing duplicates
allFiles := make(map[string][]byte)
for _, requestedPath := range appSetGenerator.Git.Files {
- files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path)
+ files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path, noRevisionCache)
if err != nil {
return nil, err
}
diff --git a/applicationset/generators/git_test.go b/applicationset/generators/git_test.go
index f0d1d29bca6ec..d3fd4965057f8 100644
--- a/applicationset/generators/git_test.go
+++ b/applicationset/generators/git_test.go
@@ -317,7 +317,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
argoCDServiceMock := mocks.Repos{}
- argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
+ argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
@@ -613,7 +613,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
argoCDServiceMock := mocks.Repos{}
- argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
+ argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
@@ -972,7 +972,7 @@ cluster:
t.Parallel()
argoCDServiceMock := mocks.Repos{}
- argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
+ argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
@@ -1322,7 +1322,7 @@ cluster:
t.Parallel()
argoCDServiceMock := mocks.Repos{}
- argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
+ argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
diff --git a/applicationset/generators/matrix_test.go b/applicationset/generators/matrix_test.go
index 35748b98bcf19..21e88710ae618 100644
--- a/applicationset/generators/matrix_test.go
+++ b/applicationset/generators/matrix_test.go
@@ -1108,7 +1108,7 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
}
repoServiceMock := &mocks.Repos{}
- repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
+ repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
"some/path.json": []byte("test: content"),
}, nil)
gitGenerator := NewGitGenerator(repoServiceMock)
diff --git a/applicationset/services/mocks/Repos.go b/applicationset/services/mocks/Repos.go
index 776b104cae284..b7620b22f08bb 100644
--- a/applicationset/services/mocks/Repos.go
+++ b/applicationset/services/mocks/Repos.go
@@ -13,25 +13,25 @@ type Repos struct {
mock.Mock
}
-// GetDirectories provides a mock function with given fields: ctx, repoURL, revision
-func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
- ret := _m.Called(ctx, repoURL, revision)
+// GetDirectories provides a mock function with given fields: ctx, repoURL, revision, noRevisionCache
+func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error) {
+ ret := _m.Called(ctx, repoURL, revision, noRevisionCache)
var r0 []string
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok {
- return rf(ctx, repoURL, revision)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) ([]string, error)); ok {
+ return rf(ctx, repoURL, revision, noRevisionCache)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok {
- r0 = rf(ctx, repoURL, revision)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) []string); ok {
+ r0 = rf(ctx, repoURL, revision, noRevisionCache)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, repoURL, revision)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, bool) error); ok {
+ r1 = rf(ctx, repoURL, revision, noRevisionCache)
} else {
r1 = ret.Error(1)
}
@@ -39,25 +39,25 @@ func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision st
return r0, r1
}
-// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern
-func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
- ret := _m.Called(ctx, repoURL, revision, pattern)
+// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern, noRevisionCache
+func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error) {
+ ret := _m.Called(ctx, repoURL, revision, pattern, noRevisionCache)
var r0 map[string][]byte
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (map[string][]byte, error)); ok {
- return rf(ctx, repoURL, revision, pattern)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) (map[string][]byte, error)); ok {
+ return rf(ctx, repoURL, revision, pattern, noRevisionCache)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) map[string][]byte); ok {
- r0 = rf(ctx, repoURL, revision, pattern)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) map[string][]byte); ok {
+ r0 = rf(ctx, repoURL, revision, pattern, noRevisionCache)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string][]byte)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
- r1 = rf(ctx, repoURL, revision, pattern)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, string, bool) error); ok {
+ r1 = rf(ctx, repoURL, revision, pattern, noRevisionCache)
} else {
r1 = ret.Error(1)
}
diff --git a/applicationset/services/pull_request/azure_devops_test.go b/applicationset/services/pull_request/azure_devops_test.go
index 15ac1c8233d89..5ed8f4de78b9d 100644
--- a/applicationset/services/pull_request/azure_devops_test.go
+++ b/applicationset/services/pull_request/azure_devops_test.go
@@ -206,9 +206,9 @@ func TestBuildURL(t *testing.T) {
},
{
name: "Provided custom URL and organization",
- url: "https://azuredevops.mycompany.com/",
+ url: "https://azuredevops.example.com/",
organization: "myorganization",
- expected: "https://azuredevops.mycompany.com/myorganization",
+ expected: "https://azuredevops.example.com/myorganization",
},
}
diff --git a/applicationset/services/repo_service.go b/applicationset/services/repo_service.go
index 8ad261fda11cd..64fedc34390b8 100644
--- a/applicationset/services/repo_service.go
+++ b/applicationset/services/repo_service.go
@@ -11,6 +11,8 @@ import (
"github.com/argoproj/argo-cd/v2/util/io"
)
+//go:generate go run github.com/vektra/mockery/v2@v2.25.1 --name=RepositoryDB
+
// RepositoryDB Is a lean facade for ArgoDB,
// Using a lean interface makes it easier to test the functionality of the git generator
type RepositoryDB interface {
@@ -25,13 +27,15 @@ type argoCDService struct {
newFileGlobbingEnabled bool
}
+//go:generate go run github.com/vektra/mockery/v2@v2.25.1 --name=Repos
+
type Repos interface {
// GetFiles returns content of files (not directories) within the target repo
- GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error)
+ GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error)
// GetDirectories returns a list of directories (not files) within the target repo
- GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error)
+ GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error)
}
func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclient.Clientset, newFileGlobbingEnabled bool) (Repos, error) {
@@ -43,7 +47,7 @@ func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclie
}, nil
}
-func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
+func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error) {
repo, err := a.repositoriesDB.GetRepository(ctx, repoURL)
if err != nil {
return nil, fmt.Errorf("error in GetRepository: %w", err)
@@ -55,6 +59,7 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
Revision: revision,
Path: pattern,
NewGitFileGlobbingEnabled: a.newFileGlobbingEnabled,
+ NoRevisionCache: noRevisionCache,
}
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
if err != nil {
@@ -69,7 +74,7 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
return fileResponse.GetMap(), nil
}
-func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
+func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error) {
repo, err := a.repositoriesDB.GetRepository(ctx, repoURL)
if err != nil {
return nil, fmt.Errorf("error in GetRepository: %w", err)
@@ -79,6 +84,7 @@ func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revi
Repo: repo,
SubmoduleEnabled: a.submoduleEnabled,
Revision: revision,
+ NoRevisionCache: noRevisionCache,
}
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
diff --git a/applicationset/services/repo_service_test.go b/applicationset/services/repo_service_test.go
index 62f8c11c172d0..040fe57f96958 100644
--- a/applicationset/services/repo_service_test.go
+++ b/applicationset/services/repo_service_test.go
@@ -25,9 +25,10 @@ func TestGetDirectories(t *testing.T) {
repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient)
}
type args struct {
- ctx context.Context
- repoURL string
- revision string
+ ctx context.Context
+ repoURL string
+ revision string
+ noRevisionCache bool
}
tests := []struct {
name string
@@ -88,11 +89,11 @@ func TestGetDirectories(t *testing.T) {
submoduleEnabled: tt.fields.submoduleEnabled,
repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient},
}
- got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision)
- if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)) {
+ got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)
+ if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)) {
return
}
- assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)
+ assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)
})
}
}
@@ -105,10 +106,11 @@ func TestGetFiles(t *testing.T) {
repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient)
}
type args struct {
- ctx context.Context
- repoURL string
- revision string
- pattern string
+ ctx context.Context
+ repoURL string
+ revision string
+ pattern string
+ noRevisionCache bool
}
tests := []struct {
name string
@@ -175,11 +177,11 @@ func TestGetFiles(t *testing.T) {
submoduleEnabled: tt.fields.submoduleEnabled,
repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient},
}
- got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)
- if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)) {
+ got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)
+ if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)) {
return
}
- assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)
+ assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)
})
}
}
diff --git a/applicationset/services/scm_provider/gitlab.go b/applicationset/services/scm_provider/gitlab.go
index f4b92b3ed9e5f..ca174de540887 100644
--- a/applicationset/services/scm_provider/gitlab.go
+++ b/applicationset/services/scm_provider/gitlab.go
@@ -100,12 +100,20 @@ func (g *GitlabProvider) ListRepos(ctx context.Context, cloneProtocol string) ([
return nil, fmt.Errorf("unknown clone protocol for Gitlab %v", cloneProtocol)
}
+ var repoLabels []string
+ if len(gitlabRepo.Topics) == 0 {
+ // fallback to for gitlab prior to 14.5
+ repoLabels = gitlabRepo.TagList
+ } else {
+ repoLabels = gitlabRepo.Topics
+ }
+
repos = append(repos, &Repository{
Organization: gitlabRepo.Namespace.FullPath,
Repository: gitlabRepo.Path,
URL: url,
Branch: gitlabRepo.DefaultBranch,
- Labels: gitlabRepo.TagList,
+ Labels: repoLabels,
RepositoryId: gitlabRepo.ID,
})
}
diff --git a/applicationset/services/scm_provider/gitlab_test.go b/applicationset/services/scm_provider/gitlab_test.go
index 11b21cb6da6d4..b93616fa8367f 100644
--- a/applicationset/services/scm_provider/gitlab_test.go
+++ b/applicationset/services/scm_provider/gitlab_test.go
@@ -1063,6 +1063,16 @@ func TestGitlabListRepos(t *testing.T) {
proto: "ssh",
url: "git@gitlab.com:test-argocd-proton/argocd.git",
},
+ {
+ name: "labelmatch",
+ proto: "ssh",
+ url: "git@gitlab.com:test-argocd-proton/argocd.git",
+ filters: []v1alpha1.SCMProviderGeneratorFilter{
+ {
+ LabelMatch: strp("test-topic"),
+ },
+ },
+ },
{
name: "https protocol",
proto: "https",
diff --git a/applicationset/utils/clusterUtils.go b/applicationset/utils/clusterUtils.go
index ee9832f533e5e..3b34a5a863dbd 100644
--- a/applicationset/utils/clusterUtils.go
+++ b/applicationset/utils/clusterUtils.go
@@ -180,7 +180,7 @@ func secretToCluster(s *corev1.Secret) (*appv1.Cluster, error) {
if val, err := strconv.Atoi(string(shardStr)); err != nil {
log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err)
} else {
- shard = pointer.Int64Ptr(int64(val))
+ shard = pointer.Int64(int64(val))
}
}
cluster := appv1.Cluster{
diff --git a/applicationset/utils/createOrUpdate.go b/applicationset/utils/createOrUpdate.go
index 096be5a9a97d3..1f2a8a9c4a54c 100644
--- a/applicationset/utils/createOrUpdate.go
+++ b/applicationset/utils/createOrUpdate.go
@@ -2,18 +2,24 @@ package utils
import (
"context"
+ "encoding/json"
"fmt"
+ log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+ "github.com/argoproj/argo-cd/v2/util/argo"
+ argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
)
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
@@ -29,7 +35,7 @@ import (
// The MutateFn is called regardless of creating or updating an object.
//
// It returns the executed operation and an error.
-func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
+func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
key := client.ObjectKeyFromObject(obj)
if err := c.Get(ctx, key, obj); err != nil {
@@ -45,15 +51,24 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
return controllerutil.OperationResultCreated, nil
}
- existingObj := obj.DeepCopyObject()
- existing, ok := existingObj.(client.Object)
- if !ok {
- panic(fmt.Errorf("existing object is not a client.Object"))
- }
+ normalizedLive := obj.DeepCopy()
+
+ // Mutate the live object to match the desired state.
if err := mutate(f, key, obj); err != nil {
return controllerutil.OperationResultNone, err
}
+ // Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
+ // prevents those differences from appearing in the diff and therefore in the patch.
+ err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj)
+ if err != nil {
+ return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
+ }
+
+ // Normalize to avoid diffing on unimportant differences.
+ normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
+ obj.Spec = *argo.NormalizeApplicationSpec(&obj.Spec)
+
equality := conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
@@ -79,16 +94,34 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
},
)
- if equality.DeepEqual(existing, obj) {
+ if equality.DeepEqual(normalizedLive, obj) {
return controllerutil.OperationResultNone, nil
}
- if err := c.Patch(ctx, obj, client.MergeFrom(existing)); err != nil {
+ patch := client.MergeFrom(normalizedLive)
+ if log.IsLevelEnabled(log.DebugLevel) {
+ LogPatch(logCtx, patch, obj)
+ }
+ if err := c.Patch(ctx, obj, patch); err != nil {
return controllerutil.OperationResultNone, err
}
return controllerutil.OperationResultUpdated, nil
}
+func LogPatch(logCtx *log.Entry, patch client.Patch, obj *argov1alpha1.Application) {
+ patchBytes, err := patch.Data(obj)
+ if err != nil {
+ logCtx.Errorf("failed to generate patch: %v", err)
+ }
+ // Get the patch as a plain object so it is easier to work with in json logs.
+ var patchObj map[string]interface{}
+ err = json.Unmarshal(patchBytes, &patchObj)
+ if err != nil {
+ logCtx.Errorf("failed to unmarshal patch: %v", err)
+ }
+ logCtx.WithField("patch", patchObj).Debug("patching application")
+}
+
// mutate wraps a MutateFn and applies validation to its result
func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object) error {
if err := f(); err != nil {
@@ -99,3 +132,71 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
}
return nil
}
+
+// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
+func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application) error {
+ if len(applicationSetIgnoreDifferences) == 0 {
+ return nil
+ }
+
+ generatedAppCopy := generatedApp.DeepCopy()
+ diffConfig, err := argodiff.NewDiffConfigBuilder().
+ WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
+ WithNoCache().
+ Build()
+ if err != nil {
+ return fmt.Errorf("failed to build diff config: %w", err)
+ }
+ unstructuredFound, err := appToUnstructured(found)
+ if err != nil {
+ return fmt.Errorf("failed to convert found application to unstructured: %w", err)
+ }
+ unstructuredGenerated, err := appToUnstructured(generatedApp)
+ if err != nil {
+ return fmt.Errorf("failed to convert found application to unstructured: %w", err)
+ }
+ result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
+ if err != nil {
+ return fmt.Errorf("failed to normalize application spec: %w", err)
+ }
+ if len(result.Lives) != 1 {
+ return fmt.Errorf("expected 1 normalized application, got %d", len(result.Lives))
+ }
+ foundJsonNormalized, err := json.Marshal(result.Lives[0].Object)
+ if err != nil {
+ return fmt.Errorf("failed to marshal normalized app to json: %w", err)
+ }
+ foundNormalized := &argov1alpha1.Application{}
+ err = json.Unmarshal(foundJsonNormalized, &foundNormalized)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal normalized app to json: %w", err)
+ }
+ if len(result.Targets) != 1 {
+ return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
+ }
+ foundNormalized.DeepCopyInto(found)
+ generatedJsonNormalized, err := json.Marshal(result.Targets[0].Object)
+ if err != nil {
+ return fmt.Errorf("failed to marshal normalized app to json: %w", err)
+ }
+ generatedAppNormalized := &argov1alpha1.Application{}
+ err = json.Unmarshal(generatedJsonNormalized, &generatedAppNormalized)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
+ }
+ generatedAppNormalized.DeepCopyInto(generatedApp)
+ // Prohibit jq queries from mutating silly things.
+ generatedApp.TypeMeta = generatedAppCopy.TypeMeta
+ generatedApp.Name = generatedAppCopy.Name
+ generatedApp.Namespace = generatedAppCopy.Namespace
+ generatedApp.Operation = generatedAppCopy.Operation
+ return nil
+}
+
+func appToUnstructured(app client.Object) (*unstructured.Unstructured, error) {
+ u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
+ }
+ return &unstructured.Unstructured{Object: u}, nil
+}
diff --git a/applicationset/utils/createOrUpdate_test.go b/applicationset/utils/createOrUpdate_test.go
new file mode 100644
index 0000000000000..a294e89281974
--- /dev/null
+++ b/applicationset/utils/createOrUpdate_test.go
@@ -0,0 +1,234 @@
+package utils
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+)
+
+func Test_applyIgnoreDifferences(t *testing.T) {
+ appMeta := metav1.TypeMeta{
+ APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
+ Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
+ }
+ testCases := []struct {
+ name string
+ ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
+ foundApp string
+ generatedApp string
+ expectedApp string
+ }{
+ {
+ name: "empty ignoreDifferences",
+ foundApp: `
+spec: {}`,
+ generatedApp: `
+spec: {}`,
+ expectedApp: `
+spec: {}`,
+ },
+ {
+ // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
+ name: "ignore target revision with jq",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{".spec.source.targetRevision"}},
+ },
+ foundApp: `
+spec:
+ source:
+ targetRevision: foo`,
+ generatedApp: `
+spec:
+ source:
+ targetRevision: bar`,
+ expectedApp: `
+spec:
+ source:
+ targetRevision: foo`,
+ },
+ {
+ // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
+ name: "ignore helm parameter with jq",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
+ },
+ foundApp: `
+spec:
+ source:
+ helm:
+ parameters:
+ - name: image.tag
+ value: test
+ - name: another
+ value: value`,
+ generatedApp: `
+spec:
+ source:
+ helm:
+ parameters:
+ - name: image.tag
+ value: v1.0.0
+ - name: another
+ value: value`,
+ expectedApp: `
+spec:
+ source:
+ helm:
+ parameters:
+ - name: image.tag
+ value: test
+ - name: another
+ value: value`,
+ },
+ {
+ // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
+ name: "ignore auto-sync in appset when it's not in the cluster with jq",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{".spec.syncPolicy.automated"}},
+ },
+ foundApp: `
+spec:
+ syncPolicy:
+ retry:
+ limit: 5`,
+ generatedApp: `
+spec:
+ syncPolicy:
+ automated:
+ selfHeal: true
+ retry:
+ limit: 5`,
+ expectedApp: `
+spec:
+ syncPolicy:
+ retry:
+ limit: 5`,
+ },
+ {
+ name: "ignore auto-sync in the cluster when it's not in the appset with jq",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{".spec.syncPolicy.automated"}},
+ },
+ foundApp: `
+spec:
+ syncPolicy:
+ automated:
+ selfHeal: true
+ retry:
+ limit: 5`,
+ generatedApp: `
+spec:
+ syncPolicy:
+ retry:
+ limit: 5`,
+ expectedApp: `
+spec:
+ syncPolicy:
+ automated:
+ selfHeal: true
+ retry:
+ limit: 5`,
+ },
+ {
+ // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
+ name: "ignore a one-off annotation with jq",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
+ },
+ foundApp: `
+metadata:
+ annotations:
+ foo.bar: baz
+ some.other: annotation`,
+ generatedApp: `
+metadata:
+ annotations:
+ some.other: annotation`,
+ expectedApp: `
+metadata:
+ annotations:
+ foo.bar: baz
+ some.other: annotation`,
+ },
+ {
+ // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
+ name: "ignore the source.plugin field with a json pointer",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JSONPointers: []string{"/spec/source/plugin"}},
+ },
+ foundApp: `
+spec:
+ source:
+ plugin:
+ parameters:
+ - name: url
+ string: https://example.com`,
+ generatedApp: `
+spec:
+ source:
+ plugin:
+ parameters:
+ - name: url
+ string: https://example.com/wrong`,
+ expectedApp: `
+spec:
+ source:
+ plugin:
+ parameters:
+ - name: url
+ string: https://example.com`,
+ },
+ {
+ // For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
+ name: "ignore parameters added to a multi-source app in the cluster",
+ ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
+ {JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
+ },
+ foundApp: `
+spec:
+ sources:
+ - repoURL: https://git.example.com/test-org/test-repo
+ helm:
+ parameters:
+ - name: test
+ value: hi`,
+ generatedApp: `
+spec:
+ sources:
+ - repoURL: https://git.example.com/test-org/test-repo`,
+ expectedApp: `
+spec:
+ sources:
+ - repoURL: https://git.example.com/test-org/test-repo
+ helm:
+ parameters:
+ - name: test
+ value: hi`,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ foundApp := v1alpha1.Application{TypeMeta: appMeta}
+ err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
+ require.NoError(t, err, tc.foundApp)
+ generatedApp := v1alpha1.Application{TypeMeta: appMeta}
+ err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
+ require.NoError(t, err, tc.generatedApp)
+ err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp)
+ require.NoError(t, err)
+ yamlFound, err := yaml.Marshal(tc.foundApp)
+ require.NoError(t, err)
+ yamlExpected, err := yaml.Marshal(tc.expectedApp)
+ require.NoError(t, err)
+ assert.Equal(t, string(yamlExpected), string(yamlFound))
+ })
+ }
+}
diff --git a/applicationset/utils/utils.go b/applicationset/utils/utils.go
index 089a6ff103100..2d128eb81a16c 100644
--- a/applicationset/utils/utils.go
+++ b/applicationset/utils/utils.go
@@ -16,6 +16,7 @@ import (
"unsafe"
"github.com/Masterminds/sprig/v3"
+ "github.com/gosimple/slug"
"github.com/valyala/fasttemplate"
"sigs.k8s.io/yaml"
@@ -32,6 +33,7 @@ func init() {
delete(sprigFuncMap, "expandenv")
delete(sprigFuncMap, "getHostByName")
sprigFuncMap["normalize"] = SanitizeName
+ sprigFuncMap["slugify"] = SlugifyName
sprigFuncMap["toYaml"] = toYAML
sprigFuncMap["fromYaml"] = fromYAML
sprigFuncMap["fromYamlArray"] = fromYAMLArray
@@ -39,6 +41,7 @@ func init() {
type Renderer interface {
RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *argoappsv1.ApplicationSetSyncPolicy, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (*argoappsv1.Application, error)
+ Replace(tmpl string, replaceMap map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (string, error)
}
type Render struct {
@@ -434,6 +437,54 @@ func NormalizeBitbucketBasePath(basePath string) string {
return basePath
}
+// SlugifyName generates a URL-friendly slug from the provided name and additional options.
+// The slug is generated in accordance with the following rules:
+// 1. The generated slug will be URL-safe and suitable for use in URLs.
+// 2. The maximum length of the slug can be specified using the `maxSize` argument.
+// 3. Smart truncation can be enabled or disabled using the `EnableSmartTruncate` argument.
+// 4. The input name can be any string value that needs to be converted into a slug.
+//
+// Args:
+// - args: A variadic number of arguments where:
+// - The first argument (if provided) is an integer specifying the maximum length of the slug.
+// - The second argument (if provided) is a boolean indicating whether smart truncation is enabled.
+// - The last argument (if provided) is the input name that needs to be slugified.
+// If no name is provided, an empty string will be used.
+//
+// Returns:
+// - string: The generated URL-friendly slug based on the input name and options.
+func SlugifyName(args ...interface{}) string {
+ // Default values for arguments
+ maxSize := 50
+ EnableSmartTruncate := true
+ name := ""
+
+ // Process the arguments
+ for idx, arg := range args {
+ switch idx {
+ case len(args) - 1:
+ name = arg.(string)
+ case 0:
+ maxSize = arg.(int)
+ case 1:
+ EnableSmartTruncate = arg.(bool)
+ default:
+ log.Errorf("Bad 'slugify' arguments.")
+ }
+ }
+
+ sanitizedName := SanitizeName(name)
+
+ // Configure slug generation options
+ slug.EnableSmartTruncate = EnableSmartTruncate
+ slug.MaxLength = maxSize
+
+ // Generate the slug from the input name
+ urlSlug := slug.Make(sanitizedName)
+
+ return urlSlug
+}
+
func getTlsConfigWithCACert(scmRootCAPath string) *tls.Config {
tlsConfig := &tls.Config{}
diff --git a/applicationset/utils/utils_test.go b/applicationset/utils/utils_test.go
index a1c58769160cc..3b4702bc35c3f 100644
--- a/applicationset/utils/utils_test.go
+++ b/applicationset/utils/utils_test.go
@@ -1243,6 +1243,43 @@ func TestNormalizeBitbucketBasePath(t *testing.T) {
}
}
+func TestSlugify(t *testing.T) {
+ for _, c := range []struct {
+ branch string
+ smartTruncate bool
+ length int
+ expectedBasePath string
+ }{
+ {
+ branch: "feat/a_really+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
+ smartTruncate: false,
+ length: 50,
+ expectedBasePath: "feat-a-really-long-pull-request-name-to-test-argo",
+ },
+ {
+ branch: "feat/a_really+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
+ smartTruncate: true,
+ length: 53,
+ expectedBasePath: "feat-a-really-long-pull-request-name-to-test-argo",
+ },
+ {
+ branch: "feat/areallylongpullrequestnametotestargoslugificationandbranchnameshorteningfeature",
+ smartTruncate: true,
+ length: 50,
+ expectedBasePath: "feat",
+ },
+ {
+ branch: "feat/areallylongpullrequestnametotestargoslugificationandbranchnameshorteningfeature",
+ smartTruncate: false,
+ length: 50,
+ expectedBasePath: "feat-areallylongpullrequestnametotestargoslugifica",
+ },
+ } {
+ result := SlugifyName(c.length, c.smartTruncate, c.branch)
+ assert.Equal(t, c.expectedBasePath, result, c.branch)
+ }
+}
+
func TestGetTLSConfig(t *testing.T) {
// certParsed, err := tls.X509KeyPair(test.Cert, test.PrivateKey)
// require.NoError(t, err)
diff --git a/applicationset/webhook/testdata/github-pull-request-labeled-event.json b/applicationset/webhook/testdata/github-pull-request-labeled-event.json
new file mode 100644
index 0000000000000..f912a2fdb4a97
--- /dev/null
+++ b/applicationset/webhook/testdata/github-pull-request-labeled-event.json
@@ -0,0 +1,473 @@
+{
+ "action": "labeled",
+ "number": 2,
+ "label": {
+ "id": 6129306173,
+ "node_id": "LA_kwDOIqudU88AAAABbVXKPQ",
+ "url": "https://api.github.com/repos/SG60/backstage/labels/deploy-preview",
+ "name": "deploy-preview",
+ "color": "bfd4f2",
+ "default": false,
+ "description": ""
+ },
+ "pull_request": {
+ "url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2",
+ "id": 279147437,
+ "node_id": "MDExOlB1bGxSZXF1ZXN0Mjc5MTQ3NDM3",
+ "html_url": "https://github.com/Codertocat/Hello-World/pull/2",
+ "diff_url": "https://github.com/Codertocat/Hello-World/pull/2.diff",
+ "patch_url": "https://github.com/Codertocat/Hello-World/pull/2.patch",
+ "issue_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/2",
+ "number": 2,
+ "state": "open",
+ "locked": false,
+ "title": "Update the README with new information.",
+ "user": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ },
+ "body": "This is a pretty simple change that we need to pull into master.",
+ "created_at": "2019-05-15T15:20:33Z",
+ "updated_at": "2019-05-15T15:20:33Z",
+ "closed_at": null,
+ "merged_at": null,
+ "merge_commit_sha": null,
+ "assignee": null,
+ "assignees": [],
+ "requested_reviewers": [],
+ "requested_teams": [],
+ "labels": [
+ {
+ "id": 6129306173,
+ "node_id": "LA_kwDOIqudU88AAAABbVXKPQ",
+ "url": "https://api.github.com/repos/Codertocat/Hello-World/labels/deploy-preview",
+ "name": "deploy-preview",
+ "color": "bfd4f2",
+ "default": false,
+ "description": ""
+ }
+ ],
+ "milestone": null,
+ "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/commits",
+ "review_comments_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/comments",
+ "review_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/comments{/number}",
+ "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/2/comments",
+ "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/ec26c3e57ca3a959ca5aad62de7213c562f8c821",
+ "head": {
+ "label": "Codertocat:changes",
+ "ref": "changes",
+ "sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821",
+ "user": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ },
+ "repo": {
+ "id": 186853002,
+ "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=",
+ "name": "Hello-World",
+ "full_name": "Codertocat/Hello-World",
+ "private": false,
+ "owner": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ },
+ "html_url": "https://github.com/Codertocat/Hello-World",
+ "description": null,
+ "fork": false,
+ "url": "https://api.github.com/repos/Codertocat/Hello-World",
+ "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks",
+ "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams",
+ "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks",
+ "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events",
+ "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags",
+ "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages",
+ "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers",
+ "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors",
+ "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers",
+ "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription",
+ "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges",
+ "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads",
+ "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}",
+ "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}",
+ "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments",
+ "created_at": "2019-05-15T15:19:25Z",
+ "updated_at": "2019-05-15T15:19:27Z",
+ "pushed_at": "2019-05-15T15:20:32Z",
+ "git_url": "git://github.com/Codertocat/Hello-World.git",
+ "ssh_url": "git@github.com:Codertocat/Hello-World.git",
+ "clone_url": "https://github.com/Codertocat/Hello-World.git",
+ "svn_url": "https://github.com/Codertocat/Hello-World",
+ "homepage": null,
+ "size": 0,
+ "stargazers_count": 0,
+ "watchers_count": 0,
+ "language": null,
+ "has_issues": true,
+ "has_projects": true,
+ "has_downloads": true,
+ "has_wiki": true,
+ "has_pages": true,
+ "forks_count": 0,
+ "mirror_url": null,
+ "archived": false,
+ "disabled": false,
+ "open_issues_count": 2,
+ "license": null,
+ "forks": 0,
+ "open_issues": 2,
+ "watchers": 0,
+ "default_branch": "master",
+ "allow_squash_merge": true,
+ "allow_merge_commit": true,
+ "allow_rebase_merge": true,
+ "delete_branch_on_merge": false
+ }
+ },
+ "base": {
+ "label": "Codertocat:master",
+ "ref": "master",
+ "sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e",
+ "user": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ },
+ "repo": {
+ "id": 186853002,
+ "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=",
+ "name": "Hello-World",
+ "full_name": "Codertocat/Hello-World",
+ "private": false,
+ "owner": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ },
+ "html_url": "https://github.com/Codertocat/Hello-World",
+ "description": null,
+ "fork": false,
+ "url": "https://api.github.com/repos/Codertocat/Hello-World",
+ "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks",
+ "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams",
+ "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks",
+ "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events",
+ "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags",
+ "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages",
+ "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers",
+ "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors",
+ "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers",
+ "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription",
+ "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges",
+ "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads",
+ "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}",
+ "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}",
+ "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments",
+ "created_at": "2019-05-15T15:19:25Z",
+ "updated_at": "2019-05-15T15:19:27Z",
+ "pushed_at": "2019-05-15T15:20:32Z",
+ "git_url": "git://github.com/Codertocat/Hello-World.git",
+ "ssh_url": "git@github.com:Codertocat/Hello-World.git",
+ "clone_url": "https://github.com/Codertocat/Hello-World.git",
+ "svn_url": "https://github.com/Codertocat/Hello-World",
+ "homepage": null,
+ "size": 0,
+ "stargazers_count": 0,
+ "watchers_count": 0,
+ "language": null,
+ "has_issues": true,
+ "has_projects": true,
+ "has_downloads": true,
+ "has_wiki": true,
+ "has_pages": true,
+ "forks_count": 0,
+ "mirror_url": null,
+ "archived": false,
+ "disabled": false,
+ "open_issues_count": 2,
+ "license": null,
+ "forks": 0,
+ "open_issues": 2,
+ "watchers": 0,
+ "default_branch": "master",
+ "allow_squash_merge": true,
+ "allow_merge_commit": true,
+ "allow_rebase_merge": true,
+ "delete_branch_on_merge": false
+ }
+ },
+ "_links": {
+ "self": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2"
+ },
+ "html": {
+ "href": "https://github.com/Codertocat/Hello-World/pull/2"
+ },
+ "issue": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/issues/2"
+ },
+ "comments": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/issues/2/comments"
+ },
+ "review_comments": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/comments"
+ },
+ "review_comment": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/comments{/number}"
+ },
+ "commits": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/commits"
+ },
+ "statuses": {
+ "href": "https://api.github.com/repos/Codertocat/Hello-World/statuses/ec26c3e57ca3a959ca5aad62de7213c562f8c821"
+ }
+ },
+ "author_association": "OWNER",
+ "draft": false,
+ "merged": false,
+ "mergeable": null,
+ "rebaseable": null,
+ "mergeable_state": "unknown",
+ "merged_by": null,
+ "comments": 0,
+ "review_comments": 0,
+ "maintainer_can_modify": false,
+ "commits": 1,
+ "additions": 1,
+ "deletions": 1,
+ "changed_files": 1
+ },
+ "repository": {
+ "id": 186853002,
+ "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=",
+ "name": "Hello-World",
+ "full_name": "Codertocat/Hello-World",
+ "private": false,
+ "owner": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ },
+ "html_url": "https://github.com/Codertocat/Hello-World",
+ "description": null,
+ "fork": false,
+ "url": "https://api.github.com/repos/Codertocat/Hello-World",
+ "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks",
+ "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams",
+ "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks",
+ "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events",
+ "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags",
+ "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages",
+ "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers",
+ "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors",
+ "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers",
+ "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription",
+ "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges",
+ "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads",
+ "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}",
+ "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}",
+ "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments",
+ "created_at": "2019-05-15T15:19:25Z",
+ "updated_at": "2019-05-15T15:19:27Z",
+ "pushed_at": "2019-05-15T15:20:32Z",
+ "git_url": "git://github.com/Codertocat/Hello-World.git",
+ "ssh_url": "git@github.com:Codertocat/Hello-World.git",
+ "clone_url": "https://github.com/Codertocat/Hello-World.git",
+ "svn_url": "https://github.com/Codertocat/Hello-World",
+ "homepage": null,
+ "size": 0,
+ "stargazers_count": 0,
+ "watchers_count": 0,
+ "language": null,
+ "has_issues": true,
+ "has_projects": true,
+ "has_downloads": true,
+ "has_wiki": true,
+ "has_pages": true,
+ "forks_count": 0,
+ "mirror_url": null,
+ "archived": false,
+ "disabled": false,
+ "open_issues_count": 2,
+ "license": null,
+ "forks": 0,
+ "open_issues": 2,
+ "watchers": 0,
+ "default_branch": "master"
+ },
+ "sender": {
+ "login": "Codertocat",
+ "id": 21031067,
+ "node_id": "MDQ6VXNlcjIxMDMxMDY3",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/Codertocat",
+ "html_url": "https://github.com/Codertocat",
+ "followers_url": "https://api.github.com/users/Codertocat/followers",
+ "following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
+ "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
+ "organizations_url": "https://api.github.com/users/Codertocat/orgs",
+ "repos_url": "https://api.github.com/users/Codertocat/repos",
+ "events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/Codertocat/received_events",
+ "type": "User",
+ "site_admin": false
+ }
+}
diff --git a/applicationset/webhook/webhook.go b/applicationset/webhook/webhook.go
index ce099df35ea35..d55e63e064f5a 100644
--- a/applicationset/webhook/webhook.go
+++ b/applicationset/webhook/webhook.go
@@ -412,10 +412,12 @@ func shouldRefreshPRGenerator(gen *v1alpha1.PullRequestGenerator, info *prGenera
}
if gen.Github != nil && info.Github != nil {
- if gen.Github.Owner != info.Github.Owner {
+ // repository owner and name are case-insensitive
+ // See https://docs.github.com/en/rest/pulls/pulls?apiVersion=2022-11-28#list-pull-requests
+ if !strings.EqualFold(gen.Github.Owner, info.Github.Owner) {
return false
}
- if gen.Github.Repo != info.Github.Repo {
+ if !strings.EqualFold(gen.Github.Repo, info.Github.Repo) {
return false
}
api := gen.Github.API
diff --git a/applicationset/webhook/webhook_test.go b/applicationset/webhook/webhook_test.go
index 349d275948aee..d22b1a07ca6f2 100644
--- a/applicationset/webhook/webhook_test.go
+++ b/applicationset/webhook/webhook_test.go
@@ -111,7 +111,7 @@ func TestWebhookHandler(t *testing.T) {
expectedRefresh: false,
},
{
- desc: "WebHook from a GitHub repository via pull_reqeuest opened event",
+ desc: "WebHook from a GitHub repository via pull_request opened event",
headerKey: "X-GitHub-Event",
headerValue: "pull_request",
payloadFile: "github-pull-request-opened-event.json",
@@ -120,7 +120,7 @@ func TestWebhookHandler(t *testing.T) {
expectedRefresh: true,
},
{
- desc: "WebHook from a GitHub repository via pull_reqeuest assigned event",
+ desc: "WebHook from a GitHub repository via pull_request assigned event",
headerKey: "X-GitHub-Event",
headerValue: "pull_request",
payloadFile: "github-pull-request-assigned-event.json",
@@ -128,6 +128,15 @@ func TestWebhookHandler(t *testing.T) {
expectedStatusCode: http.StatusOK,
expectedRefresh: false,
},
+ {
+ desc: "WebHook from a GitHub repository via pull_request labeled event",
+ headerKey: "X-GitHub-Event",
+ headerValue: "pull_request",
+ payloadFile: "github-pull-request-labeled-event.json",
+ effectedAppSets: []string{"pull-request-github", "matrix-pull-request-github", "matrix-scm-pull-request-github", "merge-pull-request-github", "plugin", "matrix-pull-request-github-plugin"},
+ expectedStatusCode: http.StatusOK,
+ expectedRefresh: true,
+ },
{
desc: "WebHook from a GitLab repository via open merge request event",
headerKey: "X-Gitlab-Event",
@@ -180,7 +189,7 @@ func TestWebhookHandler(t *testing.T) {
fakeAppWithGitGenerator("git-github", namespace, "https://github.com/org/repo"),
fakeAppWithGitGenerator("git-gitlab", namespace, "https://gitlab/group/name"),
fakeAppWithGitGenerator("git-azure-devops", namespace, "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"),
- fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "Codertocat", "Hello-World"),
+ fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "CodErTOcat", "Hello-World"),
fakeAppWithGitlabPullRequestGenerator("pull-request-gitlab", namespace, "100500"),
fakeAppWithAzureDevOpsPullRequestGenerator("pull-request-azure-devops", namespace, "DefaultCollection", "Fabrikam"),
fakeAppWithPluginGenerator("plugin", namespace),
@@ -189,7 +198,7 @@ func TestWebhookHandler(t *testing.T) {
fakeAppWithMatrixAndScmWithGitGenerator("matrix-scm-git-github", namespace, "org"),
fakeAppWithMatrixAndScmWithPullRequestGenerator("matrix-scm-pull-request-github", namespace, "Codertocat"),
fakeAppWithMatrixAndNestedGitGenerator("matrix-nested-git-github", namespace, "https://github.com/org/repo"),
- fakeAppWithMatrixAndPullRequestGeneratorWithPluginGenerator("matrix-pull-request-github-plugin", namespace, "Codertocat", "Hello-World", "plugin-cm"),
+ fakeAppWithMatrixAndPullRequestGeneratorWithPluginGenerator("matrix-pull-request-github-plugin", namespace, "coDErtoCat", "HeLLO-WorLD", "plugin-cm"),
fakeAppWithMergeAndGitGenerator("merge-git-github", namespace, "https://github.com/org/repo"),
fakeAppWithMergeAndPullRequestGenerator("merge-pull-request-github", namespace, "Codertocat", "Hello-World"),
fakeAppWithMergeAndNestedGitGenerator("merge-nested-git-github", namespace, "https://github.com/org/repo"),
diff --git a/assets/swagger.json b/assets/swagger.json
index c97e0a3c78239..91e815203eee0 100644
--- a/assets/swagger.json
+++ b/assets/swagger.json
@@ -234,7 +234,7 @@
},
{
"type": "string",
- "description": "forces application reconciliation if set to true.",
+ "description": "forces application reconciliation if set to 'hard'.",
"name": "refresh",
"in": "query"
},
@@ -573,7 +573,7 @@
},
{
"type": "string",
- "description": "forces application reconciliation if set to true.",
+ "description": "forces application reconciliation if set to 'hard'.",
"name": "refresh",
"in": "query"
},
@@ -3816,7 +3816,7 @@
},
{
"type": "string",
- "description": "forces application reconciliation if set to true.",
+ "description": "forces application reconciliation if set to 'hard'.",
"name": "refresh",
"in": "query"
},
@@ -4462,6 +4462,9 @@
"clientID": {
"type": "string"
},
+ "enablePKCEAuthentication": {
+ "type": "boolean"
+ },
"idTokenClaims": {
"type": "object",
"additionalProperties": {
@@ -5089,7 +5092,7 @@
}
},
"runtimeRawExtension": {
- "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this:\n{\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
+ "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
"type": "object",
"properties": {
"raw": {
@@ -5496,10 +5499,6 @@
"type": "string"
}
},
- "clusterName": {
- "description": "Deprecated: ClusterName is a legacy field that was always cleared by\nthe system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect\naccidental use.\n\n+optional",
- "type": "string"
- },
"creationTimestamp": {
"$ref": "#/definitions/v1Time"
},
@@ -5571,8 +5570,8 @@
}
},
"v1ObjectReference": {
+ "description": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
"type": "object",
- "title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
"properties": {
"apiVersion": {
"type": "string",
@@ -5665,6 +5664,10 @@
"type": "string",
"title": "ClusterName contains AWS cluster name"
},
+ "profile": {
+ "description": "Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain.",
+ "type": "string"
+ },
"roleARN": {
"description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.",
"type": "string"
@@ -6144,6 +6147,9 @@
},
"template": {
"$ref": "#/definitions/v1alpha1ApplicationSetTemplate"
+ },
+ "templatePatch": {
+ "type": "string"
}
}
},
@@ -6396,6 +6402,13 @@
"type": "string"
}
},
+ "components": {
+ "type": "array",
+ "title": "Components specifies a list of kustomize components to add to the kustomization before building",
+ "items": {
+ "type": "string"
+ }
+ },
"forceCommonAnnotations": {
"type": "boolean",
"title": "ForceCommonAnnotations specifies whether to force applying common annotations to resources for Kustomize apps"
@@ -8490,6 +8503,9 @@
"format": "int64",
"title": "ID is an auto incrementing identifier of the RevisionHistory"
},
+ "initiatedBy": {
+ "$ref": "#/definitions/v1alpha1OperationInitiator"
+ },
"revision": {
"type": "string",
"title": "Revision holds the revision the sync was performed against"
diff --git a/cmd/argocd-application-controller/commands/argocd_application_controller.go b/cmd/argocd-application-controller/commands/argocd_application_controller.go
index a43174633b02a..c38a2113e2b34 100644
--- a/cmd/argocd-application-controller/commands/argocd_application_controller.go
+++ b/cmd/argocd-application-controller/commands/argocd_application_controller.go
@@ -10,6 +10,8 @@ import (
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ kubeerrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -19,6 +21,7 @@ import (
"github.com/argoproj/argo-cd/v2/controller/sharding"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
+ "github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
@@ -30,8 +33,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/settings"
"github.com/argoproj/argo-cd/v2/util/tls"
"github.com/argoproj/argo-cd/v2/util/trace"
- kubeerrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
@@ -45,9 +46,12 @@ const (
func NewCommand() *cobra.Command {
var (
+ workqueueRateLimit ratelimiter.AppControllerRateLimiterConfig
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
appHardResyncPeriod int64
+ appResyncJitter int64
+ repoErrorGracePeriod int64
repoServerAddress string
repoServerTimeoutSeconds int
selfHealTimeoutSeconds int
@@ -63,11 +67,14 @@ func NewCommand() *cobra.Command {
repoServerPlaintext bool
repoServerStrictTLS bool
otlpAddress string
+ otlpInsecure bool
+ otlpHeaders map[string]string
otlpAttrs []string
applicationNamespaces []string
persistResourceHealth bool
shardingAlgorithm string
enableDynamicClusterDistribution bool
+ serverSideDiff bool
)
var command = cobra.Command{
Use: cliName,
@@ -140,7 +147,7 @@ func NewCommand() *cobra.Command {
appController.InvalidateProjectsCache()
}))
kubectl := kubeutil.NewKubectl()
- clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
+ clusterSharding, err := getClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
errors.CheckError(err)
appController, err = controller.NewApplicationController(
namespace,
@@ -152,14 +159,19 @@ func NewCommand() *cobra.Command {
kubectl,
resyncDuration,
hardResyncDuration,
+ time.Duration(appResyncJitter)*time.Second,
time.Duration(selfHealTimeoutSeconds)*time.Second,
+ time.Duration(repoErrorGracePeriod)*time.Second,
metricsPort,
metricsCacheExpiration,
metricsAplicationLabels,
kubectlParallelismLimit,
persistResourceHealth,
- clusterFilter,
+ clusterSharding,
applicationNamespaces,
+ &workqueueRateLimit,
+ serverSideDiff,
+ enableDynamicClusterDistribution,
)
errors.CheckError(err)
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
@@ -169,7 +181,7 @@ func NewCommand() *cobra.Command {
stats.RegisterHeapDumper("memprofile")
if otlpAddress != "" {
- closeTracer, err := trace.InitTracer(ctx, "argocd-controller", otlpAddress, otlpAttrs)
+ closeTracer, err := trace.InitTracer(ctx, "argocd-controller", otlpAddress, otlpInsecure, otlpHeaders, otlpAttrs)
if err != nil {
log.Fatalf("failed to initialize tracing: %v", err)
}
@@ -186,6 +198,8 @@ func NewCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
command.Flags().Int64Var(&appHardResyncPeriod, "app-hard-resync", int64(env.ParseDurationFromEnv("ARGOCD_HARD_RECONCILIATION_TIMEOUT", defaultAppHardResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application hard resync.")
+ command.Flags().Int64Var(&appResyncJitter, "app-resync-jitter", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_JITTER", 0*time.Second, 0, math.MaxInt64).Seconds()), "Maximum time period in seconds to add as a delay jitter for application resync.")
+ command.Flags().Int64Var(&repoErrorGracePeriod, "repo-error-grace-period-seconds", int64(env.ParseDurationFromEnv("ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Grace period in seconds for ignoring consecutive errors while communicating with repo server.")
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&statusProcessors, "status-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS", 20, 0, math.MaxInt32), "Number of application status processors")
@@ -201,48 +215,68 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric")
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
+ command.Flags().BoolVar(&otlpInsecure, "otlp-insecure", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_INSECURE", true), "OpenTelemetry collector insecure mode")
+ command.Flags().StringToStringVar(&otlpHeaders, "otlp-headers", env.ParseStringToStringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_HEADERS", map[string]string{}, ","), "List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2)")
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from")
command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD")
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
+ // global queue rate limit config
+ command.Flags().Int64Var(&workqueueRateLimit.BucketSize, "wq-bucket-size", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_SIZE", 500, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket Size, default 500")
+ command.Flags().Int64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_QPS", 50, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket QPS, default 50")
+ // individual item rate limit config
+ // when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
+ command.Flags().DurationVar(&workqueueRateLimit.FailureCoolDown, "wq-cooldown-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_FAILURE_COOLDOWN_NS", 0, 0, (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)")
+ command.Flags().DurationVar(&workqueueRateLimit.BaseDelay, "wq-basedelay-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_BASE_DELAY_NS", time.Millisecond.Nanoseconds(), time.Nanosecond.Nanoseconds(), (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms)")
+ command.Flags().DurationVar(&workqueueRateLimit.MaxDelay, "wq-maxdelay-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_MAX_DELAY_NS", time.Second.Nanoseconds(), 1*time.Millisecond.Nanoseconds(), (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s)")
+ command.Flags().Float64Var(&workqueueRateLimit.BackoffFactor, "wq-backoff-factor", env.ParseFloat64FromEnv("WORKQUEUE_BACKOFF_FACTOR", 1.5, 0, math.MaxFloat64), "Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5")
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
- cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
- redisClient = client
+ command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
+ cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
+ OnClientCreated: func(client *redis.Client) {
+ redisClient = client
+ },
})
return &command
}
-func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
+func getClusterSharding(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) (sharding.ClusterShardingCache, error) {
+ var (
+ replicasCount int
+ )
+ // StatefulSet mode and Deployment mode uses different default values for shard number.
+ defaultShardNumberValue := 0
- var replicas int
- shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
+ if enableDynamicClusterDistribution {
+ applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
+ appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
- applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
- appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
+ // if app controller deployment is not found when dynamic cluster distribution is enabled error out
+ if err != nil {
+ return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: %v", err)
+ }
- // if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
- if err != nil && kubeerrors.IsNotFound(err) {
- appControllerDeployment = nil
- }
+ if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
+ replicasCount = int(*appControllerDeployment.Spec.Replicas)
+ defaultShardNumberValue = -1
+ } else {
+ return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment replica count")
+ }
- if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
- replicas = int(*appControllerDeployment.Spec.Replicas)
} else {
- replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
+ replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
}
-
- var clusterFilter func(cluster *v1alpha1.Cluster) bool
- if replicas > 1 {
+ shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, defaultShardNumberValue, -math.MaxInt32, math.MaxInt32)
+ if replicasCount > 1 {
// check for shard mapping using configmap if application-controller is a deployment
// else use existing logic to infer shard from pod name if application-controller is a statefulset
- if enableDynamicClusterDistribution && appControllerDeployment != nil {
-
+ if enableDynamicClusterDistribution {
var err error
// retry 3 times if we find a conflict while updating shard mapping configMap.
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
- shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
- if !kubeerrors.IsConflict(err) {
+ shardNumber, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
+ if err != nil && !kubeerrors.IsConflict(err) {
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
break
}
@@ -250,19 +284,19 @@ func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.Se
}
errors.CheckError(err)
} else {
- if shard < 0 {
+ if shardNumber < 0 {
var err error
- shard, err = sharding.InferShard()
+ shardNumber, err = sharding.InferShard()
errors.CheckError(err)
}
+ if shardNumber > replicasCount {
+ log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
+ shardNumber = 0
+ }
}
- log.Infof("Processing clusters from shard %d", shard)
- db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
- log.Infof("Using filter function: %s", shardingAlgorithm)
- distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
- clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
} else {
log.Info("Processing all cluster shards")
}
- return clusterFilter
+ db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
+ return sharding.NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm), nil
}
diff --git a/cmd/argocd-cmp-server/commands/argocd_cmp_server.go b/cmd/argocd-cmp-server/commands/argocd_cmp_server.go
index 62f45b24aedb5..526a199cb5490 100644
--- a/cmd/argocd-cmp-server/commands/argocd_cmp_server.go
+++ b/cmd/argocd-cmp-server/commands/argocd_cmp_server.go
@@ -26,6 +26,8 @@ func NewCommand() *cobra.Command {
var (
configFilePath string
otlpAddress string
+ otlpInsecure bool
+ otlpHeaders map[string]string
otlpAttrs []string
)
var command = cobra.Command{
@@ -56,7 +58,7 @@ func NewCommand() *cobra.Command {
if otlpAddress != "" {
var closer func()
var err error
- closer, err = traceutil.InitTracer(ctx, "argocd-cmp-server", otlpAddress, otlpAttrs)
+ closer, err = traceutil.InitTracer(ctx, "argocd-cmp-server", otlpAddress, otlpInsecure, otlpHeaders, otlpAttrs)
if err != nil {
log.Fatalf("failed to initialize tracing: %v", err)
}
@@ -83,6 +85,8 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.Flags().StringVar(&configFilePath, "config-dir-path", common.DefaultPluginConfigFilePath, "Config management plugin configuration file location, Default is '/home/argocd/cmp-server/config/'")
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_CMP_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
+ command.Flags().BoolVar(&otlpInsecure, "otlp-insecure", env.ParseBoolFromEnv("ARGOCD_CMP_SERVER_OTLP_INSECURE", true), "OpenTelemetry collector insecure mode")
+ command.Flags().StringToStringVar(&otlpHeaders, "otlp-headers", env.ParseStringToStringFromEnv("ARGOCD_CMP_SERVER_OTLP_HEADERS", map[string]string{}, ","), "List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2)")
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_CMP_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
return &command
}
diff --git a/cmd/argocd-k8s-auth/commands/aws.go b/cmd/argocd-k8s-auth/commands/aws.go
index 79a118d2653a3..9b750ac5f92f8 100644
--- a/cmd/argocd-k8s-auth/commands/aws.go
+++ b/cmd/argocd-k8s-auth/commands/aws.go
@@ -37,13 +37,14 @@ func newAWSCommand() *cobra.Command {
var (
clusterName string
roleARN string
+ profile string
)
var command = &cobra.Command{
Use: "aws",
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
- presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, getSignedRequest)
+ presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, profile, getSignedRequest)
errors.CheckError(err)
token := v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))
// Set token expiration to 1 minute before the presigned URL expires for some cushion
@@ -53,16 +54,17 @@ func newAWSCommand() *cobra.Command {
}
command.Flags().StringVar(&clusterName, "cluster-name", "", "AWS Cluster name")
command.Flags().StringVar(&roleARN, "role-arn", "", "AWS Role ARN")
+ command.Flags().StringVar(&profile, "profile", "", "AWS Profile")
return command
}
-type getSignedRequestFunc func(clusterName, roleARN string) (string, error)
+type getSignedRequestFunc func(clusterName, roleARN string, profile string) (string, error)
-func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, fn getSignedRequestFunc) (string, error) {
+func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, profile string, fn getSignedRequestFunc) (string, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
for {
- signed, err := fn(clusterName, roleARN)
+ signed, err := fn(clusterName, roleARN, profile)
if err == nil {
return signed, nil
}
@@ -74,8 +76,10 @@ func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Durat
}
}
-func getSignedRequest(clusterName, roleARN string) (string, error) {
- sess, err := session.NewSession()
+func getSignedRequest(clusterName, roleARN string, profile string) (string, error) {
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Profile: profile,
+ })
if err != nil {
return "", fmt.Errorf("error creating new AWS session: %s", err)
}
diff --git a/cmd/argocd-k8s-auth/commands/aws_test.go b/cmd/argocd-k8s-auth/commands/aws_test.go
index c22449eba42be..578aae71a2c29 100644
--- a/cmd/argocd-k8s-auth/commands/aws_test.go
+++ b/cmd/argocd-k8s-auth/commands/aws_test.go
@@ -22,7 +22,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
}
// when
- signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
+ signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)
// then
assert.NoError(t, err)
@@ -41,7 +41,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
}
// when
- signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
+ signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)
// then
assert.NoError(t, err)
@@ -57,7 +57,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
}
// when
- signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
+ signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)
// then
assert.Error(t, err)
@@ -70,7 +70,7 @@ type signedRequestMock struct {
returnFunc func(m *signedRequestMock) (string, error)
}
-func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string) (string, error) {
+func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string, profile string) (string, error) {
m.getSignedRequestCalls++
return m.returnFunc(m)
}
diff --git a/cmd/argocd-notification/commands/controller.go b/cmd/argocd-notification/commands/controller.go
index abd9a2e8475f0..cb30fd5277d4b 100644
--- a/cmd/argocd-notification/commands/controller.go
+++ b/cmd/argocd-notification/commands/controller.go
@@ -43,19 +43,20 @@ func addK8SFlagsToCmd(cmd *cobra.Command) clientcmd.ClientConfig {
func NewCommand() *cobra.Command {
var (
- clientConfig clientcmd.ClientConfig
- processorsCount int
- namespace string
- appLabelSelector string
- logLevel string
- logFormat string
- metricsPort int
- argocdRepoServer string
- argocdRepoServerPlaintext bool
- argocdRepoServerStrictTLS bool
- configMapName string
- secretName string
- applicationNamespaces []string
+ clientConfig clientcmd.ClientConfig
+ processorsCount int
+ namespace string
+ appLabelSelector string
+ logLevel string
+ logFormat string
+ metricsPort int
+ argocdRepoServer string
+ argocdRepoServerPlaintext bool
+ argocdRepoServerStrictTLS bool
+ configMapName string
+ secretName string
+ applicationNamespaces []string
+ selfServiceNotificationEnabled bool
)
var command = cobra.Command{
Use: "controller",
@@ -139,7 +140,7 @@ func NewCommand() *cobra.Command {
log.Infof("serving metrics on port %d", metricsPort)
log.Infof("loading configuration %d", metricsPort)
- ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, applicationNamespaces, appLabelSelector, registry, secretName, configMapName)
+ ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, applicationNamespaces, appLabelSelector, registry, secretName, configMapName, selfServiceNotificationEnabled)
err = ctrl.Init(ctx)
if err != nil {
return fmt.Errorf("failed to initialize controller: %w", err)
@@ -163,5 +164,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
command.Flags().StringVar(&secretName, "secret-name", "argocd-notifications-secret", "Set notifications Secret name")
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that this controller should send notifications for")
+ command.Flags().BoolVar(&selfServiceNotificationEnabled, "self-service-notification-enabled", env.ParseBoolFromEnv("ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED", false), "Allows the Argo CD notification controller to pull notification config from the namespace that the resource is in. This is useful for self-service notification.")
return &command
}
diff --git a/cmd/argocd-repo-server/commands/argocd_repo_server.go b/cmd/argocd-repo-server/commands/argocd_repo_server.go
index 69358d2a91efd..84b50e7cd5ab9 100644
--- a/cmd/argocd-repo-server/commands/argocd_repo_server.go
+++ b/cmd/argocd-repo-server/commands/argocd_repo_server.go
@@ -54,6 +54,8 @@ func NewCommand() *cobra.Command {
metricsPort int
metricsHost string
otlpAddress string
+ otlpInsecure bool
+ otlpHeaders map[string]string
otlpAttrs []string
cacheSrc func() (*reposervercache.Cache, error)
tlsConfigCustomizer tls.ConfigCustomizer
@@ -129,7 +131,7 @@ func NewCommand() *cobra.Command {
if otlpAddress != "" {
var closer func()
var err error
- closer, err = traceutil.InitTracer(ctx, "argocd-repo-server", otlpAddress, otlpAttrs)
+ closer, err = traceutil.InitTracer(ctx, "argocd-repo-server", otlpAddress, otlpInsecure, otlpHeaders, otlpAttrs)
if err != nil {
log.Fatalf("failed to initialize tracing: %v", err)
}
@@ -196,6 +198,8 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&metricsHost, "metrics-address", env.StringFromEnv("ARGOCD_REPO_SERVER_METRICS_LISTEN_ADDRESS", common.DefaultAddressRepoServerMetrics), "Listen on given address for metrics")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_REPO_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
+ command.Flags().BoolVar(&otlpInsecure, "otlp-insecure", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_OTLP_INSECURE", true), "OpenTelemetry collector insecure mode")
+ command.Flags().StringToStringVar(&otlpHeaders, "otlp-headers", env.ParseStringToStringFromEnv("ARGOCD_REPO_OTLP_HEADERS", map[string]string{}, ","), "List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2)")
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_REPO_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_TLS", false), "Disable TLS on the gRPC endpoint")
command.Flags().StringVar(&maxCombinedDirectoryManifestsSize, "max-combined-directory-manifests-size", env.StringFromEnv("ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE", "10M"), "Max combined size of manifest files in a directory-type Application")
@@ -206,8 +210,10 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&helmManifestMaxExtractedSize, "helm-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of helm manifest archives when extracted")
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
- cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
- redisClient = client
+ cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
+ OnClientCreated: func(client *redis.Client) {
+ redisClient = client
+ },
})
return &command
}
diff --git a/cmd/argocd-server/commands/argocd_server.go b/cmd/argocd-server/commands/argocd_server.go
index eea346eaed03d..646ecd6a2aabe 100644
--- a/cmd/argocd-server/commands/argocd_server.go
+++ b/cmd/argocd-server/commands/argocd_server.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math"
+ "strings"
"time"
"github.com/argoproj/pkg/stats"
@@ -18,13 +19,16 @@ import (
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
+ reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
"github.com/argoproj/argo-cd/v2/server"
servercache "github.com/argoproj/argo-cd/v2/server/cache"
+ cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/dex"
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/kube"
+ "github.com/argoproj/argo-cd/v2/util/templates"
"github.com/argoproj/argo-cd/v2/util/tls"
traceutil "github.com/argoproj/argo-cd/v2/util/trace"
)
@@ -49,6 +53,8 @@ func NewCommand() *cobra.Command {
metricsHost string
metricsPort int
otlpAddress string
+ otlpInsecure bool
+ otlpHeaders map[string]string
otlpAttrs []string
glogLevel int
clientConfig clientcmd.ClientConfig
@@ -58,9 +64,11 @@ func NewCommand() *cobra.Command {
repoServerAddress string
dexServerAddress string
disableAuth bool
+ contentTypes string
enableGZip bool
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
cacheSrc func() (*servercache.Cache, error)
+ repoServerCacheSrc func() (*reposervercache.Cache, error)
frameOptions string
contentSecurityPolicy string
repoServerPlaintext bool
@@ -102,6 +110,8 @@ func NewCommand() *cobra.Command {
errors.CheckError(err)
cache, err := cacheSrc()
errors.CheckError(err)
+ repoServerCache, err := repoServerCacheSrc()
+ errors.CheckError(err)
kubeclientset := kubernetes.NewForConfigOrDie(config)
@@ -162,6 +172,11 @@ func NewCommand() *cobra.Command {
baseHRef = rootPath
}
+ var contentTypesList []string
+ if contentTypes != "" {
+ contentTypesList = strings.Split(contentTypes, ";")
+ }
+
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
ListenPort: listenPort,
@@ -177,9 +192,11 @@ func NewCommand() *cobra.Command {
DexServerAddr: dexServerAddress,
DexTLSConfig: dexTlsConfig,
DisableAuth: disableAuth,
+ ContentTypes: contentTypesList,
EnableGZip: enableGZip,
TLSConfigCustomizer: tlsConfigCustomizer,
Cache: cache,
+ RepoServerCache: repoServerCache,
XFrameOptions: frameOptions,
ContentSecurityPolicy: contentSecurityPolicy,
RedisClient: redisClient,
@@ -199,7 +216,7 @@ func NewCommand() *cobra.Command {
var closer func()
ctx, cancel := context.WithCancel(ctx)
if otlpAddress != "" {
- closer, err = traceutil.InitTracer(ctx, "argocd-server", otlpAddress, otlpAttrs)
+ closer, err = traceutil.InitTracer(ctx, "argocd-server", otlpAddress, otlpInsecure, otlpHeaders, otlpAttrs)
if err != nil {
log.Fatalf("failed to initialize tracing: %v", err)
}
@@ -211,6 +228,13 @@ func NewCommand() *cobra.Command {
}
}
},
+ Example: templates.Examples(`
+ # Start the Argo CD API server with default settings
+ $ argocd-server
+
+ # Start the Argo CD API server on a custom port and enable tracing
+ $ argocd-server --port 8888 --otlp-address localhost:4317
+ `),
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
@@ -224,6 +248,7 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
+ command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json"), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
command.AddCommand(cli.NewVersionCmd(cliName))
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
@@ -231,6 +256,8 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&metricsHost, env.StringFromEnv("ARGOCD_SERVER_METRICS_LISTEN_ADDRESS", "metrics-address"), common.DefaultAddressAPIServerMetrics, "Listen for metrics on given address")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port")
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
+ command.Flags().BoolVar(&otlpInsecure, "otlp-insecure", env.ParseBoolFromEnv("ARGOCD_SERVER_OTLP_INSECURE", true), "OpenTelemetry collector insecure mode")
+ command.Flags().StringToStringVar(&otlpHeaders, "otlp-headers", env.ParseStringToStringFromEnv("ARGOCD_SERVER_OTLP_HEADERS", map[string]string{}, ","), "List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2)")
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().StringVar(&frameOptions, "x-frame-options", env.StringFromEnv("ARGOCD_SERVER_X_FRAME_OPTIONS", "sameorigin"), "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".")
@@ -242,8 +269,11 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
- cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) {
- redisClient = client
+ cacheSrc = servercache.AddCacheFlagsToCmd(command, cacheutil.Options{
+ OnClientCreated: func(client *redis.Client) {
+ redisClient = client
+ },
})
+ repoServerCacheSrc = reposervercache.AddCacheFlagsToCmd(command, cacheutil.Options{FlagPrefix: "repo-server-"})
return command
}
diff --git a/cmd/argocd/commands/admin/admin.go b/cmd/argocd/commands/admin/admin.go
index 92cad10479d68..49c81e4da4bfe 100644
--- a/cmd/argocd/commands/admin/admin.go
+++ b/cmd/argocd/commands/admin/admin.go
@@ -48,6 +48,87 @@ func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
+ Example: `# List all clusters
+$ argocd admin cluster list
+
+# Add a new cluster
+$ argocd admin cluster add my-cluster --name my-cluster --in-cluster-context
+
+# Remove a cluster
+argocd admin cluster remove my-cluster
+
+# List all projects
+$ argocd admin project list
+
+# Create a new project
+$argocd admin project create my-project --src-namespace my-source-namespace --dest-namespace my-dest-namespace
+
+# Update a project
+$ argocd admin project update my-project --src-namespace my-updated-source-namespace --dest-namespace my-updated-dest-namespace
+
+# Delete a project
+$ argocd admin project delete my-project
+
+# List all settings
+$ argocd admin settings list
+
+# Get the current settings
+$ argocd admin settings get
+
+# Update settings
+$ argocd admin settings update --repository.resync --value 15
+
+# List all applications
+$ argocd admin app list
+
+# Get application details
+$ argocd admin app get my-app
+
+# Sync an application
+$ argocd admin app sync my-app
+
+# Pause an application
+$ argocd admin app pause my-app
+
+# Resume an application
+$ argocd admin app resume my-app
+
+# List all repositories
+$ argocd admin repo list
+
+# Add a repository
+$ argocd admin repo add https://github.com/argoproj/my-repo.git
+
+# Remove a repository
+$ argocd admin repo remove https://github.com/argoproj/my-repo.git
+
+# Import an application from a YAML file
+$ argocd admin app import -f my-app.yaml
+
+# Export an application to a YAML file
+$ argocd admin app export my-app -o my-exported-app.yaml
+
+# Access the Argo CD web UI
+$ argocd admin dashboard
+
+# List notifications
+$ argocd admin notification list
+
+# Get notification details
+$ argocd admin notification get my-notification
+
+# Create a new notification
+$ argocd admin notification create my-notification -f notification-config.yaml
+
+# Update a notification
+$ argocd admin notification update my-notification -f updated-notification-config.yaml
+
+# Delete a notification
+$ argocd admin notification delete my-notification
+
+# Reset the initial admin password
+$ argocd admin initial-password reset
+`,
}
command.AddCommand(NewClusterCommand(clientOpts, pathOpts))
@@ -57,7 +138,7 @@ func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
command.AddCommand(NewRepoCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
- command.AddCommand(NewDashboardCommand())
+ command.AddCommand(NewDashboardCommand(clientOpts))
command.AddCommand(NewNotificationsCommand())
command.AddCommand(NewInitialPasswordCommand())
diff --git a/cmd/argocd/commands/admin/app.go b/cmd/argocd/commands/admin/app.go
index fbceb436f8609..096c92f9feb01 100644
--- a/cmd/argocd/commands/admin/app.go
+++ b/cmd/argocd/commands/admin/app.go
@@ -45,6 +45,16 @@ func NewAppCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "app",
Short: "Manage applications configuration",
+ Example: `
+# Compare results of two reconciliations and print diff
+argocd admin app diff-reconcile-results APPNAME [flags]
+
+# Generate declarative config for an application
+argocd admin app generate-spec APPNAME
+
+# Reconcile all applications and store reconciliation summary in the specified file
+argocd admin app get-reconcile-results APPNAME
+`,
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
@@ -233,6 +243,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
repoServerAddress string
outputFormat string
refresh bool
+ serverSideDiff bool
)
var command = &cobra.Command{
@@ -270,7 +281,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
appClientset := appclientset.NewForConfigOrDie(cfg)
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
- result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache)
+ result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache, serverSideDiff)
errors.CheckError(err)
} else {
appClientset := appclientset.NewForConfigOrDie(cfg)
@@ -285,6 +296,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
command.Flags().StringVar(&selector, "l", "", "Label selector")
command.Flags().StringVar(&outputFormat, "o", "yaml", "Output format (yaml|json)")
command.Flags().BoolVar(&refresh, "refresh", false, "If set to true then recalculates apps reconciliation")
+ command.Flags().BoolVar(&serverSideDiff, "server-side-diff", false, "If set to \"true\" will use server-side diff while comparing resources. Default (\"false\")")
return command
}
@@ -334,6 +346,7 @@ func reconcileApplications(
repoServerClient reposerverclient.Clientset,
selector string,
createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache,
+ serverSideDiff bool,
) ([]appReconcileResult, error) {
settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, namespace)
argoDB := db.NewDB(namespace, settingsMgr, kubeClientset)
@@ -374,7 +387,7 @@ func reconcileApplications(
)
appStateManager := controller.NewAppStateManager(
- argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false)
+ argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false, 0, serverSideDiff)
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
if err != nil {
@@ -409,7 +422,10 @@ func reconcileApplications(
sources = append(sources, app.Spec.GetSource())
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
- res := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
+ res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
+ if err != nil {
+ return nil, err
+ }
items = append(items, appReconcileResult{
Name: app.Name,
Conditions: app.Status.Conditions,
diff --git a/cmd/argocd/commands/admin/app_test.go b/cmd/argocd/commands/admin/app_test.go
index 0cad2485e6696..a0284fe8ffa09 100644
--- a/cmd/argocd/commands/admin/app_test.go
+++ b/cmd/argocd/commands/admin/app_test.go
@@ -113,6 +113,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
func(argoDB db.ArgoDB, appInformer cache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) statecache.LiveStateCache {
return &liveStateCache
},
+ false,
)
if !assert.NoError(t, err) {
diff --git a/cmd/argocd/commands/admin/cluster.go b/cmd/argocd/commands/admin/cluster.go
index 1bc1417fead4d..abb055cdfa354 100644
--- a/cmd/argocd/commands/admin/cluster.go
+++ b/cmd/argocd/commands/admin/cluster.go
@@ -25,7 +25,7 @@ import (
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/controller/sharding"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
- argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+ "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/util/argo"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
@@ -44,6 +44,15 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
var command = &cobra.Command{
Use: "cluster",
Short: "Manage clusters configuration",
+ Example: `
+#Generate declarative config for a cluster
+argocd admin cluster generate-spec my-cluster -o yaml
+
+#Generate a kubeconfig for a cluster named "my-cluster" and display it in the console
+argocd admin cluster kubeconfig my-cluster
+
+#Print information namespaces which Argo CD manages in each cluster
+argocd admin cluster namespaces my-cluster `,
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
@@ -62,14 +71,14 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
}
type ClusterWithInfo struct {
- argoappv1.Cluster
+ v1alpha1.Cluster
// Shard holds controller shard number that handles the cluster
Shard int
// Namespaces holds list of namespaces managed by Argo CD in the cluster
Namespaces []string
}
-func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string) ([]ClusterWithInfo, error) {
+func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, shardingAlgorithm string, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
@@ -77,6 +86,10 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
if err != nil {
return nil, err
}
+ clusterShardingCache := sharding.NewClusterSharding(argoDB, shard, replicas, shardingAlgorithm)
+ clusterShardingCache.Init(clustersList)
+ clusterShards := clusterShardingCache.GetDistribution()
+
var cache *appstatecache.Cache
if portForwardRedis {
overrides := clientcmd.ConfigOverrides{}
@@ -88,7 +101,11 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
return nil, err
}
client := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", port)})
- cache = appstatecache.NewCache(cacheutil.NewCache(cacheutil.NewRedisCache(client, time.Hour, cacheutil.RedisCompressionNone)), time.Hour)
+ compressionType, err := cacheutil.CompressionTypeFromString(redisCompressionStr)
+ if err != nil {
+ return nil, err
+ }
+ cache = appstatecache.NewCache(cacheutil.NewCache(cacheutil.NewRedisCache(client, time.Hour, compressionType)), time.Hour)
} else {
cache, err = cacheSrc()
if err != nil {
@@ -109,8 +126,15 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
apps[i] = app
}
clusters := make([]ClusterWithInfo, len(clustersList.Items))
+
batchSize := 10
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
+ clusterSharding := &sharding.ClusterSharding{
+ Shard: shard,
+ Replicas: replicas,
+ Shards: make(map[string]int),
+ Clusters: make(map[string]*v1alpha1.Cluster),
+ }
for batchNum := 0; batchNum < batchesCount; batchNum++ {
batchStart := batchSize * batchNum
batchEnd := batchSize * (batchNum + 1)
@@ -122,12 +146,12 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
clusterShard := 0
cluster := batch[i]
if replicas > 0 {
- distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
+ distributionFunction := sharding.GetDistributionFunction(clusterSharding.GetClusterAccessor(), common.DefaultShardingAlgorithm, replicas)
distributionFunction(&cluster)
- cluster.Shard = pointer.Int64Ptr(int64(clusterShard))
+ clusterShard := clusterShards[cluster.Server]
+ cluster.Shard = pointer.Int64(int64(clusterShard))
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
}
-
if shard != -1 && clusterShard != shard {
return nil
}
@@ -161,15 +185,17 @@ func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset
func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
- shard int
- replicas int
- clientConfig clientcmd.ClientConfig
- cacheSrc func() (*appstatecache.Cache, error)
- portForwardRedis bool
+ shard int
+ replicas int
+ shardingAlgorithm string
+ clientConfig clientcmd.ClientConfig
+ cacheSrc func() (*appstatecache.Cache, error)
+ portForwardRedis bool
+ redisCompressionStr string
)
var command = cobra.Command{
Use: "shards",
- Short: "Print information about each controller shard and portion of Kubernetes resources it is responsible for.",
+ Short: "Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.",
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
@@ -189,8 +215,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
if replicas == 0 {
return
}
-
- clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName)
+ clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
errors.CheckError(err)
if len(clusters) == 0 {
return
@@ -202,8 +227,16 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
+ command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
+
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
+
+ // parse all added flags so far to get the redis-compression flag that was added by AddCacheFlagsToCmd() above
+ // we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
+ // nolint:errcheck
+ command.ParseFlags(os.Args[1:])
+ redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
return &command
}
@@ -439,15 +472,26 @@ func NewClusterDisableNamespacedMode() *cobra.Command {
func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
- shard int
- replicas int
- clientConfig clientcmd.ClientConfig
- cacheSrc func() (*appstatecache.Cache, error)
- portForwardRedis bool
+ shard int
+ replicas int
+ shardingAlgorithm string
+ clientConfig clientcmd.ClientConfig
+ cacheSrc func() (*appstatecache.Cache, error)
+ portForwardRedis bool
+ redisCompressionStr string
)
var command = cobra.Command{
Use: "stats",
Short: "Prints information cluster statistics and inferred shard number",
+ Example: `
+#Display stats and shards for clusters
+argocd admin cluster stats
+
+#Display Cluster Statistics for a Specific Shard
+argocd admin cluster stats --shard=1
+
+#In a multi-cluster environment to print stats for a specific cluster say(target-cluster)
+argocd admin cluster stats target-cluster`,
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
@@ -464,7 +508,7 @@ func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
errors.CheckError(err)
}
- clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName)
+ clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
@@ -478,8 +522,15 @@ func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
+ command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
+
+ // parse all added flags so far to get the redis-compression flag that was added by AddCacheFlagsToCmd() above
+ // we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
+ // nolint:errcheck
+ command.ParseFlags(os.Args[1:])
+ redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
return &command
}
@@ -492,6 +543,18 @@ func NewClusterConfig() *cobra.Command {
Use: "kubeconfig CLUSTER_URL OUTPUT_PATH",
Short: "Generates kubeconfig for the specified cluster",
DisableAutoGenTag: true,
+ Example: `
+#Generate a kubeconfig for a cluster named "my-cluster" on console
+argocd admin cluster kubeconfig my-cluster
+
+#Listing available kubeconfigs for clusters managed by argocd
+argocd admin cluster kubeconfig
+
+#Removing a specific kubeconfig file
+argocd admin cluster kubeconfig my-cluster --delete
+
+#Generate a Kubeconfig for a Cluster with TLS Verification Disabled
+argocd admin cluster kubeconfig https://cluster-api-url:6443 /path/to/output/kubeconfig.yaml --insecure-skip-tls-verify`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -562,15 +625,16 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
errors.CheckError(err)
kubeClientset := fake.NewSimpleClientset()
- var awsAuthConf *argoappv1.AWSAuthConfig
- var execProviderConf *argoappv1.ExecProviderConfig
+ var awsAuthConf *v1alpha1.AWSAuthConfig
+ var execProviderConf *v1alpha1.ExecProviderConfig
if clusterOpts.AwsClusterName != "" {
- awsAuthConf = &argoappv1.AWSAuthConfig{
+ awsAuthConf = &v1alpha1.AWSAuthConfig{
ClusterName: clusterOpts.AwsClusterName,
RoleARN: clusterOpts.AwsRoleArn,
+ Profile: clusterOpts.AwsProfile,
}
} else if clusterOpts.ExecProviderCommand != "" {
- execProviderConf = &argoappv1.ExecProviderConfig{
+ execProviderConf = &v1alpha1.ExecProviderConfig{
Command: clusterOpts.ExecProviderCommand,
Args: clusterOpts.ExecProviderArgs,
Env: clusterOpts.ExecProviderEnv,
@@ -594,7 +658,7 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, bearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap)
if clusterOpts.InClusterEndpoint() {
- clst.Server = argoappv1.KubernetesInternalAPIServerAddr
+ clst.Server = v1alpha1.KubernetesInternalAPIServerAddr
}
if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) {
// Ignore `kube-public` cluster endpoints, since this command is intended to run without invoking any network connections.
diff --git a/cmd/argocd/commands/admin/dashboard.go b/cmd/argocd/commands/admin/dashboard.go
index c75476ea8eb2d..21b621d264022 100644
--- a/cmd/argocd/commands/admin/dashboard.go
+++ b/cmd/argocd/commands/admin/dashboard.go
@@ -3,7 +3,9 @@ package admin
import (
"fmt"
+ "github.com/argoproj/argo-cd/v2/util/cli"
"github.com/spf13/cobra"
+ "k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
@@ -14,11 +16,12 @@ import (
"github.com/argoproj/argo-cd/v2/util/errors"
)
-func NewDashboardCommand() *cobra.Command {
+func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
port int
address string
compressionStr string
+ clientConfig clientcmd.ClientConfig
)
cmd := &cobra.Command{
Use: "dashboard",
@@ -28,12 +31,22 @@ func NewDashboardCommand() *cobra.Command {
compression, err := cache.CompressionTypeFromString(compressionStr)
errors.CheckError(err)
- errors.CheckError(headless.MaybeStartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression))
+ clientOpts.Core = true
+ errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression, clientConfig))
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
<-ctx.Done()
},
+ Example: `# Start the Argo CD Web UI locally on the default port and address
+$ argocd admin dashboard
+
+# Start the Argo CD Web UI locally on a custom port and address
+$ argocd admin dashboard --port 8080 --address 127.0.0.1
+
+# Start the Argo CD Web UI with GZip compression
+$ argocd admin dashboard --redis-compress gzip
+ `,
}
- initialize.InitCommand(cmd)
+ clientConfig = cli.AddKubectlFlagsToSet(cmd.Flags())
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address")
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
diff --git a/cmd/argocd/commands/admin/notifications.go b/cmd/argocd/commands/admin/notifications.go
index a1234cc53b7fe..3cbac0a53b5c2 100644
--- a/cmd/argocd/commands/admin/notifications.go
+++ b/cmd/argocd/commands/admin/notifications.go
@@ -36,7 +36,7 @@ func NewNotificationsCommand() *cobra.Command {
"notifications",
"argocd admin notifications",
applications,
- settings.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm"), func(clientConfig clientcmd.ClientConfig) {
+ settings.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm", false), func(clientConfig clientcmd.ClientConfig) {
k8sCfg, err := clientConfig.ClientConfig()
if err != nil {
log.Fatalf("Failed to parse k8s config: %v", err)
diff --git a/cmd/argocd/commands/admin/project_allowlist.go b/cmd/argocd/commands/admin/project_allowlist.go
index 57b855251daa9..460ea21d93329 100644
--- a/cmd/argocd/commands/admin/project_allowlist.go
+++ b/cmd/argocd/commands/admin/project_allowlist.go
@@ -41,6 +41,8 @@ func NewProjectAllowListGenCommand() *cobra.Command {
var command = &cobra.Command{
Use: "generate-allow-list CLUSTERROLE_PATH PROJ_NAME",
Short: "Generates project allow list from the specified clusterRole file",
+ Example: `# Generates project allow list from the specified clusterRole file
+argocd admin proj generate-allow-list /path/to/clusterrole.yaml my-project`,
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)
diff --git a/cmd/argocd/commands/admin/settings.go b/cmd/argocd/commands/admin/settings.go
index 281d9875691c4..0274b4a422f09 100644
--- a/cmd/argocd/commands/admin/settings.go
+++ b/cmd/argocd/commands/admin/settings.go
@@ -373,11 +373,7 @@ func executeResourceOverrideCommand(ctx context.Context, cmdCtx commandContext,
if gvk.Group != "" {
key = fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind)
}
- override, hasOverride := overrides[key]
- if !hasOverride {
- _, _ = fmt.Printf("No overrides configured for '%s/%s'\n", gvk.Group, gvk.Kind)
- return
- }
+ override := overrides[key]
callback(res, override, overrides)
}
@@ -519,16 +515,16 @@ argocd admin settings resource-overrides health ./deploy.yaml --argocd-cm-path .
executeResourceOverrideCommand(ctx, cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
gvk := res.GroupVersionKind()
- if override.HealthLua == "" {
- _, _ = fmt.Printf("Health script is not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
- return
- }
-
resHealth, err := healthutil.GetResourceHealth(&res, lua.ResourceHealthOverrides(overrides))
- errors.CheckError(err)
- _, _ = fmt.Printf("STATUS: %s\n", resHealth.Status)
- _, _ = fmt.Printf("MESSAGE: %s\n", resHealth.Message)
+ if err != nil {
+ errors.CheckError(err)
+ } else if resHealth == nil {
+ fmt.Printf("Health script is not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
+ } else {
+ _, _ = fmt.Printf("STATUS: %s\n", resHealth.Status)
+ _, _ = fmt.Printf("MESSAGE: %s\n", resHealth.Message)
+ }
})
},
}
diff --git a/cmd/argocd/commands/admin/settings_rbac.go b/cmd/argocd/commands/admin/settings_rbac.go
index 8d94feeaad466..1c09fa0d1cfe7 100644
--- a/cmd/argocd/commands/admin/settings_rbac.go
+++ b/cmd/argocd/commands/admin/settings_rbac.go
@@ -189,7 +189,6 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
}
},
}
-
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
command.Flags().StringVar(&defaultRole, "default-role", "", "name of the default role to use")
@@ -202,24 +201,55 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
// NewRBACValidateCommand returns a new rbac validate command
func NewRBACValidateCommand() *cobra.Command {
var (
- policyFile string
+ policyFile string
+ namespace string
+ clientConfig clientcmd.ClientConfig
)
var command = &cobra.Command{
- Use: "validate --policy-file=POLICYFILE",
+ Use: "validate [--policy-file POLICYFILE] [--namespace NAMESPACE]",
Short: "Validate RBAC policy",
Long: `
Validates an RBAC policy for being syntactically correct. The policy must be
-a local file, and in either CSV or K8s ConfigMap format.
+a local file or a K8s ConfigMap in the provided namespace, and in either CSV or K8s ConfigMap format.
+`,
+ Example: `
+# Check whether a given policy file is valid using a local policy.csv file.
+argocd admin settings rbac validate --policy-file policy.csv
+
+# Policy file can also be K8s config map with data keys like argocd-rbac-cm,
+# i.e. 'policy.csv' and (optionally) 'policy.default'
+argocd admin settings rbac validate --policy-file argocd-rbac-cm.yaml
+
+# If --policy-file is not given, and instead --namespace is giventhe ConfigMap 'argocd-rbac-cm'
+# from K8s is used.
+argocd admin settings rbac validate --namespace argocd
+
+# Either --policy-file or --namespace must be given.
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
- if policyFile == "" {
+ if len(args) > 0 {
c.HelpFunc()(c, args)
- log.Fatalf("Please specify policy to validate using --policy-file")
+ log.Fatalf("too many arguments")
+ }
+
+ if (namespace == "" && policyFile == "") || (namespace != "" && policyFile != "") {
+ c.HelpFunc()(c, args)
+ log.Fatalf("please provide exactly one of --policy-file or --namespace")
}
- userPolicy, _, _ := getPolicy(ctx, policyFile, nil, "")
+
+ restConfig, err := clientConfig.ClientConfig()
+ if err != nil {
+ log.Fatalf("could not get config to create k8s client: %v", err)
+ }
+ realClientset, err := kubernetes.NewForConfig(restConfig)
+ if err != nil {
+ log.Fatalf("could not create k8s client: %v", err)
+ }
+
+ userPolicy, _, _ := getPolicy(ctx, policyFile, realClientset, namespace)
if userPolicy != "" {
if err := rbac.ValidatePolicy(userPolicy); err == nil {
fmt.Printf("Policy is valid.\n")
@@ -228,11 +258,15 @@ a local file, and in either CSV or K8s ConfigMap format.
fmt.Printf("Policy is invalid: %v\n", err)
os.Exit(1)
}
+ } else {
+ log.Fatalf("Policy is empty or could not be loaded.")
}
},
}
-
+ clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
+ command.Flags().StringVar(&namespace, "namespace", "", "namespace to get argo rbac configmap from")
+
return command
}
diff --git a/cmd/argocd/commands/admin/settings_rbac_test.go b/cmd/argocd/commands/admin/settings_rbac_test.go
index a4b4b437e114c..79835ffd0c14d 100644
--- a/cmd/argocd/commands/admin/settings_rbac_test.go
+++ b/cmd/argocd/commands/admin/settings_rbac_test.go
@@ -5,15 +5,42 @@ import (
"os"
"testing"
+ "github.com/argoproj/argo-cd/v2/util/assets"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
-
- "github.com/argoproj/argo-cd/v2/util/assets"
+ restclient "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
+type FakeClientConfig struct {
+ clientConfig clientcmd.ClientConfig
+}
+
+func NewFakeClientConfig(clientConfig clientcmd.ClientConfig) *FakeClientConfig {
+ return &FakeClientConfig{clientConfig: clientConfig}
+}
+
+func (f *FakeClientConfig) RawConfig() (clientcmdapi.Config, error) {
+ config, err := f.clientConfig.RawConfig()
+ return config, err
+}
+
+func (f *FakeClientConfig) ClientConfig() (*restclient.Config, error) {
+ return f.clientConfig.ClientConfig()
+}
+
+func (f *FakeClientConfig) Namespace() (string, bool, error) {
+ return f.clientConfig.Namespace()
+}
+
+func (f *FakeClientConfig) ConfigAccess() clientcmd.ConfigAccess {
+ return nil
+}
+
func Test_isValidRBACAction(t *testing.T) {
for k := range validRBACActions {
t.Run(k, func(t *testing.T) {
@@ -200,3 +227,19 @@ p, role:, certificates, get, .*, allow`
require.True(t, ok)
})
}
+
+func TestNewRBACCanCommand(t *testing.T) {
+ command := NewRBACCanCommand()
+
+ require.NotNil(t, command)
+ assert.Equal(t, "can", command.Name())
+ assert.Equal(t, "Check RBAC permissions for a role or subject", command.Short)
+}
+
+func TestNewRBACValidateCommand(t *testing.T) {
+ command := NewRBACValidateCommand()
+
+ require.NotNil(t, command)
+ assert.Equal(t, "validate", command.Name())
+ assert.Equal(t, "Validate RBAC policy", command.Short)
+}
diff --git a/cmd/argocd/commands/admin/settings_test.go b/cmd/argocd/commands/admin/settings_test.go
index adb18c80ee84e..ff817017f4be5 100644
--- a/cmd/argocd/commands/admin/settings_test.go
+++ b/cmd/argocd/commands/admin/settings_test.go
@@ -226,6 +226,18 @@ spec:
replicas: 0`
)
+const (
+ testCustomResourceYAML = `apiVersion: v1
+apiVersion: example.com/v1alpha1
+kind: ExampleResource
+metadata:
+ name: example-resource
+ labels:
+ app: example
+spec:
+ replicas: 0`
+)
+
const (
testCronJobYAML = `apiVersion: batch/v1
kind: CronJob
@@ -285,7 +297,7 @@ func TestResourceOverrideIgnoreDifferences(t *testing.T) {
assert.NoError(t, err)
})
assert.NoError(t, err)
- assert.Contains(t, out, "No overrides configured")
+ assert.Contains(t, out, "Ignore differences are not configured for 'apps/Deployment'\n")
})
t.Run("DataIgnored", func(t *testing.T) {
@@ -305,7 +317,7 @@ func TestResourceOverrideIgnoreDifferences(t *testing.T) {
}
func TestResourceOverrideHealth(t *testing.T) {
- f, closer, err := tempFile(testDeploymentYAML)
+ f, closer, err := tempFile(testCustomResourceYAML)
if !assert.NoError(t, err) {
return
}
@@ -313,19 +325,34 @@ func TestResourceOverrideHealth(t *testing.T) {
t.Run("NoHealthAssessment", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
- "resource.customizations": `apps/Deployment: {}`}))
+ "resource.customizations": `example.com/ExampleResource: {}`}))
out, err := captureStdout(func() {
cmd.SetArgs([]string{"health", f})
err := cmd.Execute()
assert.NoError(t, err)
})
assert.NoError(t, err)
- assert.Contains(t, out, "Health script is not configured")
+ assert.Contains(t, out, "Health script is not configured for 'example.com/ExampleResource'\n")
})
t.Run("HealthAssessmentConfigured", func(t *testing.T) {
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
- "resource.customizations": `apps/Deployment:
+ "resource.customizations": `example.com/ExampleResource:
+ health.lua: |
+ return { status = "Progressing" }
+`}))
+ out, err := captureStdout(func() {
+ cmd.SetArgs([]string{"health", f})
+ err := cmd.Execute()
+ assert.NoError(t, err)
+ })
+ assert.NoError(t, err)
+ assert.Contains(t, out, "Progressing")
+ })
+
+ t.Run("HealthAssessmentConfiguredWildcard", func(t *testing.T) {
+ cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
+ "resource.customizations": `example.com/*:
health.lua: |
return { status = "Progressing" }
`}))
@@ -412,7 +439,7 @@ resume false
action.lua: |
job1 = {}
job1.apiVersion = "batch/v1"
- job1.kind = "Job"
+ job1.kind = "Job"
job1.metadata = {}
job1.metadata.name = "hello-1"
job1.metadata.namespace = "obj.metadata.namespace"
diff --git a/cmd/argocd/commands/app.go b/cmd/argocd/commands/app.go
index 55ed2ee8790f3..99be7d26b76d3 100644
--- a/cmd/argocd/commands/app.go
+++ b/cmd/argocd/commands/app.go
@@ -318,6 +318,35 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
var command = &cobra.Command{
Use: "get APPNAME",
Short: "Get application details",
+ Example: templates.Examples(`
+ # Get basic details about the application "my-app" in wide format
+ argocd app get my-app -o wide
+
+ # Get detailed information about the application "my-app" in YAML format
+ argocd app get my-app -o yaml
+
+ # Get details of the application "my-app" in JSON format
+ argocd get my-app -o json
+
+ # Get application details and include information about the current operation
+ argocd app get my-app --show-operation
+
+ # Show application parameters and overrides
+ argocd app get my-app --show-params
+
+ # Refresh application data when retrieving
+ argocd app get my-app --refresh
+
+ # Perform a hard refresh, including refreshing application data and target manifests cache
+ argocd app get my-app --hard-refresh
+
+ # Get application details and display them in a tree format
+ argocd app get my-app --output tree
+
+ # Get application details and display them in a detailed tree format
+ argocd app get my-app --output tree=detailed
+ `),
+
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) == 0 {
@@ -495,8 +524,8 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
} else {
return
}
- } //Done with receive message
- } //Done with retry
+ } // Done with receive message
+ } // Done with retry
},
}
@@ -860,7 +889,7 @@ func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, n
for i, item := range source.Kustomize.Images {
if argoappv1.KustomizeImage(kustomizeImage).Match(item) {
updated = true
- //remove i
+ // remove i
a := source.Kustomize.Images
copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index.
a[len(a)-1] = "" // Erase last element (write zero value).
@@ -1033,7 +1062,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
var command = &cobra.Command{
Use: "diff APPNAME",
Short: shortDesc,
- Long: shortDesc + "\nUses 'diff' to render the difference. KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff tool.\nReturns the following exit codes: 2 on general errors, 1 when a diff is found, and 0 when no diff is found",
+ Long: shortDesc + "\nUses 'diff' to render the difference. KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff tool.\nReturns the following exit codes: 2 on general errors, 1 when a diff is found, and 0 when no diff is found\nKubernetes Secrets are ignored from this diff.",
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -1087,6 +1116,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
defer argoio.Close(conn)
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
errors.CheckError(err)
+
diffOption.local = local
diffOption.localRepoRoot = localRepoRoot
diffOption.cluster = cluster
@@ -1595,7 +1625,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
list, err := appIf.List(ctx, &application.ApplicationQuery{Selector: pointer.String(selector)})
errors.CheckError(err)
for _, i := range list.Items {
- appNames = append(appNames, i.Name)
+ appNames = append(appNames, i.QualifiedName())
}
}
for _, appName := range appNames {
@@ -1875,7 +1905,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
Backoff: &argoappv1.Backoff{
Duration: retryBackoffDuration.String(),
MaxDuration: retryBackoffMaxDuration.String(),
- Factor: pointer.Int64Ptr(retryBackoffFactor),
+ Factor: pointer.Int64(retryBackoffFactor),
},
}
}
@@ -1966,7 +1996,7 @@ func getAppNamesBySelector(ctx context.Context, appIf application.ApplicationSer
return []string{}, fmt.Errorf("no apps match selector %v", selector)
}
for _, i := range list.Items {
- appNames = append(appNames, i.Name)
+ appNames = append(appNames, i.QualifiedName())
}
}
return appNames, nil
@@ -2114,7 +2144,7 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
} else if watch.degraded && watch.health {
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
healthStatus == string(health.HealthStatusDegraded)
- //below are good
+ // below are good
} else if watch.suspended && watch.health {
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
healthStatus == string(health.HealthStatusSuspended)
diff --git a/cmd/argocd/commands/app_resources.go b/cmd/argocd/commands/app_resources.go
index e48465c7e4693..4cffb706ff1bc 100644
--- a/cmd/argocd/commands/app_resources.go
+++ b/cmd/argocd/commands/app_resources.go
@@ -3,6 +3,7 @@ package commands
import (
"fmt"
"os"
+ "text/tabwriter"
"github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
@@ -18,8 +19,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
-
- "text/tabwriter"
)
func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
@@ -30,6 +29,7 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
var kind string
var group string
var all bool
+ var project string
command := &cobra.Command{
Use: "patch-resource APPNAME",
Short: "Patch resource in an application",
@@ -46,6 +46,7 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
command.Flags().StringVar(&group, "group", "", "Group")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources")
+ command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
command.Run = func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -77,6 +78,7 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
Kind: pointer.String(gvk.Kind),
Patch: pointer.String(patch),
PatchType: pointer.String(patchType),
+ Project: pointer.String(project),
})
errors.CheckError(err)
log.Infof("Resource '%s' patched", obj.GetName())
@@ -94,6 +96,7 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
var force bool
var orphan bool
var all bool
+ var project string
command := &cobra.Command{
Use: "delete-resource APPNAME",
Short: "Delete resource in an application",
@@ -108,6 +111,7 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
command.Flags().BoolVar(&force, "force", false, "Indicates whether to orphan the dependents of the deleted resource")
command.Flags().BoolVar(&orphan, "orphan", false, "Indicates whether to force delete the resource")
command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources")
+ command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
command.Run = func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -139,6 +143,7 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
Kind: pointer.String(gvk.Kind),
Force: &force,
Orphan: &orphan,
+ Project: pointer.String(project),
})
errors.CheckError(err)
log.Infof("Resource '%s' deleted", obj.GetName())
@@ -250,6 +255,7 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var orphaned bool
var output string
+ var project string
var command = &cobra.Command{
Use: "resources APPNAME",
Short: "List resource of application",
@@ -266,6 +272,7 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
appResourceTree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{
ApplicationName: &appName,
AppNamespace: &appNs,
+ Project: &project,
})
errors.CheckError(err)
printResources(listAll, orphaned, appResourceTree, output)
@@ -273,5 +280,6 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
}
command.Flags().BoolVar(&orphaned, "orphaned", false, "Lists only orphaned resources")
command.Flags().StringVar(&output, "output", "", "Provides the tree view of the resources")
+ command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
return command
}
diff --git a/cmd/argocd/commands/cluster.go b/cmd/argocd/commands/cluster.go
index a1d1589540af0..f203b82ae9ac0 100644
--- a/cmd/argocd/commands/cluster.go
+++ b/cmd/argocd/commands/cluster.go
@@ -111,6 +111,7 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
awsAuthConf = &argoappv1.AWSAuthConfig{
ClusterName: clusterOpts.AwsClusterName,
RoleARN: clusterOpts.AwsRoleArn,
+ Profile: clusterOpts.AwsProfile,
}
} else if clusterOpts.ExecProviderCommand != "" {
execProviderConf = &argoappv1.ExecProviderConfig{
@@ -485,6 +486,23 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
+ Example: `
+# List Clusters in Default "Wide" Format
+argocd cluster list
+
+# List Cluster via specifing the server
+argocd cluster list --server
+
+# List Clusters in JSON Format
+argocd cluster list -o json --server
+
+# List Clusters in YAML Format
+argocd cluster list -o yaml --server
+
+# List Clusters that have been added to your Argo CD
+argocd cluster list -o server
+
+`,
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|server")
return command
diff --git a/cmd/argocd/commands/gpg.go b/cmd/argocd/commands/gpg.go
index 7a48a915bebec..73768fc18a324 100644
--- a/cmd/argocd/commands/gpg.go
+++ b/cmd/argocd/commands/gpg.go
@@ -14,6 +14,7 @@ import (
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
+ "github.com/argoproj/argo-cd/v2/util/templates"
)
// NewGPGCommand returns a new instance of an `argocd repo` command
@@ -42,6 +43,17 @@ func NewGPGListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "list",
Short: "List configured GPG public keys",
+ Example: templates.Examples(`
+ # List all configured GPG public keys in wide format (default).
+ argocd gpg list
+
+ # List all configured GPG public keys in JSON format.
+ argocd gpg list -o json
+
+ # List all configured GPG public keys in YAML format.
+ argocd gpg list -o yaml
+ `),
+
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -72,6 +84,17 @@ func NewGPGGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "get KEYID",
Short: "Get the GPG public key with ID from the server",
+ Example: templates.Examples(`
+ # Get a GPG public key with the specified KEYID in wide format (default).
+ argocd gpg get KEYID
+
+ # Get a GPG public key with the specified KEYID in JSON format.
+ argocd gpg get KEYID -o json
+
+ # Get a GPG public key with the specified KEYID in YAML format.
+ argocd gpg get KEYID -o yaml
+ `),
+
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -109,6 +132,11 @@ func NewGPGAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add",
Short: "Adds a GPG public key to the server's keyring",
+ Example: templates.Examples(`
+ # Add a GPG public key to the server's keyring from a file.
+ argocd gpg add --from /path/to/keyfile
+ `),
+
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
diff --git a/cmd/argocd/commands/headless/headless.go b/cmd/argocd/commands/headless/headless.go
index 070d9c9c83bcb..d48019a2216b9 100644
--- a/cmd/argocd/commands/headless/headless.go
+++ b/cmd/argocd/commands/headless/headless.go
@@ -78,6 +78,12 @@ func (c *forwardCacheClient) Set(item *cache.Item) error {
})
}
+func (c *forwardCacheClient) Rename(oldKey string, newKey string, expiration time.Duration) error {
+ return c.doLazy(func(client cache.CacheClient) error {
+ return client.Rename(oldKey, newKey, expiration)
+ })
+}
+
func (c *forwardCacheClient) Get(key string, obj interface{}) error {
return c.doLazy(func(client cache.CacheClient) error {
return client.Get(key, obj)
@@ -153,9 +159,11 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
//
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
// not start the local server.
-func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error {
- flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
- clientConfig := cli.AddKubectlFlagsToSet(flags)
+func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType, clientConfig clientcmd.ClientConfig) error {
+ if clientConfig == nil {
+ flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
+ clientConfig = cli.AddKubectlFlagsToSet(flags)
+ }
startInProcessAPI := clientOpts.Core
if !startInProcessAPI {
// Core mode is enabled on client options. Check the local config to see if we should start the API server.
@@ -244,6 +252,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
if !cache2.WaitForCacheSync(ctx.Done(), srv.Initialized) {
log.Fatal("Timed out waiting for project cache to sync")
}
+
tries := 5
for i := 0; i < tries; i++ {
err = testAPI(ctx, clientOpts)
@@ -265,7 +274,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
// If we're not in core mode, this function call will do nothing.
- err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone)
+ err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone, nil)
if err != nil {
log.Fatal(err)
}
diff --git a/cmd/argocd/commands/login.go b/cmd/argocd/commands/login.go
index 3e2ad4e7d1b73..abb2b004291c2 100644
--- a/cmd/argocd/commands/login.go
+++ b/cmd/argocd/commands/login.go
@@ -106,6 +106,7 @@ argocd login cd.argoproj.io --core`,
PortForwardNamespace: globalClientOpts.PortForwardNamespace,
Headers: globalClientOpts.Headers,
KubeOverrides: globalClientOpts.KubeOverrides,
+ ServerName: globalClientOpts.ServerName,
}
if ctxName == "" {
diff --git a/cmd/argocd/commands/project.go b/cmd/argocd/commands/project.go
index dc894b4a79f27..32fb9e779e8ed 100644
--- a/cmd/argocd/commands/project.go
+++ b/cmd/argocd/commands/project.go
@@ -106,7 +106,7 @@ func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
# Create a new project with name PROJECT
argocd proj create PROJECT
- # Create a new project with name PROJECT from a file or URL to a kubernetes manifest
+ # Create a new project with name PROJECT from a file or URL to a Kubernetes manifest
argocd proj create PROJECT -f FILE|URL
`),
Run: func(c *cobra.Command, args []string) {
diff --git a/cmd/argocd/commands/project_role.go b/cmd/argocd/commands/project_role.go
index 987e61914d858..5920bac0dc8e4 100644
--- a/cmd/argocd/commands/project_role.go
+++ b/cmd/argocd/commands/project_role.go
@@ -18,6 +18,7 @@ import (
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/jwt"
+ "github.com/argoproj/argo-cd/v2/util/templates"
)
const (
@@ -56,6 +57,30 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob
var command = &cobra.Command{
Use: "add-policy PROJECT ROLE-NAME",
Short: "Add a policy to a project role",
+ Example: `# Before adding new policy
+$ argocd proj role get test-project test-role
+Role Name: test-role
+Description:
+Policies:
+p, proj:test-project:test-role, projects, get, test-project, allow
+JWT Tokens:
+ID ISSUED-AT EXPIRES-AT
+1696759698 2023-10-08T11:08:18+01:00 (3 hours ago)
+
+# Add a new policy to allow update to the project
+$ argocd proj role add-policy test-project test-role -a update -p allow -o project
+
+# Policy should be updated
+$ argocd proj role get test-project test-role
+Role Name: test-role
+Description:
+Policies:
+p, proj:test-project:test-role, projects, get, test-project, allow
+p, proj:test-project:test-role, applications, update, test-project/project, allow
+JWT Tokens:
+ID ISSUED-AT EXPIRES-AT
+1696759698 2023-10-08T11:08:18+01:00 (3 hours ago)
+`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -93,6 +118,30 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *
var command = &cobra.Command{
Use: "remove-policy PROJECT ROLE-NAME",
Short: "Remove a policy from a role within a project",
+ Example: `List the policy of the test-role before removing a policy
+$ argocd proj role get test-project test-role
+Role Name: test-role
+Description:
+Policies:
+p, proj:test-project:test-role, projects, get, test-project, allow
+p, proj:test-project:test-role, applications, update, test-project/project, allow
+JWT Tokens:
+ID ISSUED-AT EXPIRES-AT
+1696759698 2023-10-08T11:08:18+01:00 (3 hours ago)
+
+# Remove the policy to allow update to objects
+$ argocd proj role remove-policy test-project test-role -a update -p allow -o project
+
+# The role should be removed now.
+$ argocd proj role get test-project test-role
+Role Name: test-role
+Description:
+Policies:
+p, proj:test-project:test-role, projects, get, test-project, allow
+JWT Tokens:
+ID ISSUED-AT EXPIRES-AT
+1696759698 2023-10-08T11:08:18+01:00 (4 hours ago)
+`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -140,6 +189,11 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
var command = &cobra.Command{
Use: "create PROJECT ROLE-NAME",
Short: "Create a project role",
+ Example: templates.Examples(`
+ # Create a project role in the "my-project" project with the name "my-role".
+ argocd proj role create my-project my-role --description "My project role description"
+ `),
+
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -174,8 +228,9 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
// NewProjectRoleDeleteCommand returns a new instance of an `argocd proj role delete` command
func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
- Use: "delete PROJECT ROLE-NAME",
- Short: "Delete a project role",
+ Use: "delete PROJECT ROLE-NAME",
+ Short: "Delete a project role",
+ Example: `$ argocd proj role delete test-project test-role`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -223,8 +278,15 @@ func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *c
tokenID string
)
var command = &cobra.Command{
- Use: "create-token PROJECT ROLE-NAME",
- Short: "Create a project token",
+ Use: "create-token PROJECT ROLE-NAME",
+ Short: "Create a project token",
+ Example: `$ argocd proj role create-token test-project test-role
+Create token succeeded for proj:test-project:test-role.
+ ID: f316c466-40bd-4cfd-8a8c-1392e92255d4
+ Issued At: 2023-10-08T15:21:40+01:00
+ Expires At: Never
+ Token: xxx
+`,
Aliases: []string{"token-create"},
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -288,8 +350,13 @@ func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *co
useUnixTime bool
)
var command = &cobra.Command{
- Use: "list-tokens PROJECT ROLE-NAME",
- Short: "List tokens for a given role.",
+ Use: "list-tokens PROJECT ROLE-NAME",
+ Short: "List tokens for a given role.",
+ Example: `$ argocd proj role list-tokens test-project test-role
+ID ISSUED AT EXPIRES AT
+f316c466-40bd-4cfd-8a8c-1392e92255d4 2023-10-08T15:21:40+01:00 Never
+fa9d3517-c52d-434c-9bff-215b38508842 2023-10-08T11:08:18+01:00 Never
+`,
Aliases: []string{"list-token", "token-list"},
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -339,8 +406,35 @@ func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *co
// NewProjectRoleDeleteTokenCommand returns a new instance of an `argocd proj role delete-token` command
func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
- Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
- Short: "Delete a project token",
+ Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
+ Short: "Delete a project token",
+ Example: `#Create project test-project
+$ argocd proj create test-project
+
+# Create a role associated with test-project
+$ argocd proj role create test-project test-role
+Role 'test-role' created
+
+# Create test-role associated with test-project
+$ argocd proj role create-token test-project test-role
+Create token succeeded for proj:test-project:test-role.
+ ID: c312450e-12e1-4e0d-9f65-fac9cb027b32
+ Issued At: 2023-10-08T13:58:57+01:00
+ Expires At: Never
+ Token: xxx
+
+# Get test-role id to input into the delete-token command below
+$ argocd proj role get test-project test-role
+Role Name: test-role
+Description:
+Policies:
+p, proj:test-project:test-role, projects, get, test-project, allow
+JWT Tokens:
+ID ISSUED-AT EXPIRES-AT
+1696769937 2023-10-08T13:58:57+01:00 (6 minutes ago)
+
+$ argocd proj role delete-token test-project test-role 1696769937
+`,
Aliases: []string{"token-delete", "remove-token"},
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -389,6 +483,15 @@ func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List all the roles in a project",
+ Example: templates.Examples(`
+ # This command will list all the roles in argocd-project in a default table format.
+ argocd proj role list PROJECT
+
+ # List the roles in the project in formats like json, yaml, wide, or name.
+ argocd proj role list PROJECT --output json
+
+ `),
+
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -424,6 +527,16 @@ func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
var command = &cobra.Command{
Use: "get PROJECT ROLE-NAME",
Short: "Get the details of a specific role",
+ Example: `$ argocd proj role get test-project test-role
+Role Name: test-role
+Description:
+Policies:
+p, proj:test-project:test-role, projects, get, test-project, allow
+JWT Tokens:
+ID ISSUED-AT EXPIRES-AT
+1696774900 2023-10-08T15:21:40+01:00 (4 minutes ago)
+1696759698 2023-10-08T11:08:18+01:00 (4 hours ago)
+`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
diff --git a/cmd/argocd/commands/projectwindows.go b/cmd/argocd/commands/projectwindows.go
index 0bc867cc6cf68..93843130ebb13 100644
--- a/cmd/argocd/commands/projectwindows.go
+++ b/cmd/argocd/commands/projectwindows.go
@@ -22,6 +22,18 @@ func NewProjectWindowsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
roleCommand := &cobra.Command{
Use: "windows",
Short: "Manage a project's sync windows",
+ Example: `
+#Add a sync window to a project
+argocd proj windows add my-project \
+--schedule "0 0 * * 1-5" \
+--duration 3600 \
+--prune
+
+#Delete a sync window from a project
+argocd proj windows delete
+
+#List project sync windows
+argocd proj windows list `,
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -42,6 +54,12 @@ func NewProjectWindowsDisableManualSyncCommand(clientOpts *argocdclient.ClientOp
Use: "disable-manual-sync PROJECT ID",
Short: "Disable manual sync for a sync window",
Long: "Disable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
+ Example: `
+#Disable manual sync for a sync window for the Project
+argocd proj windows disable-manual-sync PROJECT ID
+
+#Disbaling manual sync for a windows set on the default project with Id 0
+argocd proj windows disable-manual-sync default 0`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -79,6 +97,15 @@ func NewProjectWindowsEnableManualSyncCommand(clientOpts *argocdclient.ClientOpt
Use: "enable-manual-sync PROJECT ID",
Short: "Enable manual sync for a sync window",
Long: "Enable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
+ Example: `
+#Enabling manual sync for a general case
+argocd proj windows enable-manual-sync PROJECT ID
+
+#Enabling manual sync for a windows set on the default project with Id 2
+argocd proj windows enable-manual-sync default 2
+
+#Enabling manual sync with a custom message
+argocd proj windows enable-manual-sync my-app-project --message "Manual sync initiated by admin`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -125,6 +152,24 @@ func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) *
var command = &cobra.Command{
Use: "add PROJECT",
Short: "Add a sync window to a project",
+ Example: `
+#Add a 1 hour allow sync window
+argocd proj windows add PROJECT \
+ --kind allow \
+ --schedule "0 22 * * *" \
+ --duration 1h \
+ --applications "*"
+
+#Add a deny sync window with the ability to manually sync.
+argocd proj windows add PROJECT \
+ --kind deny \
+ --schedule "30 10 * * *" \
+ --duration 30m \
+ --applications "prod-\\*,website" \
+ --namespaces "default,\\*-prod" \
+ --clusters "prod,staging" \
+ --manual-sync
+ `,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -158,11 +203,17 @@ func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) *
return command
}
-// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows delete` command
+// NewProjectWindowsDeleteCommand returns a new instance of an `argocd proj windows delete` command
func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ID",
Short: "Delete a sync window from a project. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
+ Example: `
+#Delete a sync window from a project (default) with ID 0
+argocd proj windows delete default 0
+
+#Delete a sync window from a project (new-project) with ID 1
+argocd proj windows delete new-project 1`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -205,6 +256,10 @@ func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cob
Use: "update PROJECT ID",
Short: "Update a project sync window",
Long: "Update a project sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
+ Example: `# Change a sync window's schedule
+argocd proj windows update PROJECT ID \
+ --schedule "0 20 * * *"
+`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -253,6 +308,15 @@ func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List project sync windows",
+ Example: `
+#List project windows
+argocd proj windows list PROJECT
+
+#List project windows in yaml format
+argocd proj windows list PROJECT -o yaml
+
+#List project windows info for a project name (test-project)
+argocd proj windows list test-project`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -285,8 +349,8 @@ func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra
func printSyncWindows(proj *v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var fmtStr string
- headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC"}
- fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
+ headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC", "TIMEZONE"}
+ fmtStr = strings.Repeat("%s\t", len(headers)) + "\n"
fmt.Fprintf(w, fmtStr, headers...)
if proj.Spec.SyncWindows.HasWindows() {
for i, window := range proj.Spec.SyncWindows {
@@ -300,6 +364,7 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
formatListOutput(window.Namespaces),
formatListOutput(window.Clusters),
formatManualOutput(window.ManualSync),
+ window.TimeZone,
}
fmt.Fprintf(w, fmtStr, vals...)
}
diff --git a/cmd/argocd/commands/repo.go b/cmd/argocd/commands/repo.go
index 2bf9714a06f11..1a5b4388fbeba 100644
--- a/cmd/argocd/commands/repo.go
+++ b/cmd/argocd/commands/repo.go
@@ -64,6 +64,12 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
# Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here
argocd repo add ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa
+ # Add a Git repository via SSH using socks5 proxy with no proxy credentials
+ argocd repo add ssh://git@github.com/argoproj/argocd-example-apps --ssh-private-key-path ~/id_rsa --proxy socks5://your.proxy.server.ip:1080
+
+ # Add a Git repository via SSH using socks5 proxy with proxy credentials
+ argocd repo add ssh://git@github.com/argoproj/argocd-example-apps --ssh-private-key-path ~/id_rsa --proxy socks5://username:password@your.proxy.server.ip:1080
+
# Add a private Git repository via HTTPS using username/password and TLS client certificates:
argocd repo add https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key
diff --git a/cmd/argocd/commands/repocreds.go b/cmd/argocd/commands/repocreds.go
index cf764e7d84de9..e43b9713a2927 100644
--- a/cmd/argocd/commands/repocreds.go
+++ b/cmd/argocd/commands/repocreds.go
@@ -247,11 +247,17 @@ func NewRepoCredsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
Use: "list",
Short: "List configured repository credentials",
Example: templates.Examples(`
- # List all the configured repository credentials
+ # List all repo urls
argocd repocreds list
- # List all the configured repository credentials in json format
+ # List all repo urls in json format
argocd repocreds list -o json
+
+ # List all repo urls in yaml format
+ argocd repocreds list -o yaml
+
+ # List all repo urls in url format
+ argocd repocreds list -o url
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
diff --git a/cmd/util/app.go b/cmd/util/app.go
index d64c5ed02e6cb..e08ee80305c48 100644
--- a/cmd/util/app.go
+++ b/cmd/util/app.go
@@ -295,7 +295,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
Backoff: &argoappv1.Backoff{
Duration: appOpts.retryBackoffDuration.String(),
MaxDuration: appOpts.retryBackoffMaxDuration.String(),
- Factor: pointer.Int64Ptr(appOpts.retryBackoffFactor),
+ Factor: pointer.Int64(appOpts.retryBackoffFactor),
},
}
} else if appOpts.retryLimit == 0 {
diff --git a/cmd/util/cluster.go b/cmd/util/cluster.go
index 95c071c882b12..dffb52e775a97 100644
--- a/cmd/util/cluster.go
+++ b/cmd/util/cluster.go
@@ -144,6 +144,7 @@ type ClusterOptions struct {
Upsert bool
ServiceAccount string
AwsRoleArn string
+ AwsProfile string
AwsClusterName string
SystemNamespace string
Namespaces []string
@@ -169,6 +170,7 @@ func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) {
command.Flags().BoolVar(&opts.InCluster, "in-cluster", false, "Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
command.Flags().StringVar(&opts.AwsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws cli eks token command will be used to access cluster")
command.Flags().StringVar(&opts.AwsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assumes a role to perform cluster operations instead of the default AWS credential provider chain.")
+ command.Flags().StringVar(&opts.AwsProfile, "aws-profile", "", "Optional AWS profile. If set then AWS IAM Authenticator uses this profile to perform cluster operations instead of the default AWS credential provider chain.")
command.Flags().StringArrayVar(&opts.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage")
command.Flags().BoolVar(&opts.ClusterResources, "cluster-resources", false, "Indicates if cluster level resources should be managed. The setting is used only if list of managed namespaces is not empty.")
command.Flags().StringVar(&opts.Name, "name", "", "Overwrite the cluster name")
diff --git a/cmd/util/project.go b/cmd/util/project.go
index ef157f6873081..fa446ceb3b41c 100644
--- a/cmd/util/project.go
+++ b/cmd/util/project.go
@@ -115,7 +115,7 @@ func GetOrphanedResourcesSettings(flagSet *pflag.FlagSet, opts ProjectOpts) *v1a
if opts.orphanedResourcesEnabled || warnChanged {
settings := v1alpha1.OrphanedResourcesMonitorSettings{}
if warnChanged {
- settings.Warn = pointer.BoolPtr(opts.orphanedResourcesWarn)
+ settings.Warn = pointer.Bool(opts.orphanedResourcesWarn)
}
return &settings
}
diff --git a/cmpserver/plugin/plugin.go b/cmpserver/plugin/plugin.go
index f03b73f24dcf6..ca1e7592218ea 100644
--- a/cmpserver/plugin/plugin.go
+++ b/cmpserver/plugin/plugin.go
@@ -120,11 +120,16 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
logCtx.Error(err.Error())
return strings.TrimSuffix(output, "\n"), err
}
+
+ logCtx = logCtx.WithFields(log.Fields{
+ "stderr": stderr.String(),
+ "command": command,
+ })
if len(output) == 0 {
- log.WithFields(log.Fields{
- "stderr": stderr.String(),
- "command": command,
- }).Warn("Plugin command returned zero output")
+ logCtx.Warn("Plugin command returned zero output")
+ } else {
+ // Log stderr even on successfull commands to help develop plugins
+ logCtx.Info("Plugin command successfull")
}
return strings.TrimSuffix(output, "\n"), nil
diff --git a/cmpserver/server.go b/cmpserver/server.go
index bbb493f6b1d66..1d07e531394d3 100644
--- a/cmpserver/server.go
+++ b/cmpserver/server.go
@@ -65,7 +65,7 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize),
grpc.KeepaliveEnforcementPolicy(
keepalive.EnforcementPolicy{
- MinTime: common.GRPCKeepAliveEnforcementMinimum,
+ MinTime: common.GetGRPCKeepAliveEnforcementMinimum(),
},
),
}
diff --git a/common/common.go b/common/common.go
index d7c2d24738b58..2f053d7a28198 100644
--- a/common/common.go
+++ b/common/common.go
@@ -115,9 +115,9 @@ const (
LegacyShardingAlgorithm = "legacy"
// RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards
RoundRobinShardingAlgorithm = "round-robin"
- DefaultShardingAlgorithm = LegacyShardingAlgorithm
// AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller
AppControllerHeartbeatUpdateRetryCount = 3
+ DefaultShardingAlgorithm = LegacyShardingAlgorithm
)
// Dex related constants
@@ -258,6 +258,11 @@ const (
EnvRedisName = "ARGOCD_REDIS_NAME"
// EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key.
EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME"
+ // EnvGRPCKeepAliveMin defines the GRPCKeepAliveEnforcementMinimum, used in the grpc.KeepaliveEnforcementPolicy. Expects a "Duration" format (e.g. 10s).
+ EnvGRPCKeepAliveMin = "ARGOCD_GRPC_KEEP_ALIVE_MIN"
+ // EnvServerSideDiff defines the env var used to enable ServerSide Diff feature.
+ // If defined, value must be "true" or "false".
+ EnvServerSideDiff = "ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF"
)
// Config Management Plugin related constants
@@ -351,11 +356,26 @@ const (
// gRPC settings
const (
- GRPCKeepAliveEnforcementMinimum = 10 * time.Second
- // GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors
- GRPCKeepAliveTime = 2 * GRPCKeepAliveEnforcementMinimum
+ defaultGRPCKeepAliveEnforcementMinimum = 10 * time.Second
)
+func GetGRPCKeepAliveEnforcementMinimum() time.Duration {
+ if GRPCKeepAliveMinStr := os.Getenv(EnvGRPCKeepAliveMin); GRPCKeepAliveMinStr != "" {
+ GRPCKeepAliveMin, err := time.ParseDuration(GRPCKeepAliveMinStr)
+ if err != nil {
+ logrus.Warnf("invalid env var value for %s: cannot parse: %s. Default value %s will be used.", EnvGRPCKeepAliveMin, err, defaultGRPCKeepAliveEnforcementMinimum)
+ return defaultGRPCKeepAliveEnforcementMinimum
+ }
+ return GRPCKeepAliveMin
+ }
+ return defaultGRPCKeepAliveEnforcementMinimum
+}
+
+func GetGRPCKeepAliveTime() time.Duration {
+ // GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors
+ return 2 * GetGRPCKeepAliveEnforcementMinimum()
+}
+
// Security severity logging
const (
SecurityField = "security"
diff --git a/common/common_test.go b/common/common_test.go
new file mode 100644
index 0000000000000..5632c1e7a78cc
--- /dev/null
+++ b/common/common_test.go
@@ -0,0 +1,46 @@
+package common
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Test env var not set for EnvGRPCKeepAliveMin
+func Test_GRPCKeepAliveMinNotSet(t *testing.T) {
+ grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
+ grpcKeepAliveExpectedMin := defaultGRPCKeepAliveEnforcementMinimum
+ assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
+
+ grpcKeepAliveTime := GetGRPCKeepAliveTime()
+ assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
+}
+
+// Test valid env var set for EnvGRPCKeepAliveMin
+func Test_GRPCKeepAliveMinIsSet(t *testing.T) {
+ numSeconds := 15
+ os.Setenv(EnvGRPCKeepAliveMin, fmt.Sprintf("%ds", numSeconds))
+
+ grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
+ grpcKeepAliveExpectedMin := time.Duration(numSeconds) * time.Second
+ assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
+
+ grpcKeepAliveTime := GetGRPCKeepAliveTime()
+ assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
+}
+
+// Test invalid env var set for EnvGRPCKeepAliveMin
+func Test_GRPCKeepAliveMinIncorrectlySet(t *testing.T) {
+ numSeconds := 15
+ os.Setenv(EnvGRPCKeepAliveMin, fmt.Sprintf("%d", numSeconds))
+
+ grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
+ grpcKeepAliveExpectedMin := defaultGRPCKeepAliveEnforcementMinimum
+ assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
+
+ grpcKeepAliveTime := GetGRPCKeepAliveTime()
+ assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
+}
diff --git a/controller/appcontroller.go b/controller/appcontroller.go
index afa2a2d7b8186..f038b770c29c4 100644
--- a/controller/appcontroller.go
+++ b/controller/appcontroller.go
@@ -3,8 +3,10 @@ package controller
import (
"context"
"encoding/json"
+ goerrors "errors"
"fmt"
"math"
+ "math/rand"
"net/http"
"reflect"
"runtime/debug"
@@ -46,7 +48,6 @@ import (
"github.com/argoproj/argo-cd/v2/controller/sharding"
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
- argov1alpha "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions/application/v1alpha1"
applisters "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
@@ -57,6 +58,7 @@ import (
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
+ "github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/errors"
@@ -68,7 +70,7 @@ import (
const (
updateOperationStateTimeout = 1 * time.Second
- defaultDeploymentInformerResyncDuration = 10
+ defaultDeploymentInformerResyncDuration = 10 * time.Second
// orphanedIndex contains application which monitor orphaned resources by namespace
orphanedIndex = "orphaned"
)
@@ -111,11 +113,11 @@ type ApplicationController struct {
appInformer cache.SharedIndexInformer
appLister applisters.ApplicationLister
projInformer cache.SharedIndexInformer
- deploymentInformer informerv1.DeploymentInformer
appStateManager AppStateManager
stateCache statecache.LiveStateCache
statusRefreshTimeout time.Duration
statusHardRefreshTimeout time.Duration
+ statusRefreshJitter time.Duration
selfHealTimeout time.Duration
repoClientset apiclient.Clientset
db db.ArgoDB
@@ -124,9 +126,13 @@ type ApplicationController struct {
refreshRequestedAppsMutex *sync.Mutex
metricsServer *metrics.MetricsServer
kubectlSemaphore *semaphore.Weighted
- clusterFilter func(cluster *appv1.Cluster) bool
+ clusterSharding sharding.ClusterShardingCache
projByNameCache sync.Map
applicationNamespaces []string
+
+ // dynamicClusterDistributionEnabled if disabled deploymentInformer is never initialized
+ dynamicClusterDistributionEnabled bool
+ deploymentInformer informerv1.DeploymentInformer
}
// NewApplicationController creates new instance of ApplicationController.
@@ -140,39 +146,50 @@ func NewApplicationController(
kubectl kube.Kubectl,
appResyncPeriod time.Duration,
appHardResyncPeriod time.Duration,
+ appResyncJitter time.Duration,
selfHealTimeout time.Duration,
+ repoErrorGracePeriod time.Duration,
metricsPort int,
metricsCacheExpiration time.Duration,
metricsApplicationLabels []string,
kubectlParallelismLimit int64,
persistResourceHealth bool,
- clusterFilter func(cluster *appv1.Cluster) bool,
+ clusterSharding sharding.ClusterShardingCache,
applicationNamespaces []string,
+ rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
+ serverSideDiff bool,
+ dynamicClusterDistributionEnabled bool,
) (*ApplicationController, error) {
- log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod)
+ log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
db := db.NewDB(namespace, settingsMgr, kubeClientset)
+ if rateLimiterConfig == nil {
+ rateLimiterConfig = ratelimiter.GetDefaultAppRateLimiterConfig()
+ log.Info("Using default workqueue rate limiter config")
+ }
ctrl := ApplicationController{
- cache: argoCache,
- namespace: namespace,
- kubeClientset: kubeClientset,
- kubectl: kubectl,
- applicationClientset: applicationClientset,
- repoClientset: repoClientset,
- appRefreshQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_reconciliation_queue"),
- appOperationQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_operation_processing_queue"),
- projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "project_reconciliation_queue"),
- appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
- db: db,
- statusRefreshTimeout: appResyncPeriod,
- statusHardRefreshTimeout: appHardResyncPeriod,
- refreshRequestedApps: make(map[string]CompareWith),
- refreshRequestedAppsMutex: &sync.Mutex{},
- auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
- settingsMgr: settingsMgr,
- selfHealTimeout: selfHealTimeout,
- clusterFilter: clusterFilter,
- projByNameCache: sync.Map{},
- applicationNamespaces: applicationNamespaces,
+ cache: argoCache,
+ namespace: namespace,
+ kubeClientset: kubeClientset,
+ kubectl: kubectl,
+ applicationClientset: applicationClientset,
+ repoClientset: repoClientset,
+ appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
+ appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
+ projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
+ appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
+ db: db,
+ statusRefreshTimeout: appResyncPeriod,
+ statusHardRefreshTimeout: appHardResyncPeriod,
+ statusRefreshJitter: appResyncJitter,
+ refreshRequestedApps: make(map[string]CompareWith),
+ refreshRequestedAppsMutex: &sync.Mutex{},
+ auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
+ settingsMgr: settingsMgr,
+ selfHealTimeout: selfHealTimeout,
+ clusterSharding: clusterSharding,
+ projByNameCache: sync.Map{},
+ applicationNamespaces: applicationNamespaces,
+ dynamicClusterDistributionEnabled: dynamicClusterDistributionEnabled,
}
if kubectlParallelismLimit > 0 {
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
@@ -181,10 +198,11 @@ func NewApplicationController(
appInformer, appLister := ctrl.newApplicationInformerAndLister()
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, indexers)
- projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ var err error
+ _, err = projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil {
- ctrl.projectRefreshQueue.Add(key)
+ ctrl.projectRefreshQueue.AddRateLimited(key)
if projMeta, ok := obj.(metav1.Object); ok {
ctrl.InvalidateProjectsCache(projMeta.GetName())
}
@@ -193,7 +211,7 @@ func NewApplicationController(
},
UpdateFunc: func(old, new interface{}) {
if key, err := cache.MetaNamespaceKeyFunc(new); err == nil {
- ctrl.projectRefreshQueue.Add(key)
+ ctrl.projectRefreshQueue.AddRateLimited(key)
if projMeta, ok := new.(metav1.Object); ok {
ctrl.InvalidateProjectsCache(projMeta.GetName())
}
@@ -201,6 +219,7 @@ func NewApplicationController(
},
DeleteFunc: func(obj interface{}) {
if key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err == nil {
+ // immediately push to queue for deletes
ctrl.projectRefreshQueue.Add(key)
if projMeta, ok := obj.(metav1.Object); ok {
ctrl.InvalidateProjectsCache(projMeta.GetName())
@@ -208,34 +227,45 @@ func NewApplicationController(
}
},
})
+ if err != nil {
+ return nil, err
+ }
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
- deploymentInformer := factory.Apps().V1().Deployments()
+
+ var deploymentInformer informerv1.DeploymentInformer
+
+ // only initialize deployment informer if dynamic distribution is enabled
+ if dynamicClusterDistributionEnabled {
+ deploymentInformer = factory.Apps().V1().Deployments()
+ }
readinessHealthCheck := func(r *http.Request) error {
- applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
- appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
- if err != nil {
- if kubeerrors.IsNotFound(err) {
- appControllerDeployment = nil
- } else {
- return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
- }
- }
- if appControllerDeployment != nil {
- if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
- return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
+ if dynamicClusterDistributionEnabled {
+ applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
+ appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
+ if err != nil {
+ if kubeerrors.IsNotFound(err) {
+ appControllerDeployment = nil
+ } else {
+ return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
+ }
}
- shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
- if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
- return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
+ if appControllerDeployment != nil {
+ if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
+ return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
+ }
+ shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
+ if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
+ return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
+ }
}
}
return nil
}
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
- var err error
+
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
if err != nil {
return nil, err
@@ -246,8 +276,8 @@ func NewApplicationController(
return nil, err
}
}
- stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking())
- appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth)
+ stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
+ appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff)
ctrl.appInformer = appInformer
ctrl.appLister = appLister
ctrl.projInformer = projInformer
@@ -756,7 +786,18 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
go ctrl.appInformer.Run(ctx.Done())
go ctrl.projInformer.Run(ctx.Done())
- go ctrl.deploymentInformer.Informer().Run(ctx.Done())
+
+ if ctrl.dynamicClusterDistributionEnabled {
+ // only start deployment informer if dynamic distribution is enabled
+ go ctrl.deploymentInformer.Informer().Run(ctx.Done())
+ }
+
+ clusters, err := ctrl.db.ListClusters(ctx)
+ if err != nil {
+ log.Warnf("Cannot init sharding. Error while querying clusters list from database: %v", err)
+ } else {
+ ctrl.clusterSharding.Init(clusters)
+ }
errors.CheckError(ctrl.stateCache.Init())
@@ -811,8 +852,8 @@ func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith
ctrl.appRefreshQueue.AddAfter(key, *after)
ctrl.appOperationQueue.AddAfter(key, *after)
} else {
- ctrl.appRefreshQueue.Add(key)
- ctrl.appOperationQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
+ ctrl.appOperationQueue.AddRateLimited(key)
}
}
}
@@ -871,11 +912,10 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b
if app.Operation != nil {
ctrl.processRequestedAppOperation(app)
- } else if app.DeletionTimestamp != nil && app.CascadedDeletion() {
- _, err = ctrl.finalizeApplicationDeletion(app, func(project string) ([]*appv1.Cluster, error) {
+ } else if app.DeletionTimestamp != nil {
+ if err = ctrl.finalizeApplicationDeletion(app, func(project string) ([]*appv1.Cluster, error) {
return ctrl.db.GetProjectClusters(context.Background(), project)
- })
- if err != nil {
+ }); err != nil {
ctrl.setAppCondition(app, appv1.ApplicationCondition{
Type: appv1.ApplicationConditionDeletionError,
Message: err.Error(),
@@ -1010,57 +1050,63 @@ func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Applica
return objsMap, nil
}
-func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application, projectClusters func(project string) ([]*appv1.Cluster, error)) ([]*unstructured.Unstructured, error) {
+func (ctrl *ApplicationController) isValidDestination(app *appv1.Application) (bool, *appv1.Cluster) {
+ // Validate the cluster using the Application destination's `name` field, if applicable,
+ // and set the Server field, if needed.
+ if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
+ log.Warnf("Unable to validate destination of the Application being deleted: %v", err)
+ return false, nil
+ }
+
+ cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
+ if err != nil {
+ log.Warnf("Unable to locate cluster URL for Application being deleted: %v", err)
+ return false, nil
+ }
+ return true, cluster
+}
+
+func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application, projectClusters func(project string) ([]*appv1.Cluster, error)) error {
logCtx := log.WithField("application", app.QualifiedName())
- logCtx.Infof("Deleting resources")
// Get refreshed application info, since informer app copy might be stale
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{})
if err != nil {
if !apierr.IsNotFound(err) {
logCtx.Errorf("Unable to get refreshed application info prior deleting resources: %v", err)
}
- return nil, nil
+ return nil
}
proj, err := ctrl.getAppProj(app)
if err != nil {
- return nil, err
- }
-
- // validDestination is true if the Application destination points to a cluster that is managed by Argo CD
- // (and thus either a cluster secret exists for it, or it's local); validDestination is false otherwise.
- validDestination := true
-
- // Validate the cluster using the Application destination's `name` field, if applicable,
- // and set the Server field, if needed.
- if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
- log.Warnf("Unable to validate destination of the Application being deleted: %v", err)
- validDestination = false
+ return err
}
- objs := make([]*unstructured.Unstructured, 0)
- var cluster *appv1.Cluster
-
- // Attempt to validate the destination via its URL
- if validDestination {
- if cluster, err = ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server); err != nil {
- log.Warnf("Unable to locate cluster URL for Application being deleted: %v", err)
- validDestination = false
+ isValid, cluster := ctrl.isValidDestination(app)
+ if !isValid {
+ app.UnSetCascadedDeletion()
+ app.UnSetPostDeleteFinalizer()
+ if err := ctrl.updateFinalizers(app); err != nil {
+ return err
}
+ logCtx.Infof("Resource entries removed from undefined cluster")
+ return nil
}
+ config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig())
- if validDestination {
+ if app.CascadedDeletion() {
+ logCtx.Infof("Deleting resources")
// ApplicationDestination points to a valid cluster, so we may clean up the live objects
-
+ objs := make([]*unstructured.Unstructured, 0)
objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters)
if err != nil {
- return nil, err
+ return err
}
for k := range objsMap {
// Wait for objects pending deletion to complete before proceeding with next sync wave
if objsMap[k].GetDeletionTimestamp() != nil {
logCtx.Infof("%d objects remaining for deletion", len(objsMap))
- return objs, nil
+ return nil
}
if ctrl.shouldBeDeleted(app, objsMap[k]) {
@@ -1068,8 +1114,6 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
}
}
- config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig())
-
filteredObjs := FilterObjectsForDeletion(objs)
propagationPolicy := metav1.DeletePropagationForeground
@@ -1083,12 +1127,12 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
return ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
})
if err != nil {
- return objs, err
+ return err
}
objsMap, err = ctrl.getPermittedAppLiveObjects(app, proj, projectClusters)
if err != nil {
- return nil, err
+ return err
}
for k, obj := range objsMap {
@@ -1098,38 +1142,67 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
}
if len(objsMap) > 0 {
logCtx.Infof("%d objects remaining for deletion", len(objsMap))
- return objs, nil
+ return nil
}
+ logCtx.Infof("Successfully deleted %d resources", len(objs))
+ app.UnSetCascadedDeletion()
+ return ctrl.updateFinalizers(app)
}
- if err := ctrl.cache.SetAppManagedResources(app.Name, nil); err != nil {
- return objs, err
- }
+ if app.HasPostDeleteFinalizer() {
+ objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters)
+ if err != nil {
+ return err
+ }
- if err := ctrl.cache.SetAppResourcesTree(app.Name, nil); err != nil {
- return objs, err
+ done, err := ctrl.executePostDeleteHooks(app, proj, objsMap, config, logCtx)
+ if err != nil {
+ return err
+ }
+ if !done {
+ return nil
+ }
+ app.UnSetPostDeleteFinalizer()
+ return ctrl.updateFinalizers(app)
}
- if err := ctrl.removeCascadeFinalizer(app); err != nil {
- return objs, err
+ if app.HasPostDeleteFinalizer("cleanup") {
+ objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters)
+ if err != nil {
+ return err
+ }
+
+ done, err := ctrl.cleanupPostDeleteHooks(objsMap, config, logCtx)
+ if err != nil {
+ return err
+ }
+ if !done {
+ return nil
+ }
+ app.UnSetPostDeleteFinalizer("cleanup")
+ return ctrl.updateFinalizers(app)
}
- if validDestination {
- logCtx.Infof("Successfully deleted %d resources", len(objs))
- } else {
- logCtx.Infof("Resource entries removed from undefined cluster")
+ if !app.CascadedDeletion() && !app.HasPostDeleteFinalizer() {
+ if err := ctrl.cache.SetAppManagedResources(app.Name, nil); err != nil {
+ return err
+ }
+
+ if err := ctrl.cache.SetAppResourcesTree(app.Name, nil); err != nil {
+ return err
+ }
+ ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, app.Spec.GetProject()))
}
- ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, app.Spec.GetProject()))
- return objs, nil
+ return nil
}
-func (ctrl *ApplicationController) removeCascadeFinalizer(app *appv1.Application) error {
+func (ctrl *ApplicationController) updateFinalizers(app *appv1.Application) error {
_, err := ctrl.getAppProj(app)
if err != nil {
return fmt.Errorf("error getting project: %w", err)
}
- app.UnSetCascadedDeletion()
+
var patch []byte
patch, _ = json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
@@ -1319,8 +1392,7 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
}
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
- appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
- _, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{})
+ _, err := ctrl.PatchAppWithWriteBack(context.Background(), app.Name, app.Namespace, types.MergePatchType, patchJSON, metav1.PatchOptions{})
if err != nil {
// Stop retrying updating deleted application
if apierr.IsNotFound(err) {
@@ -1358,6 +1430,27 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
}
}
+// writeBackToInformer writes a just recently updated App back into the informer cache.
+// This prevents the situation where the controller operates on a stale app and repeats work
+func (ctrl *ApplicationController) writeBackToInformer(app *appv1.Application) {
+ logCtx := log.WithFields(log.Fields{"application": app.Name, "appNamespace": app.Namespace, "project": app.Spec.Project, "informer-writeBack": true})
+ err := ctrl.appInformer.GetStore().Update(app)
+ if err != nil {
+ logCtx.Errorf("failed to update informer store: %v", err)
+ return
+ }
+}
+
+// PatchAppWithWriteBack patches an application and writes it back to the informer cache
+func (ctrl *ApplicationController) PatchAppWithWriteBack(ctx context.Context, name, ns string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appv1.Application, err error) {
+ patchedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ns).Patch(ctx, name, pt, data, opts, subresources...)
+ if err != nil {
+ return patchedApp, err
+ }
+ ctrl.writeBackToInformer(patchedApp)
+ return patchedApp, err
+}
+
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
patchMs := time.Duration(0) // time spent in doing patch/update calls
setOpMs := time.Duration(0) // time spent in doing Operation patch calls in autosync
@@ -1480,10 +1573,15 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
}
now := metav1.Now()
- compareResult := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
+ compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
refreshType == appv1.RefreshTypeHard,
comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
+ if goerrors.Is(err, CompareStateRepoError) {
+ logCtx.Warnf("Ignoring temporary failed attempt to compare app state against repo: %v", err)
+ return // short circuit if git error is encountered
+ }
+
for k, v := range compareResult.timings {
logCtx = logCtx.WithField(k, v.Milliseconds())
}
@@ -1528,6 +1626,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
app.Status.SourceTypes = compareResult.appSourceTypes
app.Status.ControllerNamespace = ctrl.namespace
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
+ if (compareResult.hasPostDeleteHooks != app.HasPostDeleteFinalizer() || compareResult.hasPostDeleteHooks != app.HasPostDeleteFinalizer("cleanup")) &&
+ app.GetDeletionTimestamp() == nil {
+ if compareResult.hasPostDeleteHooks {
+ app.SetPostDeleteFinalizer()
+ app.SetPostDeleteFinalizer("cleanup")
+ } else {
+ app.UnSetPostDeleteFinalizer()
+ app.UnSetPostDeleteFinalizer("cleanup")
+ }
+
+ if err := ctrl.updateFinalizers(app); err != nil {
+ logCtx.Errorf("Failed to update finalizers: %v", err)
+ }
+ }
return
}
@@ -1551,6 +1663,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
var reason string
compareWith := CompareWithLatest
refreshType := appv1.RefreshTypeNormal
+
softExpired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
hardExpired := (app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusHardRefreshTimeout).Before(time.Now().UTC())) && statusHardRefreshTimeout.Seconds() != 0
@@ -1569,7 +1682,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
} else if hardExpired || softExpired {
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
- //TODO: find existing Golang bug or create a new one
+ // TODO: find existing Golang bug or create a new one
reconciledAtStr := "never"
if app.Status.ReconciledAt != nil {
reconciledAtStr = app.Status.ReconciledAt.String()
@@ -1631,8 +1744,7 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica
if err != nil {
logCtx.Errorf("error constructing app spec patch: %v", err)
} else if modified {
- appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
- _, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
+ _, err := ctrl.PatchAppWithWriteBack(context.Background(), app.Name, app.Namespace, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
logCtx.Errorf("Error persisting normalized application spec: %v", err)
} else {
@@ -1676,8 +1788,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
defer func() {
patchMs = time.Since(start)
}()
- appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
- _, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
+ _, err = ctrl.PatchAppWithWriteBack(context.Background(), orig.Name, orig.Namespace, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
logCtx.Warnf("Error updating application: %v", err)
} else {
@@ -1787,11 +1898,20 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
start := time.Now()
- _, err := argo.SetAppOperation(appIf, app.Name, &op)
+ updatedApp, err := argo.SetAppOperation(appIf, app.Name, &op)
setOpTime := time.Since(start)
if err != nil {
+ if goerrors.Is(err, argo.ErrAnotherOperationInProgress) {
+ // skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
+ // it is safe to skip auto-sync because it is already running
+ logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
+ return nil, 0
+ }
+
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
+ } else {
+ ctrl.writeBackToInformer(updatedApp)
}
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "")
@@ -1884,15 +2004,11 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
}
}
- if ctrl.clusterFilter != nil {
- cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
- if err != nil {
- return ctrl.clusterFilter(nil)
- }
- return ctrl.clusterFilter(cluster)
+ cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
+ if err != nil {
+ return ctrl.clusterSharding.IsManagedCluster(nil)
}
-
- return true
+ return ctrl.clusterSharding.IsManagedCluster(cluster)
}
func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) {
@@ -1979,7 +2095,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
},
)
lister := applisters.NewApplicationLister(informer.GetIndexer())
- informer.AddEventHandler(
+ _, err := informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if !ctrl.canProcessApp(obj) {
@@ -1987,8 +2103,8 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
}
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
- ctrl.appRefreshQueue.Add(key)
- ctrl.appOperationQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
+ ctrl.appOperationQueue.AddRateLimited(key)
}
},
UpdateFunc: func(old, new interface{}) {
@@ -2000,15 +2116,26 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
if err != nil {
return
}
+
var compareWith *CompareWith
+ var delay *time.Duration
+
oldApp, oldOK := old.(*appv1.Application)
newApp, newOK := new.(*appv1.Application)
- if oldOK && newOK && automatedSyncEnabled(oldApp, newApp) {
- log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync")
- compareWith = CompareWithLatest.Pointer()
+ if oldOK && newOK {
+ if automatedSyncEnabled(oldApp, newApp) {
+ log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync")
+ compareWith = CompareWithLatest.Pointer()
+ }
+ if ctrl.statusRefreshJitter != 0 && oldApp.ResourceVersion == newApp.ResourceVersion {
+ // Handler is refreshing the apps, add a random jitter to spread the load and avoid spikes
+ jitter := time.Duration(float64(ctrl.statusRefreshJitter) * rand.Float64())
+ delay = &jitter
+ }
}
- ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, nil)
- ctrl.appOperationQueue.Add(key)
+
+ ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, delay)
+ ctrl.appOperationQueue.AddRateLimited(key)
},
DeleteFunc: func(obj interface{}) {
if !ctrl.canProcessApp(obj) {
@@ -2018,11 +2145,15 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
+ // for deletes, we immediately add to the refresh queue
ctrl.appRefreshQueue.Add(key)
}
},
},
)
+ if err != nil {
+ return nil, nil
+ }
return informer, lister
}
@@ -2040,7 +2171,7 @@ func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1
}
func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
- updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace)
+ updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterSharding.IsManagedCluster, ctrl.getAppProj, ctrl.namespace)
go updater.Run(ctx)
}
@@ -2092,4 +2223,4 @@ func (ctrl *ApplicationController) toAppQualifiedName(appName, appNamespace stri
return fmt.Sprintf("%s/%s", appNamespace, appName)
}
-type ClusterFilterFunction func(c *argov1alpha.Cluster, distributionFunction sharding.DistributionFunction) bool
+type ClusterFilterFunction func(c *appv1.Cluster, distributionFunction sharding.DistributionFunction) bool
diff --git a/controller/appcontroller_test.go b/controller/appcontroller_test.go
index cfb2141664348..33a29bc5ca3f8 100644
--- a/controller/appcontroller_test.go
+++ b/controller/appcontroller_test.go
@@ -7,18 +7,22 @@ import (
"testing"
"time"
+ "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest"
"github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/client-go/rest"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/argo-cd/v2/common"
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
+ "github.com/argoproj/argo-cd/v2/controller/sharding"
+ dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
- "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
@@ -59,7 +63,24 @@ type fakeData struct {
applicationNamespaces []string
}
-func newFakeController(data *fakeData) *ApplicationController {
+type MockKubectl struct {
+ kube.Kubectl
+
+ DeletedResources []kube.ResourceKey
+ CreatedResources []*unstructured.Unstructured
+}
+
+func (m *MockKubectl) CreateResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, obj *unstructured.Unstructured, createOptions metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {
+ m.CreatedResources = append(m.CreatedResources, obj)
+ return m.Kubectl.CreateResource(ctx, config, gvk, name, namespace, obj, createOptions, subresources...)
+}
+
+func (m *MockKubectl) DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error {
+ m.DeletedResources = append(m.DeletedResources, kube.NewResourceKey(gvk.Group, gvk.Kind, namespace, name))
+ return m.Kubectl.DeleteResource(ctx, config, gvk, name, namespace, deleteOptions)
+}
+
+func newFakeController(data *fakeData, repoErr error) *ApplicationController {
var clust corev1.Secret
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
if err != nil {
@@ -71,10 +92,18 @@ func newFakeController(data *fakeData) *ApplicationController {
if len(data.manifestResponses) > 0 {
for _, response := range data.manifestResponses {
- mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, nil).Once()
+ if repoErr != nil {
+ mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, repoErr).Once()
+ } else {
+ mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, nil).Once()
+ }
}
} else {
- mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil)
+ if repoErr != nil {
+ mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, repoErr).Once()
+ } else {
+ mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil).Once()
+ }
}
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
@@ -101,7 +130,7 @@ func newFakeController(data *fakeData) *ApplicationController {
}
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
- kubectl := &kubetest.MockKubectlCmd{}
+ kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}}
ctrl, err := NewApplicationController(
test.FakeArgoCDNamespace,
settingsMgr,
@@ -115,7 +144,9 @@ func newFakeController(data *fakeData) *ApplicationController {
kubectl,
time.Minute,
time.Hour,
+ time.Second,
time.Minute,
+ time.Second*10,
common.DefaultPortArgoCDMetrics,
data.metricsCacheExpiration,
[]string{},
@@ -123,7 +154,15 @@ func newFakeController(data *fakeData) *ApplicationController {
true,
nil,
data.applicationNamespaces,
+ nil,
+
+ false,
+ false,
)
+ db := &dbmocks.ArgoDB{}
+ db.On("GetApplicationControllerReplicas").Return(1)
+ // Setting a default sharding algorithm for the tests where we cannot set it.
+ ctrl.clusterSharding = sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm)
if err != nil {
panic(err)
}
@@ -327,6 +366,38 @@ metadata:
data:
`
+var fakePostDeleteHook = `
+{
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "name": "post-delete-hook",
+ "namespace": "default",
+ "labels": {
+ "app.kubernetes.io/instance": "my-app"
+ },
+ "annotations": {
+ "argocd.argoproj.io/hook": "PostDelete",
+ "argocd.argoproj.io/hook-delete-policy": "HookSucceeded"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "post-delete-hook",
+ "image": "busybox",
+ "restartPolicy": "Never",
+ "command": [
+ "/bin/sh",
+ "-c",
+ "sleep 5 && echo hello from the post-delete-hook pod"
+ ]
+ }
+ ]
+ }
+}
+`
+
func newFakeApp() *v1alpha1.Application {
return createFakeApp(fakeApp)
}
@@ -361,9 +432,18 @@ func newFakeCM() map[string]interface{} {
return cm
}
+func newFakePostDeleteHook() map[string]interface{} {
+ var cm map[string]interface{}
+ err := yaml.Unmarshal([]byte(fakePostDeleteHook), &cm)
+ if err != nil {
+ panic(err)
+ }
+ return cm
+}
+
func TestAutoSync(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -380,7 +460,7 @@ func TestAutoSync(t *testing.T) {
func TestAutoSyncNotAllowEmpty(t *testing.T) {
app := newFakeApp()
app.Spec.SyncPolicy.Automated.Prune = true
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -393,7 +473,7 @@ func TestAutoSyncAllowEmpty(t *testing.T) {
app := newFakeApp()
app.Spec.SyncPolicy.Automated.Prune = true
app.Spec.SyncPolicy.Automated.AllowEmpty = true
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -407,7 +487,7 @@ func TestSkipAutoSync(t *testing.T) {
// Set current to 'aaaaa', desired to 'aaaa' and mark system OutOfSync
t.Run("PreviouslySyncedToRevision", func(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
@@ -422,7 +502,7 @@ func TestSkipAutoSync(t *testing.T) {
// Verify we skip when we are already Synced (even if revision is different)
t.Run("AlreadyInSyncedState", func(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeSynced,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -438,7 +518,7 @@ func TestSkipAutoSync(t *testing.T) {
t.Run("AutoSyncIsDisabled", func(t *testing.T) {
app := newFakeApp()
app.Spec.SyncPolicy = nil
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -455,7 +535,7 @@ func TestSkipAutoSync(t *testing.T) {
app := newFakeApp()
now := metav1.Now()
app.DeletionTimestamp = &now
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -481,7 +561,7 @@ func TestSkipAutoSync(t *testing.T) {
Source: *app.Spec.Source.DeepCopy(),
},
}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -495,7 +575,7 @@ func TestSkipAutoSync(t *testing.T) {
t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -521,7 +601,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
},
},
}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
@@ -556,7 +636,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
},
},
}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
@@ -609,12 +689,12 @@ func TestFinalizeAppDeletion(t *testing.T) {
// Ensure app can be deleted cascading
t.Run("CascadingDelete", func(t *testing.T) {
app := newFakeApp()
+ app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName)
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
- }})
-
+ }}, nil)
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -624,9 +704,9 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
- _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
})
assert.NoError(t, err)
@@ -652,6 +732,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
},
}
app := newFakeApp()
+ app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName)
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app.Spec.Project = "restricted"
appObj := kube.MustToUnstructured(&app)
@@ -663,7 +744,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
kube.GetResourceKey(appObj): appObj,
kube.GetResourceKey(strayObj): strayObj,
},
- })
+ }, nil)
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
@@ -674,9 +755,9 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
- objs, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
})
assert.NoError(t, err)
@@ -687,18 +768,20 @@ func TestFinalizeAppDeletion(t *testing.T) {
}
// Managed objects must be empty
assert.Empty(t, objsMap)
+
// Loop through all deleted objects, ensure that test-cm is none of them
- for _, o := range objs {
- assert.NotEqual(t, "test-cm", o.GetName())
+ for _, o := range ctrl.kubectl.(*MockKubectl).DeletedResources {
+ assert.NotEqual(t, "test-cm", o.Name)
}
})
t.Run("DeleteWithDestinationClusterName", func(t *testing.T) {
app := newFakeAppWithDestName()
+ app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName)
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
- }})
+ }}, nil)
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -708,9 +791,9 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
- _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
})
assert.NoError(t, err)
@@ -727,7 +810,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
- }})
+ }}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -735,7 +818,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
return defaultReactor.React(action)
})
- _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
})
assert.NoError(t, err)
@@ -756,6 +839,109 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
+ t.Run("PostDelete_HookIsCreated", func(t *testing.T) {
+ app := newFakeApp()
+ app.SetPostDeleteFinalizer()
+ app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
+ ctrl := newFakeController(&fakeData{
+ manifestResponses: []*apiclient.ManifestResponse{{
+ Manifests: []string{fakePostDeleteHook},
+ }},
+ apps: []runtime.Object{app, &defaultProj},
+ managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{}}, nil)
+
+ patched := false
+ fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
+ defaultReactor := fakeAppCs.ReactionChain[0]
+ fakeAppCs.ReactionChain = nil
+ fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
+ return defaultReactor.React(action)
+ })
+ fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
+ patched = true
+ return true, &v1alpha1.Application{}, nil
+ })
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ return []*v1alpha1.Cluster{}, nil
+ })
+ assert.NoError(t, err)
+ // finalizer is not deleted
+ assert.False(t, patched)
+ // post-delete hook is created
+ require.Len(t, ctrl.kubectl.(*MockKubectl).CreatedResources, 1)
+ require.Equal(t, "post-delete-hook", ctrl.kubectl.(*MockKubectl).CreatedResources[0].GetName())
+ })
+
+ t.Run("PostDelete_HookIsExecuted", func(t *testing.T) {
+ app := newFakeApp()
+ app.SetPostDeleteFinalizer()
+ app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
+ liveHook := &unstructured.Unstructured{Object: newFakePostDeleteHook()}
+ require.NoError(t, unstructured.SetNestedField(liveHook.Object, "Succeeded", "status", "phase"))
+ ctrl := newFakeController(&fakeData{
+ manifestResponses: []*apiclient.ManifestResponse{{
+ Manifests: []string{fakePostDeleteHook},
+ }},
+ apps: []runtime.Object{app, &defaultProj},
+ managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
+ kube.GetResourceKey(liveHook): liveHook,
+ }}, nil)
+
+ patched := false
+ fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
+ defaultReactor := fakeAppCs.ReactionChain[0]
+ fakeAppCs.ReactionChain = nil
+ fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
+ return defaultReactor.React(action)
+ })
+ fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
+ patched = true
+ return true, &v1alpha1.Application{}, nil
+ })
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ return []*v1alpha1.Cluster{}, nil
+ })
+ assert.NoError(t, err)
+ // finalizer is removed
+ assert.True(t, patched)
+ })
+
+ t.Run("PostDelete_HookIsDeleted", func(t *testing.T) {
+ app := newFakeApp()
+ app.SetPostDeleteFinalizer("cleanup")
+ app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
+ liveHook := &unstructured.Unstructured{Object: newFakePostDeleteHook()}
+ require.NoError(t, unstructured.SetNestedField(liveHook.Object, "Succeeded", "status", "phase"))
+ ctrl := newFakeController(&fakeData{
+ manifestResponses: []*apiclient.ManifestResponse{{
+ Manifests: []string{fakePostDeleteHook},
+ }},
+ apps: []runtime.Object{app, &defaultProj},
+ managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
+ kube.GetResourceKey(liveHook): liveHook,
+ }}, nil)
+
+ patched := false
+ fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
+ defaultReactor := fakeAppCs.ReactionChain[0]
+ fakeAppCs.ReactionChain = nil
+ fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
+ return defaultReactor.React(action)
+ })
+ fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
+ patched = true
+ return true, &v1alpha1.Application{}, nil
+ })
+ err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
+ return []*v1alpha1.Cluster{}, nil
+ })
+ assert.NoError(t, err)
+ // post-delete hook is deleted
+ require.Len(t, ctrl.kubectl.(*MockKubectl).DeletedResources, 1)
+ require.Equal(t, "post-delete-hook", ctrl.kubectl.(*MockKubectl).DeletedResources[0].Name)
+ // finalizer is not removed
+ assert.False(t, patched)
+ })
}
// TestNormalizeApplication verifies we normalize an application during reconciliation
@@ -791,9 +977,9 @@ func TestNormalizeApplication(t *testing.T) {
{
// Verify we normalize the app because project is missing
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
key, _ := cache.MetaNamespaceKeyFunc(app)
- ctrl.appRefreshQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
@@ -803,7 +989,7 @@ func TestNormalizeApplication(t *testing.T) {
normalized = true
}
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.processAppRefreshQueueItem()
assert.True(t, normalized)
@@ -813,9 +999,9 @@ func TestNormalizeApplication(t *testing.T) {
// Verify we don't unnecessarily normalize app when project is set
app.Spec.Project = "default"
data.apps[0] = app
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
key, _ := cache.MetaNamespaceKeyFunc(app)
- ctrl.appRefreshQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
@@ -825,7 +1011,7 @@ func TestNormalizeApplication(t *testing.T) {
normalized = true
}
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.processAppRefreshQueueItem()
assert.False(t, normalized)
@@ -838,7 +1024,7 @@ func TestHandleAppUpdated(t *testing.T) {
app.Spec.Destination.Server = v1alpha1.KubernetesInternalAPIServerAddr
proj := defaultProj.DeepCopy()
proj.Spec.SourceNamespaces = []string{test.FakeArgoCDNamespace}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}}, nil)
ctrl.handleObjectUpdated(map[string]bool{app.InstanceName(ctrl.namespace): true}, kube.GetObjectRef(kube.MustToUnstructured(app)))
isRequested, level := ctrl.isRefreshRequested(app.QualifiedName())
@@ -865,7 +1051,7 @@ func TestHandleOrphanedResourceUpdated(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &v1alpha1.OrphanedResourcesMonitorSettings{}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}}, nil)
ctrl.handleObjectUpdated(map[string]bool{}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: test.FakeArgoCDNamespace})
@@ -900,7 +1086,7 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
kube.NewResourceKey("apps", "Deployment", "default", "deploy1"): {ResourceNode: orphanedDeploy1},
kube.NewResourceKey("apps", "Deployment", "default", "deploy2"): {ResourceNode: orphanedDeploy2},
},
- })
+ }, nil)
tree, err := ctrl.getResourceTree(app, []*v1alpha1.ResourceDiff{{
Namespace: "default",
Name: "nginx-deployment",
@@ -916,13 +1102,13 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
}
func TestSetOperationStateOnDeletedApp(t *testing.T) {
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
patched := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
- return true, nil, apierr.NewNotFound(schema.GroupResource{}, "my-app")
+ return true, &v1alpha1.Application{}, apierr.NewNotFound(schema.GroupResource{}, "my-app")
})
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
assert.True(t, patched)
@@ -947,16 +1133,16 @@ func TestSetOperationStateLogRetries(t *testing.T) {
t.Cleanup(func() {
logrus.StandardLogger().ReplaceHooks(logrus.LevelHooks{})
})
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
patched := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if !patched {
patched = true
- return true, nil, errors.New("fake error")
+ return true, &v1alpha1.Application{}, errors.New("fake error")
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
assert.True(t, patched)
@@ -998,7 +1184,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
t.Run("no need to refresh just reconciled application", func(t *testing.T) {
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
@@ -1010,7 +1196,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
assert.False(t, needRefresh)
// use a one-off controller so other tests don't have a manual refresh request
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
// refresh app using the 'deepest' requested comparison level
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
@@ -1038,7 +1224,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
app := app.DeepCopy()
// use a one-off controller so other tests don't have a manual refresh request
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
assert.False(t, needRefresh)
@@ -1068,7 +1254,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
}
// use a one-off controller so other tests don't have a manual refresh request
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
assert.False(t, needRefresh)
@@ -1148,7 +1334,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
}
func TestUpdatedManagedNamespaceMetadata(t *testing.T) {
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
app := newFakeApp()
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{
Labels: map[string]string{
@@ -1172,7 +1358,7 @@ func TestUpdatedManagedNamespaceMetadata(t *testing.T) {
}
func TestUnchangedManagedNamespaceMetadata(t *testing.T) {
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
app := newFakeApp()
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{
Labels: map[string]string{
@@ -1215,7 +1401,7 @@ func TestRefreshAppConditions(t *testing.T) {
t.Run("NoErrorConditions", func(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}, nil)
_, hasErrors := ctrl.refreshAppConditions(app)
assert.False(t, hasErrors)
@@ -1226,7 +1412,7 @@ func TestRefreshAppConditions(t *testing.T) {
app := newFakeApp()
app.Status.SetConditions([]v1alpha1.ApplicationCondition{{Type: v1alpha1.ApplicationConditionExcludedResourceWarning}}, nil)
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}, nil)
_, hasErrors := ctrl.refreshAppConditions(app)
assert.False(t, hasErrors)
@@ -1239,7 +1425,7 @@ func TestRefreshAppConditions(t *testing.T) {
app.Spec.Project = "wrong project"
app.Status.SetConditions([]v1alpha1.ApplicationCondition{{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: "old message"}}, nil)
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}, nil)
_, hasErrors := ctrl.refreshAppConditions(app)
assert.True(t, hasErrors)
@@ -1263,7 +1449,7 @@ func TestUpdateReconciledAt(t *testing.T) {
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
- })
+ }, nil)
key, _ := cache.MetaNamespaceKeyFunc(app)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
@@ -1272,13 +1458,13 @@ func TestUpdateReconciledAt(t *testing.T) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
t.Run("UpdatedOnFullReconciliation", func(t *testing.T) {
receivedPatch = map[string]interface{}{}
ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), nil)
- ctrl.appRefreshQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.processAppRefreshQueueItem()
@@ -1293,7 +1479,7 @@ func TestUpdateReconciledAt(t *testing.T) {
t.Run("NotUpdatedOnPartialReconciliation", func(t *testing.T) {
receivedPatch = map[string]interface{}{}
- ctrl.appRefreshQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
ctrl.processAppRefreshQueueItem()
@@ -1321,9 +1507,9 @@ func TestProjectErrorToCondition(t *testing.T) {
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
- })
+ }, nil)
key, _ := cache.MetaNamespaceKeyFunc(app)
- ctrl.appRefreshQueue.Add(key)
+ ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
ctrl.processAppRefreshQueueItem()
@@ -1340,13 +1526,13 @@ func TestProjectErrorToCondition(t *testing.T) {
func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
app := newFakeApp()
proj := &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
patched := false
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
err := ctrl.finalizeProjectDeletion(proj)
@@ -1356,7 +1542,7 @@ func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
proj := &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{&defaultProj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{&defaultProj}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
@@ -1364,7 +1550,7 @@ func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.AppProject{}, nil
})
err := ctrl.finalizeProjectDeletion(proj)
@@ -1382,14 +1568,14 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1407,7 +1593,7 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
proj := defaultProj
proj.Name = "test-project"
proj.Spec.SourceNamespaces = []string{test.FakeArgoCDNamespace}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &proj}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &proj}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
func() {
@@ -1417,7 +1603,7 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
}()
@@ -1436,14 +1622,14 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
Sync: &v1alpha1.SyncOperation{},
Retry: v1alpha1.RetryStrategy{Limit: 1},
}
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1479,14 +1665,14 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
Revision: "abc123",
},
}
- ctrl := newFakeController(data)
+ ctrl := newFakeController(data, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1512,14 +1698,14 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
Revision: "abc123",
},
}
- ctrl := newFakeController(data)
+ ctrl := newFakeController(data, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
- return true, nil, nil
+ return true, &v1alpha1.Application{}, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1539,7 +1725,7 @@ func TestGetAppHosts(t *testing.T) {
Revision: "abc123",
},
}
- ctrl := newFakeController(data)
+ ctrl := newFakeController(data, nil)
mockStateCache := &mockstatecache.LiveStateCache{}
mockStateCache.On("IterateResources", mock.Anything, mock.MatchedBy(func(callback func(res *clustercache.Resource, info *statecache.ResourceInfo)) bool {
// node resource
@@ -1589,15 +1775,15 @@ func TestGetAppHosts(t *testing.T) {
func TestMetricsExpiration(t *testing.T) {
app := newFakeApp()
// Check expiration is disabled by default
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
assert.False(t, ctrl.metricsServer.HasExpiration())
// Check expiration is enabled if set
- ctrl = newFakeController(&fakeData{apps: []runtime.Object{app}, metricsCacheExpiration: 10 * time.Second})
+ ctrl = newFakeController(&fakeData{apps: []runtime.Object{app}, metricsCacheExpiration: 10 * time.Second}, nil)
assert.True(t, ctrl.metricsServer.HasExpiration())
}
func TestToAppKey(t *testing.T) {
- ctrl := newFakeController(&fakeData{})
+ ctrl := newFakeController(&fakeData{}, nil)
tests := []struct {
name string
input string
@@ -1617,7 +1803,7 @@ func TestToAppKey(t *testing.T) {
func Test_canProcessApp(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl.applicationNamespaces = []string{"good"}
t.Run("without cluster filter, good namespace", func(t *testing.T) {
app.Namespace = "good"
@@ -1631,13 +1817,11 @@ func Test_canProcessApp(t *testing.T) {
})
t.Run("with cluster filter, good namespace", func(t *testing.T) {
app.Namespace = "good"
- ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
canProcess := ctrl.canProcessApp(app)
assert.True(t, canProcess)
})
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
app.Namespace = "bad"
- ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
canProcess := ctrl.canProcessApp(app)
assert.False(t, canProcess)
})
@@ -1650,7 +1834,7 @@ func Test_canProcessAppSkipReconcileAnnotation(t *testing.T) {
appSkipReconcileFalse.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "false"}
appSkipReconcileTrue := newFakeApp()
appSkipReconcileTrue.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "true"}
- ctrl := newFakeController(&fakeData{})
+ ctrl := newFakeController(&fakeData{}, nil)
tests := []struct {
name string
input interface{}
@@ -1671,7 +1855,7 @@ func Test_canProcessAppSkipReconcileAnnotation(t *testing.T) {
func Test_syncDeleteOption(t *testing.T) {
app := newFakeApp()
- ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
+ ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
cm := newFakeCM()
t.Run("without delete option object is deleted", func(t *testing.T) {
cmObj := kube.MustToUnstructured(&cm)
@@ -1698,7 +1882,7 @@ func TestAddControllerNamespace(t *testing.T) {
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{},
- })
+ }, nil)
ctrl.processAppRefreshQueueItem()
@@ -1717,7 +1901,7 @@ func TestAddControllerNamespace(t *testing.T) {
apps: []runtime.Object{app, &proj},
manifestResponse: &apiclient.ManifestResponse{},
applicationNamespaces: []string{appNamespace},
- })
+ }, nil)
ctrl.processAppRefreshQueueItem()
diff --git a/controller/cache/cache.go b/controller/cache/cache.go
index 9eac161714089..e3b1d7b77f19d 100644
--- a/controller/cache/cache.go
+++ b/controller/cache/cache.go
@@ -29,6 +29,7 @@ import (
"k8s.io/client-go/tools/cache"
"github.com/argoproj/argo-cd/v2/controller/metrics"
+ "github.com/argoproj/argo-cd/v2/controller/sharding"
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/argo"
@@ -168,7 +169,7 @@ func NewLiveStateCache(
kubectl kube.Kubectl,
metricsServer *metrics.MetricsServer,
onObjectUpdated ObjectUpdatedHandler,
- clusterFilter func(cluster *appv1.Cluster) bool,
+ clusterSharding sharding.ClusterShardingCache,
resourceTracking argo.ResourceTracking) LiveStateCache {
return &liveStateCache{
@@ -179,7 +180,7 @@ func NewLiveStateCache(
kubectl: kubectl,
settingsMgr: settingsMgr,
metricsServer: metricsServer,
- clusterFilter: clusterFilter,
+ clusterSharding: clusterSharding,
resourceTracking: resourceTracking,
}
}
@@ -202,7 +203,7 @@ type liveStateCache struct {
kubectl kube.Kubectl
settingsMgr *settings.SettingsManager
metricsServer *metrics.MetricsServer
- clusterFilter func(cluster *appv1.Cluster) bool
+ clusterSharding sharding.ClusterShardingCache
resourceTracking argo.ResourceTracking
clusters map[string]clustercache.ClusterCache
@@ -722,22 +723,24 @@ func (c *liveStateCache) Run(ctx context.Context) error {
}
func (c *liveStateCache) canHandleCluster(cluster *appv1.Cluster) bool {
- if c.clusterFilter == nil {
- return true
- }
- return c.clusterFilter(cluster)
+ return c.clusterSharding.IsManagedCluster(cluster)
}
func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
+ c.clusterSharding.Add(cluster)
if !c.canHandleCluster(cluster) {
log.Infof("Ignoring cluster %s", cluster.Server)
return
}
-
c.lock.Lock()
_, ok := c.clusters[cluster.Server]
c.lock.Unlock()
if !ok {
+ log.Debugf("Checking if cache %v / cluster %v has appInformer %v", c, cluster, c.appInformer)
+ if c.appInformer == nil {
+ log.Warn("Cannot get a cluster appInformer. Cache may not be started this time")
+ return
+ }
if c.isClusterHasApps(c.appInformer.GetStore().List(), cluster) {
go func() {
// warm up cache for cluster with apps
@@ -748,6 +751,7 @@ func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
}
func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *appv1.Cluster) {
+ c.clusterSharding.Update(newCluster)
c.lock.Lock()
cluster, ok := c.clusters[newCluster.Server]
c.lock.Unlock()
@@ -790,6 +794,7 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
func (c *liveStateCache) handleDeleteEvent(clusterServer string) {
c.lock.RLock()
+ c.clusterSharding.Delete(clusterServer)
cluster, ok := c.clusters[clusterServer]
c.lock.RUnlock()
if ok {
diff --git a/controller/cache/cache_test.go b/controller/cache/cache_test.go
index de2d96eb7aa28..53a03ca81995e 100644
--- a/controller/cache/cache_test.go
+++ b/controller/cache/cache_test.go
@@ -21,7 +21,11 @@ import (
"github.com/stretchr/testify/mock"
"k8s.io/client-go/kubernetes/fake"
+ "github.com/argoproj/argo-cd/v2/common"
+ "github.com/argoproj/argo-cd/v2/controller/metrics"
+ "github.com/argoproj/argo-cd/v2/controller/sharding"
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+ dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
)
@@ -35,11 +39,13 @@ func TestHandleModEvent_HasChanges(t *testing.T) {
clusterCache := &mocks.ClusterCache{}
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
clusterCache.On("EnsureSynced").Return(nil).Once()
-
+ db := &dbmocks.ArgoDB{}
+ db.On("GetApplicationControllerReplicas").Return(1)
clustersCache := liveStateCache{
clusters: map[string]cache.ClusterCache{
"https://mycluster": clusterCache,
},
+ clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
}
clustersCache.handleModEvent(&appv1.Cluster{
@@ -56,14 +62,22 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
clusterCache := &mocks.ClusterCache{}
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
clusterCache.On("EnsureSynced").Return(nil).Once()
-
+ db := &dbmocks.ArgoDB{}
+ db.On("GetApplicationControllerReplicas").Return(1)
clustersCache := liveStateCache{
- clusters: map[string]cache.ClusterCache{
- "https://mycluster": clusterCache,
- },
- clusterFilter: func(cluster *appv1.Cluster) bool {
- return false
+ db: nil,
+ appInformer: nil,
+ onObjectUpdated: func(managedByApp map[string]bool, ref v1.ObjectReference) {
},
+ kubectl: nil,
+ settingsMgr: &argosettings.SettingsManager{},
+ metricsServer: &metrics.MetricsServer{},
+ // returns a shard that never process any cluster
+ clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
+ resourceTracking: nil,
+ clusters: map[string]cache.ClusterCache{"https://mycluster": clusterCache},
+ cacheSettings: cacheSettings{},
+ lock: sync.RWMutex{},
}
clustersCache.handleModEvent(&appv1.Cluster{
@@ -75,18 +89,20 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
Namespaces: []string{"default"},
})
- assert.Len(t, clustersCache.clusters, 0)
+ assert.Len(t, clustersCache.clusters, 1)
}
func TestHandleModEvent_NoChanges(t *testing.T) {
clusterCache := &mocks.ClusterCache{}
clusterCache.On("Invalidate", mock.Anything).Panic("should not invalidate")
clusterCache.On("EnsureSynced").Return(nil).Panic("should not re-sync")
-
+ db := &dbmocks.ArgoDB{}
+ db.On("GetApplicationControllerReplicas").Return(1)
clustersCache := liveStateCache{
clusters: map[string]cache.ClusterCache{
"https://mycluster": clusterCache,
},
+ clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
}
clustersCache.handleModEvent(&appv1.Cluster{
@@ -99,11 +115,11 @@ func TestHandleModEvent_NoChanges(t *testing.T) {
}
func TestHandleAddEvent_ClusterExcluded(t *testing.T) {
+ db := &dbmocks.ArgoDB{}
+ db.On("GetApplicationControllerReplicas").Return(1)
clustersCache := liveStateCache{
- clusters: map[string]cache.ClusterCache{},
- clusterFilter: func(cluster *appv1.Cluster) bool {
- return false
- },
+ clusters: map[string]cache.ClusterCache{},
+ clusterSharding: sharding.NewClusterSharding(db, 0, 2, common.DefaultShardingAlgorithm),
}
clustersCache.handleAddEvent(&appv1.Cluster{
Server: "https://mycluster",
@@ -118,25 +134,28 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
Server: "https://mycluster",
Config: appv1.ClusterConfig{Username: "bar"},
}
+ db := &dbmocks.ArgoDB{}
+ db.On("GetApplicationControllerReplicas").Return(1)
fakeClient := fake.NewSimpleClientset()
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
- externalLockRef := sync.RWMutex{}
+ liveStateCacheLock := sync.RWMutex{}
gitopsEngineClusterCache := &mocks.ClusterCache{}
clustersCache := liveStateCache{
clusters: map[string]cache.ClusterCache{
testCluster.Server: gitopsEngineClusterCache,
},
- clusterFilter: func(cluster *appv1.Cluster) bool {
- return true
- },
- settingsMgr: settingsMgr,
+ clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
+ settingsMgr: settingsMgr,
// Set the lock here so we can reference it later
// nolint We need to overwrite here to have access to the lock
- lock: externalLockRef,
+ lock: liveStateCacheLock,
}
channel := make(chan string)
// Mocked lock held by the gitops-engine cluster cache
- mockMutex := sync.RWMutex{}
+ gitopsEngineClusterCacheLock := sync.Mutex{}
+ // Ensure completion of both EnsureSynced and Invalidate
+ ensureSyncedCompleted := sync.Mutex{}
+ invalidateCompleted := sync.Mutex{}
// Locks to force trigger condition during test
// Condition order:
// EnsuredSynced -> Locks gitops-engine
@@ -144,40 +163,39 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
// EnsureSynced via sync, newResource, populateResourceInfoHandler -> attempts to Lock liveStateCache
// handleDeleteEvent via cluster.Invalidate -> attempts to Lock gitops-engine
handleDeleteWasCalled := sync.Mutex{}
- engineHoldsLock := sync.Mutex{}
+ engineHoldsEngineLock := sync.Mutex{}
+ ensureSyncedCompleted.Lock()
+ invalidateCompleted.Lock()
handleDeleteWasCalled.Lock()
- engineHoldsLock.Lock()
+ engineHoldsEngineLock.Lock()
+
gitopsEngineClusterCache.On("EnsureSynced").Run(func(args mock.Arguments) {
- // Held by EnsureSync calling into sync and watchEvents
- mockMutex.Lock()
- defer mockMutex.Unlock()
- // Continue Execution of timer func
- engineHoldsLock.Unlock()
- // Wait for handleDeleteEvent to be called triggering the lock
- // on the liveStateCache
+ gitopsEngineClusterCacheLock.Lock()
+ t.Log("EnsureSynced: Engine has engine lock")
+ engineHoldsEngineLock.Unlock()
+ defer gitopsEngineClusterCacheLock.Unlock()
+ // Wait until handleDeleteEvent holds the liveStateCache lock
handleDeleteWasCalled.Lock()
- t.Logf("handleDelete was called, EnsureSynced continuing...")
- handleDeleteWasCalled.Unlock()
- // Try and obtain the lock on the liveStateCache
- alreadyFailed := !externalLockRef.TryLock()
- if alreadyFailed {
- channel <- "DEADLOCKED -- EnsureSynced could not obtain lock on liveStateCache"
- return
- }
- externalLockRef.Lock()
- t.Logf("EnsureSynce was able to lock liveStateCache")
- externalLockRef.Unlock()
+ // Try and obtain the liveStateCache lock
+ clustersCache.lock.Lock()
+ t.Log("EnsureSynced: Engine has LiveStateCache lock")
+ clustersCache.lock.Unlock()
+ ensureSyncedCompleted.Unlock()
}).Return(nil).Once()
+
gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) {
- // If deadlock is fixed should be able to acquire lock here
- alreadyFailed := !mockMutex.TryLock()
- if alreadyFailed {
- channel <- "DEADLOCKED -- Invalidate could not obtain lock on gitops-engine"
- return
- }
- mockMutex.Lock()
- t.Logf("Invalidate was able to lock gitops-engine cache")
- mockMutex.Unlock()
+ // Allow EnsureSynced to continue now that we're in the deadlock condition
+ handleDeleteWasCalled.Unlock()
+ // Wait until gitops engine holds the gitops lock
+ // This prevents timing issues if we reach this point before EnsureSynced has obtained the lock
+ engineHoldsEngineLock.Lock()
+ t.Log("Invalidate: Engine has engine lock")
+ engineHoldsEngineLock.Unlock()
+ // Lock engine lock
+ gitopsEngineClusterCacheLock.Lock()
+ t.Log("Invalidate: Invalidate has engine lock")
+ gitopsEngineClusterCacheLock.Unlock()
+ invalidateCompleted.Unlock()
}).Return()
go func() {
// Start the gitops-engine lock holds
@@ -187,14 +205,14 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
assert.Fail(t, err.Error())
}
}()
- // Wait for EnsureSynced to grab the lock for gitops-engine
- engineHoldsLock.Lock()
- t.Log("EnsureSynced has obtained lock on gitops-engine")
- engineHoldsLock.Unlock()
// Run in background
go clustersCache.handleDeleteEvent(testCluster.Server)
// Allow execution to continue on clusters cache call to trigger lock
- handleDeleteWasCalled.Unlock()
+ ensureSyncedCompleted.Lock()
+ invalidateCompleted.Lock()
+ t.Log("Competing functions were able to obtain locks")
+ invalidateCompleted.Unlock()
+ ensureSyncedCompleted.Unlock()
channel <- "PASSED"
}()
select {
diff --git a/controller/cache/info.go b/controller/cache/info.go
index cf0d12318a447..53512de6b713a 100644
--- a/controller/cache/info.go
+++ b/controller/cache/info.go
@@ -37,6 +37,16 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
}
}
}
+
+ for k, v := range un.GetAnnotations() {
+ if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) {
+ if res.NetworkingInfo == nil {
+ res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{}
+ }
+ res.NetworkingInfo.ExternalURLs = append(res.NetworkingInfo.ExternalURLs, v)
+ }
+ }
+
switch gvk.Group {
case "":
switch gvk.Kind {
@@ -58,15 +68,6 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
populateIstioVirtualServiceInfo(un, res)
}
}
-
- for k, v := range un.GetAnnotations() {
- if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) {
- if res.NetworkingInfo == nil {
- res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{}
- }
- res.NetworkingInfo.ExternalURLs = append(res.NetworkingInfo.ExternalURLs, v)
- }
- }
}
func getIngress(un *unstructured.Unstructured) []v1.LoadBalancerIngress {
@@ -93,7 +94,13 @@ func populateServiceInfo(un *unstructured.Unstructured, res *ResourceInfo) {
if serviceType, ok, err := unstructured.NestedString(un.Object, "spec", "type"); ok && err == nil && serviceType == string(v1.ServiceTypeLoadBalancer) {
ingress = getIngress(un)
}
- res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress}
+
+ var urls []string
+ if res.NetworkingInfo != nil {
+ urls = res.NetworkingInfo.ExternalURLs
+ }
+
+ res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress, ExternalURLs: urls}
}
func getServiceName(backend map[string]interface{}, gvk schema.GroupVersionKind) (string, error) {
@@ -263,7 +270,12 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
targets = append(targets, target)
}
- res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets}
+ var urls []string
+ if res.NetworkingInfo != nil {
+ urls = res.NetworkingInfo.ExternalURLs
+ }
+
+ res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
}
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
@@ -374,7 +386,13 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
if restarts > 0 {
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Restart Count", Value: fmt.Sprintf("%d", restarts)})
}
- res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels()}
+
+ var urls []string
+ if res.NetworkingInfo != nil {
+ urls = res.NetworkingInfo.ExternalURLs
+ }
+
+ res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels(), ExternalURLs: urls}
}
func populateHostNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {
diff --git a/controller/cache/info_test.go b/controller/cache/info_test.go
index 8a06d3745e13b..7b48040009284 100644
--- a/controller/cache/info_test.go
+++ b/controller/cache/info_test.go
@@ -52,7 +52,7 @@ var (
resourceVersion: "123"
uid: "4"
annotations:
- link.argocd.argoproj.io/external-link: http://my-grafana.com/pre-generated-link
+ link.argocd.argoproj.io/external-link: http://my-grafana.example.com/pre-generated-link
spec:
selector:
app: guestbook
@@ -74,7 +74,7 @@ var (
serviceName: not-found-service
servicePort: 443
rules:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
http:
paths:
- backend:
@@ -86,7 +86,7 @@ var (
servicePort: https
path: /
tls:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
secretName: my-tls-secret
status:
loadBalancer:
@@ -101,13 +101,13 @@ var (
namespace: default
uid: "4"
annotations:
- link.argocd.argoproj.io/external-link: http://my-grafana.com/ingress-link
+ link.argocd.argoproj.io/external-link: http://my-grafana.example.com/ingress-link
spec:
backend:
serviceName: not-found-service
servicePort: 443
rules:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
http:
paths:
- backend:
@@ -119,7 +119,7 @@ var (
servicePort: https
path: /
tls:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
secretName: my-tls-secret
status:
loadBalancer:
@@ -138,7 +138,7 @@ var (
serviceName: not-found-service
servicePort: 443
rules:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
http:
paths:
- backend:
@@ -150,7 +150,7 @@ var (
servicePort: https
path: /*
tls:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
secretName: my-tls-secret
status:
loadBalancer:
@@ -169,7 +169,7 @@ var (
serviceName: not-found-service
servicePort: 443
rules:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
http:
paths:
- backend:
@@ -199,7 +199,7 @@ var (
port:
number: 443
rules:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
http:
paths:
- backend:
@@ -215,7 +215,7 @@ var (
name: https
path: /
tls:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
secretName: my-tls-secret
status:
loadBalancer:
@@ -327,7 +327,7 @@ func TestGetLinkAnnotatedServiceInfo(t *testing.T) {
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
TargetLabels: map[string]string{"app": "guestbook"},
Ingress: []v1.LoadBalancerIngress{{Hostname: "localhost"}},
- ExternalURLs: []string{"http://my-grafana.com/pre-generated-link"},
+ ExternalURLs: []string{"http://my-grafana.example.com/pre-generated-link"},
}, info.NetworkingInfo)
}
@@ -381,7 +381,7 @@ func TestGetIngressInfo(t *testing.T) {
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
- ExternalURLs: []string{"https://helm-guestbook.com/"},
+ ExternalURLs: []string{"https://helm-guestbook.example.com/"},
}, info.NetworkingInfo)
}
}
@@ -406,7 +406,7 @@ func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
- ExternalURLs: []string{"https://helm-guestbook.com/", "http://my-grafana.com/ingress-link"},
+ ExternalURLs: []string{"http://my-grafana.example.com/ingress-link", "https://helm-guestbook.example.com/"},
}, info.NetworkingInfo)
}
@@ -430,7 +430,7 @@ func TestGetIngressInfoWildCardPath(t *testing.T) {
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
- ExternalURLs: []string{"https://helm-guestbook.com/"},
+ ExternalURLs: []string{"https://helm-guestbook.example.com/"},
}, info.NetworkingInfo)
}
@@ -454,7 +454,7 @@ func TestGetIngressInfoWithoutTls(t *testing.T) {
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
- ExternalURLs: []string{"http://helm-guestbook.com/"},
+ ExternalURLs: []string{"http://helm-guestbook.example.com/"},
}, info.NetworkingInfo)
}
@@ -563,7 +563,7 @@ func TestExternalUrlWithMultipleSubPaths(t *testing.T) {
namespace: default
spec:
rules:
- - host: helm-guestbook.com
+ - host: helm-guestbook.example.com
http:
paths:
- backend:
@@ -587,7 +587,7 @@ func TestExternalUrlWithMultipleSubPaths(t *testing.T) {
info := &ResourceInfo{}
populateNodeInfo(ingress, info, []string{})
- expectedExternalUrls := []string{"https://helm-guestbook.com/my/sub/path/", "https://helm-guestbook.com/my/sub/path/2", "https://helm-guestbook.com"}
+ expectedExternalUrls := []string{"https://helm-guestbook.example.com/my/sub/path/", "https://helm-guestbook.example.com/my/sub/path/2", "https://helm-guestbook.example.com"}
actualURLs := info.NetworkingInfo.ExternalURLs
sort.Strings(expectedExternalUrls)
sort.Strings(actualURLs)
diff --git a/controller/hook.go b/controller/hook.go
new file mode 100644
index 0000000000000..0c019ac6a1e08
--- /dev/null
+++ b/controller/hook.go
@@ -0,0 +1,158 @@
+package controller
+
+import (
+ "context"
+
+ "github.com/argoproj/gitops-engine/pkg/health"
+ "github.com/argoproj/gitops-engine/pkg/sync/common"
+ "github.com/argoproj/gitops-engine/pkg/sync/hook"
+ "github.com/argoproj/gitops-engine/pkg/utils/kube"
+ log "github.com/sirupsen/logrus"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/client-go/rest"
+
+ "github.com/argoproj/argo-cd/v2/util/lua"
+
+ "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+)
+
+var (
+ postDeleteHook = "PostDelete"
+ postDeleteHooks = map[string]string{
+ "argocd.argoproj.io/hook": postDeleteHook,
+ "helm.sh/hook": "post-delete",
+ }
+)
+
+func isHook(obj *unstructured.Unstructured) bool {
+ return hook.IsHook(obj) || isPostDeleteHook(obj)
+}
+
+func isPostDeleteHook(obj *unstructured.Unstructured) bool {
+ if obj == nil || obj.GetAnnotations() == nil {
+ return false
+ }
+ for k, v := range postDeleteHooks {
+ if val, ok := obj.GetAnnotations()[k]; ok && val == v {
+ return true
+ }
+ }
+ return false
+}
+
+func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Application, proj *v1alpha1.AppProject, liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) {
+ appLabelKey, err := ctrl.settingsMgr.GetAppInstanceLabelKey()
+ if err != nil {
+ return false, err
+ }
+ var revisions []string
+ for _, src := range app.Spec.GetSources() {
+ revisions = append(revisions, src.TargetRevision)
+ }
+
+ targets, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj)
+ if err != nil {
+ return false, err
+ }
+ runningHooks := map[kube.ResourceKey]*unstructured.Unstructured{}
+ for key, obj := range liveObjs {
+ if isPostDeleteHook(obj) {
+ runningHooks[key] = obj
+ }
+ }
+
+ expectedHook := map[kube.ResourceKey]*unstructured.Unstructured{}
+ for _, obj := range targets {
+ if obj.GetNamespace() == "" {
+ obj.SetNamespace(app.Spec.Destination.Namespace)
+ }
+ if !isPostDeleteHook(obj) {
+ continue
+ }
+ if runningHook := runningHooks[kube.GetResourceKey(obj)]; runningHook == nil {
+ expectedHook[kube.GetResourceKey(obj)] = obj
+ }
+ }
+ createdCnt := 0
+ for _, obj := range expectedHook {
+ _, err = ctrl.kubectl.CreateResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), obj, v1.CreateOptions{})
+ if err != nil {
+ return false, err
+ }
+ createdCnt++
+ }
+ if createdCnt > 0 {
+ logCtx.Infof("Created %d post-delete hooks", createdCnt)
+ return false, nil
+ }
+ resourceOverrides, err := ctrl.settingsMgr.GetResourceOverrides()
+ if err != nil {
+ return false, err
+ }
+ healthOverrides := lua.ResourceHealthOverrides(resourceOverrides)
+
+ progressingHooksCnt := 0
+ for _, obj := range runningHooks {
+ hookHealth, err := health.GetResourceHealth(obj, healthOverrides)
+ if err != nil {
+ return false, err
+ }
+ if hookHealth.Status == health.HealthStatusProgressing {
+ progressingHooksCnt++
+ }
+ }
+ if progressingHooksCnt > 0 {
+ logCtx.Infof("Waiting for %d post-delete hooks to complete", progressingHooksCnt)
+ return false, nil
+ }
+
+ return true, nil
+}
+
+func (ctrl *ApplicationController) cleanupPostDeleteHooks(liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) {
+ resourceOverrides, err := ctrl.settingsMgr.GetResourceOverrides()
+ if err != nil {
+ return false, err
+ }
+ healthOverrides := lua.ResourceHealthOverrides(resourceOverrides)
+
+ pendingDeletionCount := 0
+ aggregatedHealth := health.HealthStatusHealthy
+ var hooks []*unstructured.Unstructured
+ for _, obj := range liveObjs {
+ if !isPostDeleteHook(obj) {
+ continue
+ }
+ hookHealth, err := health.GetResourceHealth(obj, healthOverrides)
+ if err != nil {
+ return false, err
+ }
+ if health.IsWorse(aggregatedHealth, hookHealth.Status) {
+ aggregatedHealth = hookHealth.Status
+ }
+ hooks = append(hooks, obj)
+ }
+
+ for _, obj := range hooks {
+ for _, policy := range hook.DeletePolicies(obj) {
+ if policy == common.HookDeletePolicyHookFailed && aggregatedHealth == health.HealthStatusDegraded || policy == common.HookDeletePolicyHookSucceeded && aggregatedHealth == health.HealthStatusHealthy {
+ pendingDeletionCount++
+ if obj.GetDeletionTimestamp() != nil {
+ continue
+ }
+ logCtx.Infof("Deleting post-delete hook %s/%s", obj.GetNamespace(), obj.GetName())
+ err = ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), v1.DeleteOptions{})
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+
+ }
+ if pendingDeletionCount > 0 {
+ logCtx.Infof("Waiting for %d post-delete hooks to be deleted", pendingDeletionCount)
+ return false, nil
+ }
+ return true, nil
+}
diff --git a/controller/sharding/cache.go b/controller/sharding/cache.go
new file mode 100644
index 0000000000000..d16574accdf8a
--- /dev/null
+++ b/controller/sharding/cache.go
@@ -0,0 +1,163 @@
+package sharding
+
+import (
+ "sync"
+
+ "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
+ "github.com/argoproj/argo-cd/v2/util/db"
+ log "github.com/sirupsen/logrus"
+)
+
+type ClusterShardingCache interface {
+ Init(clusters *v1alpha1.ClusterList)
+ Add(c *v1alpha1.Cluster)
+ Delete(clusterServer string)
+ Update(c *v1alpha1.Cluster)
+ IsManagedCluster(c *v1alpha1.Cluster) bool
+ GetDistribution() map[string]int
+}
+
+type ClusterSharding struct {
+ Shard int
+ Replicas int
+ Shards map[string]int
+ Clusters map[string]*v1alpha1.Cluster
+ lock sync.RWMutex
+ getClusterShard DistributionFunction
+}
+
+func NewClusterSharding(db db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
+ log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
+ clusterSharding := &ClusterSharding{
+ Shard: shard,
+ Replicas: replicas,
+ Shards: make(map[string]int),
+ Clusters: make(map[string]*v1alpha1.Cluster),
+ }
+ distributionFunction := NoShardingDistributionFunction()
+ if replicas > 1 {
+ log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
+ distributionFunction = GetDistributionFunction(clusterSharding.GetClusterAccessor(), shardingAlgorithm, replicas)
+ } else {
+ log.Info("Processing all cluster shards")
+ }
+ clusterSharding.getClusterShard = distributionFunction
+ return clusterSharding
+}
+
+// IsManagedCluster returns wheter or not the cluster should be processed by a given shard.
+func (s *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+ if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
+ return true
+ }
+ clusterShard := 0
+ if shard, ok := s.Shards[c.Server]; ok {
+ clusterShard = shard
+ } else {
+ log.Warnf("The cluster %s has no assigned shard.", c.Server)
+ }
+ log.Debugf("Checking if cluster %s with clusterShard %d should be processed by shard %d", c.Server, clusterShard, s.Shard)
+ return clusterShard == s.Shard
+}
+
+func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList) {
+ sharding.lock.Lock()
+ defer sharding.lock.Unlock()
+ newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
+ for _, c := range clusters.Items {
+ newClusters[c.Server] = &c
+ }
+ sharding.Clusters = newClusters
+ sharding.updateDistribution()
+}
+
+func (sharding *ClusterSharding) Add(c *v1alpha1.Cluster) {
+ sharding.lock.Lock()
+ defer sharding.lock.Unlock()
+
+ old, ok := sharding.Clusters[c.Server]
+ sharding.Clusters[c.Server] = c
+ if !ok || hasShardingUpdates(old, c) {
+ sharding.updateDistribution()
+ } else {
+ log.Debugf("Skipping sharding distribution update. Cluster already added")
+ }
+}
+
+func (sharding *ClusterSharding) Delete(clusterServer string) {
+ sharding.lock.Lock()
+ defer sharding.lock.Unlock()
+ if _, ok := sharding.Clusters[clusterServer]; ok {
+ delete(sharding.Clusters, clusterServer)
+ delete(sharding.Shards, clusterServer)
+ sharding.updateDistribution()
+ }
+}
+
+func (sharding *ClusterSharding) Update(c *v1alpha1.Cluster) {
+ sharding.lock.Lock()
+ defer sharding.lock.Unlock()
+
+ old, ok := sharding.Clusters[c.Server]
+ sharding.Clusters[c.Server] = c
+ if !ok || hasShardingUpdates(old, c) {
+ sharding.updateDistribution()
+ } else {
+ log.Debugf("Skipping sharding distribution update. No relevant changes")
+ }
+}
+
+func (sharding *ClusterSharding) GetDistribution() map[string]int {
+ sharding.lock.RLock()
+ shards := sharding.Shards
+ sharding.lock.RUnlock()
+
+ distribution := make(map[string]int, len(shards))
+ for k, v := range shards {
+ distribution[k] = v
+ }
+ return distribution
+}
+
+func (sharding *ClusterSharding) updateDistribution() {
+ log.Info("Updating cluster shards")
+
+ for _, c := range sharding.Clusters {
+ shard := 0
+ if c.Shard != nil {
+ requestedShard := int(*c.Shard)
+ if requestedShard < sharding.Replicas {
+ shard = requestedShard
+ } else {
+ log.Warnf("Specified cluster shard (%d) for cluster: %s is greater than the number of available shard (%d). Using shard 0.", requestedShard, c.Server, sharding.Replicas)
+ }
+ } else {
+ shard = sharding.getClusterShard(c)
+ }
+ var shard64 int64 = int64(shard)
+ c.Shard = &shard64
+ sharding.Shards[c.Server] = shard
+ }
+}
+
+// hasShardingUpdates returns true if the sharding distribution has been updated.
+// nil checking is done for the corner case of the in-cluster cluster which may
+// have a nil shard assigned
+func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
+ if old == nil || new == nil || (old.Shard == nil && new.Shard == nil) {
+ return false
+ }
+ return old.Shard != new.Shard
+}
+
+func (d *ClusterSharding) GetClusterAccessor() clusterAccessor {
+ return func() []*v1alpha1.Cluster {
+ clusters := make([]*v1alpha1.Cluster, 0, len(d.Clusters))
+ for _, c := range d.Clusters {
+ clusters = append(clusters, c)
+ }
+ return clusters
+ }
+}
diff --git a/controller/sharding/sharding.go b/controller/sharding/sharding.go
index 526896531dbca..2b86ed3f82bc6 100644
--- a/controller/sharding/sharding.go
+++ b/controller/sharding/sharding.go
@@ -40,6 +40,7 @@ const ShardControllerMappingKey = "shardControllerMapping"
type DistributionFunction func(c *v1alpha1.Cluster) int
type ClusterFilterFunction func(c *v1alpha1.Cluster) bool
+type clusterAccessor func() []*v1alpha1.Cluster
// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap.
// It also stores the heartbeat of last synced time of the application controller.
@@ -53,8 +54,7 @@ type shardApplicationControllerMapping struct {
// and returns wheter or not the cluster should be processed by a given shard. It calls the distributionFunction
// to determine which shard will process the cluster, and if the given shard is equal to the calculated shard
// the function will return true.
-func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, shard int) ClusterFilterFunction {
- replicas := db.GetApplicationControllerReplicas()
+func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, replicas, shard int) ClusterFilterFunction {
return func(c *v1alpha1.Cluster) bool {
clusterShard := 0
if c != nil && c.Shard != nil {
@@ -73,14 +73,14 @@ func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, s
// GetDistributionFunction returns which DistributionFunction should be used based on the passed algorithm and
// the current datas.
-func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) DistributionFunction {
- log.Infof("Using filter function: %s", shardingAlgorithm)
- distributionFunction := LegacyDistributionFunction(db)
+func GetDistributionFunction(clusters clusterAccessor, shardingAlgorithm string, replicasCount int) DistributionFunction {
+ log.Debugf("Using filter function: %s", shardingAlgorithm)
+ distributionFunction := LegacyDistributionFunction(replicasCount)
switch shardingAlgorithm {
case common.RoundRobinShardingAlgorithm:
- distributionFunction = RoundRobinDistributionFunction(db)
+ distributionFunction = RoundRobinDistributionFunction(clusters, replicasCount)
case common.LegacyShardingAlgorithm:
- distributionFunction = LegacyDistributionFunction(db)
+ distributionFunction = LegacyDistributionFunction(replicasCount)
default:
log.Warnf("distribution type %s is not supported, defaulting to %s", shardingAlgorithm, common.DefaultShardingAlgorithm)
}
@@ -92,15 +92,21 @@ func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) Distributio
// is lightweight and can be distributed easily, however, it does not ensure an homogenous distribution as
// some shards may get assigned more clusters than others. It is the legacy function distribution that is
// kept for compatibility reasons
-func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
- replicas := db.GetApplicationControllerReplicas()
+func LegacyDistributionFunction(replicas int) DistributionFunction {
return func(c *v1alpha1.Cluster) int {
if replicas == 0 {
+ log.Debugf("Replicas count is : %d, returning -1", replicas)
return -1
}
if c == nil {
+ log.Debug("In-cluster: returning 0")
return 0
}
+ // if Shard is manually set and the assigned value is lower than the number of replicas,
+ // then its value is returned otherwise it is the default calculated value
+ if c.Shard != nil && int(*c.Shard) < replicas {
+ return int(*c.Shard)
+ }
id := c.ID
log.Debugf("Calculating cluster shard for cluster id: %s", id)
if id == "" {
@@ -121,14 +127,19 @@ func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
// This function ensures an homogenous distribution: each shards got assigned the same number of
// clusters +/-1 , but with the drawback of a reshuffling of clusters accross shards in case of some changes
// in the cluster list
-func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
- replicas := db.GetApplicationControllerReplicas()
+
+func RoundRobinDistributionFunction(clusters clusterAccessor, replicas int) DistributionFunction {
return func(c *v1alpha1.Cluster) int {
if replicas > 0 {
if c == nil { // in-cluster does not necessarly have a secret assigned. So we are receiving a nil cluster here.
return 0
+ }
+ // if Shard is manually set and the assigned value is lower than the number of replicas,
+ // then its value is returned otherwise it is the default calculated value
+ if c.Shard != nil && int(*c.Shard) < replicas {
+ return int(*c.Shard)
} else {
- clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(db)
+ clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(clusters)
clusterIndex, ok := clusterIndexdByClusterIdMap[c.ID]
if !ok {
log.Warnf("Cluster with id=%s not found in cluster map.", c.ID)
@@ -144,6 +155,12 @@ func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
}
}
+// NoShardingDistributionFunction returns a DistributionFunction that will process all cluster by shard 0
+// the function is created for API compatibility purposes and is not supposed to be activated.
+func NoShardingDistributionFunction() DistributionFunction {
+ return func(c *v1alpha1.Cluster) int { return 0 }
+}
+
// InferShard extracts the shard index based on its hostname.
func InferShard() (int, error) {
hostname, err := osHostnameFunction()
@@ -152,33 +169,29 @@ func InferShard() (int, error) {
}
parts := strings.Split(hostname, "-")
if len(parts) == 0 {
- return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
+ log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
+ return 0, nil
}
shard, err := strconv.Atoi(parts[len(parts)-1])
if err != nil {
- return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
+ log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
+ return 0, nil
}
return int(shard), nil
}
-func getSortedClustersList(db db.ArgoDB) []v1alpha1.Cluster {
- ctx := context.Background()
- clustersList, dbErr := db.ListClusters(ctx)
- if dbErr != nil {
- log.Warnf("Error while querying clusters list from database: %v", dbErr)
- return []v1alpha1.Cluster{}
- }
- clusters := clustersList.Items
+func getSortedClustersList(getCluster clusterAccessor) []*v1alpha1.Cluster {
+ clusters := getCluster()
sort.Slice(clusters, func(i, j int) bool {
return clusters[i].ID < clusters[j].ID
})
return clusters
}
-func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
- clusters := getSortedClustersList(db)
+func createClusterIndexByClusterIdMap(getCluster clusterAccessor) map[string]int {
+ clusters := getSortedClustersList(getCluster)
log.Debugf("ClustersList has %d items", len(clusters))
- clusterById := make(map[string]v1alpha1.Cluster)
+ clusterById := make(map[string]*v1alpha1.Cluster)
clusterIndexedByClusterId := make(map[string]int)
for i, cluster := range clusters {
log.Debugf("Adding cluster with id=%s and name=%s to cluster's map", cluster.ID, cluster.Name)
@@ -194,7 +207,6 @@ func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable,
// we default the shard number to 0 for computing the default config map.
func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
-
hostname, err := osHostnameFunction()
if err != nil {
return -1, err
diff --git a/controller/sharding/sharding_test.go b/controller/sharding/sharding_test.go
index a8a25e11c4978..0992f7a9dfd7f 100644
--- a/controller/sharding/sharding_test.go
+++ b/controller/sharding/sharding_test.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"os"
+ "strconv"
"testing"
"time"
@@ -19,18 +20,20 @@ import (
func TestGetShardByID_NotEmptyID(t *testing.T) {
db := &dbmocks.ArgoDB{}
- db.On("GetApplicationControllerReplicas").Return(1)
- assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "1"}))
- assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "2"}))
- assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "3"}))
- assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "4"}))
+ replicasCount := 1
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+ assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "1"}))
+ assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "2"}))
+ assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "3"}))
+ assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "4"}))
}
func TestGetShardByID_EmptyID(t *testing.T) {
db := &dbmocks.ArgoDB{}
- db.On("GetApplicationControllerReplicas").Return(1)
+ replicasCount := 1
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
distributionFunction := LegacyDistributionFunction
- shard := distributionFunction(db)(&v1alpha1.Cluster{})
+ shard := distributionFunction(replicasCount)(&v1alpha1.Cluster{})
assert.Equal(t, 0, shard)
}
@@ -38,7 +41,7 @@ func TestGetShardByID_NoReplicas(t *testing.T) {
db := &dbmocks.ArgoDB{}
db.On("GetApplicationControllerReplicas").Return(0)
distributionFunction := LegacyDistributionFunction
- shard := distributionFunction(db)(&v1alpha1.Cluster{})
+ shard := distributionFunction(0)(&v1alpha1.Cluster{})
assert.Equal(t, -1, shard)
}
@@ -46,16 +49,16 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunction(t *testing.T) {
db := &dbmocks.ArgoDB{}
db.On("GetApplicationControllerReplicas").Return(0)
distributionFunction := LegacyDistributionFunction
- shard := distributionFunction(db)(&v1alpha1.Cluster{})
+ shard := distributionFunction(0)(&v1alpha1.Cluster{})
assert.Equal(t, -1, shard)
}
func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *testing.T) {
- db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
+ clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
// Test with replicas set to 0
db.On("GetApplicationControllerReplicas").Return(0)
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
- distributionFunction := RoundRobinDistributionFunction(db)
+ distributionFunction := RoundRobinDistributionFunction(clusters, 0)
assert.Equal(t, -1, distributionFunction(nil))
assert.Equal(t, -1, distributionFunction(&cluster1))
assert.Equal(t, -1, distributionFunction(&cluster2))
@@ -65,137 +68,112 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *tes
}
func TestGetClusterFilterDefault(t *testing.T) {
- shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
+ //shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
+ clusterAccessor, _, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
os.Unsetenv(common.EnvControllerShardingAlgorithm)
- db := &dbmocks.ArgoDB{}
- db.On("GetApplicationControllerReplicas").Return(2)
- filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
- assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
- assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
+ replicasCount := 2
+ distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
+ assert.Equal(t, 0, distributionFunction(nil))
+ assert.Equal(t, 0, distributionFunction(&cluster1))
+ assert.Equal(t, 1, distributionFunction(&cluster2))
+ assert.Equal(t, 0, distributionFunction(&cluster3))
+ assert.Equal(t, 1, distributionFunction(&cluster4))
}
func TestGetClusterFilterLegacy(t *testing.T) {
- shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
- db := &dbmocks.ArgoDB{}
- db.On("GetApplicationControllerReplicas").Return(2)
+ //shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
+ clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
+ replicasCount := 2
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
- filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
- assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
- assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
+ distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
+ assert.Equal(t, 0, distributionFunction(nil))
+ assert.Equal(t, 0, distributionFunction(&cluster1))
+ assert.Equal(t, 1, distributionFunction(&cluster2))
+ assert.Equal(t, 0, distributionFunction(&cluster3))
+ assert.Equal(t, 1, distributionFunction(&cluster4))
}
func TestGetClusterFilterUnknown(t *testing.T) {
- shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
- db := &dbmocks.ArgoDB{}
- db.On("GetApplicationControllerReplicas").Return(2)
+ clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
+ // Test with replicas set to 0
+ t.Setenv(common.EnvControllerReplicas, "2")
+ os.Unsetenv(common.EnvControllerShardingAlgorithm)
t.Setenv(common.EnvControllerShardingAlgorithm, "unknown")
- filter := GetClusterFilter(db, GetDistributionFunction(db, "unknown"), shardIndex)
- assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
- assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
+ replicasCount := 2
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+ distributionFunction := GetDistributionFunction(clusterAccessor, "unknown", replicasCount)
+ assert.Equal(t, 0, distributionFunction(nil))
+ assert.Equal(t, 0, distributionFunction(&cluster1))
+ assert.Equal(t, 1, distributionFunction(&cluster2))
+ assert.Equal(t, 0, distributionFunction(&cluster3))
+ assert.Equal(t, 1, distributionFunction(&cluster4))
}
func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
- shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
- db := &dbmocks.ArgoDB{}
- db.On("GetApplicationControllerReplicas").Return(2)
- filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
- assert.False(t, filter(nil))
- assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
- assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
- assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
+ //shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
+ t.Setenv(common.EnvControllerReplicas, "5")
+ clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
+ replicasCount := 5
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+ filter := GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
+ assert.Equal(t, 0, filter(nil))
+ assert.Equal(t, 4, filter(&cluster1))
+ assert.Equal(t, 1, filter(&cluster2))
+ assert.Equal(t, 2, filter(&cluster3))
+ assert.Equal(t, 2, filter(&cluster4))
var fixedShard int64 = 4
- filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
- assert.False(t, filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
+ cluster5 := &v1alpha1.Cluster{ID: "5", Shard: &fixedShard}
+ clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
+ filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
+ assert.Equal(t, int(fixedShard), filter(cluster5))
fixedShard = 1
- filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
- assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
+ cluster5.Shard = &fixedShard
+ clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
+ filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
+ assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
}
func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
- shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
- db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
- db.On("GetApplicationControllerReplicas").Return(2)
- filter := GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex)
- assert.False(t, filter(nil))
- assert.False(t, filter(&cluster1))
- assert.True(t, filter(&cluster2))
- assert.False(t, filter(&cluster3))
- assert.True(t, filter(&cluster4))
+ //shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
+ t.Setenv(common.EnvControllerReplicas, "4")
+ clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
+ replicasCount := 4
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+
+ filter := GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
+ assert.Equal(t, filter(nil), 0)
+ assert.Equal(t, filter(&cluster1), 0)
+ assert.Equal(t, filter(&cluster2), 1)
+ assert.Equal(t, filter(&cluster3), 2)
+ assert.Equal(t, filter(&cluster4), 3)
// a cluster with a fixed shard should be processed by the specified exact
// same shard unless the specified shard index is greater than the number of replicas.
- var fixedShard int64 = 4
- filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
- assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
+ var fixedShard int64 = 1
+ cluster5 := v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
+ clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
+ clusterAccessor = getClusterAccessor(clusters)
+ filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
+ assert.Equal(t, int(fixedShard), filter(&cluster5))
fixedShard = 1
- filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
- assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
-}
-
-func TestGetClusterFilterLegacyHash(t *testing.T) {
- shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
- t.Setenv(common.EnvControllerShardingAlgorithm, "hash")
- db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
- db.On("GetApplicationControllerReplicas").Return(2)
- filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
- assert.False(t, filter(&cluster1))
- assert.True(t, filter(&cluster2))
- assert.False(t, filter(&cluster3))
- assert.True(t, filter(&cluster4))
-
- // a cluster with a fixed shard should be processed by the specified exact
- // same shard unless the specified shard index is greater than the number of replicas.
- var fixedShard int64 = 4
- filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
- assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
-
- fixedShard = 1
- filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
- assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
-}
-
-func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) {
- db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
- shardIndex := 1
- db.On("GetApplicationControllerReplicas").Return(2)
-
- t.Run("legacy", func(t *testing.T) {
- t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
- shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
- assert.False(t, shardShouldProcessCluster(&cluster1))
- assert.True(t, shardShouldProcessCluster(&cluster2))
- assert.False(t, shardShouldProcessCluster(&cluster3))
- assert.True(t, shardShouldProcessCluster(&cluster4))
- assert.False(t, shardShouldProcessCluster(nil))
- })
-
- t.Run("roundrobin", func(t *testing.T) {
- t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
- shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
- assert.False(t, shardShouldProcessCluster(&cluster1))
- assert.True(t, shardShouldProcessCluster(&cluster2))
- assert.False(t, shardShouldProcessCluster(&cluster3))
- assert.True(t, shardShouldProcessCluster(&cluster4))
- assert.False(t, shardShouldProcessCluster(nil))
- })
+ cluster5 = v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
+ clusters = []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
+ clusterAccessor = getClusterAccessor(clusters)
+ filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
+ assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
}
func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
- db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
+ clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
t.Run("replicas set to 1", func(t *testing.T) {
- db.On("GetApplicationControllerReplicas").Return(1).Once()
- distributionFunction := RoundRobinDistributionFunction(db)
+ replicasCount := 1
+ db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
+ distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
assert.Equal(t, 0, distributionFunction(nil))
assert.Equal(t, 0, distributionFunction(&cluster1))
assert.Equal(t, 0, distributionFunction(&cluster2))
@@ -205,8 +183,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
})
t.Run("replicas set to 2", func(t *testing.T) {
- db.On("GetApplicationControllerReplicas").Return(2).Once()
- distributionFunction := RoundRobinDistributionFunction(db)
+ replicasCount := 2
+ db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
+ distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
assert.Equal(t, 0, distributionFunction(nil))
assert.Equal(t, 0, distributionFunction(&cluster1))
assert.Equal(t, 1, distributionFunction(&cluster2))
@@ -216,8 +195,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
})
t.Run("replicas set to 3", func(t *testing.T) {
- db.On("GetApplicationControllerReplicas").Return(3).Once()
- distributionFunction := RoundRobinDistributionFunction(db)
+ replicasCount := 3
+ db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
+ distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
assert.Equal(t, 0, distributionFunction(nil))
assert.Equal(t, 0, distributionFunction(&cluster1))
assert.Equal(t, 1, distributionFunction(&cluster2))
@@ -233,17 +213,19 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumber
// Initial tests where showing that under 1024 clusters, execution time was around 400ms
// and for 4096 clusters, execution time was under 9s
// The other implementation was giving almost linear time of 400ms up to 10'000 clusters
- db := dbmocks.ArgoDB{}
- clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{}}
+ clusterPointers := []*v1alpha1.Cluster{}
for i := 0; i < 2048; i++ {
cluster := createCluster(fmt.Sprintf("cluster-%d", i), fmt.Sprintf("%d", i))
- clusterList.Items = append(clusterList.Items, cluster)
+ clusterPointers = append(clusterPointers, &cluster)
}
- db.On("ListClusters", mock.Anything).Return(clusterList, nil)
- db.On("GetApplicationControllerReplicas").Return(2)
- distributionFunction := RoundRobinDistributionFunction(&db)
- for i, c := range clusterList.Items {
- assert.Equal(t, i%2, distributionFunction(&c))
+ replicasCount := 2
+ t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
+ _, db, _, _, _, _, _ := createTestClusters()
+ clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+ distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
+ for i, c := range clusterPointers {
+ assert.Equal(t, i%2, distributionFunction(c))
}
}
@@ -256,12 +238,15 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
cluster5 := createCluster("cluster5", "5")
cluster6 := createCluster("cluster6", "6")
+ clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
+ clusterAccessor := getClusterAccessor(clusters)
+
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}}
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
-
// Test with replicas set to 2
- db.On("GetApplicationControllerReplicas").Return(2)
- distributionFunction := RoundRobinDistributionFunction(&db)
+ replicasCount := 2
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+ distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
assert.Equal(t, 0, distributionFunction(nil))
assert.Equal(t, 0, distributionFunction(&cluster1))
assert.Equal(t, 1, distributionFunction(&cluster2))
@@ -272,17 +257,20 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
// Now, the database knows cluster6. Shard should be assigned a proper shard
clusterList.Items = append(clusterList.Items, cluster6)
+ distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
assert.Equal(t, 1, distributionFunction(&cluster6))
// Now, we remove the last added cluster, it should be unassigned as well
clusterList.Items = clusterList.Items[:len(clusterList.Items)-1]
+ distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
assert.Equal(t, -1, distributionFunction(&cluster6))
}
func TestGetShardByIndexModuloReplicasCountDistributionFunction(t *testing.T) {
- db, cluster1, cluster2, _, _, _ := createTestClusters()
- db.On("GetApplicationControllerReplicas").Return(2)
- distributionFunction := RoundRobinDistributionFunction(db)
+ clusters, db, cluster1, cluster2, _, _, _ := createTestClusters()
+ replicasCount := 2
+ db.On("GetApplicationControllerReplicas").Return(replicasCount)
+ distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
// Test that the function returns the correct shard for cluster1 and cluster2
expectedShardForCluster1 := 0
@@ -315,14 +303,14 @@ func TestInferShard(t *testing.T) {
osHostnameFunction = func() (string, error) { return "exampleshard", nil }
_, err = InferShard()
- assert.NotNil(t, err)
+ assert.Nil(t, err)
osHostnameFunction = func() (string, error) { return "example-shard", nil }
_, err = InferShard()
- assert.NotNil(t, err)
+ assert.Nil(t, err)
}
-func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
+func createTestClusters() (clusterAccessor, *dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
db := dbmocks.ArgoDB{}
cluster1 := createCluster("cluster1", "1")
cluster2 := createCluster("cluster2", "2")
@@ -330,10 +318,27 @@ func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster,
cluster4 := createCluster("cluster4", "4")
cluster5 := createCluster("cluster5", "5")
+ clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
+
db.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
cluster1, cluster2, cluster3, cluster4, cluster5,
}}, nil)
- return &db, cluster1, cluster2, cluster3, cluster4, cluster5
+ return getClusterAccessor(clusters), &db, cluster1, cluster2, cluster3, cluster4, cluster5
+}
+
+func getClusterAccessor(clusters []v1alpha1.Cluster) clusterAccessor {
+ // Convert the array to a slice of pointers
+ clusterPointers := getClusterPointers(clusters)
+ clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
+ return clusterAccessor
+}
+
+func getClusterPointers(clusters []v1alpha1.Cluster) []*v1alpha1.Cluster {
+ var clusterPointers []*v1alpha1.Cluster
+ for i := range clusters {
+ clusterPointers = append(clusterPointers, &clusters[i])
+ }
+ return clusterPointers
}
func createCluster(name string, id string) v1alpha1.Cluster {
diff --git a/controller/sharding/shuffle_test.go b/controller/sharding/shuffle_test.go
index 9e089e31bad0f..1cca783a2afe9 100644
--- a/controller/sharding/shuffle_test.go
+++ b/controller/sharding/shuffle_test.go
@@ -3,6 +3,7 @@ package sharding
import (
"fmt"
"math"
+ "strconv"
"testing"
"github.com/argoproj/argo-cd/v2/common"
@@ -22,9 +23,11 @@ func TestLargeShuffle(t *testing.T) {
clusterList.Items = append(clusterList.Items, cluster)
}
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
+ clusterAccessor := getClusterAccessor(clusterList.Items)
// Test with replicas set to 256
- t.Setenv(common.EnvControllerReplicas, "256")
- distributionFunction := RoundRobinDistributionFunction(&db)
+ replicasCount := 256
+ t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
+ distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
for i, c := range clusterList.Items {
assert.Equal(t, i%2567, distributionFunction(&c))
}
@@ -44,10 +47,11 @@ func TestShuffle(t *testing.T) {
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5, cluster6}}
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
-
+ clusterAccessor := getClusterAccessor(clusterList.Items)
// Test with replicas set to 3
t.Setenv(common.EnvControllerReplicas, "3")
- distributionFunction := RoundRobinDistributionFunction(&db)
+ replicasCount := 3
+ distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
assert.Equal(t, 0, distributionFunction(nil))
assert.Equal(t, 0, distributionFunction(&cluster1))
assert.Equal(t, 1, distributionFunction(&cluster2))
diff --git a/controller/state.go b/controller/state.go
index 19757510aa71d..704411558669b 100644
--- a/controller/state.go
+++ b/controller/state.go
@@ -3,12 +3,15 @@ package controller
import (
"context"
"encoding/json"
+ "errors"
"fmt"
- v1 "k8s.io/api/core/v1"
"reflect"
"strings"
+ goSync "sync"
"time"
+ v1 "k8s.io/api/core/v1"
+
"github.com/argoproj/gitops-engine/pkg/diff"
"github.com/argoproj/gitops-engine/pkg/health"
"github.com/argoproj/gitops-engine/pkg/sync"
@@ -40,6 +43,10 @@ import (
"github.com/argoproj/argo-cd/v2/util/stats"
)
+var (
+ CompareStateRepoError = errors.New("failed to get repo objects")
+)
+
type resourceInfoProviderStub struct {
}
@@ -62,8 +69,9 @@ type managedResource struct {
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
- CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) *comparisonResult
+ CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
+ GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error)
}
// comparisonResult holds the state of an application after the reconciliation
@@ -78,8 +86,9 @@ type comparisonResult struct {
// appSourceTypes stores the SourceType for each application source under sources field
appSourceTypes []v1alpha1.ApplicationSourceType
// timings maps phases of comparison to the duration it took to complete (for statistical purposes)
- timings map[string]time.Duration
- diffResultList *diff.DiffResultList
+ timings map[string]time.Duration
+ diffResultList *diff.DiffResultList
+ hasPostDeleteHooks bool
}
func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {
@@ -105,10 +114,16 @@ type appStateManager struct {
statusRefreshTimeout time.Duration
resourceTracking argo.ResourceTracking
persistResourceHealth bool
+ repoErrorCache goSync.Map
+ repoErrorGracePeriod time.Duration
+ serverSideDiff bool
}
-func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
-
+// GetRepoObjs will generate the manifests for the given application delegating the
+// task to the repo-server. It returns the list of generated manifests as unstructured
+// objects. It also returns the full response from all calls to the repo server as the
+// second argument.
+func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
ts := stats.NewTimingStats()
helmRepos, err := m.db.ListHelmRepositories(context.Background())
if err != nil {
@@ -224,7 +239,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alp
logCtx = logCtx.WithField(k, v.Milliseconds())
}
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
- logCtx.Info("getRepoObjs stats")
+ logCtx.Info("GetRepoObjs stats")
return targetObjs, manifestInfos, nil
}
@@ -345,7 +360,7 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied source. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
-func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) *comparisonResult {
+func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
ts := stats.NewTimingStats()
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
@@ -361,7 +376,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
Revisions: revisions,
},
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
- }
+ }, nil
} else {
return &comparisonResult{
syncStatus: &v1alpha1.SyncStatus{
@@ -370,7 +385,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
Revision: revisions[0],
},
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
- }
+ }, nil
}
}
@@ -391,6 +406,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
now := metav1.Now()
var manifestInfos []*apiclient.ManifestResponse
+ targetNsExists := false
if len(localManifests) == 0 {
// If the length of revisions is not same as the length of sources,
@@ -402,12 +418,26 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
}
}
- targetObjs, manifestInfos, err = m.getRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project)
+ targetObjs, manifestInfos, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project)
if err != nil {
targetObjs = make([]*unstructured.Unstructured, 0)
msg := fmt.Sprintf("Failed to load target state: %s", err.Error())
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
+ if firstSeen, ok := m.repoErrorCache.Load(app.Name); ok {
+ if time.Since(firstSeen.(time.Time)) <= m.repoErrorGracePeriod && !noRevisionCache {
+ // if first seen is less than grace period and it's not a Level 3 comparison,
+ // ignore error and short circuit
+ logCtx.Debugf("Ignoring repo error %v, already encountered error in grace period", err.Error())
+ return nil, CompareStateRepoError
+ }
+ } else if !noRevisionCache {
+ logCtx.Debugf("Ignoring repo error %v, new occurrence", err.Error())
+ m.repoErrorCache.Store(app.Name, time.Now())
+ return nil, CompareStateRepoError
+ }
failedToLoadObjs = true
+ } else {
+ m.repoErrorCache.Delete(app.Name)
}
} else {
// Prevent applying local manifests for now when signature verification is enabled
@@ -453,6 +483,13 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
LastTransitionTime: &now,
})
}
+
+ // If we reach this path, this means that a namespace has been both defined in Git, as well in the
+ // application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead
+ // of what is present in managedNamespaceMetadata.
+ if isManagedNamespace(targetObj, app) {
+ targetNsExists = true
+ }
}
ts.AddCheckpoint("dedup_ms")
@@ -511,7 +548,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a
// namespace which we can compare the live namespace with. For that, we'll do the same as is done in
// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.
- if isManagedNamespace(liveObj, app) {
+ //
+ // targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the
+ // targetObjs array.
+ if isManagedNamespace(liveObj, app) && !targetNsExists {
nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}
managedNs, err := kubeutil.ToUnstructured(nsSpec)
@@ -532,6 +572,12 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
}
}
}
+ hasPostDeleteHooks := false
+ for _, obj := range targetObjs {
+ if isPostDeleteHook(obj) {
+ hasPostDeleteHooks = true
+ }
+ }
reconciliation := sync.Reconcile(targetObjs, liveObjByKey, app.Spec.Destination.Namespace, infoProvider)
ts.AddCheckpoint("live_ms")
@@ -547,21 +593,29 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
}
- // restore comparison using cached diff result if previous comparison was performed for the same revision
- revisionChanged := len(manifestInfos) != len(sources) || !reflect.DeepEqual(app.Status.Sync.Revisions, manifestRevisions)
- specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences})
+ serverSideDiff := m.serverSideDiff ||
+ resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=true")
- _, refreshRequested := app.IsRefreshRequested()
- noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) || specChanged || revisionChanged
+ // This allows turning SSD off for a given app if it is enabled at the
+ // controller level
+ if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=false") {
+ serverSideDiff = false
+ }
+
+ useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, serverSideDiff, logCtx)
diffConfigBuilder := argodiff.NewDiffConfigBuilder().
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
WithTracking(appLabelKey, string(trackingMethod))
- if noCache {
- diffConfigBuilder.WithNoCache()
+ if useDiffCache {
+ diffConfigBuilder.WithCache(m.cache, app.InstanceName(m.namespace))
} else {
- diffConfigBuilder.WithCache(m.cache, app.GetName())
+ diffConfigBuilder.WithNoCache()
+ }
+
+ if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "IncludeMutationWebhook=true") {
+ diffConfigBuilder.WithIgnoreMutationWebhook(false)
}
gvkParser, err := m.getGVKParser(app.Spec.Destination.Server)
@@ -571,6 +625,18 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
diffConfigBuilder.WithGVKParser(gvkParser)
diffConfigBuilder.WithManager(common.ArgoCDSSAManager)
+ diffConfigBuilder.WithServerSideDiff(serverSideDiff)
+
+ if serverSideDiff {
+ resourceOps, cleanup, err := m.getResourceOperations(app.Spec.Destination.Server)
+ if err != nil {
+ log.Errorf("CompareAppState error getting resource operations: %s", err)
+ conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
+ }
+ defer cleanup()
+ diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(resourceOps))
+ }
+
// enable structured merge diff if application syncs with server-side apply
if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.SyncOptions.HasOption("ServerSideApply=true") {
diffConfigBuilder.WithStructuredMergeDiff(true)
@@ -611,7 +677,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
Kind: gvk.Kind,
Version: gvk.Version,
Group: gvk.Group,
- Hook: hookutil.IsHook(obj),
+ Hook: isHook(obj),
RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj,
}
if targetObj != nil {
@@ -744,6 +810,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
reconciliationResult: reconciliation,
diffConfig: diffConfig,
diffResultList: diffResults,
+ hasPostDeleteHooks: hasPostDeleteHooks,
}
if hasMultipleSources {
@@ -765,10 +832,64 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
})
ts.AddCheckpoint("health_ms")
compRes.timings = ts.Timings()
- return &compRes
+ return &compRes, nil
+}
+
+// useDiffCache will determine if the diff should be calculated based
+// on the existing live state cache or not.
+func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sources []v1alpha1.ApplicationSource, app *v1alpha1.Application, manifestRevisions []string, statusRefreshTimeout time.Duration, serverSideDiff bool, log *log.Entry) bool {
+
+ if noCache {
+ log.WithField("useDiffCache", "false").Debug("noCache is true")
+ return false
+ }
+ refreshType, refreshRequested := app.IsRefreshRequested()
+ if refreshRequested {
+ log.WithField("useDiffCache", "false").Debugf("refresh type %s requested", string(refreshType))
+ return false
+ }
+ // serverSideDiff should still use cache even if status is expired.
+ // This is an attempt to avoid hitting k8s API server too frequently during
+ // app refresh with serverSideDiff is enabled. If there are negative side
+ // effects identified with this approach, the serverSideDiff should be removed
+ // from this condition.
+ if app.Status.Expired(statusRefreshTimeout) && !serverSideDiff {
+ log.WithField("useDiffCache", "false").Debug("app.status.expired")
+ return false
+ }
+
+ if len(manifestInfos) != len(sources) {
+ log.WithField("useDiffCache", "false").Debug("manifestInfos len != sources len")
+ return false
+ }
+
+ revisionChanged := !reflect.DeepEqual(app.Status.GetRevisions(), manifestRevisions)
+ if revisionChanged {
+ log.WithField("useDiffCache", "false").Debug("revisionChanged")
+ return false
+ }
+
+ currentSpec := app.BuildComparedToStatus()
+ specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
+ if specChanged {
+ log.WithField("useDiffCache", "false").Debug("specChanged")
+ return false
+ }
+
+ log.WithField("useDiffCache", "true").Debug("using diff cache")
+ return true
}
-func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error {
+func (m *appStateManager) persistRevisionHistory(
+ app *v1alpha1.Application,
+ revision string,
+ source v1alpha1.ApplicationSource,
+ revisions []string,
+ sources []v1alpha1.ApplicationSource,
+ hasMultipleSources bool,
+ startedAt metav1.Time,
+ initiatedBy v1alpha1.OperationInitiator,
+) error {
var nextID int64
if len(app.Status.History) > 0 {
nextID = app.Status.History.LastRevisionHistory().ID + 1
@@ -781,6 +902,7 @@ func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revi
ID: nextID,
Sources: sources,
Revisions: revisions,
+ InitiatedBy: initiatedBy,
})
} else {
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
@@ -789,6 +911,7 @@ func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revi
DeployStartedAt: &startedAt,
ID: nextID,
Source: source,
+ InitiatedBy: initiatedBy,
})
}
@@ -821,6 +944,8 @@ func NewAppStateManager(
statusRefreshTimeout time.Duration,
resourceTracking argo.ResourceTracking,
persistResourceHealth bool,
+ repoErrorGracePeriod time.Duration,
+ serverSideDiff bool,
) AppStateManager {
return &appStateManager{
liveStateCache: liveStateCache,
@@ -836,6 +961,8 @@ func NewAppStateManager(
statusRefreshTimeout: statusRefreshTimeout,
resourceTracking: resourceTracking,
persistResourceHealth: persistResourceHealth,
+ repoErrorGracePeriod: repoErrorGracePeriod,
+ serverSideDiff: serverSideDiff,
}
}
diff --git a/controller/state_test.go b/controller/state_test.go
index dcb48e87fce9b..1a55e25b262d1 100644
--- a/controller/state_test.go
+++ b/controller/state_test.go
@@ -2,6 +2,7 @@ package controller
import (
"encoding/json"
+ "fmt"
"os"
"testing"
"time"
@@ -10,6 +11,9 @@ import (
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
+ "github.com/imdario/mergo"
+ "github.com/sirupsen/logrus"
+ logrustest "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -19,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"github.com/argoproj/argo-cd/v2/common"
+ "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
"github.com/argoproj/argo-cd/v2/test"
@@ -37,12 +42,13 @@ func TestCompareAppStateEmpty(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -51,6 +57,31 @@ func TestCompareAppStateEmpty(t *testing.T) {
assert.Len(t, app.Status.Conditions, 0)
}
+// TestCompareAppStateRepoError tests the case when CompareAppState notices a repo error
+func TestCompareAppStateRepoError(t *testing.T) {
+ app := newFakeApp()
+ ctrl := newFakeController(&fakeData{manifestResponses: make([]*apiclient.ManifestResponse, 3)}, fmt.Errorf("test repo error"))
+ sources := make([]argoappv1.ApplicationSource, 0)
+ sources = append(sources, app.Spec.GetSource())
+ revisions := make([]string, 0)
+ revisions = append(revisions, "")
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, compRes)
+ assert.EqualError(t, err, CompareStateRepoError.Error())
+
+ // expect to still get compare state error to as inside grace period
+ compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, compRes)
+ assert.EqualError(t, err, CompareStateRepoError.Error())
+
+ time.Sleep(10 * time.Second)
+ // expect to not get error as outside of grace period, but status should be unknown
+ compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.NotNil(t, compRes)
+ assert.Nil(t, err)
+ assert.Equal(t, compRes.syncStatus.Status, argoappv1.SyncStatusCodeUnknown)
+}
+
// TestCompareAppStateNamespaceMetadataDiffers tests comparison when managed namespace metadata differs
func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
app := newFakeApp()
@@ -75,12 +106,13 @@ func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -89,6 +121,124 @@ func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
assert.Len(t, app.Status.Conditions, 0)
}
+// TestCompareAppStateNamespaceMetadataDiffers tests comparison when managed namespace metadata differs to live and manifest ns
+func TestCompareAppStateNamespaceMetadataDiffersToManifest(t *testing.T) {
+ ns := NewNamespace()
+ ns.SetName(test.FakeDestNamespace)
+ ns.SetNamespace(test.FakeDestNamespace)
+ ns.SetAnnotations(map[string]string{"bar": "bat"})
+
+ app := newFakeApp()
+ app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{
+ Labels: map[string]string{
+ "foo": "bar",
+ },
+ Annotations: map[string]string{
+ "foo": "bar",
+ },
+ }
+ app.Status.OperationState = &argoappv1.OperationState{
+ SyncResult: &argoappv1.SyncOperationResult{},
+ }
+
+ liveNs := ns.DeepCopy()
+ liveNs.SetAnnotations(nil)
+
+ data := fakeData{
+ manifestResponse: &apiclient.ManifestResponse{
+ Manifests: []string{toJSON(t, liveNs)},
+ Namespace: test.FakeDestNamespace,
+ Server: test.FakeClusterURL,
+ Revision: "abc123",
+ },
+ managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
+ kube.GetResourceKey(ns): ns,
+ },
+ }
+ ctrl := newFakeController(&data, nil)
+ sources := make([]argoappv1.ApplicationSource, 0)
+ sources = append(sources, app.Spec.GetSource())
+ revisions := make([]string, 0)
+ revisions = append(revisions, "")
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
+ assert.NotNil(t, compRes)
+ assert.NotNil(t, compRes.syncStatus)
+ assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
+ assert.Len(t, compRes.resources, 1)
+ assert.Len(t, compRes.managedResources, 1)
+ assert.NotNil(t, compRes.diffResultList)
+ assert.Len(t, compRes.diffResultList.Diffs, 1)
+
+ result := NewNamespace()
+ assert.NoError(t, json.Unmarshal(compRes.diffResultList.Diffs[0].PredictedLive, result))
+
+ labels := result.GetLabels()
+ delete(labels, "kubernetes.io/metadata.name")
+
+ assert.Equal(t, map[string]string{}, labels)
+ // Manifests override definitions in managedNamespaceMetadata
+ assert.Equal(t, map[string]string{"bar": "bat"}, result.GetAnnotations())
+ assert.Len(t, app.Status.Conditions, 0)
+}
+
+// TestCompareAppStateNamespaceMetadata tests comparison when managed namespace metadata differs to live
+func TestCompareAppStateNamespaceMetadata(t *testing.T) {
+ ns := NewNamespace()
+ ns.SetName(test.FakeDestNamespace)
+ ns.SetNamespace(test.FakeDestNamespace)
+ ns.SetAnnotations(map[string]string{"bar": "bat"})
+
+ app := newFakeApp()
+ app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{
+ Labels: map[string]string{
+ "foo": "bar",
+ },
+ Annotations: map[string]string{
+ "foo": "bar",
+ },
+ }
+ app.Status.OperationState = &argoappv1.OperationState{
+ SyncResult: &argoappv1.SyncOperationResult{},
+ }
+
+ data := fakeData{
+ manifestResponse: &apiclient.ManifestResponse{
+ Manifests: []string{},
+ Namespace: test.FakeDestNamespace,
+ Server: test.FakeClusterURL,
+ Revision: "abc123",
+ },
+ managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
+ kube.GetResourceKey(ns): ns,
+ },
+ }
+ ctrl := newFakeController(&data, nil)
+ sources := make([]argoappv1.ApplicationSource, 0)
+ sources = append(sources, app.Spec.GetSource())
+ revisions := make([]string, 0)
+ revisions = append(revisions, "")
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
+ assert.NotNil(t, compRes)
+ assert.NotNil(t, compRes.syncStatus)
+ assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
+ assert.Len(t, compRes.resources, 1)
+ assert.Len(t, compRes.managedResources, 1)
+ assert.NotNil(t, compRes.diffResultList)
+ assert.Len(t, compRes.diffResultList.Diffs, 1)
+
+ result := NewNamespace()
+ assert.NoError(t, json.Unmarshal(compRes.diffResultList.Diffs[0].PredictedLive, result))
+
+ labels := result.GetLabels()
+ delete(labels, "kubernetes.io/metadata.name")
+
+ assert.Equal(t, map[string]string{"foo": "bar"}, labels)
+ assert.Equal(t, map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true", "bar": "bat", "foo": "bar"}, result.GetAnnotations())
+ assert.Len(t, app.Status.Conditions, 0)
+}
+
// TestCompareAppStateNamespaceMetadataIsTheSame tests comparison when managed namespace metadata is the same
func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) {
app := newFakeApp()
@@ -122,12 +272,13 @@ func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -149,12 +300,13 @@ func TestCompareAppStateMissing(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -180,12 +332,13 @@ func TestCompareAppStateExtra(t *testing.T) {
key: pod,
},
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
@@ -210,12 +363,13 @@ func TestCompareAppStateHook(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 0, len(compRes.resources))
@@ -241,12 +395,13 @@ func TestCompareAppStateSkipHook(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
@@ -270,13 +425,14 @@ func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -303,12 +459,13 @@ func TestCompareAppStateExtraHook(t *testing.T) {
key: pod,
},
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -331,12 +488,13 @@ func TestAppRevisionsSingleSource(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
app := newFakeApp()
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.NotEmpty(t, compRes.syncStatus.Revision)
@@ -370,12 +528,13 @@ func TestAppRevisionsMultiSource(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
app := newFakeMultiSourceApp()
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Empty(t, compRes.syncStatus.Revision)
@@ -417,12 +576,13 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
kube.GetResourceKey(obj3): obj3,
},
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, 1, len(app.Status.Conditions))
@@ -457,8 +617,9 @@ func TestCompareAppStateManagedNamespaceMetadataWithLiveNsDoesNotGetPruned(t *te
kube.GetResourceKey(ns): ns,
},
}
- ctrl := newFakeController(&data)
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false)
+ ctrl := newFakeController(&data, nil)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.Equal(t, 0, len(app.Status.Conditions))
@@ -512,13 +673,14 @@ func TestSetHealth(t *testing.T) {
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(deployment): deployment,
},
- })
+ }, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
}
@@ -548,13 +710,14 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
kube.GetResourceKey(deployment): deployment,
kube.GetResourceKey(unstructuredApp): unstructuredApp,
},
- })
+ }, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
}
@@ -574,7 +737,7 @@ func TestSetManagedResourcesWithOrphanedResources(t *testing.T) {
AppName: "",
},
},
- })
+ }, nil)
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
@@ -603,7 +766,7 @@ func TestSetManagedResourcesWithResourcesOfAnotherApp(t *testing.T) {
AppName: "app2",
},
},
- })
+ }, nil)
tree, err := ctrl.setAppManagedResources(app1, &comparisonResult{managedResources: make([]managedResource, 0)})
@@ -622,13 +785,14 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
configMapData: map[string]string{
"resource.customizations": "invalid setting",
},
- })
+ }, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus.Status)
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
@@ -655,7 +819,7 @@ func TestSetManagedResourcesKnownOrphanedResourceExceptions(t *testing.T) {
ResourceNode: argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: kube.ServiceAccountKind, Name: "kubernetes", Namespace: app.Namespace}},
},
},
- })
+ }, nil)
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
@@ -668,14 +832,14 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app},
- })
+ }, nil)
manager := ctrl.appStateManager.(*appStateManager)
setRevisionHistoryLimit := func(value int) {
i := int64(value)
app.Spec.RevisionHistoryLimit = &i
}
addHistory := func() {
- err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{})
+ err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{}, v1alpha1.OperationInitiator{})
assert.NoError(t, err)
}
addHistory()
@@ -711,7 +875,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
assert.Len(t, app.Status.History, 9)
metav1NowTime := metav1.NewTime(time.Now())
- err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime)
+ err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime, v1alpha1.OperationInitiator{})
assert.NoError(t, err)
assert.Equal(t, app.Status.History.LastRevisionHistory().DeployStartedAt, &metav1NowTime)
}
@@ -763,12 +927,13 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -789,12 +954,13 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -820,12 +986,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -846,12 +1013,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -872,12 +1040,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -898,12 +1067,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -925,14 +1095,15 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
testProj := signedProj
testProj.Spec.SignatureKeys[0].KeyID = "4AEE18F83AFDEB24"
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -956,12 +1127,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
}
// it doesn't matter for our test whether local manifests are valid
localManifests := []string{"foobar"}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
@@ -985,12 +1157,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1014,12 +1187,13 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
}
// it doesn't matter for our test whether local manifests are valid
localManifests := []string{""}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
- compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
+ compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
+ assert.Nil(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1154,7 +1328,7 @@ func TestIsLiveResourceManaged(t *testing.T) {
kube.GetResourceKey(unmanagedObjWrongGroup): unmanagedObjWrongGroup,
kube.GetResourceKey(unmanagedObjWrongNamespace): unmanagedObjWrongNamespace,
},
- })
+ }, nil)
manager := ctrl.appStateManager.(*appStateManager)
appName := "guestbook"
@@ -1223,3 +1397,272 @@ func TestIsLiveResourceManaged(t *testing.T) {
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
}
+
+func TestUseDiffCache(t *testing.T) {
+ type fixture struct {
+ testName string
+ noCache bool
+ manifestInfos []*apiclient.ManifestResponse
+ sources []argoappv1.ApplicationSource
+ app *argoappv1.Application
+ manifestRevisions []string
+ statusRefreshTimeout time.Duration
+ expectedUseCache bool
+ serverSideDiff bool
+ }
+
+ manifestInfos := func(revision string) []*apiclient.ManifestResponse {
+ return []*apiclient.ManifestResponse{
+ {
+ Manifests: []string{
+ "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"labels\":{\"app.kubernetes.io/instance\":\"httpbin\"},\"name\":\"httpbin-svc\",\"namespace\":\"httpbin\"},\"spec\":{\"ports\":[{\"name\":\"http-port\",\"port\":7777,\"targetPort\":80},{\"name\":\"test\",\"port\":333}],\"selector\":{\"app\":\"httpbin\"}}}",
+ "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"labels\":{\"app.kubernetes.io/instance\":\"httpbin\"},\"name\":\"httpbin-deployment\",\"namespace\":\"httpbin\"},\"spec\":{\"replicas\":2,\"selector\":{\"matchLabels\":{\"app\":\"httpbin\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"httpbin\"}},\"spec\":{\"containers\":[{\"image\":\"kennethreitz/httpbin\",\"imagePullPolicy\":\"Always\",\"name\":\"httpbin\",\"ports\":[{\"containerPort\":80}]}]}}}}",
+ },
+ Namespace: "",
+ Server: "",
+ Revision: revision,
+ SourceType: "Kustomize",
+ VerifyResult: "",
+ },
+ }
+ }
+ sources := func() []argoappv1.ApplicationSource {
+ return []argoappv1.ApplicationSource{
+ {
+ RepoURL: "https://some-repo.com",
+ Path: "argocd/httpbin",
+ TargetRevision: "HEAD",
+ },
+ }
+ }
+
+ app := func(namespace string, revision string, refresh bool, a *argoappv1.Application) *argoappv1.Application {
+ app := &argoappv1.Application{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "httpbin",
+ Namespace: namespace,
+ },
+ Spec: argoappv1.ApplicationSpec{
+ Source: &argoappv1.ApplicationSource{
+ RepoURL: "https://some-repo.com",
+ Path: "argocd/httpbin",
+ TargetRevision: "HEAD",
+ },
+ Destination: argoappv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "httpbin",
+ },
+ Project: "default",
+ SyncPolicy: &argoappv1.SyncPolicy{
+ SyncOptions: []string{
+ "CreateNamespace=true",
+ "ServerSideApply=true",
+ },
+ },
+ },
+ Status: argoappv1.ApplicationStatus{
+ Resources: []argoappv1.ResourceStatus{},
+ Sync: argoappv1.SyncStatus{
+ Status: argoappv1.SyncStatusCodeSynced,
+ ComparedTo: argoappv1.ComparedTo{
+ Source: argoappv1.ApplicationSource{
+ RepoURL: "https://some-repo.com",
+ Path: "argocd/httpbin",
+ TargetRevision: "HEAD",
+ },
+ Destination: argoappv1.ApplicationDestination{
+ Server: "https://kubernetes.default.svc",
+ Namespace: "httpbin",
+ },
+ },
+ Revision: revision,
+ Revisions: []string{},
+ },
+ ReconciledAt: &metav1.Time{
+ Time: time.Now().Add(-time.Hour),
+ },
+ },
+ }
+ if refresh {
+ annotations := make(map[string]string)
+ annotations[argoappv1.AnnotationKeyRefresh] = string(argoappv1.RefreshTypeNormal)
+ app.SetAnnotations(annotations)
+ }
+ if a != nil {
+ err := mergo.Merge(app, a, mergo.WithOverride, mergo.WithOverwriteWithEmptyValue)
+ if err != nil {
+ t.Fatalf("error merging app: %s", err)
+ }
+ }
+ return app
+ }
+
+ cases := []fixture{
+ {
+ testName: "will use diff cache",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, nil),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: true,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will use diff cache for multisource",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "", false, &argoappv1.Application{
+ Spec: argoappv1.ApplicationSpec{
+ Source: nil,
+ Sources: argoappv1.ApplicationSources{
+ {
+ RepoURL: "multisource repo1",
+ },
+ {
+ RepoURL: "multisource repo2",
+ },
+ },
+ },
+ Status: argoappv1.ApplicationStatus{
+ Resources: []argoappv1.ResourceStatus{},
+ Sync: argoappv1.SyncStatus{
+ Status: argoappv1.SyncStatusCodeSynced,
+ ComparedTo: argoappv1.ComparedTo{
+ Source: argoappv1.ApplicationSource{},
+ Sources: argoappv1.ApplicationSources{
+ {
+ RepoURL: "multisource repo1",
+ },
+ {
+ RepoURL: "multisource repo2",
+ },
+ },
+ },
+ Revisions: []string{"rev1", "rev2"},
+ },
+ ReconciledAt: &metav1.Time{
+ Time: time.Now().Add(-time.Hour),
+ },
+ },
+ }),
+ manifestRevisions: []string{"rev1", "rev2"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: true,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will return false if nocache is true",
+ noCache: true,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, nil),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: false,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will return false if requested refresh",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", true, nil),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: false,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will return false if status expired",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, nil),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Minute,
+ expectedUseCache: false,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will return true if status expired and server-side diff",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, nil),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Minute,
+ expectedUseCache: true,
+ serverSideDiff: true,
+ },
+ {
+ testName: "will return false if there is a new revision",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, nil),
+ manifestRevisions: []string{"rev2"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: false,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will return false if app spec repo changed",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, &argoappv1.Application{
+ Spec: argoappv1.ApplicationSpec{
+ Source: &argoappv1.ApplicationSource{
+ RepoURL: "new-repo",
+ },
+ },
+ }),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: false,
+ serverSideDiff: false,
+ },
+ {
+ testName: "will return false if app spec IgnoreDifferences changed",
+ noCache: false,
+ manifestInfos: manifestInfos("rev1"),
+ sources: sources(),
+ app: app("httpbin", "rev1", false, &argoappv1.Application{
+ Spec: argoappv1.ApplicationSpec{
+ IgnoreDifferences: []argoappv1.ResourceIgnoreDifferences{
+ {
+ Group: "app/v1",
+ Kind: "application",
+ Name: "httpbin",
+ Namespace: "httpbin",
+ JQPathExpressions: []string{"."},
+ },
+ },
+ },
+ }),
+ manifestRevisions: []string{"rev1"},
+ statusRefreshTimeout: time.Hour * 24,
+ expectedUseCache: false,
+ serverSideDiff: false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc := tc
+ t.Run(tc.testName, func(t *testing.T) {
+ // Given
+ t.Parallel()
+ logger, _ := logrustest.NewNullLogger()
+ log := logrus.NewEntry(logger)
+
+ // When
+ useDiffCache := useDiffCache(tc.noCache, tc.manifestInfos, tc.sources, tc.app, tc.manifestRevisions, tc.statusRefreshTimeout, tc.serverSideDiff, log)
+
+ // Then
+ assert.Equal(t, useDiffCache, tc.expectedUseCache)
+ })
+ }
+}
diff --git a/controller/sync.go b/controller/sync.go
index 783183c17fc7c..34c12bdb5da3c 100644
--- a/controller/sync.go
+++ b/controller/sync.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
"encoding/json"
+ goerrors "errors"
"fmt"
"os"
"strconv"
@@ -56,6 +57,27 @@ func (m *appStateManager) getGVKParser(server string) (*managedfields.GvkParser,
return cluster.GetGVKParser(), nil
}
+// getResourceOperations will return the kubectl implementation of the ResourceOperations
+// interface that provides functionality to manage kubernetes resources. Returns a
+// cleanup function that must be called to remove the generated kube config for this
+// server.
+func (m *appStateManager) getResourceOperations(server string) (kube.ResourceOperations, func(), error) {
+ clusterCache, err := m.liveStateCache.GetClusterCache(server)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error getting cluster cache: %w", err)
+ }
+
+ cluster, err := m.db.GetCluster(context.Background(), server)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error getting cluster: %w", err)
+ }
+ ops, cleanup, err := m.kubectl.ManageResources(cluster.RawRestConfig(), clusterCache.GetOpenAPISchema())
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating kubectl ResourceOperations: %w", err)
+ }
+ return ops, cleanup, nil
+}
+
func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState) {
// Sync requests might be requested with ambiguous revisions (e.g. master, HEAD, v1.2.3).
// This can change meaning when resuming operations (e.g a hook sync). After calculating a
@@ -81,7 +103,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
hasSharedResource {
state.Phase = common.OperationFailed
- state.Message = fmt.Sprintf("Shared resouce found: %s", sharedResourceMessage)
+ state.Message = fmt.Sprintf("Shared resource found: %s", sharedResourceMessage)
return
}
@@ -152,7 +174,13 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
revisions = []string{revision}
}
- compareResult := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources())
+ // ignore error if CompareStateRepoError, this shouldn't happen as noRevisionCache is true
+ compareResult, err := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources())
+ if err != nil && !goerrors.Is(err, CompareStateRepoError) {
+ state.Phase = common.OperationError
+ state.Message = err.Error()
+ return
+ }
// We now have a concrete commit SHA. Save this in the sync result revision so that we remember
// what we should be syncing to when resuming operations.
@@ -276,6 +304,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
sync.WithInitialState(state.Phase, state.Message, initialResourcesRes, state.StartedAt),
sync.WithResourcesFilter(func(key kube.ResourceKey, target *unstructured.Unstructured, live *unstructured.Unstructured) bool {
return (len(syncOp.Resources) == 0 ||
+ isPostDeleteHook(target) ||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
}),
@@ -362,7 +391,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
logEntry.WithField("duration", time.Since(start)).Info("sync/terminate complete")
if !syncOp.DryRun && len(syncOp.Resources) == 0 && state.Phase.Successful() {
- err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt)
+ err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt, state.Operation.InitiatedBy)
if err != nil {
state.Phase = common.OperationError
state.Message = fmt.Sprintf("failed to record sync to history: %v", err)
diff --git a/controller/sync_test.go b/controller/sync_test.go
index da68e5d9a3dfe..309f846ca6460 100644
--- a/controller/sync_test.go
+++ b/controller/sync_test.go
@@ -41,7 +41,7 @@ func TestPersistRevisionHistory(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -87,7 +87,7 @@ func TestPersistManagedNamespaceMetadataState(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -118,7 +118,7 @@ func TestPersistRevisionHistoryRollback(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
// Sync with source specified
source := v1alpha1.ApplicationSource{
@@ -172,7 +172,7 @@ func TestSyncComparisonError(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -217,7 +217,7 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
- ctrl := newFakeController(&data)
+ ctrl := newFakeController(&data, nil)
return &fixture{
project: project,
diff --git a/docs/assets/api-management.png b/docs/assets/api-management.png
deleted file mode 100644
index ae066f0a6a87d..0000000000000
Binary files a/docs/assets/api-management.png and /dev/null differ
diff --git a/docs/assets/groups-claim.png b/docs/assets/groups-claim.png
deleted file mode 100644
index d27e03b661f82..0000000000000
Binary files a/docs/assets/groups-claim.png and /dev/null differ
diff --git a/docs/assets/groups-scope.png b/docs/assets/groups-scope.png
deleted file mode 100644
index 45557b51ead7f..0000000000000
Binary files a/docs/assets/groups-scope.png and /dev/null differ
diff --git a/docs/assets/identity-center-1.png b/docs/assets/identity-center-1.png
new file mode 100644
index 0000000000000..0cd49528d90f7
Binary files /dev/null and b/docs/assets/identity-center-1.png differ
diff --git a/docs/assets/identity-center-2.png b/docs/assets/identity-center-2.png
new file mode 100644
index 0000000000000..5a96899193168
Binary files /dev/null and b/docs/assets/identity-center-2.png differ
diff --git a/docs/assets/identity-center-3.png b/docs/assets/identity-center-3.png
new file mode 100644
index 0000000000000..79414b119d335
Binary files /dev/null and b/docs/assets/identity-center-3.png differ
diff --git a/docs/assets/identity-center-4.png b/docs/assets/identity-center-4.png
new file mode 100644
index 0000000000000..fbe48e4400974
Binary files /dev/null and b/docs/assets/identity-center-4.png differ
diff --git a/docs/assets/identity-center-5.png b/docs/assets/identity-center-5.png
new file mode 100644
index 0000000000000..f170c8d5069e0
Binary files /dev/null and b/docs/assets/identity-center-5.png differ
diff --git a/docs/assets/identity-center-6.png b/docs/assets/identity-center-6.png
new file mode 100644
index 0000000000000..01fe6f73f0642
Binary files /dev/null and b/docs/assets/identity-center-6.png differ
diff --git a/docs/assets/okta-app.png b/docs/assets/okta-app.png
new file mode 100644
index 0000000000000..bfc4570826b0a
Binary files /dev/null and b/docs/assets/okta-app.png differ
diff --git a/docs/assets/okta-auth-policy.png b/docs/assets/okta-auth-policy.png
new file mode 100644
index 0000000000000..dbf99a88ed6e3
Binary files /dev/null and b/docs/assets/okta-auth-policy.png differ
diff --git a/docs/assets/okta-auth-rule.png b/docs/assets/okta-auth-rule.png
new file mode 100644
index 0000000000000..4e85b062f357b
Binary files /dev/null and b/docs/assets/okta-auth-rule.png differ
diff --git a/docs/assets/okta-create-oidc-app.png b/docs/assets/okta-create-oidc-app.png
new file mode 100644
index 0000000000000..cf0b75b0e4a21
Binary files /dev/null and b/docs/assets/okta-create-oidc-app.png differ
diff --git a/docs/assets/okta-groups-claim.png b/docs/assets/okta-groups-claim.png
new file mode 100644
index 0000000000000..4edb93d42ea91
Binary files /dev/null and b/docs/assets/okta-groups-claim.png differ
diff --git a/docs/assets/okta-groups-scope.png b/docs/assets/okta-groups-scope.png
new file mode 100644
index 0000000000000..6cd1783c72653
Binary files /dev/null and b/docs/assets/okta-groups-scope.png differ
diff --git a/docs/cli_installation.md b/docs/cli_installation.md
index 42938bcd751ba..5a314d4ce6be2 100644
--- a/docs/cli_installation.md
+++ b/docs/cli_installation.md
@@ -37,6 +37,17 @@ sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
rm argocd-linux-amd64
```
+#### Download latest stable version
+
+You can download the latest stable release by executing below steps:
+
+```bash
+VERSION=$(curl -L -s https://raw.githubusercontent.com/argoproj/argo-cd/stable/VERSION)
+curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/download/v$VERSION/argocd-linux-amd64
+sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
+rm argocd-linux-amd64
+```
+
You should now be able to run `argocd` commands.
diff --git a/docs/developer-guide/api-docs.md b/docs/developer-guide/api-docs.md
index 289e4d466652e..63e3cd901e3d3 100644
--- a/docs/developer-guide/api-docs.md
+++ b/docs/developer-guide/api-docs.md
@@ -24,10 +24,9 @@ $ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKE
#### How to Avoid 403 Errors for Missing Applications
-All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter is
-specified, and the specified Application does not exist, or if the Application does exist but is not in the given
-project, the API will return a `404` error.
+All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter
+is specified, and the specified Application does not exist, the API will return a `404` error.
-If the `project` query string parameter is specified, and the Application does not exist, the API will return a `403`
-error. This is to prevent leaking information about the existence of Applications to users who do not have access to
-them.
+Additionally, if the `project` query string parameter is specified and the Application exists but is not in
+the given `project`, the API will return a `403` error. This is to prevent leaking information about the
+existence of Applications to users who do not have access to them.
\ No newline at end of file
diff --git a/docs/developer-guide/architecture/components.md b/docs/developer-guide/architecture/components.md
index eb2904b531ccb..e073751da4867 100644
--- a/docs/developer-guide/architecture/components.md
+++ b/docs/developer-guide/architecture/components.md
@@ -71,7 +71,7 @@ and the CLI functionalities.
### Application Controller
The Application Controller is responsible for reconciling the
-Application resource in Kubernetes syncronizing the desired
+Application resource in Kubernetes synchronizing the desired
application state (provided in Git) with the live state (in
Kubernetes). The Application Controller is also responsible for
reconciling the Project resource.
diff --git a/docs/developer-guide/contributors-quickstart.md b/docs/developer-guide/contributors-quickstart.md
index 0e98fab7ec940..a7646a6cf5f25 100644
--- a/docs/developer-guide/contributors-quickstart.md
+++ b/docs/developer-guide/contributors-quickstart.md
@@ -9,6 +9,8 @@ and the [toolchain guide](toolchain-guide.md).
### Install Go
+
+
Install version 1.18 or newer (Verify version by running `go version`)
### Clone the Argo CD repo
@@ -23,16 +25,29 @@ git clone https://github.com/argoproj/argo-cd.git
-### Install or Upgrade `kind` (Optional - Should work with any local cluster)
+### Install or Upgrade a Tool for Running Local Clusters (e.g. kind or minikube)
+
+#### Installation guide for kind:
+#### Installation guide for minikube:
+
+
+
### Start Your Local Cluster
+For example, if you are using kind:
```shell
kind create cluster
```
+Or, if you are using minikube:
+
+```shell
+minikube start
+```
+
### Install Argo CD
```shell
diff --git a/docs/developer-guide/debugging-remote-environment.md b/docs/developer-guide/debugging-remote-environment.md
index 7f8102a75c502..5548d3444af8c 100644
--- a/docs/developer-guide/debugging-remote-environment.md
+++ b/docs/developer-guide/debugging-remote-environment.md
@@ -45,7 +45,7 @@ And uninstall telepresence from your cluster:
telepresence helm uninstall
```
-See [this quickstart](https://www.telepresence.io/docs/latest/howtos/intercepts/) for more information on how to intercept services using Telepresence.
+See [this quickstart](https://www.telepresence.io/docs/latest/quick-start/) for more information on how to intercept services using Telepresence.
### Connect (telepresence v1)
Use the following command instead:
diff --git a/docs/developer-guide/extensions/ui-extensions.md b/docs/developer-guide/extensions/ui-extensions.md
index 2c25748beb148..8d3d9dc4a3882 100644
--- a/docs/developer-guide/extensions/ui-extensions.md
+++ b/docs/developer-guide/extensions/ui-extensions.md
@@ -36,7 +36,7 @@ registerResourceExtension(component: ExtensionComponent, group: string, kind: st
- `component: ExtensionComponent` is a React component that receives the following properties:
- application: Application - Argo CD Application resource;
- - resource: State - the kubernetes resource object;
+ - resource: State - the Kubernetes resource object;
- tree: ApplicationTree - includes list of all resources that comprise the application;
See properties interfaces in [models.ts](https://github.com/argoproj/argo-cd/blob/master/ui/src/app/shared/models.ts)
@@ -95,3 +95,66 @@ Below is an example of a simple system level extension:
Since the Argo CD Application is a Kubernetes resource, application tabs can be the same as any other resource tab.
Make sure to use 'argoproj.io'/'Application' as group/kind and an extension will be used to render the application-level tab.
+
+## Application Status Panel Extensions
+
+The status panel is the bar at the top of the application view where the sync status is displayed. Argo CD allows you to add new items to the status panel of an application. The extension should be registered using the `extensionsAPI.registerStatusPanelExtension` method:
+
+```typescript
+registerStatusPanelExtension(component: StatusPanelExtensionComponent, title: string, id: string, flyout?: ExtensionComponent)
+```
+
+Below is an example of a simple extension:
+
+```typescript
+((window) => {
+ const component = () => {
+ return React.createElement(
+ "div",
+ { style: { padding: "10px" } },
+ "Hello World"
+ );
+ };
+ window.extensionsAPI.registerStatusPanelExtension(
+ component,
+ "My Extension",
+ "my_extension"
+ );
+})(window);
+```
+
+### Flyout widget
+
+It is also possible to add an optional flyout widget to your extension. It can be opened by calling `openFlyout()` from your extension's component. Your flyout component will then be rendered in a sliding panel, similar to the panel that opens when clicking on `History and rollback`.
+
+Below is an example of an extension using the flyout widget:
+
+```typescript
+((window) => {
+ const component = (props: {
+ openFlyout: () => any
+ }) => {
+ return React.createElement(
+ "div",
+ {
+ style: { padding: "10px" },
+ onClick: () => props.openFlyout()
+ },
+ "Hello World"
+ );
+ };
+ const flyout = () => {
+ return React.createElement(
+ "div",
+ { style: { padding: "10px" } },
+ "This is a flyout"
+ );
+ };
+ window.extensionsAPI.registerStatusPanelExtension(
+ component,
+ "My Extension",
+ "my_extension",
+ flyout
+ );
+})(window);
+```
diff --git a/docs/developer-guide/release-process-and-cadence.md b/docs/developer-guide/release-process-and-cadence.md
index 051de617f0776..737c6eba6a8d9 100644
--- a/docs/developer-guide/release-process-and-cadence.md
+++ b/docs/developer-guide/release-process-and-cadence.md
@@ -6,14 +6,15 @@
These are the upcoming releases dates:
-| Release | Release Planning Meeting | Release Candidate 1 | General Availability | Release Champion | Checklist |
-|---------|--------------------------|-----------------------|----------------------|-------------------------------------------------------|---------------------------------------------------------------|
-| v2.6 | Monday, Dec. 12, 2022 | Monday, Dec. 19, 2022 | Monday, Feb. 6, 2023 | [William Tam](https://github.com/wtam2018) | [checklist](https://github.com/argoproj/argo-cd/issues/11563) |
-| v2.7 | Monday, Mar. 6, 2023 | Monday, Mar. 20, 2023 | Monday, May. 1, 2023 | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/12762) |
-| v2.8 | Monday, Jun. 20, 2023 | Monday, Jun. 26, 2023 | Monday, Aug. 7, 2023 | [Keith Chong](https://github.com/keithchong) | [checklist](https://github.com/argoproj/argo-cd/issues/13742) |
-| v2.9 | Monday, Sep. 4, 2023 | Monday, Sep. 18, 2023 | Monday, Nov. 6, 2023 | [Leonardo Almeida](https://github.com/leoluz) | [checklist](https://github.com/argoproj/argo-cd/issues/14078) |
-| v2.10 | Monday, Dec. 4, 2023 | Monday, Dec. 18, 2023 | Monday, Feb. 5, 2024 |
-
+| Release | Release Candidate 1 | General Availability | Release Champion | Release Approver |Checklist |
+|---------|-----------------------|----------------------|-------------------------------------------------------|-------------------------------------------------------|---------------------------------------------------------------|
+| v2.6 | Monday, Dec. 19, 2022 | Monday, Feb. 6, 2023 | [William Tam](https://github.com/wtam2018) | [William Tam](https://github.com/wtam2018) | [checklist](https://github.com/argoproj/argo-cd/issues/11563) |
+| v2.7 | Monday, Mar. 20, 2023 | Monday, May 1, 2023 | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/12762) |
+| v2.8 | Monday, Jun. 26, 2023 | Monday, Aug. 7, 2023 | [Keith Chong](https://github.com/keithchong) | [Keith Chong](https://github.com/keithchong) | [checklist](https://github.com/argoproj/argo-cd/issues/13742) |
+| v2.9 | Monday, Sep. 18, 2023 | Monday, Nov. 6, 2023 | [Leonardo Almeida](https://github.com/leoluz) | [Leonardo Almeida](https://github.com/leoluz) | [checklist](https://github.com/argoproj/argo-cd/issues/14078) |
+| v2.10 | Monday, Dec. 18, 2023 | Monday, Feb. 5, 2024 | [Katie Lamkin](https://github.com/kmlamkin9) | | [checklist](https://github.com/argoproj/argo-cd/issues/16339) |
+| v2.11 | Monday, Mar. 18, 2024 | Monday, May 6, 2024 |
+| v2.12 | Monday, Jun. 17, 2024 | Monday, Aug. 5, 2024 |
Actual release dates might differ from the plan by a few days.
@@ -22,8 +23,8 @@ Actual release dates might differ from the plan by a few days.
#### Minor Releases (e.g. 2.x.0)
A minor Argo CD release occurs four times a year, once every three months. Each General Availability (GA) release is
-preceded by several Release Candidates (RCs). The first RC is released three weeks before the scheduled GA date. This
-effectively means that there is a three-week feature freeze.
+preceded by several Release Candidates (RCs). The first RC is released seven weeks before the scheduled GA date. This
+effectively means that there is a seven-week feature freeze.
These are the approximate release dates:
@@ -40,17 +41,6 @@ Argo CD patch releases occur on an as-needed basis. Only the three most recent m
releases. Versions older than the three most recent minor versions are considered EOL and will not receive bug fixes or
security updates.
-#### Minor Release Planning Meeting
-
-Roughly two weeks before the RC date, there will be a meeting to discuss which features are planned for the RC. This meeting is
-for contributors to advocate for certain features. Features which have at least one approver (besides the contributor)
-who can assure they will review/merge by the RC date will be included in the release milestone. All other features will
-be dropped from the milestone (and potentially shifted to the next one).
-
-Since not everyone will be able to attend the meeting, there will be a meeting doc. Contributors can add their feature
-to a table, and Approvers can add their name to the table. Features with a corresponding approver will remain in the
-release milestone.
-
#### Release Champion
To help manage all the steps involved in a release, we will have a Release Champion. The Release Champion will be
@@ -78,3 +68,21 @@ The feature PR must include:
If these criteria are not met by the RC date, the feature will be ineligible for inclusion in the RC series or GA for
that minor release. It will have to wait for the next minor release.
+
+### Security Patch Policy
+
+CVEs in Argo CD code will be patched for all [supported versions](../operator-manual/installation.md#supported-versions).
+
+### Dependencies Lifecycle Policy
+
+Dependencies are evaluated before being introduced to ensure they:
+
+1) are actively maintained
+2) are maintained by trustworthy maintainers
+
+These evaluations vary from dependency to dependencies.
+
+Dependencies are also scheduled for removal if the project has been deprecated or if the project is no longer maintained.
+
+CVEs in dependencies will be patched for all supported versions if the CVE is applicable and is assessed by Snyk to be
+of high or critical severity. Automation generates a [new Snyk scan weekly](../snyk).
diff --git a/docs/developer-guide/site.md b/docs/developer-guide/site.md
index 47c1f57e29bb7..efd6aece9aedb 100644
--- a/docs/developer-guide/site.md
+++ b/docs/developer-guide/site.md
@@ -2,24 +2,19 @@
## Developing And Testing
-The website is build using `mkdocs` and `mkdocs-material`.
+The website is built using `mkdocs` and `mkdocs-material`.
To test:
```bash
+make build-docs
make serve-docs
```
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
-## Deploying
-
-```bash
-make publish-docs
-```
-
## Analytics
!!! tip
Don't forget to disable your ad-blocker when testing.
-We collect [Google Analytics](https://analytics.google.com/analytics/web/#/report-home/a105170809w198079555p192782995).
\ No newline at end of file
+We collect [Google Analytics](https://analytics.google.com/analytics/web/#/report-home/a105170809w198079555p192782995).
diff --git a/docs/developer-guide/toolchain-guide.md b/docs/developer-guide/toolchain-guide.md
index 42ca7fac87404..335180438dac6 100644
--- a/docs/developer-guide/toolchain-guide.md
+++ b/docs/developer-guide/toolchain-guide.md
@@ -304,7 +304,7 @@ For installing the tools required to build and test Argo CD on your local system
You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`):
```shell
-make BIN=~/go/bin install-tools-local
+BIN=~/go/bin make install-tools-local
```
Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date):
diff --git a/docs/faq.md b/docs/faq.md
index 19273acc04d23..83bdf8d7d38b5 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -36,6 +36,15 @@ which might cause health check to return `Progressing` state instead of `Healthy
As workaround Argo CD allows providing [health check](operator-manual/health.md) customization which overrides default
behavior.
+If you are using Traefik for your Ingress, you can update the Traefik config to publish the loadBalancer IP using [publishedservice](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#publishedservice), which will resolve this issue.
+
+```yaml
+providers:
+ kubernetesIngress:
+ publishedService:
+ enabled: true
+```
+
## I forgot the admin password, how do I reset it?
For Argo CD v1.8 and earlier, the initial password is set to the name of the server pod, as
@@ -88,7 +97,7 @@ data:
## After deploying my Helm application with Argo CD I cannot see it with `helm ls` and other Helm commands
-When deploying a Helm application Argo CD is using Helm
+When deploying a Helm application Argo CD is using Helm
only as a template mechanism. It runs `helm template` and
then deploys the resulting manifests on the cluster instead of doing `helm install`. This means that you cannot use any Helm command
to view/verify the application. It is fully managed by Argo CD.
@@ -131,15 +140,15 @@ Argo CD automatically sets the `app.kubernetes.io/instance` label and uses it to
If the tool does this too, this causes confusion. You can change this label by setting
the `application.instanceLabelKey` value in the `argocd-cm`. We recommend that you use `argocd.argoproj.io/instance`.
-!!! note
+!!! note
When you make this change your applications will become out of sync and will need re-syncing.
See [#1482](https://github.com/argoproj/argo-cd/issues/1482).
## How often does Argo CD check for changes to my Git or Helm repository ?
-The default polling interval is 3 minutes (180 seconds).
-You can change the setting by updating the `timeout.reconciliation` value in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
+The default polling interval is 3 minutes (180 seconds) with a configurable jitter.
+You can change the setting by updating the `timeout.reconciliation` value and the `timeout.reconciliation.jitter` in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
## Why Are My Resource Limits `Out Of Sync`?
@@ -241,7 +250,7 @@ There are two parts to the message:
> map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something]
- You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
+ You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
`]`
@@ -250,7 +259,7 @@ There are two parts to the message:
This includes all of the keys. It's included for debugging purposes -- you don't need to pay much attention to it. It will give you a hint about the precise location in the list for the duplicated keys:
> map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**]
-
+
`]`
In this case, the duplicated keys have been **emphasized** to help you identify the problematic keys. Many editors have the ability to highlight all instances of a string, using such an editor can help with such problems.
diff --git a/docs/getting_started.md b/docs/getting_started.md
index d81bd08897ad8..1000206eaf972 100644
--- a/docs/getting_started.md
+++ b/docs/getting_started.md
@@ -22,12 +22,8 @@ This will create a new namespace, `argocd`, where Argo CD services and applicati
The installation manifests include `ClusterRoleBinding` resources that reference `argocd` namespace. If you are installing Argo CD into a different
namespace then make sure to update the namespace reference.
-If you are not interested in UI, SSO, multi-cluster features then you can install [core](operator-manual/installation.md#core) Argo CD components only:
-
-```bash
-kubectl create namespace argocd
-kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/core-install.yaml
-```
+!!! tip
+ If you are not interested in UI, SSO, and multi-cluster features, then you can install only the [core](operator-manual/core/#installing) Argo CD components.
This default installation will have a self-signed certificate and cannot be accessed without a bit of extra work.
Do one of:
diff --git a/docs/index.md b/docs/index.md
index 6315ced37efad..ddb17c2bdc36a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -53,7 +53,7 @@ meeting:
![Argo CD Architecture](assets/argocd_architecture.png)
-Argo CD is implemented as a kubernetes controller which continuously monitors running applications
+Argo CD is implemented as a Kubernetes controller which continuously monitors running applications
and compares the current, live state against the desired target state (as specified in the Git repo).
A deployed application whose live state deviates from the target state is considered `OutOfSync`.
Argo CD reports & visualizes the differences, while providing facilities to automatically or
diff --git a/docs/operator-manual/app-any-namespace.md b/docs/operator-manual/app-any-namespace.md
index 21743b7bc003d..21bfa5c4f5a0b 100644
--- a/docs/operator-manual/app-any-namespace.md
+++ b/docs/operator-manual/app-any-namespace.md
@@ -15,7 +15,10 @@ Some manual steps will need to be performed by the Argo CD administrator in orde
!!! note
This feature is considered beta as of now. Some of the implementation details may change over the course of time until it is promoted to a stable status. We will be happy if early adopters use this feature and provide us with bug reports and feedback.
-
+
+
+One additional advantage of adopting applications in any namespace is to allow end-users to configure notifications for their Argo CD application in the namespace where Argo CD application is running in. See notifications [namespace based configuration](notifications/index.md#namespace-based-configuration) page for more information.
+
## Prerequisites
### Cluster-scoped Argo CD installation
diff --git a/docs/operator-manual/application.yaml b/docs/operator-manual/application.yaml
index 75a0d3b0df8ae..864a293ce6890 100644
--- a/docs/operator-manual/application.yaml
+++ b/docs/operator-manual/application.yaml
@@ -119,7 +119,7 @@ spec:
extVars:
- name: foo
value: bar
- # You can use "code to determine if the value is either string (false, the default) or Jsonnet code (if code is true).
+ # You can use "code" to determine if the value is either string (false, the default) or Jsonnet code (if code is true).
- code: true
name: baz
value: "true"
@@ -189,6 +189,7 @@ spec:
- PrunePropagationPolicy=foreground # Supported policies are background, foreground and orphan.
- PruneLast=true # Allow the ability for resource pruning to happen as a final, implicit wave of a sync operation
- RespectIgnoreDifferences=true # When syncing changes, respect fields ignored by the ignoreDifferences configuration
+ - ApplyOutOfSyncOnly=true # Only sync out-of-sync resources, rather than applying every object in the application
managedNamespaceMetadata: # Sets the metadata for the application namespace. Only valid if CreateNamespace=true (see above), otherwise it's a no-op.
labels: # The labels to set on the application namespace
any: label
diff --git a/docs/operator-manual/applicationset.yaml b/docs/operator-manual/applicationset.yaml
index 65935802c674a..d05b08f1101a0 100644
--- a/docs/operator-manual/applicationset.yaml
+++ b/docs/operator-manual/applicationset.yaml
@@ -33,6 +33,6 @@ spec:
- jsonPointers:
- /spec/source/targetRevision
- name: some-app
- jqExpressions:
+ jqPathExpressions:
- .spec.source.helm.values
diff --git a/docs/operator-manual/applicationset/Appset-Any-Namespace.md b/docs/operator-manual/applicationset/Appset-Any-Namespace.md
index 61716414aeb69..bf3f8ffecfaf1 100644
--- a/docs/operator-manual/applicationset/Appset-Any-Namespace.md
+++ b/docs/operator-manual/applicationset/Appset-Any-Namespace.md
@@ -35,6 +35,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- scmProvider:
gitea:
@@ -137,17 +139,19 @@ metadata:
name: team-one-product-one
namespace: team-one-cd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
list:
- - id: infra
+ - name: infra
project: infra-project
- - id: team-two
+ - name: team-two
project: team-two-project
- template:
- metadata:
- name: '{{name}}-escalation'
- spec:
- project: "{{project}}"
+ template:
+ metadata:
+ name: '{{.name}}-escalation'
+ spec:
+ project: "{{.project}}"
```
### ApplicationSet names
diff --git a/docs/operator-manual/applicationset/Controlling-Resource-Modification.md b/docs/operator-manual/applicationset/Controlling-Resource-Modification.md
index 73f8a5a3eeb50..d72cee60ad401 100644
--- a/docs/operator-manual/applicationset/Controlling-Resource-Modification.md
+++ b/docs/operator-manual/applicationset/Controlling-Resource-Modification.md
@@ -6,7 +6,7 @@ These settings allow you to exert control over when, and how, changes are made t
Here are some of the controller settings that may be modified to alter the ApplicationSet controller's resource-handling behaviour.
-### Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
+## Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
To prevent the ApplicationSet controller from creating, modifying, or deleting any `Application` resources, you may enable `dry-run` mode. This essentially switches the controller into a "read only" mode, where the controller Reconcile loop will run, but no resources will be modified.
@@ -14,7 +14,7 @@ To enable dry-run, add `--dryrun true` to the ApplicationSet Deployment's contai
See 'How to modify ApplicationSet container parameters' below for detailed steps on how to add this parameter to the controller.
-### Managed Applications modification Policies
+## Managed Applications modification Policies
The ApplicationSet controller supports a parameter `--policy`, which is specified on launch (within the controller Deployment container), and which restricts what types of modifications will be made to managed Argo CD `Application` resources.
@@ -32,16 +32,14 @@ spec:
```
-- Policy `create-only`: Prevents ApplicationSet controller from modifying or deleting Applications.
-- Policy `create-update`: Prevents ApplicationSet controller from deleting Applications. Update is allowed.
+- Policy `create-only`: Prevents ApplicationSet controller from modifying or deleting Applications. Prevents Application controller from deleting Applications according to [ownerReferences](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/).
+- Policy `create-update`: Prevents ApplicationSet controller from deleting Applications. Update is allowed. Prevents Application controller from deleting Applications according to [ownerReferences](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/).
- Policy `create-delete`: Prevents ApplicationSet controller from modifying Applications. Delete is allowed.
- Policy `sync`: Update and Delete are allowed.
If the controller parameter `--policy` is set, it takes precedence on the field `applicationsSync`. It is possible to allow per ApplicationSet sync policy by setting variable `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_POLICY_OVERRIDE` to argocd-cmd-params-cm `applicationsetcontroller.enable.policy.override` or directly with controller parameter `--enable-policy-override` (default to `false`).
-This does not prevent deletion of Applications if the ApplicationSet is deleted
-
-#### Controller parameter
+### Controller parameter
To allow the ApplicationSet controller to *create* `Application` resources, but prevent any further modification, such as deletion, or modification of Application fields, add this parameter in the ApplicationSet controller:
```
@@ -59,7 +57,7 @@ spec:
applicationsSync: create-only
```
-### Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
+## Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
To allow the ApplicationSet controller to create or modify `Application` resources, but prevent Applications from being deleted, add the following parameter to the ApplicationSet controller `Deployment`:
```
@@ -79,7 +77,7 @@ spec:
applicationsSync: create-update
```
-### Ignore certain changes to Applications
+## Ignore certain changes to Applications
The ApplicationSet spec includes an `ignoreApplicationDifferences` field, which allows you to specify which fields of
the ApplicationSet should be ignored when comparing Applications.
@@ -98,11 +96,94 @@ spec:
- jsonPointers:
- /spec/source/targetRevision
- name: some-app
- jqExpressions:
+ jqPathExpressions:
- .spec.source.helm.values
```
-### Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
+### Allow temporarily toggling auto-sync
+
+One of the most common use cases for ignoring differences is to allow temporarily toggling auto-sync for an Application.
+
+For example, if you have an ApplicationSet that is configured to automatically sync Applications, you may want to temporarily
+disable auto-sync for a specific Application. You can do this by adding an ignore rule for the `spec.syncPolicy.automated` field.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: ApplicationSet
+spec:
+ ignoreApplicationDifferences:
+ - jsonPointers:
+ - /spec/syncPolicy
+```
+
+### Limitations of `ignoreApplicationDifferences`
+
+When an ApplicationSet is reconciled, the controller will compare the ApplicationSet spec with the spec of each Application
+that it manages. If there are any differences, the controller will generate a patch to update the Application to match the
+ApplicationSet spec.
+
+The generated patch is a MergePatch. According to the MergePatch documentation, "existing lists will be completely
+replaced by new lists" when there is a change to the list.
+
+This limits the effectiveness of `ignoreApplicationDifferences` when the ignored field is in a list. For example, if you
+have an application with multiple sources, and you want to ignore changes to the `targetRevision` of one of the sources,
+changes in other fields or in other sources will cause the entire `sources` list to be replaced, and the `targetRevision`
+field will be reset to the value defined in the ApplicationSet.
+
+For example, consider this ApplicationSet:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: ApplicationSet
+spec:
+ ignoreApplicationDifferences:
+ - jqPathExpressions:
+ - .spec.sources[] | select(.repoURL == "https://git.example.com/org/repo1").targetRevision
+ template:
+ spec:
+ sources:
+ - repoURL: https://git.example.com/org/repo1
+ targetRevision: main
+ - repoURL: https://git.example.com/org/repo2
+ targetRevision: main
+```
+
+You can freely change the `targetRevision` of the `repo1` source, and the ApplicationSet controller will not overwrite
+your change.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+spec:
+ sources:
+ - repoURL: https://git.example.com/org/repo1
+ targetRevision: fix/bug-123
+ - repoURL: https://git.example.com/org/repo2
+ targetRevision: main
+```
+
+However, if you change the `targetRevision` of the `repo2` source, the ApplicationSet controller will overwrite the entire
+`sources` field.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+spec:
+ sources:
+ - repoURL: https://git.example.com/org/repo1
+ targetRevision: main
+ - repoURL: https://git.example.com/org/repo2
+ targetRevision: main
+```
+
+!!! note
+ [Future improvements](https://github.com/argoproj/argo-cd/issues/15975) to the ApplicationSet controller may
+ eliminate this problem. For example, the `ref` field might be made a merge key, allowing the ApplicationSet
+ controller to generate and use a StrategicMergePatch instead of a MergePatch. You could then target a specific
+ source by `ref`, ignore changes to a field in that source, and changes to other sources would not cause the ignored
+ field to be overwritten.
+
+## Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
By default, when an `Application` resource is deleted by the ApplicationSet controller, all of the child resources of the Application will be deleted as well (such as, all of the Application's `Deployments`, `Services`, etc).
@@ -119,7 +200,7 @@ spec:
More information on the specific behaviour of `preserveResourcesOnDeletion`, and deletion in ApplicationSet controller and Argo CD in general, can be found on the [Application Deletion](Application-Deletion.md) page.
-### Prevent an Application's child resources from being modified
+## Prevent an Application's child resources from being modified
Changes made to the ApplicationSet will propagate to the Applications managed by the ApplicationSet, and then Argo CD will propagate the Application changes to the underlying cluster resources (as per [Argo CD Integration](Argo-CD-Integration.md)).
@@ -185,6 +266,11 @@ kubectl apply -n argocd -f install.yaml
## Preserving changes made to an Applications annotations and labels
+!!! note
+ The same behavior can be achieved on a per-app basis using the [`ignoreApplicationDifferences`](#ignore-certain-changes-to-applications)
+ feature described above. However, preserved fields may be configured globally, a feature that is not yet available
+ for `ignoreApplicationDifferences`.
+
It is common practice in Kubernetes to store state in annotations, operators will often make use of this. To allow for this, it is possible to configure a list of annotations that the ApplicationSet should preserve when reconciling.
For example, imagine that we have an Application created from an ApplicationSet, but a custom annotation and label has since been added (to the Application) that does not exist in the `ApplicationSet` resource:
@@ -220,3 +306,18 @@ By default, the Argo CD notifications and the Argo CD refresh type annotations a
!!!note
One can also set global preserved fields for the controller by passing a comma separated list of annotations and labels to
`ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS` and `ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS` respectively.
+
+## Debugging unexpected changes to Applications
+
+When the ApplicationSet controller makes a change to an application, it logs the patch at the debug level. To see these
+logs, set the log level to debug in the `argocd-cmd-params-cm` ConfigMap in the `argocd` namespace:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: argocd-cmd-params-cm
+ namespace: argocd
+data:
+ applicationsetcontroller.log.level: debug
+```
diff --git a/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md b/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md
index 8f5bb491b8b44..95c60d95cd68c 100644
--- a/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md
+++ b/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md
@@ -1,6 +1,6 @@
# Cluster Decision Resource Generator
-The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator:
+The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced Kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator:
```yaml
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
@@ -8,6 +8,8 @@ metadata:
name: guestbook
namespace: argocd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- clusterDecisionResource:
# ConfigMap with GVK information for the duck type resource
@@ -26,7 +28,7 @@ spec:
requeueAfterSeconds: 60
template:
metadata:
- name: '{{name}}-guestbook'
+ name: '{{.name}}-guestbook'
spec:
project: "default"
source:
@@ -34,7 +36,7 @@ spec:
targetRevision: HEAD
path: guestbook
destination:
- server: '{{clusterName}}' # 'server' field of the secret
+ server: '{{.clusterName}}' # 'server' field of the secret
namespace: guestbook
```
The `quak` resource, referenced by the ApplicationSet `clusterDecisionResource` generator:
diff --git a/docs/operator-manual/applicationset/Generators-Cluster.md b/docs/operator-manual/applicationset/Generators-Cluster.md
index 92507645a4ffe..ca1a49aad295b 100644
--- a/docs/operator-manual/applicationset/Generators-Cluster.md
+++ b/docs/operator-manual/applicationset/Generators-Cluster.md
@@ -39,11 +39,13 @@ metadata:
name: guestbook
namespace: argocd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- clusters: {} # Automatically use all clusters defined within Argo CD
template:
metadata:
- name: '{{name}}-guestbook' # 'name' field of the Secret
+ name: '{{.name}}-guestbook' # 'name' field of the Secret
spec:
project: "my-project"
source:
@@ -51,7 +53,7 @@ spec:
targetRevision: HEAD
path: guestbook
destination:
- server: '{{server}}' # 'server' field of the secret
+ server: '{{.server}}' # 'server' field of the secret
namespace: guestbook
```
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/cluster).*)
@@ -67,6 +69,8 @@ metadata:
name: guestbook
namespace: argocd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -105,6 +109,8 @@ The cluster generator will automatically target both local and non-local cluster
If you wish to target only remote clusters with your Applications (e.g. you want to exclude the local cluster), then use a cluster selector with labels, for example:
```yaml
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -137,6 +143,8 @@ You may pass additional, arbitrary string key-value pairs via the `values` field
In this example, a `revision` parameter value is passed, based on matching labels on the cluster secret:
```yaml
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -154,16 +162,16 @@ spec:
revision: stable
template:
metadata:
- name: '{{name}}-guestbook'
+ name: '{{.name}}-guestbook'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
# The cluster values field for each generator will be substituted here:
- targetRevision: '{{values.revision}}'
+ targetRevision: '{{.values.revision}}'
path: guestbook
destination:
- server: '{{server}}'
+ server: '{{.server}}'
namespace: guestbook
```
@@ -184,6 +192,8 @@ Extending the example above, we could do something like this:
```yaml
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -192,8 +202,8 @@ spec:
# A key-value map for arbitrary parameters
values:
# If `my-custom-annotation` is in your cluster secret, `revision` will be substituted with it.
- revision: '{{metadata.annotations.my-custom-annotation}}'
- clusterName: '{{name}}'
+ revision: '{{index .metadata.annotations "my-custom-annotation"}}'
+ clusterName: '{{.name}}'
- clusters:
selector:
matchLabels:
@@ -201,19 +211,19 @@ spec:
values:
# production uses a different revision value, for 'stable' branch
revision: stable
- clusterName: '{{name}}'
+ clusterName: '{{.name}}'
template:
metadata:
- name: '{{name}}-guestbook'
+ name: '{{.name}}-guestbook'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
# The cluster values field for each generator will be substituted here:
- targetRevision: '{{values.revision}}'
+ targetRevision: '{{.values.revision}}'
path: guestbook
destination:
# In this case this is equivalent to just using {{name}}
- server: '{{values.clusterName}}'
+ server: '{{.values.clusterName}}'
namespace: guestbook
```
diff --git a/docs/operator-manual/applicationset/Generators-Git.md b/docs/operator-manual/applicationset/Generators-Git.md
index 1dcd85ea24b2a..7e4aa5fdb1c24 100644
--- a/docs/operator-manual/applicationset/Generators-Git.md
+++ b/docs/operator-manual/applicationset/Generators-Git.md
@@ -210,6 +210,8 @@ metadata:
name: cluster-addons
namespace: argocd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- git:
repoURL: https://github.com/example/example-repo.git
@@ -217,19 +219,19 @@ spec:
directories:
- path: '*'
values:
- cluster: '{{branch}}-{{path}}'
+ cluster: '{{.branch}}-{{.path.basename}}'
template:
metadata:
- name: '{{path.basename}}'
+ name: '{{.path.basename}}'
spec:
project: "my-project"
source:
repoURL: https://github.com/example/example-repo.git
targetRevision: HEAD
- path: '{{path}}'
+ path: '{{.path.path}}'
destination:
server: https://kubernetes.default.svc
- namespace: '{{values.cluster}}'
+ namespace: '{{.values.cluster}}'
```
!!! note
@@ -323,15 +325,15 @@ As with other generators, clusters *must* already be defined within Argo CD, in
In addition to the flattened key/value pairs from the configuration file, the following generator parameters are provided:
-- `{{path}}`: The path to the directory containing matching configuration file within the Git repository. Example: `/clusters/clusterA`, if the config file was `/clusters/clusterA/config.json`
-- `{{path[n]}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `path[0]: clusters`, `path[1]: clusterA`
-- `{{path.basename}}`: Basename of the path to the directory containing the configuration file (e.g. `clusterA`, with the above example.)
-- `{{path.basenameNormalized}}`: This field is the same as `path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `path.basename` of `directory_2` would produce `directory-2` here).
-- `{{path.filename}}`: The matched filename. e.g., `config.json` in the above example.
-- `{{path.filenameNormalized}}`: The matched filename with unsupported characters replaced with `-`.
+- `{{.path.path}}`: The path to the directory containing matching configuration file within the Git repository. Example: `/clusters/clusterA`, if the config file was `/clusters/clusterA/config.json`
+- `{{index .path n}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `index .path 0: clusters`, `index .path 1: clusterA`
+- `{{.path.basename}}`: Basename of the path to the directory containing the configuration file (e.g. `clusterA`, with the above example.)
+- `{{.path.basenameNormalized}}`: This field is the same as `.path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `.path.basename` of `directory_2` would produce `directory-2` here).
+- `{{.path.filename}}`: The matched filename. e.g., `config.json` in the above example.
+- `{{.path.filenameNormalized}}`: The matched filename with unsupported characters replaced with `-`.
-**Note**: The right-most *directory* name always becomes `{{path.basename}}`. For example, from `- path: /one/two/three/four/config.json`, `{{path.basename}}` will be `four`.
-The filename can always be accessed using `{{path.filename}}`.
+**Note**: The right-most *directory* name always becomes `{{.path.basename}}`. For example, from `- path: /one/two/three/four/config.json`, `{{.path.basename}}` will be `four`.
+The filename can always be accessed using `{{.path.filename}}`.
**Note**: If the `pathParamPrefix` option is specified, all `path`-related parameter names above will be prefixed with the specified value and a dot separator. E.g., if `pathParamPrefix` is `myRepo`, then the generated parameter name would be `myRepo.path` instead of `path`. Using this option is necessary in a Matrix generator where both child generators are Git generators (to avoid conflicts when merging the child generators’ items).
@@ -349,6 +351,8 @@ metadata:
name: guestbook
namespace: argocd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
@@ -356,18 +360,18 @@ spec:
files:
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
values:
- base_dir: "{{path[0]}}/{{path[1]}}/{{path[2]}}"
+ base_dir: "{{index .path 0}}/{{index .path 1}}/{{index .path 2}}"
template:
metadata:
- name: '{{cluster.name}}-guestbook'
+ name: '{{.cluster.name}}-guestbook'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
- path: "{{values.base_dir}}/apps/guestbook"
+ path: "{{.values.base_dir}}/apps/guestbook"
destination:
- server: '{{cluster.address}}'
+ server: '{{.cluster.address}}'
namespace: guestbook
```
@@ -405,15 +409,15 @@ the contents of webhook payloads are considered untrusted, and will only result
application (a process which already occurs at three-minute intervals). If ApplicationSet is publicly
accessible, then configuring a webhook secret is recommended to prevent a DDoS attack.
-In the `argocd-secret` kubernetes secret, include the Git provider's webhook secret configured in step 1.
+In the `argocd-secret` Kubernetes secret, include the Git provider's webhook secret configured in step 1.
-Edit the Argo CD kubernetes secret:
+Edit the Argo CD Kubernetes secret:
```bash
kubectl edit secret argocd-secret -n argocd
```
-TIP: for ease of entering secrets, kubernetes supports inputting secrets in the `stringData` field,
+TIP: for ease of entering secrets, Kubernetes supports inputting secrets in the `stringData` field,
which saves you the trouble of base64 encoding the values and copying it to the `data` field.
Simply copy the shared webhook secret created in step 1, to the corresponding
GitHub/GitLab/BitBucket key under the `stringData` field:
diff --git a/docs/operator-manual/applicationset/Generators-List.md b/docs/operator-manual/applicationset/Generators-List.md
index a99229f858da4..e5696f37b9745 100644
--- a/docs/operator-manual/applicationset/Generators-List.md
+++ b/docs/operator-manual/applicationset/Generators-List.md
@@ -8,25 +8,26 @@ metadata:
name: guestbook
namespace: argocd
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
-# - cluster: engineering-prod
-# url: https://kubernetes.default.svc
-# foo: bar
+ - cluster: engineering-prod
+ url: https://kubernetes.default.svc
template:
metadata:
- name: '{{cluster}}-guestbook'
+ name: '{{.cluster}}-guestbook'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
- path: applicationset/examples/list-generator/guestbook/{{cluster}}
+ path: applicationset/examples/list-generator/guestbook/{{.cluster}}
destination:
- server: '{{url}}'
+ server: '{{.url}}'
namespace: guestbook
```
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/list-generator).*)
diff --git a/docs/operator-manual/applicationset/Generators-Matrix.md b/docs/operator-manual/applicationset/Generators-Matrix.md
index 6684cdc90f73b..0396b8c0e06d3 100644
--- a/docs/operator-manual/applicationset/Generators-Matrix.md
+++ b/docs/operator-manual/applicationset/Generators-Matrix.md
@@ -35,6 +35,8 @@ kind: ApplicationSet
metadata:
name: cluster-git
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
# matrix 'parent' generator
- matrix:
@@ -52,16 +54,16 @@ spec:
argocd.argoproj.io/secret-type: cluster
template:
metadata:
- name: '{{path.basename}}-{{name}}'
+ name: '{{.path.basename}}-{{.name}}'
spec:
- project: '{{metadata.labels.environment}}'
+ project: '{{index .metadata.labels "environment"}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
- path: '{{path}}'
+ path: '{{.path.path}}'
destination:
- server: '{{server}}'
- namespace: '{{path.basename}}'
+ server: '{{.server}}'
+ namespace: '{{.path.basename}}'
```
First, the Git directory generator will scan the Git repository, discovering directories under the specified path. It discovers the argo-workflows and prometheus-operator applications, and produces two corresponding sets of parameters:
@@ -117,6 +119,8 @@ kind: ApplicationSet
metadata:
name: cluster-git
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
# matrix 'parent' generator
- matrix:
@@ -132,10 +136,10 @@ spec:
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
- kubernetes.io/environment: '{{path.basename}}'
+ kubernetes.io/environment: '{{.path.basename}}'
template:
metadata:
- name: '{{name}}-guestbook'
+ name: '{{.name}}-guestbook'
spec:
project: default
source:
@@ -143,7 +147,7 @@ spec:
targetRevision: HEAD
path: "examples/git-generator-files-discovery/apps/guestbook"
destination:
- server: '{{server}}'
+ server: '{{.server}}'
namespace: guestbook
```
Here is the corresponding folder structure for the git repository used by the git-files generator:
@@ -162,8 +166,8 @@ Here is the corresponding folder structure for the git repository used by the gi
│ └── config.json
└── git-generator-files.yaml
```
-In the above example, the `{{path.basename}}` parameters produced by the git-files generator will resolve to `dev` and `prod`.
-In the 2nd child generator, the label selector with label `kubernetes.io/environment: {{path.basename}}` will resolve with the values produced by the first child generator's parameters (`kubernetes.io/environment: prod` and `kubernetes.io/environment: dev`).
+In the above example, the `{{.path.basename}}` parameters produced by the git-files generator will resolve to `dev` and `prod`.
+In the 2nd child generator, the label selector with label `kubernetes.io/environment: {{.path.basename}}` will resolve with the values produced by the first child generator's parameters (`kubernetes.io/environment: prod` and `kubernetes.io/environment: dev`).
So in the above example, clusters with the label `kubernetes.io/environment: prod` will have only prod-specific configuration (ie. `prod/config.json`) applied to it, wheres clusters
with the label `kubernetes.io/environment: dev` will have only dev-specific configuration (ie. `dev/config.json`)
@@ -262,6 +266,8 @@ kind: ApplicationSet
metadata:
name: two-gits-with-path-param-prefix
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- matrix:
generators:
@@ -280,7 +286,7 @@ spec:
repoURL: https://github.com/some-org/some-repo.git
revision: HEAD
files:
- - path: "targets/{{appName}}/*.json"
+ - path: "targets/{{.appName}}/*.json"
pathParamPrefix: target
template: {} # ...
```
@@ -390,7 +396,7 @@ For example, the below example would be invalid (cluster-generator must come aft
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
- kubernetes.io/environment: '{{path.basename}}' # {{path.basename}} is produced by git-files generator
+ kubernetes.io/environment: '{{.path.basename}}' # {{.path.basename}} is produced by git-files generator
# git generator, 'child' #2
- git:
repoURL: https://github.com/argoproj/applicationset.git
@@ -398,7 +404,7 @@ For example, the below example would be invalid (cluster-generator must come aft
files:
- path: "examples/git-generator-files-discovery/cluster-config/**/config.json"
-1. You cannot have both child generators consuming parameters from each another. In the example below, the cluster generator is consuming the `{{path.basename}}` parameter produced by the git-files generator, whereas the git-files generator is consuming the `{{name}}` parameter produced by the cluster generator. This will result in a circular dependency, which is invalid.
+1. You cannot have both child generators consuming parameters from each another. In the example below, the cluster generator is consuming the `{{.path.basename}}` parameter produced by the git-files generator, whereas the git-files generator is consuming the `{{.name}}` parameter produced by the cluster generator. This will result in a circular dependency, which is invalid.
- matrix:
generators:
@@ -407,13 +413,13 @@ For example, the below example would be invalid (cluster-generator must come aft
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
- kubernetes.io/environment: '{{path.basename}}' # {{path.basename}} is produced by git-files generator
+ kubernetes.io/environment: '{{.path.basename}}' # {{.path.basename}} is produced by git-files generator
# git generator, 'child' #2
- git:
repoURL: https://github.com/argoproj/applicationset.git
revision: HEAD
files:
- - path: "examples/git-generator-files-discovery/cluster-config/engineering/{{name}}**/config.json" # {{name}} is produced by cluster generator
+ - path: "examples/git-generator-files-discovery/cluster-config/engineering/{{.name}}**/config.json" # {{.name}} is produced by cluster generator
1. When using a Matrix generator nested inside another Matrix or Merge generator, [Post Selectors](Generators-Post-Selector.md) for this nested generator's generators will only be applied when enabled via `spec.applyNestedSelectors`. You may also need to enable this even if your Post Selectors are not within the nested matrix or Merge generator, but are instead a sibling of a nested Matrix or Merge generator.
diff --git a/docs/operator-manual/applicationset/Generators-Merge.md b/docs/operator-manual/applicationset/Generators-Merge.md
index 50da174cf349a..b2ccfe86fb66d 100644
--- a/docs/operator-manual/applicationset/Generators-Merge.md
+++ b/docs/operator-manual/applicationset/Generators-Merge.md
@@ -17,6 +17,8 @@ kind: ApplicationSet
metadata:
name: cluster-git
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
# merge 'parent' generator
- merge:
@@ -41,9 +43,9 @@ spec:
values.redis: 'true'
template:
metadata:
- name: '{{name}}'
+ name: '{{.name}}'
spec:
- project: '{{metadata.labels.environment}}'
+ project: '{{index .metadata.labels "environment"}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
@@ -51,11 +53,11 @@ spec:
helm:
parameters:
- name: kafka
- value: '{{values.kafka}}'
+ value: '{{.values.kafka}}'
- name: redis
- value: '{{values.redis}}'
+ value: '{{.values.redis}}'
destination:
- server: '{{server}}'
+ server: '{{.server}}'
namespace: default
```
@@ -122,6 +124,8 @@ kind: ApplicationSet
metadata:
name: cluster-git
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
# merge 'parent' generator:
# Use the selector set by both child generators to combine them.
@@ -135,7 +139,7 @@ spec:
# Set the selector to this location.
- clusters:
values:
- selector: '{{ metadata.labels.location }}'
+ selector: '{{index .metadata.labels "location"}}'
# The git repo may have different directories which correspond to the
# cluster locations, using these as a selector.
- git:
@@ -144,19 +148,19 @@ spec:
directories:
- path: '*'
values:
- selector: '{{ path }}'
+ selector: '{{.path.path}}'
template:
metadata:
- name: '{{name}}'
+ name: '{{.name}}'
spec:
- project: '{{metadata.labels.environment}}'
+ project: '{{index .metadata.labels "environment"}}'
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
# The cluster values field for each generator will be substituted here:
targetRevision: HEAD
- path: '{{path}}'
+ path: '{{.path.path}}'
destination:
- server: '{{server}}'
+ server: '{{.server}}'
namespace: default
```
diff --git a/docs/operator-manual/applicationset/Generators-Plugin.md b/docs/operator-manual/applicationset/Generators-Plugin.md
index 3747c38865df5..d0888b9949b8e 100644
--- a/docs/operator-manual/applicationset/Generators-Plugin.md
+++ b/docs/operator-manual/applicationset/Generators-Plugin.md
@@ -22,6 +22,8 @@ kind: ApplicationSet
metadata:
name: myplugin
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- plugin:
# Specify the configMap where the plugin configuration is located.
@@ -51,10 +53,10 @@ spec:
metadata:
name: myplugin
annotations:
- example.from.input.parameters: "{{ generator.input.parameters.map.key1 }}"
- example.from.values: "{{ values.value1 }}"
+ example.from.input.parameters: "{{ index .generator.input.parameters.map "key1" }}"
+ example.from.values: "{{ .values.value1 }}"
# The plugin determines what else it produces.
- example.from.plugin.output: "{{ something.from.the.plugin }}"
+ example.from.plugin.output: "{{ .something.from.the.plugin }}"
```
- `configMapRef.name`: A `ConfigMap` name containing the plugin configuration to use for RPC call.
@@ -230,6 +232,7 @@ metadata:
name: fb-matrix
spec:
goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- matrix:
generators:
diff --git a/docs/operator-manual/applicationset/Generators-Post-Selector.md b/docs/operator-manual/applicationset/Generators-Post-Selector.md
index d8570859084ff..896e89e267d7c 100644
--- a/docs/operator-manual/applicationset/Generators-Post-Selector.md
+++ b/docs/operator-manual/applicationset/Generators-Post-Selector.md
@@ -1,6 +1,6 @@
# Post Selector all generators
-The Selector allows to post-filter based on generated values using the kubernetes common labelSelector format. In the example, the list generator generates a set of two application which then filter by the key value to only select the `env` with value `staging`:
+The Selector allows to post-filter based on generated values using the Kubernetes common labelSelector format. In the example, the list generator generates a set of two application which then filter by the key value to only select the `env` with value `staging`:
## Example: List generator + Post Selector
```yaml
@@ -9,6 +9,8 @@ kind: ApplicationSet
metadata:
name: guestbook
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- list:
elements:
@@ -23,15 +25,15 @@ spec:
env: staging
template:
metadata:
- name: '{{cluster}}-guestbook'
+ name: '{{.cluster}}-guestbook'
spec:
project: default
source:
repoURL: https://github.com/argoproj-labs/applicationset.git
targetRevision: HEAD
- path: examples/list-generator/guestbook/{{cluster}}
+ path: examples/list-generator/guestbook/{{.cluster}}
destination:
- server: '{{url}}'
+ server: '{{.url}}'
namespace: guestbook
```
diff --git a/docs/operator-manual/applicationset/Generators-Pull-Request.md b/docs/operator-manual/applicationset/Generators-Pull-Request.md
index 298e5135392ce..e54fc385d7d28 100644
--- a/docs/operator-manual/applicationset/Generators-Pull-Request.md
+++ b/docs/operator-manual/applicationset/Generators-Pull-Request.md
@@ -8,6 +8,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
# When using a Pull Request generator, the ApplicationSet controller polls every `requeueAfterSeconds` interval (defaulting to every 30 minutes) to detect changes.
@@ -33,6 +35,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
github:
@@ -75,6 +79,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
gitlab:
@@ -117,6 +123,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
gitea:
@@ -153,6 +161,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
bitbucketServer:
@@ -195,6 +205,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
bitbucket:
@@ -251,6 +263,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
azuredevops:
@@ -292,6 +306,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
# ...
@@ -319,21 +335,23 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
# ...
template:
metadata:
- name: 'myapp-{{branch}}-{{number}}'
+ name: 'myapp-{{.branch}}-{{.number}}'
spec:
source:
repoURL: 'https://github.com/myorg/myrepo.git'
- targetRevision: '{{head_sha}}'
+ targetRevision: '{{.head_sha}}'
path: kubernetes/
helm:
parameters:
- name: "image.tag"
- value: "pull-{{head_sha}}"
+ value: "pull-{{.head_sha}}"
project: "my-project"
destination:
server: https://kubernetes.default.svc
@@ -348,23 +366,25 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
# ...
template:
metadata:
- name: 'myapp-{{branch}}-{{number}}'
+ name: 'myapp-{{.branch}}-{{.number}}'
spec:
source:
repoURL: 'https://github.com/myorg/myrepo.git'
- targetRevision: '{{head_sha}}'
+ targetRevision: '{{.head_sha}}'
path: kubernetes/
kustomize:
- nameSuffix: {{branch}}
+ nameSuffix: '{{.branch}}'
commonLabels:
- app.kubernetes.io/instance: {{branch}}-{{number}}
+ app.kubernetes.io/instance: '{{.branch}}-{{.number}}'
images:
- - ghcr.io/myorg/myrepo:{{head_sha}}
+ - 'ghcr.io/myorg/myrepo:{{.head_sha}}'
project: "my-project"
destination:
server: https://kubernetes.default.svc
diff --git a/docs/operator-manual/applicationset/Generators-SCM-Provider.md b/docs/operator-manual/applicationset/Generators-SCM-Provider.md
index 5e3c4a6ab8aa4..40c8e552fe573 100644
--- a/docs/operator-manual/applicationset/Generators-SCM-Provider.md
+++ b/docs/operator-manual/applicationset/Generators-SCM-Provider.md
@@ -111,7 +111,7 @@ spec:
* `tokenRef`: A `Secret` name and key containing the GitLab access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories.
* `insecure`: By default (false) - Skip checking the validity of the SCM's certificate - useful for self-signed TLS certificates.
-For label filtering, the repository tags are used.
+For label filtering, the repository topics are used.
Available clone protocols are `ssh` and `https`.
@@ -395,16 +395,18 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- scmProvider:
# ...
template:
metadata:
- name: '{{ repository }}'
+ name: '{{ .repository }}'
spec:
source:
- repoURL: '{{ url }}'
- targetRevision: '{{ branch }}'
+ repoURL: '{{ .url }}'
+ targetRevision: '{{ .branch }}'
path: kubernetes/
project: default
destination:
@@ -433,6 +435,8 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- scmProvider:
bitbucketServer:
@@ -445,15 +449,15 @@ spec:
secretName: mypassword
key: password
values:
- name: "{{organization}}-{{repository}}"
+ name: "{{.organization}}-{{.repository}}"
template:
metadata:
- name: '{{ values.name }}'
+ name: '{{ .values.name }}'
spec:
source:
- repoURL: '{{ url }}'
- targetRevision: '{{ branch }}'
+ repoURL: '{{ .url }}'
+ targetRevision: '{{ .branch }}'
path: kubernetes/
project: default
destination:
diff --git a/docs/operator-manual/applicationset/GoTemplate.md b/docs/operator-manual/applicationset/GoTemplate.md
index 08c1f3feb035a..1d62eeea9f93a 100644
--- a/docs/operator-manual/applicationset/GoTemplate.md
+++ b/docs/operator-manual/applicationset/GoTemplate.md
@@ -12,6 +12,29 @@ An additional `normalize` function makes any string parameter usable as a valid
with hyphens and truncating at 253 characters. This is useful when making parameters safe for things like Application
names.
+Another `slugify` function has been added which, by default, sanitizes and smart truncates (it doesn't cut a word into 2). This function accepts a couple of arguments:
+- The first argument (if provided) is an integer specifying the maximum length of the slug.
+- The second argument (if provided) is a boolean indicating whether smart truncation is enabled.
+- The last argument (if provided) is the input name that needs to be slugified.
+
+#### Usage example
+
+```
+apiVersion: argoproj.io/v1alpha1
+kind: ApplicationSet
+metadata:
+ name: test-appset
+spec:
+ ...
+ template:
+ metadata:
+ name: 'hellos3-{{.name}}-{{ cat .branch | slugify 23 }}'
+ annotations:
+ label-1: '{{ cat .branch | slugify }}'
+ label-2: '{{ cat .branch | slugify 23 }}'
+ label-3: '{{ cat .branch | slugify 50 false }}'
+```
+
If you want to customize [options defined by text/template](https://pkg.go.dev/text/template#Template.Option), you can
add the `goTemplateOptions: ["opt1", "opt2", ...]` key to your ApplicationSet next to `goTemplate: true`. Note that at
the time of writing, there is only one useful option defined, which is `missingkey=error`.
@@ -183,6 +206,8 @@ ApplicationSet controller provides:
1. contains no more than 253 characters
2. contains only lowercase alphanumeric characters, '-' or '.'
3. starts and ends with an alphanumeric character
+
+- `slugify`: sanitizes like `normalize` and smart truncates (it doesn't cut a word into 2) like described in the [introduction](#introduction) section.
- `toYaml` / `fromYaml` / `fromYamlArray` helm like functions
diff --git a/docs/operator-manual/applicationset/Progressive-Syncs.md b/docs/operator-manual/applicationset/Progressive-Syncs.md
index 8864151e9dcb7..edfe0dad101f2 100644
--- a/docs/operator-manual/applicationset/Progressive-Syncs.md
+++ b/docs/operator-manual/applicationset/Progressive-Syncs.md
@@ -52,8 +52,7 @@ Once a change is pushed, the following will happen in order.
* The rollout will wait for all `env-qa` Applications to be manually synced via the `argocd` CLI or by clicking the Sync button in the UI.
* 10% of all `env-prod` Applications will be updated at a time until all `env-prod` Applications have been updated.
-```
----
+```yaml
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
diff --git a/docs/operator-manual/applicationset/Template.md b/docs/operator-manual/applicationset/Template.md
index f66a403586bbd..9a7cd574453b4 100644
--- a/docs/operator-manual/applicationset/Template.md
+++ b/docs/operator-manual/applicationset/Template.md
@@ -108,3 +108,71 @@ spec:
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/template-override).*)
In this example, the ApplicationSet controller will generate an `Application` resource using the `path` generated by the List generator, rather than the `path` value defined in `.spec.template`.
+
+## Template Patch
+
+Templating is only available on string type. However, some use cases may require applying templating on other types.
+
+Example:
+
+- Conditionally set the automated sync policy.
+- Conditionally switch prune boolean to `true`.
+- Add multiple helm value files from a list.
+
+The `templatePatch` feature enables advanced templating, with support for `json` and `yaml`.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: ApplicationSet
+metadata:
+ name: guestbook
+spec:
+ goTemplate: true
+ generators:
+ - list:
+ elements:
+ - cluster: engineering-dev
+ url: https://kubernetes.default.svc
+ autoSync: true
+ prune: true
+ valueFiles:
+ - values.large.yaml
+ - values.debug.yaml
+ template:
+ metadata:
+ name: '{{.cluster}}-deployment'
+ spec:
+ project: "default"
+ source:
+ repoURL: https://github.com/infra-team/cluster-deployments.git
+ targetRevision: HEAD
+ path: guestbook/{{ .cluster }}
+ destination:
+ server: '{{.url}}'
+ namespace: guestbook
+ templatePatch: |
+ spec:
+ source:
+ helm:
+ valueFiles:
+ {{- range $valueFile := .valueFiles }}
+ - {{ $valueFile }}
+ {{- end }}
+ {{- if .autoSync }}
+ syncPolicy:
+ automated:
+ prune: {{ .prune }}
+ {{- end }}
+```
+
+!!! important
+ The `templatePatch` can apply arbitrary changes to the template. If parameters include untrustworthy user input, it
+ may be possible to inject malicious changes into the template. It is recommended to use `templatePatch` only with
+ trusted input or to carefully escape the input before using it in the template. Piping input to `toJson` should help
+ prevent, for example, a user from successfully injecting a string with newlines.
+
+ The `spec.project` field is not supported in `templatePatch`. If you need to change the project, you can use the
+ `spec.project` field in the `template` field.
+
+!!! important
+ When writing a `templatePatch`, you're crafting a patch. So, if the patch includes an empty `spec: # nothing in here`, it will effectively clear out existing fields. See [#17040](https://github.com/argoproj/argo-cd/issues/17040) for an example of this behavior.
diff --git a/docs/operator-manual/applicationset/Use-Cases.md b/docs/operator-manual/applicationset/Use-Cases.md
index 0e9c65d3963ee..a13c6598072ca 100644
--- a/docs/operator-manual/applicationset/Use-Cases.md
+++ b/docs/operator-manual/applicationset/Use-Cases.md
@@ -68,10 +68,26 @@ Thus in the self-service use case, administrators desire to only allow some fiel
Fortunately, the ApplicationSet controller presents an alternative solution to this use case: cluster administrators may safely create an `ApplicationSet` resource containing a Git generator that restricts deployment of application resources to fixed values with the `template` field, while allowing customization of 'safe' fields by developers, at will.
+The `config.json` files contain information describing the app.
+
+```json
+{
+ (...)
+ "app": {
+ "source": "https://github.com/argoproj/argo-cd",
+ "revision": "HEAD",
+ "path": "applicationset/examples/git-generator-files-discovery/apps/guestbook"
+ }
+ (...)
+}
+```
+
```yaml
kind: ApplicationSet
# (...)
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
@@ -82,9 +98,9 @@ spec:
project: dev-team-one # project is restricted
source:
# developers may customize app details using JSON files from above repo URL
- repoURL: {{app.source}}
- targetRevision: {{app.revision}}
- path: {{app.path}}
+ repoURL: {{.app.source}}
+ targetRevision: {{.app.revision}}
+ path: {{.app.path}}
destination:
name: production-cluster # cluster is restricted
namespace: dev-team-one # namespace is restricted
diff --git a/docs/operator-manual/applicationset/index.md b/docs/operator-manual/applicationset/index.md
index 1fe83fb2a0952..ea7c0f3deaf5d 100644
--- a/docs/operator-manual/applicationset/index.md
+++ b/docs/operator-manual/applicationset/index.md
@@ -27,6 +27,8 @@ kind: ApplicationSet
metadata:
name: guestbook
spec:
+ goTemplate: true
+ goTemplateOptions: ["missingkey=error"]
generators:
- list:
elements:
@@ -38,15 +40,15 @@ spec:
url: https://9.8.7.6
template:
metadata:
- name: '{{cluster}}-guestbook'
+ name: '{{.cluster}}-guestbook'
spec:
project: my-project
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
- path: guestbook/{{cluster}}
+ path: guestbook/{{.cluster}}
destination:
- server: '{{url}}'
+ server: '{{.url}}'
namespace: guestbook
```
diff --git a/docs/operator-manual/argocd-cm.yaml b/docs/operator-manual/argocd-cm.yaml
index 5e4ed095be56d..4355354d2faef 100644
--- a/docs/operator-manual/argocd-cm.yaml
+++ b/docs/operator-manual/argocd-cm.yaml
@@ -308,14 +308,22 @@ data:
# have either a permanent banner or a regular closeable banner, and NOT both. eg. A user can't dismiss a
# notification message (closeable) banner, to then immediately see a permanent banner.
# ui.bannerpermanent: "true"
- # An option to specify the position of the banner, either the top or bottom of the page. The default is at the top.
- # Uncomment to make the banner appear at the bottom of the page. Any value other than "bottom" will make the banner appear at the top.
+ # An option to specify the position of the banner, either the top or bottom of the page, or both. The valid values
+ # are: "top", "bottom" and "both". The default (if the option is not provided), is "top". If "both" is specified, then
+ # the content appears both at the top and the bottom of the page. Uncomment the following line to make the banner appear
+ # at the bottom of the page. Change the value as needed.
# ui.bannerposition: "bottom"
# Application reconciliation timeout is the max amount of time required to discover if a new manifests version got
# published to the repository. Reconciliation by timeout is disabled if timeout is set to 0. Three minutes by default.
# > Note: argocd-repo-server deployment must be manually restarted after changing the setting.
timeout.reconciliation: 180s
+ # With a large number of applications, the periodic refresh for each application can cause a spike in the refresh queue
+ # and can cause a spike in the repo-server component. To avoid this, you can set a jitter to the sync timeout, which will
+ # spread out the refreshes and give time to the repo-server to catch up. The jitter is the maximum duration that can be
+ # added to the sync timeout. So, if the sync timeout is 3 minutes and the jitter is 1 minute, then the actual timeout will
+ # be between 3 and 4 minutes. Disabled when the value is 0, defaults to 0.
+ timeout.reconciliation.jitter: 0
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
cluster.inClusterEnabled: "true"
diff --git a/docs/operator-manual/argocd-cmd-params-cm.yaml b/docs/operator-manual/argocd-cmd-params-cm.yaml
index 7d38506d0b7ec..3cb79d85f3150 100644
--- a/docs/operator-manual/argocd-cmd-params-cm.yaml
+++ b/docs/operator-manual/argocd-cmd-params-cm.yaml
@@ -17,7 +17,11 @@ data:
redis.db:
# Open-Telemetry collector address: (e.g. "otel-collector:4317")
- otlp.address:
+ otlp.address: ""
+ # Open-Telemetry collector insecure: (e.g. "true")
+ otlp.insecure: "true"
+ # Open-Telemetry collector headers: (e.g. "key1=value1,key2=value2")
+ otlp.headers: ""
# List of additional namespaces where applications may be created in and
# reconciled from. The namespace where Argo CD is installed to will always
@@ -58,6 +62,16 @@ data:
controller.sharding.algorithm: legacy
# Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.
controller.kubectl.parallelism.limit: "20"
+ # The maximum number of retries for each request
+ controller.k8sclient.retry.max: "0"
+ # The initial backoff delay on the first retry attempt in ms. Subsequent retries will double this backoff time up to a maximum threshold
+ controller.k8sclient.retry.base.backoff: "100"
+ # Grace period in seconds for ignoring consecutive errors while communicating with repo server.
+ controller.repo.error.grace.period.seconds: "180"
+ # Enables the server side diff feature at the application controller level.
+ # Diff calculation will be done by running a server side apply dryrun (when
+ # diff cache is unavailable).
+ controller.diff.server.side: "false"
## Server properties
# Listen on given address for incoming connections (default "0.0.0.0")
@@ -72,6 +86,13 @@ data:
server.rootpath: ""
# Directory path that contains additional static assets
server.staticassets: "/shared/app"
+ # The maximum number of retries for each request
+ server.k8sclient.retry.max: "0"
+ # The initial backoff delay on the first retry attempt in ms. Subsequent retries will double this backoff time up to a maximum threshold
+ server.k8sclient.retry.base.backoff: "100"
+ # Semicolon-separated list of content types allowed on non-GET requests. Set an empty string to allow all. Be aware
+ # that allowing content types besides application/json may make your API more vulnerable to CSRF attacks.
+ server.api.content.types: "application/json"
# Set the logging format. One of: text|json (default "text")
server.log.format: "text"
@@ -154,6 +175,10 @@ data:
reposerver.streamed.manifest.max.extracted.size: "1G"
# Enable git submodule support
reposerver.enable.git.submodule: "true"
+ # Number of concurrent git ls-remote requests. Any value less than 1 means no limit.
+ reposerver.git.lsremote.parallelism.limit: "0"
+ # Git requests timeout.
+ reposerver.git.request.timeout: "15s"
# Disable TLS on the HTTP endpoint
dexserver.disable.tls: "false"
@@ -192,3 +217,5 @@ data:
notificationscontroller.log.level: "info"
# Set the logging format. One of: text|json (default "text")
notificationscontroller.log.format: "text"
+ # Enable self-service notifications config. Used in conjunction with apps-in-any-namespace. (default "false")
+ notificationscontroller.selfservice.enabled: "false"
diff --git a/docs/operator-manual/cluster-management.md b/docs/operator-manual/cluster-management.md
new file mode 100644
index 0000000000000..bd0d28e08dba7
--- /dev/null
+++ b/docs/operator-manual/cluster-management.md
@@ -0,0 +1,23 @@
+# Cluster Management
+
+This guide is for operators looking to manage clusters on the CLI. If you want to use Kubernetes resources for this, check out [Declarative Setup](./declarative-setup.md#clusters).
+
+Not all commands are described here, see the [argocd cluster Command Reference](../user-guide/commands/argocd_cluster.md) for all available commands.
+
+## Adding a cluster
+
+Run `argocd cluster add context-name`.
+
+If you're unsure about the context names, run `kubectl config get-contexts` to get them all listed.
+
+This will connect to the cluster and install the necessary resources for ArgoCD to connect to it.
+Note that you will need privileged access to the cluster.
+
+## Removing a cluster
+
+Run `argocd cluster rm context-name`.
+
+This removes the cluster with the specified name.
+
+!!!note "in-cluster cannot be removed"
+ The `in-cluster` cluster cannot be removed with this. If you want to disable the `in-cluster` configuration, you need to update your `argocd-cm` ConfigMap. Set [`cluster.inClusterEnabled`](./argocd-cm-yaml.md) to `"false"`
diff --git a/docs/operator-manual/config-management-plugins.md b/docs/operator-manual/config-management-plugins.md
index ee805b71cd604..7c86075ff2f7f 100644
--- a/docs/operator-manual/config-management-plugins.md
+++ b/docs/operator-manual/config-management-plugins.md
@@ -34,6 +34,8 @@ metadata:
# The name of the plugin must be unique within a given Argo CD instance.
name: my-plugin
spec:
+ # The version of your plugin. Optional. If specified, the Application's spec.source.plugin.name field
+ # must be -.
version: v1.0
# The init command runs in the Application source directory at the beginning of each manifest generation. The init
# command can output anything. A non-zero status code will fail manifest generation.
@@ -44,6 +46,7 @@ spec:
args: [-c, 'echo "Initializing..."']
# The generate command runs in the Application source directory each time manifests are generated. Standard output
# must be ONLY valid Kubernetes Objects in either YAML or JSON. A non-zero exit code will fail manifest generation.
+ # To write log messages from the command, write them to stderr, it will always be displayed.
# Error output will be sent to the UI, so avoid printing sensitive information (such as secrets).
generate:
command: [sh, -c]
@@ -107,9 +110,9 @@ spec:
# static parameter announcements list.
command: [echo, '[{"name": "example-param", "string": "default-string-value"}]']
- # If set to `true` then the plugin receives repository files with original file mode. Dangerous since the repository
- # might have executable files. Set to true only if you trust the CMP plugin authors.
- preserveFileMode: false
+ # If set to `true` then the plugin receives repository files with original file mode. Dangerous since the repository
+ # might have executable files. Set to true only if you trust the CMP plugin authors.
+ preserveFileMode: false
```
!!! note
@@ -333,6 +336,7 @@ If you are actively developing a sidecar-installed CMP, keep a few things in min
3. CMP errors are cached by the repo-server in Redis. Restarting the repo-server Pod will not clear the cache. Always
do a "Hard Refresh" when actively developing a CMP so you have the latest output.
4. Verify your sidecar has started properly by viewing the Pod and seeing that two containers are running `kubectl get pod -l app.kubernetes.io/component=repo-server -n argocd`
+5. Write log message to stderr and set the `--loglevel=info` flag in the sidecar. This will print everything written to stderr, even on successfull command execution.
### Other Common Errors
diff --git a/docs/operator-manual/custom-styles.md b/docs/operator-manual/custom-styles.md
index 8f2499a2d636a..6f68d5e23b128 100644
--- a/docs/operator-manual/custom-styles.md
+++ b/docs/operator-manual/custom-styles.md
@@ -21,7 +21,7 @@ metadata:
...
name: argocd-cm
data:
- ui.cssurl: "https://www.myhost.com/my-styles.css"
+ ui.cssurl: "https://www.example.com/my-styles.css"
```
## Adding Styles Via Volume Mounts
@@ -100,7 +100,7 @@ experience, you may wish to build a separate project using the [Argo CD UI dev s
## Banners
-Argo CD can optionally display a banner that can be used to notify your users of upcoming maintenance and operational changes. This feature can be enabled by specifying the banner message using the `ui.bannercontent` field in the `argocd-cm` ConfigMap and Argo CD will display this message at the top of every UI page. You can optionally add a link to this message by setting `ui.bannerurl`. You can also make the banner sticky (permanent) by setting `ui.bannerpermanent` to `true` and change it's position to the bottom by using `ui.bannerposition: "bottom"`
+Argo CD can optionally display a banner that can be used to notify your users of upcoming maintenance and operational changes. This feature can be enabled by specifying the banner message using the `ui.bannercontent` field in the `argocd-cm` ConfigMap and Argo CD will display this message at the top of every UI page. You can optionally add a link to this message by setting `ui.bannerurl`. You can also make the banner sticky (permanent) by setting `ui.bannerpermanent` to true and change its position to "both" or "bottom" by using `ui.bannerposition: "both"`, allowing the banner to display on both the top and bottom, or `ui.bannerposition: "bottom"` to display it exclusively at the bottom.
### argocd-cm
```yaml
diff --git a/docs/operator-manual/declarative-setup.md b/docs/operator-manual/declarative-setup.md
index 5353f70cf14ef..4d87ae9f80286 100644
--- a/docs/operator-manual/declarative-setup.md
+++ b/docs/operator-manual/declarative-setup.md
@@ -266,7 +266,7 @@ metadata:
argocd.argoproj.io/secret-type: repository
stringData:
type: git
- repo: https://source.developers.google.com/p/my-google-project/r/my-repo
+ url: https://source.developers.google.com/p/my-google-project/r/my-repo
gcpServiceAccountKey: |
{
"type": "service_account",
@@ -490,7 +490,7 @@ stringData:
### Legacy behaviour
-In Argo CD version 2.0 and earlier, repositories where stored as part of the `argocd-cm` config map. For
+In Argo CD version 2.0 and earlier, repositories were stored as part of the `argocd-cm` config map. For
backward-compatibility, Argo CD will still honor repositories in the config map, but this style of repository
configuration is deprecated and support for it will be removed in a future version.
@@ -549,6 +549,7 @@ bearerToken: string
awsAuthConfig:
clusterName: string
roleARN: string
+ profile: string
# Configure external command to supply client credentials
# See https://godoc.org/k8s.io/client-go/tools/clientcmd/api#ExecConfig
execProviderConfig:
@@ -590,8 +591,8 @@ metadata:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
- name: mycluster.com
- server: https://mycluster.com
+ name: mycluster.example.com
+ server: https://mycluster.example.com
config: |
{
"bearerToken": "",
@@ -615,8 +616,8 @@ metadata:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
- name: "mycluster.com"
- server: "https://mycluster.com"
+ name: "mycluster.example.com"
+ server: "https://mycluster.example.com"
config: |
{
"awsAuthConfig": {
@@ -676,8 +677,10 @@ extended to allow assumption of multiple roles, either as an explicit array of r
}
```
-Example service account configs for `argocd-application-controller` and `argocd-server`. Note that once the annotations
-have been set on the service accounts, both the application controller and server pods need to be restarted.
+Example service account configs for `argocd-application-controller` and `argocd-server`.
+
+!!! warning
+ Once the annotations have been set on the service accounts, both the application controller and server pods need to be restarted.
```yaml
apiVersion: v1
@@ -742,8 +745,8 @@ metadata:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
- name: mycluster.com
- server: https://mycluster.com
+ name: mycluster.example.com
+ server: https://mycluster.example.com
config: |
{
"execProviderConfig": {
@@ -795,8 +798,8 @@ metadata:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
- name: mycluster.com
- server: https://mycluster.com
+ name: mycluster.example.com
+ server: https://mycluster.example.com
config: |
{
"execProviderConfig": {
@@ -830,8 +833,8 @@ metadata:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
- name: mycluster.com
- server: https://mycluster.com
+ name: mycluster.example.com
+ server: https://mycluster.example.com
config: |
{
"execProviderConfig": {
diff --git a/docs/operator-manual/dynamic-cluster-distribution.md b/docs/operator-manual/dynamic-cluster-distribution.md
index a32258c3f2f0a..9d5d2104a1795 100644
--- a/docs/operator-manual/dynamic-cluster-distribution.md
+++ b/docs/operator-manual/dynamic-cluster-distribution.md
@@ -17,16 +17,10 @@ which does not require a restart of the application controller pods.
## Enabling Dynamic Distribution of Clusters
-This feature is disabled by default while it is in alpha. To enable it, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller.
-
-In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize
-overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. The
-dynamic distribution code automatically kicks in when the controller is deployed as a Deployment.
+This feature is disabled by default while it is in alpha. In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. Also, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller as a deployment.
!!! important
- The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of
- this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes
- to avoid issues.
+ The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes to avoid issues.
Note the introduction of new environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME`. The environment variable is explained in [working of Dynamic Distribution Heartbeat Process](#working-of-dynamic-distribution)
diff --git a/docs/operator-manual/health.md b/docs/operator-manual/health.md
index 5cc80de6538c5..8566d6460e6db 100644
--- a/docs/operator-manual/health.md
+++ b/docs/operator-manual/health.md
@@ -3,7 +3,7 @@
## Overview
Argo CD provides built-in health assessment for several standard Kubernetes types, which is then
surfaced to the overall Application health status as a whole. The following checks are made for
-specific types of kubernetes resources:
+specific types of Kubernetes resources:
### Deployment, ReplicaSet, StatefulSet, DaemonSet
* Observed generation is equal to desired generation.
diff --git a/docs/operator-manual/high_availability.md b/docs/operator-manual/high_availability.md
index ac59c333ba7cb..0a011104967f1 100644
--- a/docs/operator-manual/high_availability.md
+++ b/docs/operator-manual/high_availability.md
@@ -57,7 +57,7 @@ performance. For performance reasons the controller monitors and caches only the
preferred version into a version of the resource stored in Git. If `kubectl convert` fails because the conversion is not supported then the controller falls back to Kubernetes API query which slows down
reconciliation. In this case, we advise to use the preferred resource version in Git.
-* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` setting in the `argocd-cm` ConfigMap. The value of `timeout.reconciliation` is a duration string e.g `60s`, `1m`, `1h` or `1d`.
+* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` and `timeout.reconciliation.jitter` setting in the `argocd-cm` ConfigMap. The value of the fields is a duration string e.g `60s`, `1m`, `1h` or `1d`.
* If the controller is managing too many clusters and uses too much memory then you can shard clusters across multiple
controller replicas. To enable sharding, increase the number of replicas in `argocd-application-controller` `StatefulSet`
@@ -98,8 +98,8 @@ metadata:
type: Opaque
stringData:
shard: 1
- name: mycluster.com
- server: https://mycluster.com
+ name: mycluster.example.com
+ server: https://mycluster.example.com
config: |
{
"bearerToken": "",
@@ -243,3 +243,102 @@ spec:
path: my-application
# ...
```
+
+### Application Sync Timeout & Jitter
+
+Argo CD has a timeout for application syncs. It will trigger a refresh for each application periodically when the timeout expires.
+With a large number of applications, this will cause a spike in the refresh queue and can cause a spike to the repo-server component. To avoid this, you can set a jitter to the sync timeout which will spread out the refreshes and give time to the repo-server to catch up.
+
+The jitter is the maximum duration that can be added to the sync timeout, so if the sync timeout is 5 minutes and the jitter is 1 minute, then the actual timeout will be between 5 and 6 minutes.
+
+To configure the jitter you can set the following environment variables:
+
+* `ARGOCD_RECONCILIATION_JITTER` - The jitter to apply to the sync timeout. Disabled when value is 0. Defaults to 0.
+
+## Rate Limiting Application Reconciliations
+
+To prevent high controller resource usage or sync loops caused either due to misbehaving apps or other environment specific factors,
+we can configure rate limits on the workqueues used by the application controller. There are two types of rate limits that can be configured:
+
+ * Global rate limits
+ * Per item rate limits
+
+The final rate limiter uses a combination of both and calculates the final backoff as `max(globalBackoff, perItemBackoff)`.
+
+### Global rate limits
+
+ This is enabled by default, it is a simple bucket based rate limiter that limits the number of items that can be queued per second.
+This is useful to prevent a large number of apps from being queued at the same time.
+
+To configure the bucket limiter you can set the following environment variables:
+
+ * `WORKQUEUE_BUCKET_SIZE` - The number of items that can be queued in a single burst. Defaults to 500.
+ * `WORKQUEUE_BUCKET_QPS` - The number of items that can be queued per second. Defaults to 50.
+
+### Per item rate limits
+
+ This by default returns a fixed base delay/backoff value but can be configured to return exponential values.
+Per item rate limiter limits the number of times a particular item can be queued. This is based on exponential backoff where the backoff time for an item keeps increasing exponentially
+if it is queued multiple times in a short period, but the backoff is reset automatically if a configured `cool down` period has elapsed since the last time the item was queued.
+
+To configure the per item limiter you can set the following environment variables:
+
+ * `WORKQUEUE_FAILURE_COOLDOWN_NS` : The cool down period in nanoseconds, once period has elapsed for an item the backoff is reset. Exponential backoff is disabled if set to 0(default), eg. values : 10 * 10^9 (=10s)
+ * `WORKQUEUE_BASE_DELAY_NS` : The base delay in nanoseconds, this is the initial backoff used in the exponential backoff formula. Defaults to 1000 (=1μs)
+ * `WORKQUEUE_MAX_DELAY_NS` : The max delay in nanoseconds, this is the max backoff limit. Defaults to 3 * 10^9 (=3s)
+ * `WORKQUEUE_BACKOFF_FACTOR` : The backoff factor, this is the factor by which the backoff is increased for each retry. Defaults to 1.5
+
+The formula used to calculate the backoff time for an item, where `numRequeue` is the number of times the item has been queued
+and `lastRequeueTime` is the time at which the item was last queued:
+
+- When `WORKQUEUE_FAILURE_COOLDOWN_NS` != 0 :
+
+```
+backoff = time.Since(lastRequeueTime) >= WORKQUEUE_FAILURE_COOLDOWN_NS ?
+ WORKQUEUE_BASE_DELAY_NS :
+ min(
+ WORKQUEUE_MAX_DELAY_NS,
+ WORKQUEUE_BASE_DELAY_NS * WORKQUEUE_BACKOFF_FACTOR ^ (numRequeue)
+ )
+```
+
+- When `WORKQUEUE_FAILURE_COOLDOWN_NS` = 0 :
+
+```
+backoff = WORKQUEUE_BASE_DELAY_NS
+```
+
+## HTTP Request Retry Strategy
+
+In scenarios where network instability or transient server errors occur, the retry strategy ensures the robustness of HTTP communication by automatically resending failed requests. It uses a combination of maximum retries and backoff intervals to prevent overwhelming the server or thrashing the network.
+
+### Configuring Retries
+
+The retry logic can be fine-tuned with the following environment variables:
+
+* `ARGOCD_K8SCLIENT_RETRY_MAX` - The maximum number of retries for each request. The request will be dropped after this count is reached. Defaults to 0 (no retries).
+* `ARGOCD_K8SCLIENT_RETRY_BASE_BACKOFF` - The initial backoff delay on the first retry attempt in ms. Subsequent retries will double this backoff time up to a maximum threshold. Defaults to 100ms.
+
+### Backoff Strategy
+
+The backoff strategy employed is a simple exponential backoff without jitter. The backoff time increases exponentially with each retry attempt until a maximum backoff duration is reached.
+
+The formula for calculating the backoff time is:
+
+```
+backoff = min(retryWaitMax, baseRetryBackoff * (2 ^ retryAttempt))
+```
+Where `retryAttempt` starts at 0 and increments by 1 for each subsequent retry.
+
+### Maximum Wait Time
+
+There is a cap on the backoff time to prevent excessive wait times between retries. This cap is defined by:
+
+`retryWaitMax` - The maximum duration to wait before retrying. This ensures that retries happen within a reasonable timeframe. Defaults to 10 seconds.
+
+### Non-Retriable Conditions
+
+Not all HTTP responses are eligible for retries. The following conditions will not trigger a retry:
+
+* Responses with a status code indicating client errors (4xx) except for 429 Too Many Requests.
+* Responses with the status code 501 Not Implemented.
diff --git a/docs/operator-manual/ingress.md b/docs/operator-manual/ingress.md
index 84b2bcaf34a67..aad2208c21873 100644
--- a/docs/operator-manual/ingress.md
+++ b/docs/operator-manual/ingress.md
@@ -166,6 +166,43 @@ The argocd-server Service needs to be annotated with `projectcontour.io/upstream
The API server should then be run with TLS disabled. Edit the `argocd-server` deployment to add the
`--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md).
+Contour httpproxy CRD:
+
+Using a contour httpproxy CRD allows you to use the same hostname for the GRPC and REST api.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: argocd-server
+ namespace: argocd
+spec:
+ ingressClassName: contour
+ virtualhost:
+ fqdn: path.to.argocd.io
+ tls:
+ secretName: wildcard-tls
+ routes:
+ - conditions:
+ - prefix: /
+ - header:
+ name: Content-Type
+ contains: application/grpc
+ services:
+ - name: argocd-server
+ port: 80
+ protocol: h2c # allows for unencrypted http2 connections
+ timeoutPolicy:
+ response: 1h
+ idle: 600s
+ idleConnection: 600s
+ - conditions:
+ - prefix: /
+ services:
+ - name: argocd-server
+ port: 80
+```
+
## [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx)
### Option 1: SSL-Passthrough
@@ -661,9 +698,9 @@ metadata:
networking.gke.io/v1beta1.FrontendConfig: argocd-frontend-config
spec:
tls:
- - secretName: secret-yourdomain-com
+ - secretName: secret-example-com
rules:
- - host: argocd.yourdomain.com
+ - host: argocd.example.com
http:
paths:
- pathType: ImplementationSpecific
@@ -686,9 +723,9 @@ metadata:
networking.gke.io/v1beta1.FrontendConfig: argocd-frontend-config
spec:
tls:
- - secretName: secret-yourdomain-com
+ - secretName: secret-example-com
rules:
- - host: argocd.yourdomain.com
+ - host: argocd.example.com
http:
paths:
- pathType: Prefix
@@ -700,7 +737,7 @@ spec:
number: 80
```
-As you may know already, it can take some minutes to deploy the load balancer and become ready to accept connections. Once it's ready, get the public IP address for your Load Balancer, go to your DNS server (Google or third party) and point your domain or subdomain (i.e. argocd.yourdomain.com) to that IP address.
+As you may know already, it can take some minutes to deploy the load balancer and become ready to accept connections. Once it's ready, get the public IP address for your Load Balancer, go to your DNS server (Google or third party) and point your domain or subdomain (i.e. argocd.example.com) to that IP address.
You can get that IP address describing the Ingress object like this:
diff --git a/docs/operator-manual/metrics.md b/docs/operator-manual/metrics.md
index 174b08fd75c2c..634684a430045 100644
--- a/docs/operator-manual/metrics.md
+++ b/docs/operator-manual/metrics.md
@@ -8,12 +8,12 @@ Metrics about applications. Scraped at the `argocd-metrics:8082/metrics` endpoin
| Metric | Type | Description |
|--------|:----:|-------------|
| `argocd_app_info` | gauge | Information about Applications. It contains labels such as `sync_status` and `health_status` that reflect the application state in Argo CD. |
-| `argocd_app_k8s_request_total` | counter | Number of kubernetes requests executed during application reconciliation |
+| `argocd_app_k8s_request_total` | counter | Number of Kubernetes requests executed during application reconciliation |
| `argocd_app_labels` | gauge | Argo Application labels converted to Prometheus labels. Disabled by default. See section below about how to enable it. |
| `argocd_app_reconcile` | histogram | Application reconciliation performance. |
| `argocd_app_sync_total` | counter | Counter for application sync history |
| `argocd_cluster_api_resource_objects` | gauge | Number of k8s resource objects in the cache. |
-| `argocd_cluster_api_resources` | gauge | Number of monitored kubernetes API resources. |
+| `argocd_cluster_api_resources` | gauge | Number of monitored Kubernetes API resources. |
| `argocd_cluster_cache_age_seconds` | gauge | Cluster cache age in seconds. |
| `argocd_cluster_connection_status` | gauge | The k8s cluster current connection status. |
| `argocd_cluster_events_total` | counter | Number of processes k8s resource events. |
@@ -67,9 +67,11 @@ Scraped at the `argocd-server-metrics:8083/metrics` endpoint.
| Metric | Type | Description |
|--------|:----:|-------------|
| `argocd_redis_request_duration` | histogram | Redis requests duration. |
-| `argocd_redis_request_total` | counter | Number of kubernetes requests executed during application reconciliation. |
+| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
| `grpc_server_handled_total` | counter | Total number of RPCs completed on the server, regardless of success or failure. |
| `grpc_server_msg_sent_total` | counter | Total number of gRPC stream messages sent by the server. |
+| `argocd_proxy_extension_request_total` | counter | Number of requests sent to the configured proxy extensions. |
+| `argocd_proxy_extension_request_duration_seconds` | histogram | Request duration in seconds between the Argo CD API server and the proxy extension backend. |
## Repo Server Metrics
Metrics about the Repo Server.
@@ -80,13 +82,13 @@ Scraped at the `argocd-repo-server:8084/metrics` endpoint.
| `argocd_git_request_duration_seconds` | histogram | Git requests duration seconds. |
| `argocd_git_request_total` | counter | Number of git requests performed by repo server |
| `argocd_redis_request_duration_seconds` | histogram | Redis requests duration seconds. |
-| `argocd_redis_request_total` | counter | Number of kubernetes requests executed during application reconciliation. |
+| `argocd_redis_request_total` | counter | Number of Kubernetes requests executed during application reconciliation. |
| `argocd_repo_pending_request_total` | gauge | Number of pending requests requiring repository lock |
## Prometheus Operator
If using Prometheus Operator, the following ServiceMonitor example manifests can be used.
-Change `metadata.labels.release` to the name of label selected by your Prometheus.
+Add a namespace where Argo CD is installed and change `metadata.labels.release` to the name of label selected by your Prometheus.
```yaml
apiVersion: monitoring.coreos.com/v1
@@ -148,6 +150,52 @@ spec:
- port: metrics
```
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: argocd-dex-server
+ labels:
+ release: prometheus-operator
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: argocd-dex-server
+ endpoints:
+ - port: metrics
+```
+
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: argocd-redis-haproxy-metrics
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: argocd-redis-ha-haproxy
+ endpoints:
+ - port: http-exporter-port
+```
+
+For notifications controller, you need to additionally add following:
+
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: argocd-notifications-controller
+ labels:
+ release: prometheus-operator
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: argocd-notifications-controller-metrics
+ endpoints:
+ - port: metrics
+```
+
+
## Dashboards
You can find an example Grafana dashboard [here](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard.json) or check demo instance
diff --git a/docs/operator-manual/notifications/functions.md b/docs/operator-manual/notifications/functions.md
index 3d614e4e53a55..c50d122024b76 100644
--- a/docs/operator-manual/notifications/functions.md
+++ b/docs/operator-manual/notifications/functions.md
@@ -48,6 +48,16 @@ Transforms given GIT URL into HTTPs format.
Returns repository URL full name `(/)`. Currently supports only Github, GitLab and Bitbucket.
+
+**`repo.QueryEscape(s string) string`**
+
+QueryEscape escapes the string, so it can be safely placed inside a URL
+
+Example:
+```
+/projects/{{ call .repo.QueryEscape (call .repo.FullNameByRepoURL .app.status.RepoURL) }}/merge_requests
+```
+
**`repo.GetCommitMetadata(sha string) CommitMetadata`**
diff --git a/docs/operator-manual/notifications/index.md b/docs/operator-manual/notifications/index.md
index c719d10e7611c..eccca906ae91b 100644
--- a/docs/operator-manual/notifications/index.md
+++ b/docs/operator-manual/notifications/index.md
@@ -45,3 +45,71 @@ So you can just use them instead of reinventing new ones.
```
Try syncing an application to get notified when the sync is completed.
+
+## Namespace based configuration
+
+A common installation method for Argo CD Notifications is to install it in a dedicated namespace to manage a whole cluster. In this case, the administrator is the only
+person who can configure notifications in that namespace generally. However, in some cases, it is required to allow end-users to configure notifications
+for their Argo CD applications. For example, the end-user can configure notifications for their Argo CD application in the namespace where they have access to and their Argo CD application is running in.
+
+This feature is based on applications in any namespace. See [applications in any namespace](../app-any-namespace.md) page for more information.
+
+In order to enable this feature, the Argo CD administrator must reconfigure the argocd-notification-controller workloads to add `--application-namespaces` and `--self-service-notification-enabled` parameters to the container's startup command.
+`--application-namespaces` controls the list of namespaces that Argo CD applications are in. `--self-service-notification-enabled` turns on this feature.
+
+The startup parameters for both can also be conveniently set up and kept in sync by specifying
+the `application.namespaces` and `notificationscontroller.selfservice.enabled` in the argocd-cmd-params-cm ConfigMap instead of changing the manifests for the respective workloads. For example:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: argocd-cmd-params-cm
+data:
+ application.namespaces: app-team-one, app-team-two
+ notificationscontroller.selfservice.enabled: "true"
+```
+
+To use this feature, you can deploy configmap named `argocd-notifications-cm` and possibly a secret `argocd-notifications-secret` in the namespace where the Argo CD application lives.
+
+When it is configured this way the controller will send notifications using both the controller level configuration (the configmap located in the same namespaces as the controller) as well as
+the configuration located in the same namespace where the Argo CD application is at.
+
+Example: Application team wants to receive notifications using PagerDutyV2, when the controller level configuration is only supporting Slack.
+
+The following two resources are deployed in the namespace where the Argo CD application lives.
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: argocd-notifications-cm
+data:
+ service.pagerdutyv2: |
+ serviceKeys:
+ my-service: $pagerduty-key-my-service
+...
+```
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: argo-cd-notification-secret
+type: Opaque
+data:
+ pagerduty-key-my-service:
+```
+
+When an Argo CD application has the following subscriptions, user receives application sync failure message from pager duty.
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ annotations:
+ notifications.argoproj.io/subscribe.on-sync-failed.pagerdutyv2: ""
+```
+
+!!! note
+ When the same notification service and trigger are defined in controller level configuration and application level configuration,
+ both notifications will be sent according to its own configuration.
+
+[Defining and using secrets within notification templates](templates.md/#defining-and-using-secrets-within-notification-templates) function is not available when flag `--self-service-notification-enable` is on.
diff --git a/docs/operator-manual/notifications/services/alertmanager.md b/docs/operator-manual/notifications/services/alertmanager.md
index e0f9d7e4e7889..033a76a29ea65 100755
--- a/docs/operator-manual/notifications/services/alertmanager.md
+++ b/docs/operator-manual/notifications/services/alertmanager.md
@@ -43,7 +43,7 @@ You should turn off "send_resolved" or you will receive unnecessary recovery not
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.alertmanager: |
targets:
@@ -58,7 +58,7 @@ If your alertmanager has changed the default api, you can customize "apiPath".
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.alertmanager: |
targets:
@@ -89,7 +89,7 @@ stringData:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.alertmanager: |
targets:
@@ -110,7 +110,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.alertmanager: |
targets:
diff --git a/docs/operator-manual/notifications/services/awssqs.md b/docs/operator-manual/notifications/services/awssqs.md
index 6bbc47cbbc0b5..5331533826348 100755
--- a/docs/operator-manual/notifications/services/awssqs.md
+++ b/docs/operator-manual/notifications/services/awssqs.md
@@ -1,13 +1,13 @@
-# AWS SQS
+# AWS SQS
## Parameters
-This notification service is capable of sending simple messages to AWS SQS queue.
+This notification service is capable of sending simple messages to AWS SQS queue.
-* `queue` - name of the queue you are intending to send messages to. Can be overwriten with target destination annotation.
+* `queue` - name of the queue you are intending to send messages to. Can be overridden with target destination annotation.
* `region` - region of the sqs queue can be provided via env variable AWS_DEFAULT_REGION
* `key` - optional, aws access key must be either referenced from a secret via variable or via env variable AWS_ACCESS_KEY_ID
-* `secret` - optional, aws access secret must be either referenced from a secret via variableor via env variable AWS_SECRET_ACCESS_KEY
+* `secret` - optional, aws access secret must be either referenced from a secret via variable or via env variable AWS_SECRET_ACCESS_KEY
* `account` optional, external accountId of the queue
* `endpointUrl` optional, useful for development with localstack
@@ -30,7 +30,7 @@ metadata:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.awssqs: |
region: "us-east-2"
@@ -63,7 +63,7 @@ stringData:
### Minimal configuration using AWS Env variables
-Ensure following list of enviromental variable is injected via OIDC, or other method. And assuming SQS is local to the account.
+Ensure the following list of environment variables are injected via OIDC, or another method. And assuming SQS is local to the account.
You may skip usage of secret for sensitive data and omit other parameters. (Setting parameters via ConfigMap takes precedent.)
Variables:
@@ -89,7 +89,7 @@ metadata:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.awssqs: |
queue: "myqueue"
@@ -104,3 +104,16 @@ data:
- oncePer: obj.metadata.annotations["generation"]
```
+
+## FIFO SQS Queues
+
+FIFO queues require a [MessageGroupId](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html#SQS-SendMessage-request-MessageGroupId) to be sent along with every message, every message with a matching MessageGroupId will be processed one by one in order.
+
+To send to a FIFO SQS Queue you must include a `messageGroupId` in the template such as in the example below:
+
+```yaml
+template.deployment-ready: |
+ message: |
+ Deployment {{.obj.metadata.name}} is ready!
+ messageGroupId: {{.obj.metadata.name}}-deployment
+```
diff --git a/docs/operator-manual/notifications/services/email.md b/docs/operator-manual/notifications/services/email.md
index b81ab6cde8b4c..7fd3f0e22379c 100755
--- a/docs/operator-manual/notifications/services/email.md
+++ b/docs/operator-manual/notifications/services/email.md
@@ -20,7 +20,7 @@ The following snippet contains sample Gmail service configuration:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.email.gmail: |
username: $email-username
@@ -36,7 +36,7 @@ Without authentication:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.email.example: |
host: smtp.example.com
@@ -52,7 +52,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
template.app-sync-succeeded: |
email:
diff --git a/docs/operator-manual/notifications/services/github.md b/docs/operator-manual/notifications/services/github.md
index a3f89f8c87ef0..1fa1a985d2682 100755
--- a/docs/operator-manual/notifications/services/github.md
+++ b/docs/operator-manual/notifications/services/github.md
@@ -12,7 +12,7 @@ The GitHub notification service changes commit status using [GitHub Apps](https:
## Configuration
1. Create a GitHub Apps using https://github.com/settings/apps/new
-2. Change repository permissions to enable write commit statuses and/or deployments
+2. Change repository permissions to enable write commit statuses and/or deployments and/or pull requests comments
![2](https://user-images.githubusercontent.com/18019529/108397381-3ca57980-725b-11eb-8d17-5b8992dc009e.png)
3. Generate a private key, and download it automatically
![3](https://user-images.githubusercontent.com/18019529/108397926-d4a36300-725b-11eb-83fe-74795c8c3e03.png)
@@ -24,7 +24,7 @@ in `argocd-notifications-cm` ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.github: |
appID:
@@ -76,6 +76,11 @@ template.app-deployed: |
logURL: "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true"
requiredContexts: []
autoMerge: true
+ transientEnvironment: false
+ pullRequestComment:
+ content: |
+ Application {{.app.metadata.name}} is now running new version of deployments manifests.
+ See more here: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true
```
**Notes**:
@@ -83,4 +88,5 @@ template.app-deployed: |
- If `github.repoURLPath` and `github.revisionPath` are same as above, they can be omitted.
- Automerge is optional and `true` by default for github deployments to ensure the requested ref is up to date with the default branch.
Setting this option to `false` is required if you would like to deploy older refs in your default branch.
- For more information see the [Github Deployment API Docs](https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#create-a-deployment).
+ For more information see the [GitHub Deployment API Docs](https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#create-a-deployment).
+- If `github.pullRequestComment.content` is set to 65536 characters or more, it will be truncated.
diff --git a/docs/operator-manual/notifications/services/googlechat.md b/docs/operator-manual/notifications/services/googlechat.md
index 041ea6e022ef5..821c23023e863 100755
--- a/docs/operator-manual/notifications/services/googlechat.md
+++ b/docs/operator-manual/notifications/services/googlechat.md
@@ -19,7 +19,7 @@ The Google Chat notification service send message notifications to a google chat
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.googlechat: |
webhooks:
@@ -59,24 +59,27 @@ A card message can be defined as follows:
```yaml
template.app-sync-succeeded: |
googlechat:
- cards: |
+ cardsV2: |
- header:
title: ArgoCD Bot Notification
sections:
- widgets:
- - textParagraph:
+ - decoratedText:
text: The app {{ .app.metadata.name }} has successfully synced!
- widgets:
- - keyValue:
+ - decoratedText:
topLabel: Repository
- content: {{ call .repo.RepoURLToHTTPS .app.spec.source.repoURL }}
- - keyValue:
+ text: {{ call .repo.RepoURLToHTTPS .app.spec.source.repoURL }}
+ - decoratedText:
topLabel: Revision
- content: {{ .app.spec.source.targetRevision }}
- - keyValue:
+ text: {{ .app.spec.source.targetRevision }}
+ - decoratedText:
topLabel: Author
- content: {{ (call .repo.GetCommitMetadata .app.status.sync.revision).Author }}
+ text: {{ (call .repo.GetCommitMetadata .app.status.sync.revision).Author }}
```
+All [Card fields](https://developers.google.com/chat/api/reference/rest/v1/cards#Card_1) are supported and can be used
+in notifications. It is also possible to use the previous (now deprecated) `cards` key to use the legacy card fields,
+but this is not recommended as Google has deprecated this field and recommends using the newer `cardsV2`.
The card message can be written in JSON too.
@@ -86,7 +89,7 @@ It is possible send both simple text and card messages in a chat thread by speci
```yaml
template.app-sync-succeeded: |
- message: The app {{ .app.metadata.name }} has succesfully synced!
+ message: The app {{ .app.metadata.name }} has successfully synced!
googlechat:
threadKey: {{ .app.metadata.name }}
```
diff --git a/docs/operator-manual/notifications/services/grafana.md b/docs/operator-manual/notifications/services/grafana.md
index a36672d0fa423..1f3e77701f044 100755
--- a/docs/operator-manual/notifications/services/grafana.md
+++ b/docs/operator-manual/notifications/services/grafana.md
@@ -21,7 +21,7 @@ Available parameters :
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.grafana: |
apiUrl: https://grafana.example.com/api
diff --git a/docs/operator-manual/notifications/services/mattermost.md b/docs/operator-manual/notifications/services/mattermost.md
index 98e0d0fd7b82f..d1f187e955b9c 100755
--- a/docs/operator-manual/notifications/services/mattermost.md
+++ b/docs/operator-manual/notifications/services/mattermost.md
@@ -19,7 +19,7 @@ in `argocd-notifications-cm` ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.mattermost: |
apiURL:
diff --git a/docs/operator-manual/notifications/services/newrelic.md b/docs/operator-manual/notifications/services/newrelic.md
index d98288a846422..b0c7e340c9b28 100755
--- a/docs/operator-manual/notifications/services/newrelic.md
+++ b/docs/operator-manual/notifications/services/newrelic.md
@@ -14,7 +14,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.newrelic: |
apiURL:
diff --git a/docs/operator-manual/notifications/services/opsgenie.md b/docs/operator-manual/notifications/services/opsgenie.md
index 665d0081e7c73..e92ee99756ab8 100755
--- a/docs/operator-manual/notifications/services/opsgenie.md
+++ b/docs/operator-manual/notifications/services/opsgenie.md
@@ -12,14 +12,15 @@ To be able to send notifications with argocd-notifications you have to create an
8. Give your integration a name, copy the "API key" and safe it somewhere for later
9. Make sure the checkboxes for "Create and Update Access" and "enable" are selected, disable the other checkboxes to remove unnecessary permissions
10. Click "Safe Integration" at the bottom
-11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the us/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (european api).
-12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the opsgenie integration in the `argocd-notifications-secret` secret.
+11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the US/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (European API).
+12. You are finished with configuring Opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the Opsgenie integration in the `argocd-notifications-secret` secret.
+
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.opsgenie: |
apiUrl:
diff --git a/docs/operator-manual/notifications/services/pagerduty.md b/docs/operator-manual/notifications/services/pagerduty.md
index 0e1ab965332e1..c6e1e41dac81d 100755
--- a/docs/operator-manual/notifications/services/pagerduty.md
+++ b/docs/operator-manual/notifications/services/pagerduty.md
@@ -1,17 +1,17 @@
-# Pagerduty
+# PagerDuty
## Parameters
-The Pagerduty notification service is used to create pagerduty incidents and requires specifying the following settings:
+The PagerDuty notification service is used to create PagerDuty incidents and requires specifying the following settings:
-* `pagerdutyToken` - the pagerduty auth token
+* `pagerdutyToken` - the PagerDuty auth token
* `from` - email address of a valid user associated with the account making the request.
* `serviceID` - The ID of the resource.
## Example
-The following snippet contains sample Pagerduty service configuration:
+The following snippet contains sample PagerDuty service configuration:
```yaml
apiVersion: v1
@@ -26,7 +26,7 @@ stringData:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.pagerduty: |
token: $pagerdutyToken
@@ -35,13 +35,13 @@ data:
## Template
-[Notification templates](../templates.md) support specifying subject for pagerduty notifications:
+[Notification templates](../templates.md) support specifying subject for PagerDuty notifications:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
template.rollout-aborted: |
message: Rollout {{.rollout.metadata.name}} is aborted.
@@ -62,5 +62,5 @@ apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
annotations:
- notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: ""
+ notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: ""
```
diff --git a/docs/operator-manual/notifications/services/pagerduty_v2.md b/docs/operator-manual/notifications/services/pagerduty_v2.md
index 21e8d942e4e93..549cdc937b150 100755
--- a/docs/operator-manual/notifications/services/pagerduty_v2.md
+++ b/docs/operator-manual/notifications/services/pagerduty_v2.md
@@ -28,7 +28,7 @@ stringData:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.pagerdutyv2: |
serviceKeys:
@@ -43,7 +43,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
template.rollout-aborted: |
message: Rollout {{.rollout.metadata.name}} is aborted.
@@ -74,5 +74,5 @@ apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
annotations:
- notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: ""
+ notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: ""
```
diff --git a/docs/operator-manual/notifications/services/pushover.md b/docs/operator-manual/notifications/services/pushover.md
index 37cb20b277dcc..a09b3660f9233 100755
--- a/docs/operator-manual/notifications/services/pushover.md
+++ b/docs/operator-manual/notifications/services/pushover.md
@@ -1,13 +1,13 @@
# Pushover
1. Create an app at [pushover.net](https://pushover.net/apps/build).
-2. Store the API key in `` Secret and define the secret name in `` ConfigMap:
+2. Store the API key in `` Secret and define the secret name in `argocd-notifications-cm` ConfigMap:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.pushover: |
token: $pushover-token
diff --git a/docs/operator-manual/notifications/services/rocketchat.md b/docs/operator-manual/notifications/services/rocketchat.md
index f1157050139d0..20aaa405c80d0 100755
--- a/docs/operator-manual/notifications/services/rocketchat.md
+++ b/docs/operator-manual/notifications/services/rocketchat.md
@@ -43,7 +43,7 @@ stringData:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.rocketchat: |
email: $rocketchat-email
diff --git a/docs/operator-manual/notifications/services/slack.md b/docs/operator-manual/notifications/services/slack.md
index 15937597c19f2..41bdddd7617c4 100755
--- a/docs/operator-manual/notifications/services/slack.md
+++ b/docs/operator-manual/notifications/services/slack.md
@@ -6,11 +6,16 @@ If you want to send message using incoming webhook, you can use [webhook](./webh
The Slack notification service configuration includes following settings:
-* `token` - the app token
-* `apiURL` - optional, the server url, e.g. https://example.com/api
-* `username` - optional, the app username
-* `icon` - optional, the app icon, e.g. :robot_face: or https://example.com/image.png
-* `insecureSkipVerify` - optional bool, true or false
+| **Option** | **Required** | **Type** | **Description** | **Example** |
+| -------------------- | ------------ | -------------- | --------------- | ----------- |
+| `apiURL` | False | `string` | The server URL. | `https://example.com/api` |
+| `channels` | False | `list[string]` | | `["my-channel-1", "my-channel-2"]` |
+| `icon` | False | `string` | The app icon. | `:robot_face:` or `https://example.com/image.png` |
+| `insecureSkipVerify` | False | `bool` | | `true` |
+| `signingSecret` | False | `string` | | `8f742231b10e8888abcd99yyyzzz85a5` |
+| `token` | **True** | `string` | The app's OAuth access token. | `xoxb-1234567890-1234567890123-5n38u5ed63fgzqlvuyxvxcx6` |
+| `username` | False | `string` | The app username. | `argocd` |
+| `disableUnfurl` | False | `bool` | Disable slack unfurling links in messages | `true` |
## Configuration
@@ -44,7 +49,7 @@ The Slack notification service configuration includes following settings:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.slack: |
token: $slack-token
diff --git a/docs/operator-manual/notifications/services/teams.md b/docs/operator-manual/notifications/services/teams.md
index b5b9a228c43eb..0e44456d4de19 100755
--- a/docs/operator-manual/notifications/services/teams.md
+++ b/docs/operator-manual/notifications/services/teams.md
@@ -18,7 +18,7 @@ The Teams notification service send message notifications using Teams bot and re
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.teams: |
recipientUrls:
@@ -113,7 +113,7 @@ template.app-sync-succeeded: |
### summary field
-You can set a summary of the message that will be shown on Notifcation & Activity Feed
+You can set a summary of the message that will be shown on Notification & Activity Feed
![](https://user-images.githubusercontent.com/6957724/116587921-84c4d480-a94d-11eb-9da4-f365151a12e7.jpg)
diff --git a/docs/operator-manual/notifications/services/telegram.md b/docs/operator-manual/notifications/services/telegram.md
index 953c2a9fca0bf..8612a09d1ca84 100755
--- a/docs/operator-manual/notifications/services/telegram.md
+++ b/docs/operator-manual/notifications/services/telegram.md
@@ -2,13 +2,13 @@
1. Get an API token using [@Botfather](https://t.me/Botfather).
2. Store token in `` Secret and configure telegram integration
-in `` ConfigMap:
+in `argocd-notifications-cm` ConfigMap:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.telegram: |
token: $telegram-token
diff --git a/docs/operator-manual/notifications/services/webex.md b/docs/operator-manual/notifications/services/webex.md
index 440ed1ddc738f..eba4c5e11b8dc 100755
--- a/docs/operator-manual/notifications/services/webex.md
+++ b/docs/operator-manual/notifications/services/webex.md
@@ -24,7 +24,7 @@ The Webex Teams notification service configuration includes following settings:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webex: |
token: $webex-token
diff --git a/docs/operator-manual/notifications/services/webhook.md b/docs/operator-manual/notifications/services/webhook.md
index bd45b1f69e40b..4b8ca38a685ad 100755
--- a/docs/operator-manual/notifications/services/webhook.md
+++ b/docs/operator-manual/notifications/services/webhook.md
@@ -1,7 +1,7 @@
# Webhook
The webhook notification service allows sending a generic HTTP request using the templatized request body and URL.
-Using Webhook you might trigger a Jenkins job, update Github commit status.
+Using Webhook you might trigger a Jenkins job, update GitHub commit status.
## Parameters
@@ -9,8 +9,17 @@ The Webhook notification service configuration includes following settings:
- `url` - the url to send the webhook to
- `headers` - optional, the headers to pass along with the webhook
-- `basicAuth` - optional, the basic authentication to pass along with the webook
+- `basicAuth` - optional, the basic authentication to pass along with the webhook
- `insecureSkipVerify` - optional bool, true or false
+- `retryWaitMin` - Optional, the minimum wait time between retries. Default value: 1s.
+- `retryWaitMax` - Optional, the maximum wait time between retries. Default value: 5s.
+- `retryMax` - Optional, the maximum number of retries. Default value: 3.
+
+## Retry Behavior
+
+The webhook service will automatically retry the request if it fails due to network errors or if the server returns a 5xx status code. The number of retries and the wait time between retries can be configured using the `retryMax`, `retryWaitMin`, and `retryWaitMax` parameters.
+
+The wait time between retries is between `retryWaitMin` and `retryWaitMax`. If all retries fail, the `Send` method will return an error.
## Configuration
@@ -22,7 +31,7 @@ Use the following steps to configure webhook:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webhook.: |
url: https:///
@@ -41,7 +50,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
template.github-commit-status: |
webhook:
@@ -67,13 +76,13 @@ metadata:
## Examples
-### Set Github commit status
+### Set GitHub commit status
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webhook.github: |
url: https://api.github.com
@@ -88,7 +97,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webhook.github: |
url: https://api.github.com
@@ -119,7 +128,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webhook.jenkins: |
url: http:///job//build?token=
@@ -136,7 +145,7 @@ type: Opaque
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webhook.form: |
url: https://form.example.com
@@ -157,7 +166,7 @@ data:
apiVersion: v1
kind: ConfigMap
metadata:
- name:
+ name: argocd-notifications-cm
data:
service.webhook.slack_webhook: |
url: https://hooks.slack.com/services/xxxxx
diff --git a/docs/operator-manual/notifications/templates.md b/docs/operator-manual/notifications/templates.md
index f865229e12835..1d80f20953b24 100644
--- a/docs/operator-manual/notifications/templates.md
+++ b/docs/operator-manual/notifications/templates.md
@@ -20,6 +20,7 @@ Each template has access to the following fields:
- `app` holds the application object.
- `context` is a user-defined string map and might include any string keys and values.
+- `secrets` provides access to sensitive data stored in `argocd-notifications-secret`
- `serviceType` holds the notification service type name (such as "slack" or "email). The field can be used to conditionally
render service-specific fields.
- `recipient` holds the recipient name.
@@ -43,6 +44,39 @@ data:
message: "Something happened in {{ .context.environmentName }} in the {{ .context.region }} data center!"
```
+## Defining and using secrets within notification templates
+
+Some notification service use cases will require the use of secrets within templates. This can be achieved with the use of
+the `secrets` data variable available within the templates.
+
+Given that we have the following `argocd-notifications-secret`:
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: argocd-notifications-secret
+stringData:
+ sampleWebhookToken: secret-token
+type: Opaque
+```
+
+We can use the defined `sampleWebhookToken` in a template as such:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: argocd-notifications-cm
+data:
+ template.trigger-webhook: |
+ webhook:
+ sample-webhook:
+ method: POST
+ path: 'webhook/endpoint/with/auth'
+ body: 'token={{ .secrets.sampleWebhookToken }}&variables[APP_SOURCE_PATH]={{ .app.spec.source.path }}
+```
+
## Notification Service Specific Fields
The `message` field of the template definition allows creating a basic notification for any notification service. You can leverage notification service-specific
diff --git a/docs/operator-manual/notifications/triggers.md b/docs/operator-manual/notifications/triggers.md
index c3e2dc601296b..02d0228c40997 100644
--- a/docs/operator-manual/notifications/triggers.md
+++ b/docs/operator-manual/notifications/triggers.md
@@ -1,7 +1,7 @@
The trigger defines the condition when the notification should be sent. The definition includes name, condition
and notification templates reference. The condition is a predicate expression that returns true if the notification
should be sent. The trigger condition evaluation is powered by [antonmedv/expr](https://github.com/antonmedv/expr).
-The condition language syntax is described at [Language-Definition.md](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md).
+The condition language syntax is described at [language-definition.md](https://github.com/antonmedv/expr/blob/master/docs/language-definition.md).
The trigger is configured in the `argocd-notifications-cm` ConfigMap. For example the following trigger sends a notification
when application sync status changes to `Unknown` using the `app-sync-status` template:
diff --git a/docs/operator-manual/notifications/troubleshooting-commands.md b/docs/operator-manual/notifications/troubleshooting-commands.md
index 633eb47d71690..8674e9677c1eb 100644
--- a/docs/operator-manual/notifications/troubleshooting-commands.md
+++ b/docs/operator-manual/notifications/troubleshooting-commands.md
@@ -39,6 +39,7 @@ argocd admin notifications template get app-sync-succeeded -o=yaml
--cluster string The name of the kubeconfig cluster to use
--config-map string argocd-notifications-cm.yaml file path
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
--kubeconfig string Path to a kube config. Only required if out-of-cluster
-n, --namespace string If present, the namespace scope for this CLI request
@@ -95,6 +96,7 @@ argocd admin notifications template notify app-sync-succeeded guestbook
--cluster string The name of the kubeconfig cluster to use
--config-map string argocd-notifications-cm.yaml file path
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
--kubeconfig string Path to a kube config. Only required if out-of-cluster
-n, --namespace string If present, the namespace scope for this CLI request
@@ -150,6 +152,7 @@ argocd admin notifications trigger get on-sync-failed -o=yaml
--cluster string The name of the kubeconfig cluster to use
--config-map string argocd-notifications-cm.yaml file path
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
--kubeconfig string Path to a kube config. Only required if out-of-cluster
-n, --namespace string If present, the namespace scope for this CLI request
@@ -205,6 +208,7 @@ argocd admin notifications trigger run on-sync-status-unknown ./sample-app.yaml
--cluster string The name of the kubeconfig cluster to use
--config-map string argocd-notifications-cm.yaml file path
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
--kubeconfig string Path to a kube config. Only required if out-of-cluster
-n, --namespace string If present, the namespace scope for this CLI request
diff --git a/docs/operator-manual/rbac.md b/docs/operator-manual/rbac.md
index 0f15a18be1973..b1d386fb5eb8e 100644
--- a/docs/operator-manual/rbac.md
+++ b/docs/operator-manual/rbac.md
@@ -159,6 +159,7 @@ data:
g, your-github-org:your-team, role:org-admin
```
+
----
Another `policy.csv` example might look as follows:
diff --git a/docs/operator-manual/secret-management.md b/docs/operator-manual/secret-management.md
index ab06a46014b20..aa224e20ff742 100644
--- a/docs/operator-manual/secret-management.md
+++ b/docs/operator-manual/secret-management.md
@@ -10,7 +10,7 @@ Here are some ways people are doing GitOps secrets:
* [Bitnami Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets)
* [External Secrets Operator](https://github.com/external-secrets/external-secrets)
* [Hashicorp Vault](https://www.vaultproject.io)
-* [Bank-Vaults]((https://bank-vaults.dev/))
+* [Bank-Vaults](https://bank-vaults.dev/)
* [Helm Secrets](https://github.com/jkroepke/helm-secrets)
* [Kustomize secret generator plugins](https://github.com/kubernetes-sigs/kustomize/blob/fd7a353df6cece4629b8e8ad56b71e30636f38fc/examples/kvSourceGoPlugin.md#secret-values-from-anywhere)
* [aws-secret-operator](https://github.com/mumoshu/aws-secret-operator)
diff --git a/docs/operator-manual/security.md b/docs/operator-manual/security.md
index 3ba9fdfe39363..47c5d3aa1accc 100644
--- a/docs/operator-manual/security.md
+++ b/docs/operator-manual/security.md
@@ -45,7 +45,7 @@ Communication with Redis is performed over plain HTTP by default. TLS can be set
Git and helm repositories are managed by a stand-alone service, called the repo-server. The
repo-server does not carry any Kubernetes privileges and does not store credentials to any services
(including git). The repo-server is responsible for cloning repositories which have been permitted
-and trusted by Argo CD operators, and generating kubernetes manifests at a given path in the
+and trusted by Argo CD operators, and generating Kubernetes manifests at a given path in the
repository. For performance and bandwidth efficiency, the repo-server maintains local clones of
these repositories so that subsequent commits to the repository are efficiently downloaded.
@@ -109,7 +109,7 @@ The information is used to reconstruct a REST config and kubeconfig to the clust
services.
To rotate the bearer token used by Argo CD, the token can be deleted (e.g. using kubectl) which
-causes kubernetes to generate a new secret with a new bearer token. The new token can be re-inputted
+causes Kubernetes to generate a new secret with a new bearer token. The new token can be re-inputted
to Argo CD by re-running `argocd cluster add`. Run the following commands against the *_managed_*
cluster:
diff --git a/docs/operator-manual/server-commands/argocd-application-controller.md b/docs/operator-manual/server-commands/argocd-application-controller.md
index 21d26b29c572e..f4057bf7b04cc 100644
--- a/docs/operator-manual/server-commands/argocd-application-controller.md
+++ b/docs/operator-manual/server-commands/argocd-application-controller.md
@@ -17,6 +17,7 @@ argocd-application-controller [flags]
```
--app-hard-resync int Time period in seconds for application hard resync.
--app-resync int Time period in seconds for application resync. (default 180)
+ --app-resync-jitter int Maximum time period in seconds to add as a delay jitter for application resync.
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
--as string Username to impersonate for the operation
@@ -28,6 +29,7 @@ argocd-application-controller [flags]
--cluster string The name of the kubeconfig cluster to use
--context string The name of the kubeconfig context to use
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
+ --disable-compression If true, opt-out of response compression for all requests to the server
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
--gloglevel int Set the glog logging level
-h, --help help for argocd-application-controller
@@ -43,6 +45,8 @@ argocd-application-controller [flags]
--operation-processors int Number of application operation processors (default 10)
--otlp-address string OpenTelemetry collector address to send traces to
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
+ --otlp-headers stringToString List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2) (default [])
+ --otlp-insecure OpenTelemetry collector insecure mode (default true)
--password string Password for basic authentication to the API server
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
--proxy-url string If provided, this URL will be used to connect via proxy
@@ -54,6 +58,7 @@ argocd-application-controller [flags]
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
--redis-use-tls Use TLS when connecting to Redis.
--redisdb int Redis database.
+ --repo-error-grace-period-seconds int Grace period in seconds for ignoring consecutive errors while communicating with repo server. (default 180)
--repo-server string Repo server address. (default "argocd-repo-server:8081")
--repo-server-plaintext Disable TLS on connections to repo server
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
@@ -63,11 +68,18 @@ argocd-application-controller [flags]
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
--sentinelmaster string Redis sentinel master group name. (default "master")
--server string The address and port of the Kubernetes API server
+ --server-side-diff-enabled Feature flag to enable ServerSide diff. Default ("false")
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
--status-processors int Number of application status processors (default 20)
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
--token string Bearer token for authentication to the API server
--user string The name of the kubeconfig user to use
--username string Username for basic authentication to the API server
+ --wq-backoff-factor float Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5 (default 1.5)
+ --wq-basedelay-ns duration Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms) (default 1ms)
+ --wq-bucket-qps int Set Workqueue Rate Limiter Bucket QPS, default 50 (default 50)
+ --wq-bucket-size int Set Workqueue Rate Limiter Bucket Size, default 500 (default 500)
+ --wq-cooldown-ns duration Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)
+ --wq-maxdelay-ns duration Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s) (default 1s)
```
diff --git a/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md b/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md
index 1e784e94a2620..a889b64133a93 100644
--- a/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md
+++ b/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md
@@ -19,6 +19,7 @@ argocd-dex gendexcfg [flags]
--client-key string Path to a client key file for TLS
--cluster string The name of the kubeconfig cluster to use
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--disable-tls Disable TLS on the HTTP endpoint
-h, --help help for gendexcfg
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
diff --git a/docs/operator-manual/server-commands/argocd-dex_rundex.md b/docs/operator-manual/server-commands/argocd-dex_rundex.md
index 16e2b15abbece..b2d453feba613 100644
--- a/docs/operator-manual/server-commands/argocd-dex_rundex.md
+++ b/docs/operator-manual/server-commands/argocd-dex_rundex.md
@@ -19,6 +19,7 @@ argocd-dex rundex [flags]
--client-key string Path to a client key file for TLS
--cluster string The name of the kubeconfig cluster to use
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--disable-tls Disable TLS on the HTTP endpoint
-h, --help help for rundex
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
diff --git a/docs/operator-manual/server-commands/argocd-repo-server.md b/docs/operator-manual/server-commands/argocd-repo-server.md
index 33ecaf7c76dd4..7be45fe18d26f 100644
--- a/docs/operator-manual/server-commands/argocd-repo-server.md
+++ b/docs/operator-manual/server-commands/argocd-repo-server.md
@@ -29,6 +29,8 @@ argocd-repo-server [flags]
--metrics-port int Start metrics server on given port (default 8084)
--otlp-address string OpenTelemetry collector address to send traces to
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
+ --otlp-headers stringToString List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2) (default [])
+ --otlp-insecure OpenTelemetry collector insecure mode (default true)
--parallelismlimit int Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.
--plugin-tar-exclude stringArray Globs to filter when sending tarballs to plugins.
--port int Listen on given port for incoming connections (default 8081)
diff --git a/docs/operator-manual/server-commands/argocd-server.md b/docs/operator-manual/server-commands/argocd-server.md
index d39459ad181d6..a72cc041299ad 100644
--- a/docs/operator-manual/server-commands/argocd-server.md
+++ b/docs/operator-manual/server-commands/argocd-server.md
@@ -12,73 +12,100 @@ The API server is a gRPC/REST server which exposes the API consumed by the Web U
argocd-server [flags]
```
+### Examples
+
+```
+ # Start the Argo CD API server with default settings
+ $ argocd-server
+
+ # Start the Argo CD API server on a custom port and enable tracing
+ $ argocd-server --port 8888 --otlp-address localhost:4317
+```
+
### Options
```
- --address string Listen on given address (default "0.0.0.0")
- --app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
- --application-namespaces strings List of additional namespaces where application resources can be managed in
- --as string Username to impersonate for the operation
- --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
- --as-uid string UID to impersonate for the operation
- --basehref string Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / (default "/")
- --certificate-authority string Path to a cert file for the certificate authority
- --client-certificate string Path to a client certificate file for TLS
- --client-key string Path to a client key file for TLS
- --cluster string The name of the kubeconfig cluster to use
- --connection-status-cache-expiration duration Cache expiration for cluster/repo connection status (default 1h0m0s)
- --content-security-policy value Set Content-Security-Policy header in HTTP responses to value. To disable, set to "". (default "frame-ancestors 'self';")
- --context string The name of the kubeconfig context to use
- --default-cache-expiration duration Cache expiration default (default 24h0m0s)
- --dex-server string Dex server address (default "argocd-dex-server:5556")
- --dex-server-plaintext Use a plaintext client (non-TLS) to connect to dex server
- --dex-server-strict-tls Perform strict validation of TLS certificates when connecting to dex server
- --disable-auth Disable client authentication
- --enable-gzip Enable GZIP compression (default true)
- --enable-proxy-extension Enable Proxy Extension feature
- --gloglevel int Set the glog logging level
- -h, --help help for argocd-server
- --insecure Run server without TLS
- --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
- --kubeconfig string Path to a kube config. Only required if out-of-cluster
- --logformat string Set the logging format. One of: text|json (default "text")
- --login-attempts-expiration duration Cache expiration for failed login attempts (default 24h0m0s)
- --loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
- --metrics-address string Listen for metrics on given address (default "0.0.0.0")
- --metrics-port int Start metrics on given port (default 8083)
- -n, --namespace string If present, the namespace scope for this CLI request
- --oidc-cache-expiration duration Cache expiration for OIDC state (default 3m0s)
- --otlp-address string OpenTelemetry collector address to send traces to
- --otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
- --password string Password for basic authentication to the API server
- --port int Listen on given port (default 8080)
- --proxy-url string If provided, this URL will be used to connect via proxy
- --redis string Redis server hostname and port (e.g. argocd-redis:6379).
- --redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
- --redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
- --redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
- --redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
- --redis-insecure-skip-tls-verify Skip Redis server certificate validation.
- --redis-use-tls Use TLS when connecting to Redis.
- --redisdb int Redis database.
- --repo-server string Repo server address (default "argocd-repo-server:8081")
- --repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server
- --repo-server-strict-tls Perform strict validation of TLS certificates when connecting to repo server
- --repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
- --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
- --rootpath string Used if Argo CD is running behind reverse proxy under subpath different from /
- --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
- --sentinelmaster string Redis sentinel master group name. (default "master")
- --server string The address and port of the Kubernetes API server
- --staticassets string Directory path that contains additional static assets (default "/shared/app")
- --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
- --tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384")
- --tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3")
- --tlsminversion string The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2")
- --token string Bearer token for authentication to the API server
- --user string The name of the kubeconfig user to use
- --username string Username for basic authentication to the API server
- --x-frame-options value Set X-Frame-Options header in HTTP responses to value. To disable, set to "". (default "sameorigin")
+ --address string Listen on given address (default "0.0.0.0")
+ --api-content-types string Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty. (default "application/json")
+ --app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
+ --application-namespaces strings List of additional namespaces where application resources can be managed in
+ --as string Username to impersonate for the operation
+ --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
+ --as-uid string UID to impersonate for the operation
+ --basehref string Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / (default "/")
+ --certificate-authority string Path to a cert file for the certificate authority
+ --client-certificate string Path to a client certificate file for TLS
+ --client-key string Path to a client key file for TLS
+ --cluster string The name of the kubeconfig cluster to use
+ --connection-status-cache-expiration duration Cache expiration for cluster/repo connection status (default 1h0m0s)
+ --content-security-policy value Set Content-Security-Policy header in HTTP responses to value. To disable, set to "". (default "frame-ancestors 'self';")
+ --context string The name of the kubeconfig context to use
+ --default-cache-expiration duration Cache expiration default (default 24h0m0s)
+ --dex-server string Dex server address (default "argocd-dex-server:5556")
+ --dex-server-plaintext Use a plaintext client (non-TLS) to connect to dex server
+ --dex-server-strict-tls Perform strict validation of TLS certificates when connecting to dex server
+ --disable-auth Disable client authentication
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ --enable-gzip Enable GZIP compression (default true)
+ --enable-proxy-extension Enable Proxy Extension feature
+ --gloglevel int Set the glog logging level
+ -h, --help help for argocd-server
+ --insecure Run server without TLS
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Path to a kube config. Only required if out-of-cluster
+ --logformat string Set the logging format. One of: text|json (default "text")
+ --login-attempts-expiration duration Cache expiration for failed login attempts (default 24h0m0s)
+ --loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
+ --metrics-address string Listen for metrics on given address (default "0.0.0.0")
+ --metrics-port int Start metrics on given port (default 8083)
+ -n, --namespace string If present, the namespace scope for this CLI request
+ --oidc-cache-expiration duration Cache expiration for OIDC state (default 3m0s)
+ --otlp-address string OpenTelemetry collector address to send traces to
+ --otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
+ --otlp-headers stringToString List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2) (default [])
+ --otlp-insecure OpenTelemetry collector insecure mode (default true)
+ --password string Password for basic authentication to the API server
+ --port int Listen on given port (default 8080)
+ --proxy-url string If provided, this URL will be used to connect via proxy
+ --redis string Redis server hostname and port (e.g. argocd-redis:6379).
+ --redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
+ --redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
+ --redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
+ --redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
+ --redis-insecure-skip-tls-verify Skip Redis server certificate validation.
+ --redis-use-tls Use TLS when connecting to Redis.
+ --redisdb int Redis database.
+ --repo-cache-expiration duration Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data (default 24h0m0s)
+ --repo-server string Repo server address (default "argocd-repo-server:8081")
+ --repo-server-default-cache-expiration duration Cache expiration default (default 24h0m0s)
+ --repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server
+ --repo-server-redis string Redis server hostname and port (e.g. argocd-redis:6379).
+ --repo-server-redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
+ --repo-server-redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
+ --repo-server-redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
+ --repo-server-redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
+ --repo-server-redis-insecure-skip-tls-verify Skip Redis server certificate validation.
+ --repo-server-redis-use-tls Use TLS when connecting to Redis.
+ --repo-server-redisdb int Redis database.
+ --repo-server-sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
+ --repo-server-sentinelmaster string Redis sentinel master group name. (default "master")
+ --repo-server-strict-tls Perform strict validation of TLS certificates when connecting to repo server
+ --repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
+ --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
+ --revision-cache-expiration duration Cache expiration for cached revision (default 3m0s)
+ --rootpath string Used if Argo CD is running behind reverse proxy under subpath different from /
+ --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
+ --sentinelmaster string Redis sentinel master group name. (default "master")
+ --server string The address and port of the Kubernetes API server
+ --staticassets string Directory path that contains additional static assets (default "/shared/app")
+ --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
+ --tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384")
+ --tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3")
+ --tlsminversion string The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2")
+ --token string Bearer token for authentication to the API server
+ --user string The name of the kubeconfig user to use
+ --username string Username for basic authentication to the API server
+ --x-frame-options value Set X-Frame-Options header in HTTP responses to value. To disable, set to "". (default "sameorigin")
```
### SEE ALSO
diff --git a/docs/operator-manual/server-commands/argocd-server_version.md b/docs/operator-manual/server-commands/argocd-server_version.md
index 2d7d9d1151e8a..2659c99e87219 100644
--- a/docs/operator-manual/server-commands/argocd-server_version.md
+++ b/docs/operator-manual/server-commands/argocd-server_version.md
@@ -26,6 +26,7 @@ argocd-server version [flags]
--client-key string Path to a client key file for TLS
--cluster string The name of the kubeconfig cluster to use
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
--kubeconfig string Path to a kube config. Only required if out-of-cluster
-n, --namespace string If present, the namespace scope for this CLI request
diff --git a/docs/operator-manual/signed-release-assets.md b/docs/operator-manual/signed-release-assets.md
index 9aec6bb071047..b4e4f3fc97418 100644
--- a/docs/operator-manual/signed-release-assets.md
+++ b/docs/operator-manual/signed-release-assets.md
@@ -92,7 +92,7 @@ The attestation payload contains a non-forgeable provenance which is base64 enco
```bash
slsa-verifier verify-image "$IMAGE" \
--source-uri github.com/argoproj/argo-cd \
- --source-tag v2.7.0
+ --source-tag v2.7.0 \
--print-provenance | jq
```
diff --git a/docs/operator-manual/troubleshooting.md b/docs/operator-manual/troubleshooting.md
index 884045410b0b8..0e0159e5def4f 100644
--- a/docs/operator-manual/troubleshooting.md
+++ b/docs/operator-manual/troubleshooting.md
@@ -25,7 +25,7 @@ argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argo
**Health Assessment**
-Argo CD provides built-in [health assessment](./health.md) for several kubernetes resources which can be further
+Argo CD provides built-in [health assessment](./health.md) for several Kubernetes resources which can be further
customized by writing your own health checks in [Lua](https://www.lua.org/).
The health checks are configured in the `resource.customizations` field of `argocd-cm` ConfigMap.
diff --git a/docs/operator-manual/upgrading/2.10-2.11.md b/docs/operator-manual/upgrading/2.10-2.11.md
new file mode 100644
index 0000000000000..4cf5c8ed02b0b
--- /dev/null
+++ b/docs/operator-manual/upgrading/2.10-2.11.md
@@ -0,0 +1,5 @@
+# v2.10 to 2.11
+
+## initiatedBy added in Application CRD
+
+In order to address [argoproj/argo-cd#16612](https://github.com/argoproj/argo-cd/issues/16612), initiatedBy has been added in the Application CRD.
\ No newline at end of file
diff --git a/docs/operator-manual/upgrading/2.7-2.8.md b/docs/operator-manual/upgrading/2.7-2.8.md
index 1e403bf981ab4..c42a97a1f429c 100644
--- a/docs/operator-manual/upgrading/2.7-2.8.md
+++ b/docs/operator-manual/upgrading/2.7-2.8.md
@@ -11,7 +11,7 @@ to upgrade your plugin.
With the 2.8 release `entrypoint.sh` will be removed from the containers,
because starting with 2.7, the implicit entrypoint is set to `tini` in the
-`Dockerfile` explicitly, and the kubernetes manifests has been updated to use
+`Dockerfile` explicitly, and the Kubernetes manifests has been updated to use
it. Simply updating the containers without updating the deployment manifests
will result in pod startup failures, as the old manifests are relying on
`entrypoint.sh` instead of `tini`. Please make sure the manifests are updated
diff --git a/docs/operator-manual/upgrading/2.8-2.9.md b/docs/operator-manual/upgrading/2.8-2.9.md
new file mode 100644
index 0000000000000..ef99e09587814
--- /dev/null
+++ b/docs/operator-manual/upgrading/2.8-2.9.md
@@ -0,0 +1,5 @@
+# v2.8 to 2.9
+
+## Upgraded Kustomize Version
+
+Note that bundled Kustomize version has been upgraded from 5.1.0 to 5.2.1.
diff --git a/docs/operator-manual/upgrading/2.9-2.10.md b/docs/operator-manual/upgrading/2.9-2.10.md
new file mode 100644
index 0000000000000..cfb3e286649ac
--- /dev/null
+++ b/docs/operator-manual/upgrading/2.9-2.10.md
@@ -0,0 +1,16 @@
+# v2.9 to 2.10
+
+## `managedNamespaceMetadata` no longer preserves client-side-applied labels or annotations
+
+Argo CD 2.10 upgraded kubectl from 1.24 to 1.26. This upgrade introduced a change where client-side-applied labels and
+annotations are no longer preserved when using a server-side kubectl apply. This change affects the
+`managedNamespaceMetadata` field of the `Application` CRD. Previously, labels and annotations applied via a client-side
+apply would be preserved when `managedNamespaceMetadata` was enabled. Now, those existing labels and annotation will be
+removed.
+
+To avoid unexpected behavior, follow the [client-side to server-side resource upgrade guide](https://kubernetes.io/docs/reference/using-api/server-side-apply/#upgrading-from-client-side-apply-to-server-side-apply)
+before enabling `managedNamespaceMetadata` on an existing namespace.
+
+## Upgraded Helm Version
+
+Note that bundled Helm version has been upgraded from 3.13.2 to 3.14.0.
diff --git a/docs/operator-manual/upgrading/overview.md b/docs/operator-manual/upgrading/overview.md
index 419fc7bbb1353..742c7b191b57a 100644
--- a/docs/operator-manual/upgrading/overview.md
+++ b/docs/operator-manual/upgrading/overview.md
@@ -37,6 +37,8 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/
+* [v2.9 to v2.10](./2.9-2.10.md)
+* [v2.8 to v2.9](./2.8-2.9.md)
* [v2.7 to v2.8](./2.7-2.8.md)
* [v2.6 to v2.7](./2.6-2.7.md)
* [v2.5 to v2.6](./2.5-2.6.md)
diff --git a/docs/operator-manual/user-management/identity-center.md b/docs/operator-manual/user-management/identity-center.md
new file mode 100644
index 0000000000000..0fd78b1aaf62f
--- /dev/null
+++ b/docs/operator-manual/user-management/identity-center.md
@@ -0,0 +1,79 @@
+# Identity Center (AWS SSO)
+
+!!! note "Are you using this? Please contribute!"
+ If you're using this IdP please consider [contributing](../../developer-guide/site.md) to this document.
+
+A working Single Sign-On configuration using Identity Center (AWS SSO) has been achieved using the following method:
+
+* [SAML (with Dex)](#saml-with-dex)
+
+## SAML (with Dex)
+
+1. Create a new SAML application in Identity Center and download the certificate.
+ * ![Identity Center SAML App 1](../../assets/identity-center-1.png)
+ * ![Identity Center SAML App 2](../../assets/identity-center-2.png)
+2. Click `Assign Users` after creating the application in Identity Center, and select the users or user groups you wish to grant access to this application.
+ * ![Identity Center SAML App 3](../../assets/identity-center-3.png)
+3. Copy the Argo CD URL into the `data.url` field in the `argocd-cm` ConfigMap.
+
+ data:
+ url: https://argocd.example.com
+
+4. Configure Attribute mappings.
+
+ !!! note "Group attribute mapping is not officially!"
+ Group attribute mapping is not officially supported in the AWS docs, however the workaround is currently working.
+
+ * ![Identity Center SAML App 4](../../assets/identity-center-4.png)
+ * ![Identity Center SAML App 5](../../assets/identity-center-5.png)
+
+
+
+5. Download the CA certificate to use in the `argocd-cm` configuration.
+ * If using the `caData` field, you'll need to base64-encode the entire certificate, including the `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` stanzas (e.g., `base64 my_cert.pem`).
+ * If using the `ca` field and storing the CA certificate separately as a secret, you will need to mount the secret onto the `dex` container in the `argocd-dex-server` Deployment.
+ * ![Identity Center SAML App 6](../../assets/identity-center-6.png)
+6. Edit the `argocd-cm` and configure the `data.dex.config` section:
+
+
+```yaml
+dex.config: |
+ logger:
+ level: debug
+ format: json
+ connectors:
+ - type: saml
+ id: aws
+ name: "AWS IAM Identity Center"
+ config:
+ # You need value of Identity Center APP SAML (IAM Identity Center sign-in URL)
+ ssoURL: https://portal.sso.yourregion.amazonaws.com/saml/assertion/id
+ # You need `caData` _OR_ `ca`, but not both.
+ caData:
+ # Path to mount the secret to the dex container
+ entityIssuer: https://external.path.to.argocd.io/api/dex/callback
+ redirectURI: https://external.path.to.argocd.io/api/dex/callback
+ usernameAttr: email
+ emailAttr: email
+ groupsAttr: groups
+```
+
+
+### Connect Identity Center Groups to Argo CD Roles
+Argo CD recognizes user memberships in Identity Center groups that match the **Group Attribute Statements** regex.
+
+ In the example above, the regex `argocd-*` is used, making Argo CD aware of a group named `argocd-admins`.
+
+Modify the `argocd-rbac-cm` ConfigMap to connect the `ArgoCD-administrators` Identity Center group to the builtin Argo CD `admin` role.
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: argocd-rbac-cm
+data:
+ policy.csv: |
+ g, , role:admin
+ scopes: '[groups, email]'
+```
+
diff --git a/docs/operator-manual/user-management/index.md b/docs/operator-manual/user-management/index.md
index 8c3f2e169597c..496dd17a83e9f 100644
--- a/docs/operator-manual/user-management/index.md
+++ b/docs/operator-manual/user-management/index.md
@@ -201,7 +201,7 @@ data:
id: acme-github
name: Acme GitHub
config:
- hostName: github.acme.com
+ hostName: github.acme.example.com
clientID: abcdefghijklmnopqrst
clientSecret: $dex.acme.clientSecret # Alternatively $:dex.acme.clientSecret
orgs:
@@ -242,7 +242,7 @@ data:
id: oidc
name: OIDC
config:
- issuer: https://example-OIDC-provider.com
+ issuer: https://example-OIDC-provider.example.com
clientID: aaaabbbbccccddddeee
clientSecret: $dex.oidc.clientSecret
```
@@ -264,7 +264,7 @@ data:
id: oidc
name: OIDC
config:
- issuer: https://example-OIDC-provider.com
+ issuer: https://example-OIDC-provider.example.com
clientID: aaaabbbbccccddddeee
clientSecret: $dex.oidc.clientSecret
insecureEnableGroups: true
@@ -294,7 +294,7 @@ data:
id: oidc
name: OIDC
config:
- issuer: https://example-OIDC-provider.com
+ issuer: https://example-OIDC-provider.example.com
clientID: aaaabbbbccccddddeee
clientSecret: $dex.oidc.clientSecret
insecureEnableGroups: true
@@ -344,6 +344,12 @@ data:
# for the 'localhost' (CLI) client to Dex. This field is optional. If omitted, the CLI will
# use the same clientID as the Argo CD server
cliClientID: vvvvwwwwxxxxyyyyzzzz
+
+ # PKCE authentication flow processes authorization flow from browser only - default false
+ # uses the clientID
+ # make sure the Identity Provider (IdP) is public and doesn't need clientSecret
+ # make sure the Identity Provider (IdP) has this redirect URI registered: https://argocd.example.com/pkce/verify
+ enablePKCEAuthentication: true
```
!!! note
@@ -381,6 +387,20 @@ For a simple case this can be:
oidc.config: |
requestedIDTokenClaims: {"groups": {"essential": true}}
```
+
+### Retrieving group claims when not in the token
+
+Some OIDC providers don't return the group information for a user in the ID token, even if explicitly requested using the `requestedIDTokenClaims` setting (Okta for example). They instead provide the groups on the user info endpoint. With the following config, Argo CD queries the user info endpoint during login for groups information of a user:
+
+```yaml
+oidc.config: |
+ enableUserInfoGroups: true
+ userInfoPath: /userinfo
+ userInfoCacheExpiration: "5m"
+```
+
+**Note: If you omit the `userInfoCacheExpiration` setting or if it's greater than the expiration of the ID token, the argocd-server will cache group information as long as the ID token is valid!**
+
### Configuring a custom logout URL for your OIDC provider
Optionally, if your OIDC provider exposes a logout API and you wish to configure a custom logout URL for the purposes of invalidating
@@ -389,18 +409,18 @@ any active session post logout, you can do so by specifying it as follows:
```yaml
oidc.config: |
name: example-OIDC-provider
- issuer: https://example-OIDC-provider.com
+ issuer: https://example-OIDC-provider.example.com
clientID: xxxxxxxxx
clientSecret: xxxxxxxxx
requestedScopes: ["openid", "profile", "email", "groups"]
requestedIDTokenClaims: {"groups": {"essential": true}}
- logoutURL: https://example-OIDC-provider.com/logout?id_token_hint={{token}}
+ logoutURL: https://example-OIDC-provider.example.com/logout?id_token_hint={{token}}
```
By default, this would take the user to their OIDC provider's login page after logout. If you also wish to redirect the user back to Argo CD after logout, you can specify the logout URL as follows:
```yaml
...
- logoutURL: https://example-OIDC-provider.com/logout?id_token_hint={{token}}&post_logout_redirect_uri={{logoutRedirectURL}}
+ logoutURL: https://example-OIDC-provider.example.com/logout?id_token_hint={{token}}&post_logout_redirect_uri={{logoutRedirectURL}}
```
You are not required to specify a logoutRedirectURL as this is automatically generated by ArgoCD as your base ArgoCD url + Rootpath
@@ -436,7 +456,7 @@ Add a `rootCA` to your `oidc.config` which contains the PEM encoded root certifi
#### Example
-SSO `clientSecret` can thus be stored as a kubernetes secret with the following manifests
+SSO `clientSecret` can thus be stored as a Kubernetes secret with the following manifests
`argocd-secret`:
```yaml
diff --git a/docs/operator-manual/user-management/microsoft.md b/docs/operator-manual/user-management/microsoft.md
index 33a6b3e945940..486d647fde3d0 100644
--- a/docs/operator-manual/user-management/microsoft.md
+++ b/docs/operator-manual/user-management/microsoft.md
@@ -1,13 +1,16 @@
# Microsoft
-* [Azure AD SAML Enterprise App Auth using Dex](#azure-ad-saml-enterprise-app-auth-using-dex)
-* [Azure AD App Registration Auth using OIDC](#azure-ad-app-registration-auth-using-oidc)
-* [Azure AD App Registration Auth using Dex](#azure-ad-app-registration-auth-using-dex)
+!!! note ""
+ Entra ID was formerly known as Azure AD.
-## Azure AD SAML Enterprise App Auth using Dex
-### Configure a new Azure AD Enterprise App
+* [Entra ID SAML Enterprise App Auth using Dex](#entra-id-saml-enterprise-app-auth-using-dex)
+* [Entra ID App Registration Auth using OIDC](#entra-id-app-registration-auth-using-oidc)
+* [Entra ID App Registration Auth using Dex](#entra-id-app-registration-auth-using-dex)
-1. From the `Azure Active Directory` > `Enterprise applications` menu, choose `+ New application`
+## Entra ID SAML Enterprise App Auth using Dex
+### Configure a new Entra ID Enterprise App
+
+1. From the `Microsoft Entra ID` > `Enterprise applications` menu, choose `+ New application`
2. Select `Non-gallery application`
3. Enter a `Name` for the application (e.g. `Argo CD`), then choose `Add`
4. Once the application is created, open it from the `Enterprise applications` menu.
@@ -31,9 +34,9 @@
- *Keep a copy of the encoded output to be used in the next section.*
9. From the `Single sign-on` menu, copy the `Login URL` parameter, to be used in the next section.
-### Configure Argo to use the new Azure AD Enterprise App
+### Configure Argo to use the new Entra ID Enterprise App
-1. Edit `argocd-cm` and add the following `dex.config` to the data section, replacing the `caData`, `my-argo-cd-url` and `my-login-url` your values from the Azure AD App:
+1. Edit `argocd-cm` and add the following `dex.config` to the data section, replacing the `caData`, `my-argo-cd-url` and `my-login-url` your values from the Entra ID App:
data:
url: https://my-argo-cd-url
@@ -56,7 +59,7 @@
groupsAttr: Group
2. Edit `argocd-rbac-cm` to configure permissions, similar to example below.
- - Use Azure AD `Group IDs` for assigning roles.
+ - Use Entra ID `Group IDs` for assigning roles.
- See [RBAC Configurations](../rbac.md) for more detailed scenarios.
# example policy
@@ -70,11 +73,11 @@
p, role:org-admin, repositories, delete, *, allow
g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin # (azure group assigned to role)
-## Azure AD App Registration Auth using OIDC
-### Configure a new Azure AD App registration
-#### Add a new Azure AD App registration
+## Entra ID App Registration Auth using OIDC
+### Configure a new Entra ID App registration
+#### Add a new Entra ID App registration
-1. From the `Azure Active Directory` > `App registrations` menu, choose `+ New registration`
+1. From the `Microsoft Entra ID` > `App registrations` menu, choose `+ New registration`
2. Enter a `Name` for the application (e.g. `Argo CD`).
3. Specify who can use the application (e.g. `Accounts in this organizational directory only`).
4. Enter Redirect URI (optional) as follows (replacing `my-argo-cd-url` with your Argo URL), then choose `Add`.
@@ -92,29 +95,29 @@
- **Redirect URI:** `http://localhost:8085/auth/callback`
![Azure App registration's Authentication](../../assets/azure-app-registration-authentication.png "Azure App registration's Authentication")
-#### Add credentials a new Azure AD App registration
+#### Add credentials a new Entra ID App registration
1. From the `Certificates & secrets` menu, choose `+ New client secret`
2. Enter a `Name` for the secret (e.g. `ArgoCD-SSO`).
- Make sure to copy and save generated value. This is a value for the `client_secret`.
![Azure App registration's Secret](../../assets/azure-app-registration-secret.png "Azure App registration's Secret")
-#### Setup permissions for Azure AD Application
+#### Setup permissions for Entra ID Application
1. From the `API permissions` menu, choose `+ Add a permission`
2. Find `User.Read` permission (under `Microsoft Graph`) and grant it to the created application:
- ![Azure AD API permissions](../../assets/azure-api-permissions.png "Azure AD API permissions")
+ ![Entra ID API permissions](../../assets/azure-api-permissions.png "Entra ID API permissions")
3. From the `Token Configuration` menu, choose `+ Add groups claim`
- ![Azure AD token configuration](../../assets/azure-token-configuration.png "Azure AD token configuration")
+ ![Entra ID token configuration](../../assets/azure-token-configuration.png "Entra ID token configuration")
-### Associate an Azure AD group to your Azure AD App registration
+### Associate an Entra ID group to your Entra ID App registration
-1. From the `Azure Active Directory` > `Enterprise applications` menu, search the App that you created (e.g. `Argo CD`).
- - An Enterprise application with the same name of the Azure AD App registration is created when you add a new Azure AD App registration.
+1. From the `Microsoft Entra ID` > `Enterprise applications` menu, search the App that you created (e.g. `Argo CD`).
+ - An Enterprise application with the same name of the Entra ID App registration is created when you add a new Entra ID App registration.
2. From the `Users and groups` menu of the app, add any users or groups requiring access to the service.
![Azure Enterprise SAML Users](../../assets/azure-enterprise-users.png "Azure Enterprise SAML Users")
-### Configure Argo to use the new Azure AD App registration
+### Configure Argo to use the new Entra ID App registration
1. Edit `argocd-cm` and configure the `data.oidc.config` and `data.url` section:
@@ -173,7 +176,7 @@
Refer to [operator-manual/argocd-rbac-cm.yaml](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-rbac-cm.yaml) for all of the available variables.
-## Azure AD App Registration Auth using Dex
+## Entra ID App Registration Auth using Dex
Configure a new AD App Registration, as above.
Then, add the `dex.config` to `argocd-cm`:
@@ -200,9 +203,9 @@ data:
1. Open a new browser tab and enter your ArgoCD URI: https://``
![Azure SSO Web Log In](../../assets/azure-sso-web-log-in-via-azure.png "Azure SSO Web Log In")
-3. Click `LOGIN VIA AZURE` button to log in with your Azure Active Directory account. You’ll see the ArgoCD applications screen.
+3. Click `LOGIN VIA AZURE` button to log in with your Microsoft Entra ID account. You’ll see the ArgoCD applications screen.
![Azure SSO Web Application](../../assets/azure-sso-web-application.png "Azure SSO Web Application")
-4. Navigate to User Info and verify Group ID. Groups will have your group’s Object ID that you added in the `Setup permissions for Azure AD Application` step.
+4. Navigate to User Info and verify Group ID. Groups will have your group’s Object ID that you added in the `Setup permissions for Entra ID Application` step.
![Azure SSO Web User Info](../../assets/azure-sso-web-user-info.png "Azure SSO Web User Info")
### Log in to ArgoCD using CLI
diff --git a/docs/operator-manual/user-management/okta.md b/docs/operator-manual/user-management/okta.md
index 09d7099d19954..308254759de6e 100644
--- a/docs/operator-manual/user-management/okta.md
+++ b/docs/operator-manual/user-management/okta.md
@@ -118,34 +118,81 @@ data:
## OIDC (without Dex)
-!!! warning "Do you want groups for RBAC later?"
- If you want `groups` scope returned from Okta you need to unfortunately contact support to enable [API Access Management with Okta](https://developer.okta.com/docs/concepts/api-access-management/) or [_just use SAML above!_](#saml-with-dex)
+!!! warning "Okta groups for RBAC"
+ If you want `groups` scope returned from Okta, you will need to enable [API Access Management with Okta](https://developer.okta.com/docs/concepts/api-access-management/). This addon is free, and automatically enabled, on Okta developer edition. However, it's an optional add-on for production environments, with an additional associated cost.
- Next you may need the API Access Management feature, which the support team can enable for your OktaPreview domain for testing, to enable "custom scopes" and a separate endpoint to use instead of the "public" `/oauth2/v1/authorize` API Access Management endpoint. This might be a paid feature if you want OIDC unfortunately. The free alternative I found was SAML.
+ You may alternately add a "groups" scope and claim to the default authorization server, and then filter the claim in the Okta application configuration. It's not clear if this requires the Authorization Server add-on.
+
+ If this is not an option for you, use the [SAML (with Dex)](#saml-with-dex) option above instead.
+
+!!! note
+ These instructions and screenshots are of Okta version 2023.05.2 E. You can find the current version in the Okta website footer.
+
+First, create the OIDC integration:
+
+1. On the `Okta Admin` page, navigate to the Okta Applications at `Applications > Applications.`
+1. Choose `Create App Integration`, and choose `OIDC`, and then `Web Application` in the resulting dialogues.
+ ![Okta OIDC app dialogue](../../assets/okta-create-oidc-app.png)
+1. Update the following:
+ 1. `App Integration name` and `Logo` - set these to suit your needs; they'll be displayed in the Okta catalogue.
+ 1. `Sign-in redirect URLs`: Add `https://argocd.example.com/auth/callback`; replacing `argocd.example.com` with your ArgoCD web interface URL. Also add `http://localhost:8085/auth/callback` if you would like to be able to login with the CLI.
+ 1. `Sign-out redirect URIs`: Add `https://argocd.example.com`; substituting the correct domain name as above.
+ 1. Either assign groups, or choose to skip this step for now.
+ 1. Leave the rest of the options as-is, and save the integration.
+ ![Okta app settings](../../assets/okta-app.png)
+1. Copy the `Client ID` and the `Client Secret` from the newly created app; you will need these later.
+
+Next, create a custom Authorization server:
1. On the `Okta Admin` page, navigate to the Okta API Management at `Security > API`.
- ![Okta API Management](../../assets/api-management.png)
-1. Choose your `default` authorization server.
-1. Click `Scopes > Add Scope`
- 1. Add a scope called `groups`.
- ![Groups Scope](../../assets/groups-scope.png)
-1. Click `Claims > Add Claim.`
- 1. Add a claim called `groups`
- 1. Choose the matching options you need, one example is:
- * e.g. to match groups starting with `argocd-` you'd return an `ID Token` using your scope name from step 3 (e.g. `groups`) where the groups name `matches` the `regex` `argocd-.*`
- ![Groups Claim](../../assets/groups-claim.png)
-1. Edit the `argocd-cm` and configure the `data.oidc.config` section:
+1. Click `Add Authorization Server`, and assign it a name and a description. The `Audience` should match your ArgoCD URL - `https://argocd.example.com`
+1. Click `Scopes > Add Scope`:
+ 1. Add a scope called `groups`. Leave the rest of the options as default.
+ ![Groups Scope](../../assets/okta-groups-scope.png)
+1. Click `Claims > Add Claim`:
+ 1. Add a claim called `groups`.
+ 1. Adjust the `Include in token type` to `ID Token`, `Always`.
+ 1. Adjust the `Value type` to `Groups`.
+ 1. Add a filter that will match the Okta groups you want passed on to ArgoCD; for example `Regex: argocd-.*`.
+ 1. Set `Include in` to `groups` (the scope you created above).
+ ![Groups Claim](../../assets/okta-groups-claim.png)
+1. Click on `Access Policies` > `Add Policy.` This policy will restrict how this authorization server is used.
+ 1. Add a name and description.
+ 1. Assign the policy to the client (application integration) you created above. The field should auto-complete as you type.
+ 1. Create the policy.
+ ![Auth Policy](../../assets/okta-auth-policy.png)
+1. Add a rule to the policy:
+ 1. Add a name; `default` is a reasonable name for this rule.
+ 1. Fine-tune the settings to suit your organization's security posture. Some ideas:
+ 1. uncheck all the grant types except the Authorization Code.
+ 1. Adjust the token lifetime to govern how long a session can last.
+ 1. Restrict refresh token lifetime, or completely disable it.
+ ![Default rule](../../assets/okta-auth-rule.png)
+1. Finally, click `Back to Authorization Servers`, and copy the `Issuer URI`. You will need this later.
+
+If you haven't yet created Okta groups, and assigned them to the application integration, you should do that now:
+
+1. Go to `Directory > Groups`
+1. For each group you wish to add:
+ 1. Click `Add Group`, and choose a meaningful name. It should match the regex or pattern you added to your custom `group` claim.
+ 1. Click on the group (refresh the page if the new group didn't show up in the list).
+ 1. Assign Okta users to the group.
+ 1. Click on `Applications` and assign the OIDC application integration you created to this group.
+ 1. Repeat as needed.
+
+Finally, configure ArgoCD itself. Edit the `argocd-cm` configmap:
```yaml
+url: https://argocd.example.com
oidc.config: |
name: Okta
- issuer: https://yourorganization.oktapreview.com
- clientID: 0oaltaqg3oAIf2NOa0h3
- clientSecret: ZXF_CfUc-rtwNfzFecGquzdeJ_MxM4sGc8pDT2Tg6t
+ # this is the authorization server URI
+ issuer: https://example.okta.com/oauth2/aus9abcdefgABCDEFGd7
+ clientID: 0oa9abcdefgh123AB5d7
+ clientSecret: ABCDEFG1234567890abcdefg
requestedScopes: ["openid", "profile", "email", "groups"]
requestedIDTokenClaims: {"groups": {"essential": true}}
```
-
-
+You may want to store the `clientSecret` in a Kubernetes secret; see [how to deal with SSO secrets](./index.md/#sensitive-data-and-sso-client-secrets ) for more details.
diff --git a/docs/operator-manual/webhook.md b/docs/operator-manual/webhook.md
index 1d5ad5ec79c96..eb15c4cb02369 100644
--- a/docs/operator-manual/webhook.md
+++ b/docs/operator-manual/webhook.md
@@ -41,7 +41,7 @@ the contents of webhook payloads are considered untrusted, and will only result
application (a process which already occurs at three-minute intervals). If Argo CD is publicly
accessible, then configuring a webhook secret is recommended to prevent a DDoS attack.
-In the `argocd-secret` kubernetes secret, configure one of the following keys with the Git
+In the `argocd-secret` Kubernetes secret, configure one of the following keys with the Git
provider's webhook secret configured in step 1.
| Provider | K8s Secret Key |
@@ -54,13 +54,13 @@ provider's webhook secret configured in step 1.
| Azure DevOps | `webhook.azuredevops.username` |
| | `webhook.azuredevops.password` |
-Edit the Argo CD kubernetes secret:
+Edit the Argo CD Kubernetes secret:
```bash
kubectl edit secret argocd-secret -n argocd
```
-TIP: for ease of entering secrets, kubernetes supports inputting secrets in the `stringData` field,
+TIP: for ease of entering secrets, Kubernetes supports inputting secrets in the `stringData` field,
which saves you the trouble of base64 encoding the values and copying it to the `data` field.
Simply copy the shared webhook secret created in step 1, to the corresponding
GitHub/GitLab/BitBucket key under the `stringData` field:
diff --git a/docs/proposals/config-management-plugin-v2.md b/docs/proposals/config-management-plugin-v2.md
index d5d68cc0af942..549ed3967ef49 100644
--- a/docs/proposals/config-management-plugin-v2.md
+++ b/docs/proposals/config-management-plugin-v2.md
@@ -291,7 +291,7 @@ There aren't any major drawbacks to this proposal. Also, the advantages supersed
However following are few minor drawbacks,
* With addition of plugin.yaml, there will be more yamls to manage
-* Operators need to be aware of the modified kubernetes manifests in the subsequent version.
+* Operators need to be aware of the modified Kubernetes manifests in the subsequent version.
* The format of the CMP manifest is a new "contract" that would need to adhere the usual Argo CD compatibility promises in future.
diff --git a/docs/proposals/decouple-application-sync-user-using-impersonation.md b/docs/proposals/decouple-application-sync-user-using-impersonation.md
new file mode 100644
index 0000000000000..e7e459a7059c0
--- /dev/null
+++ b/docs/proposals/decouple-application-sync-user-using-impersonation.md
@@ -0,0 +1,592 @@
+---
+title: Decouple Control plane and Application Sync privileges
+authors:
+ - "@anandf"
+sponsors:
+ - Red Hat
+reviewers:
+ - "@blakepettersson"
+ - "@crenshaw-dev"
+ - "@jannfis"
+approvers:
+ - "@alexmt"
+ - "@crenshaw-dev"
+ - "@jannfis"
+
+creation-date: 2023-06-23
+last-updated: 2024-02-06
+---
+
+# Decouple Application Sync using Impersonation
+
+Application syncs in Argo CD have the same privileges as the Argo CD control plane. As a consequence, in a multi-tenant setup, the Argo CD control plane privileges needs to match the tenant that needs the highest privileges. As an example, if an Argo CD instance has 10 Applications and only one of them requires admin privileges, then the Argo CD control plane must have admin privileges in order to be able to sync that one Application. Argo CD provides a multi-tenancy model to restrict what each Application can do using `AppProjects`, even though the control plane has higher privileges, however that creates a large attack surface since if Argo CD is compromised, attackers would have cluster-admin access to the cluster.
+
+The goal of this proposal is to perform the Application sync as a different user using impersonation and use the service account provided in the cluster config purely for control plane operations.
+
+### What is Impersonation
+
+Impersonation is a feature in Kubernetes and enabled in the `kubectl` CLI client, using which, a user can act as another user through impersonation headers. For example, an admin could use this feature to debug an authorization policy by temporarily impersonating another user and seeing if a request was denied.
+
+Impersonation requests first authenticate as the requesting user, then switch to the impersonated user info.
+
+```
+kubectl --as ...
+kubectl --as --as-group ...
+```
+
+## Open Questions [optional]
+
+- Should the restrictions imposed as part of the `AppProjects` be honored if the impersonation feature is enabled ?
+>Yes, other restrictions implemented by `AppProject` related to whitelisting/blacklisting resources must continue to be honoured.
+- Can an Application refer to a service account with elevated privileges like say `cluster-admin`, `admin`, and service accounts used for running the ArgoCD controllers itself ?
+>Yes, this is possible as long as the ArgoCD admin user explicitly allows it through the `AppProject` configuration.
+- Among the destinations configured in the `AppProject`, if there are multiple matches for a given destination, which destination option should be used ?
+>If there are more than one matching destination, either with a glob pattern match or an exact match, then we use the first valid match to determine the service account to be used for the sync operation.
+- Can the kubernetes audit trail events capture the impersonation.
+>Yes, kubernetes audit trail events capture both the actual user and the impersonating user details and hence its possible to track who executed the commands and as which user permissions using the audit trails.
+- Would the Sync hooks be using the impersonation service account.
+>Yes, if the impersonation feature is enabled and customers use Sync hooks, then impersonation service account would be used for executing the hook jobs as well.
+- If application resources have hardcoded namespaces in the git repository, would different service accounts be used for each resource during the sync operation ?
+>The service account to be used for impersonation is determined on a per Application level rather than on per resource level. The value specified in `Application.spec.destination.namespace` would be used to determine the service account to be used for the sync operation of all resources present in the `Application`.
+
+## Summary
+
+In a multi team/multi tenant environment, an application team is typically granted access to a namespace to self-manage their Applications in a declarative way. Current implementation of ArgoCD requires the ArgoCD Administrator to create an `AppProject` with access settings configured to replicate the RBAC resources that are configured for each team. This approach requires duplication of effort and also requires syncing the access between both to maintain the security posture. It would be desirable for users to use the existing RBAC rules without having to revert to Argo CD API to create and manage these Applications. One namespace per team, or even one namespace per application is what we are looking to address as part of this proposal.
+
+## Motivation
+
+This proposal would allow ArgoCD administrators to manage the cluster permissions using kubernetes native RBAC implementation rather than using complex configurations in `AppProjects` to restrict access to individual applications. By decoupling the privileges required for application sync from the privileges required for ArgoCD control plane operations, the security requirement of providing least privileges can be achieved there by improving the security posture of ArgoCD. For implementing multi team/tenant use cases, this decoupling would be greatly beneficial.
+
+### Assumptions
+
+- Namespaces are pre-populated with one or more `ServiceAccounts` that define the permissions for each `AppProject`.
+- Many users prefer to control access to kubernetes resources through kubernetes RBAC constructs instead of Argo specific constructs.
+- Each tenant is generally given access to a specific namespace along with a service account, role or cluster role and role binding to control access to that namespace.
+- `Applications` created by a tenant manage namespaced resources.
+- An `AppProject` can either be mapped to a single tenant or multiple related tenants and the respective destinations that needs to be managed via the `AppProject`, needs to be configured.
+
+
+### Goals
+- Applications may only impersonate ServiceAccounts that live in the same namespace as the destination namespace configured in the application.If the service account is created in a different namespace, then the user can provide the service account name in the format `:` . ServiceAccount to be used for syncing each application is determined by the target destination configured in the `AppProject` associated with the `Application`.
+- If impersonation feature is enabled, and no service account name is provided in the associated `AppProject`, then the default service account of the destination namespace of the `Application` should be used.
+- Access restrictions implemented through properties in AppProject (if done) must have the existing behavior. From a security standpoint, any restrictions that were available before switching to a service account based approach should continue to exist even when the impersonation feature is enabled.
+
+### Non-Goals
+
+None
+
+## Proposal
+
+As part of this proposal, it would be possible for an ArgoCD Admin to specify a service account name in `AppProjects` CR for a single or a group of destinations. A destination is uniquely identified by a target cluster and a namespace combined.
+
+When applications gets synced, based on its destination (target cluster and namespace combination), the `defaultServiceAccount` configured in the `AppProject` will be selected and used for impersonation when executing the kubectl commands for the sync operation.
+
+We would be introducing a new element `destinationServiceAccounts` in `AppProject.spec`. This element is used for the sole purpose of specifying the impersonation configuration. The `defaultServiceAccount` configured for the `AppProject` would be used for the sync operation for a particular destination cluster and namespace. If impersonation feature is enabled and no specific service account is provided in the `AppProject` CR, then the `default` service account in the destination namespace would be used for impersonation.
+
+```
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: my-project
+ namespace: argocd
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io
+spec:
+ description: Example Project
+ # Allow manifests to deploy from any Git repos
+ sourceRepos:
+ - '*'
+ destinations:
+ - *
+ destinationServiceAccounts:
+ - server: https://kubernetes.default.svc
+ namespace: guestbook
+ defaultServiceAccount: guestbook-deployer
+ - server: https://kubernetes.default.svc
+ namespace: guestbook-dev
+ defaultServiceAccount: guestbook-dev-deployer
+ - server: https://kubernetes.default.svc
+ namespace: guestbook-stage
+ defaultServiceAccount: guestbook-stage-deployer
+```
+
+### Structure of DestinationServiceAccount:
+|Parameter| Type | Required/Optional| Description|
+| ------ | ------ | ------- | -------- |
+| server | string | Required | Server specifies the URL of the target cluster's Kubernetes control plane API. Glob patterns are supported. |
+| namespace | string | Required | Namespace specifies the target namespace for the application's resources. Glob patterns are supported. |
+| defaultServiceAccount | string | Required| DefaultServiceAccount specifies the service account to be impersonated when performing the `Application` sync operation.|
+
+**Note:** Only server URL for the target cluster is supported and target cluster name is not supported.
+
+### Future enhancements
+
+In a future release, we plan to support overriding of service accounts at the application level. In that case, we would be adding an element called `allowedServiceAccounts` to `AppProject.spec.destinationServiceAccounts[*]`
+
+### Use cases
+
+#### Use case 1:
+
+As a user, I would like to use kubernetes security constructs to restrict user access for application sync
+So that, I can provide granular permissions based on the principle of least privilege required for syncing an application.
+
+#### Use case 2:
+
+As a user, I would like to configure a common service account for all applications associated to an AppProject
+So that, I can use a generic convention of naming service accounts and avoid associating the service account per application.
+
+### Design considerations
+
+- Extending the `destinations` field under `AppProjects` was an option that was considered. But since the intent of it was to restrict the destinations that an associated `Application` can use, it was not used. Also the destination fields allowed negation operator (`!`) which would complicate the service account matching logic. The decision to create a new struct under `AppProject.Spec` for specifying the service account for each destination was considered a better alternative.
+
+- The field name `defaultServiceAccount` was chosen instead of `serviceAccount` as we wanted to support overriding of the service account at an `Application` at a later point in time and wanted to reserve the name `serviceAccount` for future extension.
+
+- Not supporting all impersonation options at the moment to keep the initial design to a minimum. Based on the need and feedback, support to impersonate users or groups can be added in future.
+
+### Implementation Details/Notes/Constraints
+
+#### Component : GitOps Engine
+
+- Fix GitOps Engine code to honor Impersonate configuration set in the Application sync context for all kubectl commands that are being executed.
+
+#### Component: ArgoCD API
+
+- Create a new struct type `DestinationServiceAccount` having fields `namespace`, `server` and `defaultServiceAccount`
+- Create a new field `DestinationServiceAccounts` under a `AppProject.Spec` that takes in a list of `DestinationServiceAccount` objects.
+- Add Documentation for newly introduced struct and its fields for `DestinationServiceAccount` and `DestinationServiceAccounts` under `AppProject.Spec`
+
+#### Component: ArgoCD Application Controller
+
+- Provide a configuration in `argocd-cm` which can be modified to enable the Impersonation feature. Set `applicationcontroller.enable.impersonation: true` in the Argo CD ConfigMap. Default value of `applicationcontroller.enable.impersonation` would be `false` and user has to explicitly override it to use this feature.
+- Provide an option to override the Impersonation feature using environment variables.
+Set `ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true` in the Application controller environment variables. Default value of the environment variable must be `false` and user has to explicitly set it to `true` to use this feature.
+- Provide an option to enable this feature using a command line flag `--enable-impersonation`. This new argument option needs to be added to the Application controller args.
+- Fix Application Controller `sync.go` to set the Impersonate configuration from the AppProject CR to the `SyncContext` Object (rawConfig and restConfig field, need to understand which config is used for the actual sync and if both configs need to be impersonated.)
+
+#### Component: ArgoCD UI
+
+- Provide option to create `DestinationServiceAccount` with fields `namespace`, `server` and `defaultServiceAccount`.
+- Provide option to add multiple `DestinationServiceAccounts` to an `AppProject` created/updated via the web console.
+- Update the User Guide documentation on how to use these newly added fields from the web console.
+
+#### Component: ArgoCD CLI
+
+- Provide option to create `DestinationServiceAccount` with fields `namespace`, `server` and `defaultServiceAccount`.
+- Provide option to add multiple `DestinationServiceAccounts` to an `AppProject` created/updated via the web console.
+- Update the User Guide and other documentation where the CLI option usages are explained.
+
+#### Component: Documentation
+
+- Add note that this is a Beta feature in the documentation.
+- Add a separate section for this feature under user-guide section.
+- Update the ArgoCD CLI command reference documentation.
+- Update the ArgoCD UI command reference documentation.
+
+### Detailed examples
+
+#### Example 1: Service account for application sync specified at the AppProject level for all namespaces
+
+In this specific scenario, service account name `generic-deployer` will get used for the application sync as the namespace `guestbook` matches the glob pattern `*`.
+
+- Install ArgoCD in the `argocd` namespace.
+```
+kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml -n argocd
+```
+
+- Enable the impersonation feature in ArgoCD.
+```
+kubectl set env statefulset/argocd-application-controller ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true
+```
+
+- Create a namespace called `guestbook` and a service account called `guestbook-deployer`.
+```
+kubectl create namespace guestbook
+kubectl create serviceaccount guestbook-deployer
+```
+
+- Create Role and RoleBindings and configure RBAC access for creating `Service` and `Deployment` objects in namespace `guestbook` for service account `guestbook-deployer`.
+```
+kubectl create role guestbook-deployer-role --verb get,list,update,delete --resource pods,deployment,service
+kubectl create rolebinding guestbook-deployer-rb --serviceaccount guestbook-deployer --role guestbook-deployer-role
+```
+
+- Create the `Application` in the `argocd` namespace and the required `AppProject` as below
+```
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: guestbook
+ namespace: argocd
+spec:
+ project: my-project
+ source:
+ repoURL: https://github.com/argoproj/argocd-example-apps.git
+ targetRevision: HEAD
+ path: guestbook
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: guestbook
+---
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: my-project
+ namespace: argocd
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io
+spec:
+ description: Example Project
+ # Allow manifests to deploy from any Git repos
+ sourceRepos:
+ - '*'
+ destinations:
+ - namespace: *
+ server: https://kubernetes.default.svc
+ destinationServiceAccounts:
+ - namespace: *
+ server: https://kubernetes.default.svc
+ defaultServiceAccount: generic-deployer
+```
+
+#### Example 2: Service account for application sync specified at the AppProject level for specific namespaces
+
+In this specific scenario, service account name `guestbook-deployer` will get used for the application sync as the namespace `guestbook` matches the target namespace `guestbook`.
+
+- Install ArgoCD in the `argocd` namespace.
+```
+kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml -n argocd
+```
+
+- Enable the impersonation feature in ArgoCD.
+```
+kubectl set env statefulset/argocd-application-controller ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true
+```
+
+- Create a namespace called `guestbook` and a service account called `guestbook-deployer`.
+```
+kubectl create namespace guestbook
+kubectl create serviceaccount guestbook-deployer
+```
+- Create Role and RoleBindings and configure RBAC access for creating `Service` and `Deployment` objects in namespace `guestbook` for service account `guestbook-deployer`.
+```
+kubectl create role guestbook-deployer-role --verb get,list,update,delete --resource pods,deployment,service
+kubectl create rolebinding guestbook-deployer-rb --serviceaccount guestbook-deployer --role guestbook-deployer-role
+```
+
+In this specific scenario, service account name `guestbook-deployer` will get used as it matches to the specific namespace `guestbook`.
+```
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: guestbook
+ namespace: argocd
+spec:
+ project: my-project
+ source:
+ repoURL: https://github.com/argoproj/argocd-example-apps.git
+ targetRevision: HEAD
+ path: guestbook
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: guestbook
+---
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: my-project
+ namespace: argocd
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io
+spec:
+ description: Example Project
+ # Allow manifests to deploy from any Git repos
+ sourceRepos:
+ - '*'
+ destinations:
+ - namespace: guestbook
+ server: https://kubernetes.default.svc
+ - namespace: guestbook-ui
+ server: https://kubernetes.default.svc
+ destinationServiceAccounts:
+ - namespace: guestbook
+ server: https://kubernetes.default.svc
+ defaultServiceAccount: guestbook-deployer
+ - namespace: guestbook-ui
+ server: https://kubernetes.default.svc
+ defaultServiceAccount: guestbook-ui-deployer
+```
+
+#### Example 3: Remote destination with cluster-admin access and using different service account for the sync operation
+
+**Note**: In this example, we are relying on the default service account `argocd-manager` with `cluster-admin` privileges which gets created when adding a remote cluster destination using the ArgoCD CLI.
+
+- Install ArgoCD in the `argocd` namespace.
+```
+kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml -n argocd
+```
+
+- Enable the impersonation feature in ArgoCD.
+```
+kubectl set env statefulset/argocd-application-controller ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true
+```
+
+- Add the remote cluster as a destination to argocd
+```
+argocd cluster add remote-cluster --name remote-cluster
+```
+**Note:** The above command would create a service account named `argocd-manager` in `kube-system` namespace and `ClusterRole` named `argocd-manager-role` with full cluster admin access and a `ClusterRoleBinding` named `argocd-manager-role-binding` mapping the `argocd-manager-role` to the service account `remote-cluster`
+
+- In the remote cluster, create a namespace called `guestbook` and a service account called `guestbook-deployer`.
+```
+kubectl ctx remote-cluster
+kubectl create namespace guestbook
+kubectl create serviceaccount guestbook-deployer
+```
+
+- In the remote cluster, create `Role` and `RoleBindings` and configure RBAC access for creating `Service` and `Deployment` objects in namespace `guestbook` for service account `guestbook-deployer`.
+
+```
+kubectl ctx remote-cluster
+kubectl create role guestbook-deployer-role --verb get,list,update,delete --resource pods,deployment,service
+kubectl create rolebinding guestbook-deployer-rb --serviceaccount guestbook-deployer --role guestbook-deployer-role
+```
+
+- Create the `Application` and `AppProject` for the `guestbook` application.
+```
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: guestbook
+ namespace: argocd
+spec:
+ project: my-project
+ source:
+ repoURL: https://github.com/argoproj/argocd-example-apps.git
+ targetRevision: HEAD
+ path: guestbook
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: guestbook
+---
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: my-project
+ namespace: argocd
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io
+spec:
+ description: Example Project
+ # Allow manifests to deploy from any Git repos
+ sourceRepos:
+ - '*'
+ destinations:
+ - namespace: guestbook
+ server: https://kubernetes.default.svc
+ serviceAccountName: guestbook-deployer
+ destinationServiceAccounts:
+ - namespace: guestbook
+ server: https://kubernetes.default.svc
+ defaultServiceAccount: guestbook-deployer
+```
+
+#### Example 4: Remote destination with a custom service account for the sync operation
+
+**Note**: In this example, we are relying on a non default service account `guestbook` created in the target cluster and namespace for the sync operation. This use case is for handling scenarios where the remote cluster is managed by a different administrator and providing a service account with `cluster-admin` level access is not feasible.
+
+- Install ArgoCD in the `argocd` namespace.
+```
+kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml -n argocd
+```
+
+- Enable the impersonation feature in ArgoCD.
+```
+kubectl set env statefulset/argocd-application-controller ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true
+```
+
+- In the remote cluster, create a service account called `argocd-admin`
+```
+kubectl ctx remote-cluster
+kubectl create serviceaccount argocd-admin
+kubectl create clusterrole argocd-admin-role --verb=impersonate --resource="users,groups,serviceaccounts"
+kubectl create clusterrole argocd-admin-role-access-review --verb=create --resource="selfsubjectaccessreviews"
+kubectl create clusterrolebinding argocd-admin-role-binding --serviceaccount argocd-admin --clusterrole argocd-admin-role
+kubectl create clusterrolebinding argocd-admin-access-review-role-binding --serviceaccount argocd-admin --clusterrole argocd-admin-role
+```
+
+- In the remote cluster, create a namespace called `guestbook` and a service account called `guestbook-deployer`.
+```
+kubectl ctx remote-cluster
+kubectl create namespace guestbook
+kubectl create serviceaccount guestbook-deployer
+```
+
+- In the remote cluster, create `Role` and `RoleBindings` and configure RBAC access for creating `Service` and `Deployment` objects in namespace `guestbook` for service account `guestbook-deployer`.
+```
+kubectl create role guestbook-deployer-role --verb get,list,update,delete --resource pods,deployment,service
+kubectl create rolebinding guestbook-deployer-rb --serviceaccount guestbook-deployer --role guestbook-deployer-role
+```
+
+In this specific scenario, service account name `guestbook-deployer` will get used as it matches to the specific namespace `guestbook`.
+```
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: guestbook
+ namespace: argocd
+spec:
+ project: my-project
+ source:
+ repoURL: https://github.com/argoproj/argocd-example-apps.git
+ targetRevision: HEAD
+ path: guestbook
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: guestbook
+---
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: my-project
+ namespace: argocd
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io
+spec:
+ description: Example Project
+ # Allow manifests to deploy from any Git repos
+ sourceRepos:
+ - '*'
+ destinations:
+ - namespace: guestbook
+ server: https://kubernetes.default.svc
+ - namespace: guestbook-ui
+ server: https://kubernetes.default.svc
+ destinationServiceAccounts:
+ - namespace: guestbook
+ server: https://kubernetes.default.svc
+ defaultServiceAccount: guestbook-deployer
+ - namespace: guestbook-ui
+ server: https://kubernetes.default.svc
+ defaultServiceAccount: guestbook-ui-deployer
+```
+
+### Special cases
+
+#### Specifying service account in a different namespace
+
+By default, the service account would be looked up in the Application's destination namespace configured through `Application.Spec.Destination.Namespace` field. If the service account is in a different namespace, then users can provide the namespace of the service account explicitly in the format :
+eg:
+```
+ ...
+ destinationServiceAccounts:
+ - server: https://kubernetes.default.svc
+ namespace: *
+ defaultServiceAccount: mynamespace:guestbook-deployer
+ ...
+```
+
+#### Multiple matches of destinations
+
+If there are multiple matches for a given destination, the first valid match in the list of `destinationServiceAccounts` would be used.
+
+eg:
+Lets assume that the `AppProject` has the below `destinationServiceAccounts` configured.
+```
+ ...
+ destinationServiceAccounts:
+ - server: https://kubernetes.default.svc
+ namespace: guestbook-prod
+ defaultServiceAccount: guestbook-prod-deployer
+ - server: https://kubernetes.default.svc
+ namespace: guestbook-*
+ defaultServiceAccount: guestbook-generic-deployer
+ - server: https://kubernetes.default.svc
+ namespace: *
+ defaultServiceAccount: generic-deployer
+ ...
+```
+- If the application destination namespace is `myns`, then the service account `generic-deployer` would be used as the first valid match is the glob pattern `*` and there are no other valid matches in the list.
+- If the application destination namespace is `guestbook-dev` or `guestbook-stage`, then both glob patterns `*` and `guestbook-*` are valid matches, however `guestbook-*` pattern appears first and hence, the service account `guestbook-generic-deployer` would be used for the impersonation.
+- If the application destination namespace is `guestbook-prod`, then there are three candidates, however the first valid match in the list is the one with service account `guestbook-prod-deployer` and that would be used for the impersonation.
+
+#### Application resources referring to multiple namespaces
+If application resources have hardcoded namespaces in the git repository, would different service accounts be used for each resource during the sync operation ?
+
+The service account to be used for impersonation is determined on a per Application level rather than on per resource level. The value specified in `Application.spec.destination.namespace` would be used to determine the service account to be used for the sync operation of all resources present in the `Application`.
+
+### Security Considerations
+
+* How does this proposal impact the security aspects of Argo CD workloads ?
+* Are there any unresolved follow-ups that need to be done to make the enhancement more robust ?
+
+### Risks and Mitigations
+
+#### Privilege Escalation
+
+There could be an issue of privilege escalation, if we allow users to impersonate without restrictions. This is mitigated by only allowing admin users to configure service account used for the sync operation at the `AppProject` level.
+
+Instead of allowing users to impersonate all possible users, administrators can restrict the users a particular service account can impersonate using the `resourceNames` field in the RBAC spec.
+
+
+### Upgrade / Downgrade Strategy
+
+If applicable, how will the component be upgraded and downgraded? Make sure this is in the test
+plan.
+
+Consider the following in developing an upgrade/downgrade strategy for this enhancement:
+
+- What changes (in invocations, configurations, API use, etc.) is an existing cluster required to
+ make on upgrade in order to keep previous behavior?
+- What changes (in invocations, configurations, API use, etc.) is an existing cluster required to
+ make on upgrade in order to make use of the enhancement?
+
+- This feature would be implemented on an `opt-in` based on a feature flag and disabled by default.
+- The new struct being added to `AppProject.Spec` would be introduced as an optional field and would be enabled only if the feature is enabled explicitly by a feature flag. If new property is used in the CR, but the feature flag is not enabled, then a warning message would be displayed during reconciliation of such CRs.
+
+
+## Drawbacks
+
+- When using this feature, there is an overhead in creating namespaces, service accounts and the required RBAC policies and mapping the service accounts with the corresponding `AppProject` configuration.
+
+## Alternatives
+
+### Option 1
+Allow all options available in the `ImpersonationConfig` available to the user through the `AppProject` CRs.
+
+```
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: my-project
+ namespace: argocd
+spec:
+ description: Example Project
+ # Allow manifests to deploy from any Git repos
+ sourceRepos:
+ - '*'
+ destinations:
+ - namespace: *
+ server: https://kubernetes.default.svc
+ namespace: guestbook
+ impersonate:
+ user: system:serviceaccount:dev_ns:admin
+ uid: 1234
+ groups:
+ - admin
+ - view
+ - edit
+```
+
+### Related issue
+
+https://github.com/argoproj/argo-cd/issues/7689
+
+
+### Related links
+
+https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation
+
+### Prior art
+
+https://github.com/argoproj/argo-cd/pull/3377
+https://github.com/argoproj/argo-cd/pull/7651
\ No newline at end of file
diff --git a/docs/proposals/feature-bounties/hide-annotations.md b/docs/proposals/feature-bounties/hide-annotations.md
new file mode 100644
index 0000000000000..47c9b943b8f71
--- /dev/null
+++ b/docs/proposals/feature-bounties/hide-annotations.md
@@ -0,0 +1,23 @@
+# Proposal: Allow Hiding Certain Annotations in the Argo CD Web UI
+
+Based on this issue: https://github.com/argoproj/argo-cd/issues/15693
+
+Award amount: $100
+
+## Solution
+
+!!! note
+ This is the proposed solution. The accepted PR may differ from this proposal.
+
+Add a new config item in argocd-cm:
+
+```yaml
+hide.secret.annotations: |
+- openshift.io/token-secret.value
+```
+
+This will hide the `openshift.io/token-secret.value` annotation from the UI. Behind the scenes, it would likely work the
+same way as the `last-applied-configuration` annotation hiding works: https://github.com/argoproj/gitops-engine/blob/b0fffe419a0f0a40f9f2c0b6346b752ed6537385/pkg/diff/diff.go#L897
+
+I considered whether we'd want to support hiding things besides annotations and in resources besides secrets, but
+having reviewed existing issues, I think this narrow feature is sufficient.
diff --git a/docs/proposals/native-ocp-support.md b/docs/proposals/native-oci-support.md
similarity index 99%
rename from docs/proposals/native-ocp-support.md
rename to docs/proposals/native-oci-support.md
index 64918fde8904e..7ec0053729c2e 100644
--- a/docs/proposals/native-ocp-support.md
+++ b/docs/proposals/native-oci-support.md
@@ -126,10 +126,10 @@ Consider the following in developing an upgrade/downgrade strategy for this enha
## Drawbacks
-* Sourcing content from an OCI registry may be perceived to be against GitOps principles as content is not sourced from a Git repository. This concern could be mitigated by attaching additional details related to the content (such as original Git source [URL, revision]). Though it should be noted that the GitOps principles only require a source of truth to be visioned and immutable which OCI registires support.
+* Sourcing content from an OCI registry may be perceived to be against GitOps principles as content is not sourced from a Git repository. This concern could be mitigated by attaching additional details related to the content (such as original Git source [URL, revision]). Though it should be noted that the GitOps principles only require a source of truth to be visioned and immutable which OCI registries support.
## Alternatives
### Config Management Plugin
-Content stored within OCI artifacts could be sourced using a Config Management Plugin which would not require changes to the core capabilities provided by Argo CD. However, this would be hacky and not represent itself within the Argo CD UI.
\ No newline at end of file
+Content stored within OCI artifacts could be sourced using a Config Management Plugin which would not require changes to the core capabilities provided by Argo CD. However, this would be hacky and not represent itself within the Argo CD UI.
diff --git a/docs/proposals/parameterized-config-management-plugins.md b/docs/proposals/parameterized-config-management-plugins.md
index fa3061b2c3686..749f4efe63687 100644
--- a/docs/proposals/parameterized-config-management-plugins.md
+++ b/docs/proposals/parameterized-config-management-plugins.md
@@ -256,7 +256,7 @@ spec:
array: [values.yaml]
- name: helm-parameters
map:
- image.repository: my.company.com/gcr-proxy/heptio-images/ks-guestbook-demo
+ image.repository: my.example.com/gcr-proxy/heptio-images/ks-guestbook-demo
image.tag: "0.1"
```
@@ -283,7 +283,7 @@ That command, when run by a CMP with the above Application manifest, will print
{
"name": "helm-parameters",
"map": {
- "image.repository": "my.company.com/gcr-proxy/heptio-images/ks-guestbook-demo",
+ "image.repository": "my.example.com/gcr-proxy/heptio-images/ks-guestbook-demo",
"image.tag": "0.1"
}
}
@@ -398,7 +398,7 @@ like this:
"title": "Helm Parameters",
"tooltip": "Parameters to override when generating manifests with Helm",
"map": {
- "image.repository": "my.company.com/gcr-proxy/heptio-images/ks-guestbook-demo",
+ "image.repository": "my.example.com/gcr-proxy/heptio-images/ks-guestbook-demo",
"image.tag": "0.1"
}
}
@@ -423,7 +423,7 @@ readability.)
"title": "Helm Parameters",
"tooltip": "Parameters to override when generating manifests with Helm",
"map": {
- "image.repository": "my.company.com/gcr-proxy/heptio-images/ks-guestbook-demo",
+ "image.repository": "my.example.com/gcr-proxy/heptio-images/ks-guestbook-demo",
"image.tag": "0.1"
}
}
@@ -493,11 +493,11 @@ type ParametersAnnouncement []ParameterAnnouncement
- name: images
collectionType: map
array: # this gets ignored because collectionType is 'map'
- - ubuntu:latest=docker.company.com/proxy/ubuntu:latest
- - guestbook:v0.1=docker.company.com/proxy/guestbook:v0.1
+ - ubuntu:latest=docker.example.com/proxy/ubuntu:latest
+ - guestbook:v0.1=docker.example.com/proxy/guestbook:v0.1
map:
- ubuntu:latest: docker.company.com/proxy/ubuntu:latest
- guestbook:v0.1: docker.company.com/proxy/guestbook:v0.1
+ ubuntu:latest: docker.example.com/proxy/ubuntu:latest
+ guestbook:v0.1: docker.example.com/proxy/guestbook:v0.1
```
2. **Question**: What do we do if the CMP user sets more than one of `value`/`array`/`map` in the Application spec?
@@ -513,11 +513,11 @@ type ParametersAnnouncement []ParameterAnnouncement
parameters:
- name: images
array: # this gets sent to the CMP, but the CMP should ignore it
- - ubuntu:latest=docker.company.com/proxy/ubuntu:latest
- - guestbook:v0.1=docker.company.com/proxy/guestbook:v0.1
+ - ubuntu:latest=docker.example.com/proxy/ubuntu:latest
+ - guestbook:v0.1=docker.example.com/proxy/guestbook:v0.1
map:
- ubuntu:latest: docker.company.com/proxy/ubuntu:latest
- guestbook:v0.1: docker.company.com/proxy/guestbook:v0.1
+ ubuntu:latest: docker.example.com/proxy/ubuntu:latest
+ guestbook:v0.1: docker.example.com/proxy/guestbook:v0.1
```
3. **Question**: How will the UI know that adding more items to an array or a map is allowed?
@@ -528,17 +528,17 @@ type ParametersAnnouncement []ParameterAnnouncement
- name: images
collectionType: map # users will be allowed to add new items, because this is a map
map:
- ubuntu:latest: docker.company.com/proxy/ubuntu:latest
- guestbook:v0.1: docker.company.com/proxy/guestbook:v0.1
+ ubuntu:latest: docker.example.com/proxy/ubuntu:latest
+ guestbook:v0.1: docker.example.com/proxy/guestbook:v0.1
```
If the CMP author wants an immutable array or map, they should just break it into individual parameters.
```yaml
- name: ubuntu:latest
- string: docker.company.com/proxy/ubuntu:latest
+ string: docker.example.com/proxy/ubuntu:latest
- name: guestbook:v0.1
- string: docker.company.com/proxy/guestbook:v0.1
+ string: docker.example.com/proxy/guestbook:v0.1
```
4. **Question**: What do we do if a CMP announcement doesn't include a `collectionType`?
@@ -799,8 +799,8 @@ spec:
"title": "Image Overrides",
"collectionType": "map",
"map": {
- "quay.io/argoproj/argocd": "docker.company.com/proxy/argoproj/argocd",
- "ubuntu:latest": "docker.company.com/proxy/argoproj/argocd"
+ "quay.io/argoproj/argocd": "docker.example.com/proxy/argoproj/argocd",
+ "ubuntu:latest": "docker.example.com/proxy/argoproj/argocd"
}
}
]
diff --git a/docs/proposals/project-repos-and-clusters.md b/docs/proposals/project-repos-and-clusters.md
index 1f8258f47a72b..514c389048218 100644
--- a/docs/proposals/project-repos-and-clusters.md
+++ b/docs/proposals/project-repos-and-clusters.md
@@ -102,7 +102,7 @@ p, proj:my-project:admin, repositories, update, my-project/*, allow
This provides extra flexibility so that admin can have stricter rules. e.g.:
```
-p, proj:my-project:admin, repositories, update, my-project/"https://github.my-company.com/*", allow
+p, proj:my-project:admin, repositories, update, my-project/"https://github.example.com/*", allow
```
#### UI/CLI Changes
diff --git a/docs/snyk/index.md b/docs/snyk/index.md
index 0803b8ab69ef0..984cd3460c17d 100644
--- a/docs/snyk/index.md
+++ b/docs/snyk/index.md
@@ -13,38 +13,51 @@ recent minor releases.
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
-| [go.mod](master/argocd-test.html) | 0 | 0 | 5 | 0 |
+| [go.mod](master/argocd-test.html) | 0 | 0 | 6 | 0 |
| [ui/yarn.lock](master/argocd-test.html) | 0 | 0 | 0 | 0 |
-| [dex:v2.37.0](master/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 0 |
-| [haproxy:2.6.14-alpine](master/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 0 |
-| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 3 | 17 |
-| [redis:7.0.11-alpine](master/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 0 |
+| [dex:v2.37.0](master/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 1 |
+| [haproxy:2.6.14-alpine](master/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 1 |
+| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 4 | 16 |
+| [redis:7.0.11-alpine](master/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 1 |
| [install.yaml](master/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](master/argocd-iac-namespace-install.html) | - | - | - | - |
-### v2.8.4
+### v2.9.0-rc3
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
-| [go.mod](v2.8.4/argocd-test.html) | 0 | 0 | 5 | 0 |
-| [ui/yarn.lock](v2.8.4/argocd-test.html) | 0 | 0 | 0 | 0 |
-| [dex:v2.37.0](v2.8.4/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 0 |
-| [haproxy:2.6.14-alpine](v2.8.4/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 0 |
-| [argocd:v2.8.4](v2.8.4/quay.io_argoproj_argocd_v2.8.4.html) | 0 | 0 | 3 | 17 |
-| [redis:7.0.11-alpine](v2.8.4/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 0 |
-| [install.yaml](v2.8.4/argocd-iac-install.html) | - | - | - | - |
-| [namespace-install.yaml](v2.8.4/argocd-iac-namespace-install.html) | - | - | - | - |
+| [go.mod](v2.9.0-rc3/argocd-test.html) | 0 | 2 | 6 | 0 |
+| [ui/yarn.lock](v2.9.0-rc3/argocd-test.html) | 0 | 0 | 0 | 0 |
+| [dex:v2.37.0](v2.9.0-rc3/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 1 |
+| [haproxy:2.6.14-alpine](v2.9.0-rc3/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 1 |
+| [argocd:v2.9.0-rc3](v2.9.0-rc3/quay.io_argoproj_argocd_v2.9.0-rc3.html) | 0 | 0 | 4 | 16 |
+| [redis:7.0.11-alpine](v2.9.0-rc3/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 1 |
+| [install.yaml](v2.9.0-rc3/argocd-iac-install.html) | - | - | - | - |
+| [namespace-install.yaml](v2.9.0-rc3/argocd-iac-namespace-install.html) | - | - | - | - |
+
+### v2.8.5
+
+| | Critical | High | Medium | Low |
+|---:|:--------:|:----:|:------:|:---:|
+| [go.mod](v2.8.5/argocd-test.html) | 0 | 0 | 6 | 0 |
+| [ui/yarn.lock](v2.8.5/argocd-test.html) | 0 | 0 | 0 | 0 |
+| [dex:v2.37.0](v2.8.5/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 1 |
+| [haproxy:2.6.14-alpine](v2.8.5/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 1 |
+| [argocd:v2.8.5](v2.8.5/quay.io_argoproj_argocd_v2.8.5.html) | 0 | 0 | 4 | 16 |
+| [redis:7.0.11-alpine](v2.8.5/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 1 |
+| [install.yaml](v2.8.5/argocd-iac-install.html) | - | - | - | - |
+| [namespace-install.yaml](v2.8.5/argocd-iac-namespace-install.html) | - | - | - | - |
### v2.7.14
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
-| [go.mod](v2.7.14/argocd-test.html) | 0 | 1 | 5 | 0 |
+| [go.mod](v2.7.14/argocd-test.html) | 0 | 3 | 5 | 0 |
| [ui/yarn.lock](v2.7.14/argocd-test.html) | 0 | 1 | 0 | 0 |
-| [dex:v2.37.0](v2.7.14/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 0 |
-| [haproxy:2.6.14-alpine](v2.7.14/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 0 |
-| [argocd:v2.7.14](v2.7.14/quay.io_argoproj_argocd_v2.7.14.html) | 0 | 0 | 3 | 17 |
-| [redis:7.0.11-alpine](v2.7.14/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 0 |
+| [dex:v2.37.0](v2.7.14/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 1 |
+| [haproxy:2.6.14-alpine](v2.7.14/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 1 |
+| [argocd:v2.7.14](v2.7.14/quay.io_argoproj_argocd_v2.7.14.html) | 0 | 2 | 8 | 20 |
+| [redis:7.0.11-alpine](v2.7.14/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 1 |
| [install.yaml](v2.7.14/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](v2.7.14/argocd-iac-namespace-install.html) | - | - | - | - |
@@ -52,11 +65,11 @@ recent minor releases.
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
-| [go.mod](v2.6.15/argocd-test.html) | 0 | 1 | 5 | 0 |
+| [go.mod](v2.6.15/argocd-test.html) | 0 | 3 | 5 | 0 |
| [ui/yarn.lock](v2.6.15/argocd-test.html) | 0 | 1 | 0 | 0 |
-| [dex:v2.37.0](v2.6.15/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 0 |
-| [haproxy:2.6.14-alpine](v2.6.15/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 0 |
-| [argocd:v2.6.15](v2.6.15/quay.io_argoproj_argocd_v2.6.15.html) | 0 | 0 | 3 | 17 |
-| [redis:7.0.11-alpine](v2.6.15/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 0 |
+| [dex:v2.37.0](v2.6.15/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 0 | 3 | 1 |
+| [haproxy:2.6.14-alpine](v2.6.15/haproxy_2.6.14-alpine.html) | 0 | 0 | 0 | 1 |
+| [argocd:v2.6.15](v2.6.15/quay.io_argoproj_argocd_v2.6.15.html) | 0 | 2 | 8 | 20 |
+| [redis:7.0.11-alpine](v2.6.15/redis_7.0.11-alpine.html) | 1 | 0 | 3 | 1 |
| [install.yaml](v2.6.15/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](v2.6.15/argocd-iac-namespace-install.html) | - | - | - | - |
diff --git a/docs/snyk/master/argocd-iac-install.html b/docs/snyk/master/argocd-iac-install.html
index 3fb0b8186141a..28be7b9bb102b 100644
--- a/docs/snyk/master/argocd-iac-install.html
+++ b/docs/snyk/master/argocd-iac-install.html
@@ -456,7 +456,7 @@
Scanned the following path:
@@ -507,7 +507,7 @@
Role with dangerous permissions
- Line number: 18488
+ Line number: 20316
@@ -553,7 +553,7 @@
Role with dangerous permissions
- Line number: 18565
+ Line number: 20393
@@ -599,7 +599,7 @@
Role with dangerous permissions
- Line number: 18593
+ Line number: 20421
@@ -645,7 +645,7 @@
Role with dangerous permissions
- Line number: 18641
+ Line number: 20469
@@ -691,7 +691,7 @@
Role with dangerous permissions
- Line number: 18623
+ Line number: 20451
@@ -737,7 +737,7 @@
Role with dangerous permissions
- Line number: 18657
+ Line number: 20485
@@ -789,7 +789,7 @@
Container could be running with outdated image
- Line number: 19790
+ Line number: 21642
@@ -847,7 +847,7 @@
Container has no CPU limit
- Line number: 19141
+ Line number: 20969
@@ -905,7 +905,7 @@
Container has no CPU limit
- Line number: 19386
+ Line number: 21220
@@ -963,7 +963,7 @@
Container has no CPU limit
- Line number: 19352
+ Line number: 21186
@@ -1021,7 +1021,7 @@
Container has no CPU limit
- Line number: 19446
+ Line number: 21280
@@ -1079,7 +1079,7 @@
Container has no CPU limit
- Line number: 19533
+ Line number: 21373
@@ -1137,7 +1137,7 @@
Container has no CPU limit
- Line number: 19790
+ Line number: 21642
@@ -1195,7 +1195,7 @@
Container has no CPU limit
- Line number: 19590
+ Line number: 21430
@@ -1253,7 +1253,7 @@
Container has no CPU limit
- Line number: 19875
+ Line number: 21727
@@ -1311,7 +1311,7 @@
Container has no CPU limit
- Line number: 20191
+ Line number: 22043
@@ -1363,7 +1363,7 @@
Container is running with multiple open ports
- Line number: 19366
+ Line number: 21200
@@ -1415,7 +1415,7 @@
Container is running without liveness probe
- Line number: 19141
+ Line number: 20969
@@ -1460,14 +1460,14 @@
Container is running without liveness probe
›
spec
›
- containers[dex]
+ initContainers[copyutil]
›
livenessProbe
- Line number: 19352
+ Line number: 21220
@@ -1512,14 +1512,14 @@
Container is running without liveness probe
›
spec
›
- initContainers[copyutil]
+ containers[dex]
›
livenessProbe
- Line number: 19386
+ Line number: 21186
@@ -1571,7 +1571,7 @@
Container is running without liveness probe
- Line number: 19533
+ Line number: 21373
@@ -1623,7 +1623,7 @@
Container is running without liveness probe
- Line number: 19790
+ Line number: 21642
@@ -1681,7 +1681,7 @@
Container is running without memory limit
- Line number: 19141
+ Line number: 20969
@@ -1739,7 +1739,7 @@
Container is running without memory limit
- Line number: 19352
+ Line number: 21186
@@ -1797,7 +1797,7 @@
Container is running without memory limit
- Line number: 19386
+ Line number: 21220
@@ -1855,7 +1855,7 @@
Container is running without memory limit
- Line number: 19446
+ Line number: 21280
@@ -1913,7 +1913,7 @@
Container is running without memory limit
- Line number: 19533
+ Line number: 21373
@@ -1971,7 +1971,7 @@
Container is running without memory limit
- Line number: 19790
+ Line number: 21642
@@ -2029,7 +2029,7 @@
Container is running without memory limit
- Line number: 19590
+ Line number: 21430
@@ -2087,7 +2087,7 @@
Container is running without memory limit
- Line number: 19875
+ Line number: 21727
@@ -2145,7 +2145,7 @@
Container is running without memory limit
- Line number: 20191
+ Line number: 22043
@@ -2201,7 +2201,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19276
+ Line number: 21110
@@ -2257,7 +2257,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19394
+ Line number: 21228
@@ -2313,7 +2313,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19369
+ Line number: 21203
@@ -2369,7 +2369,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19467
+ Line number: 21307
@@ -2425,7 +2425,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19543
+ Line number: 21383
@@ -2481,7 +2481,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19797
+ Line number: 21649
@@ -2537,7 +2537,7 @@
Container's or Pod's UID could clash with hos
- Line number: 19763
+ Line number: 21615
@@ -2593,7 +2593,7 @@
Container's or Pod's UID could clash with hos
- Line number: 20101
+ Line number: 21953
@@ -2649,7 +2649,7 @@
Container's or Pod's UID could clash with hos
- Line number: 20339
+ Line number: 22191
diff --git a/docs/snyk/master/argocd-iac-namespace-install.html b/docs/snyk/master/argocd-iac-namespace-install.html
index 389ef692caaa1..e043d126f446c 100644
--- a/docs/snyk/master/argocd-iac-namespace-install.html
+++ b/docs/snyk/master/argocd-iac-namespace-install.html
@@ -456,7 +456,7 @@
Scanned the following path:
@@ -789,7 +789,7 @@
Container could be running with outdated image
- Line number: 1274
+ Line number: 1298
@@ -905,7 +905,7 @@
Container has no CPU limit
- Line number: 870
+ Line number: 876
@@ -963,7 +963,7 @@
Container has no CPU limit
- Line number: 836
+ Line number: 842
@@ -1021,7 +1021,7 @@
Container has no CPU limit
- Line number: 930
+ Line number: 936
@@ -1079,7 +1079,7 @@
Container has no CPU limit
- Line number: 1017
+ Line number: 1029
@@ -1137,7 +1137,7 @@
Container has no CPU limit
- Line number: 1274
+ Line number: 1298
@@ -1195,7 +1195,7 @@
Container has no CPU limit
- Line number: 1074
+ Line number: 1086
@@ -1253,7 +1253,7 @@
Container has no CPU limit
- Line number: 1359
+ Line number: 1383
@@ -1311,7 +1311,7 @@
Container has no CPU limit
- Line number: 1675
+ Line number: 1699
@@ -1363,7 +1363,7 @@
Container is running with multiple open ports
- Line number: 850
+ Line number: 856
@@ -1460,14 +1460,14 @@
Container is running without liveness probe
›
spec
›
- containers[dex]
+ initContainers[copyutil]
›
livenessProbe
- Line number: 836
+ Line number: 876
@@ -1512,14 +1512,14 @@
Container is running without liveness probe
›
spec
›
- initContainers[copyutil]
+ containers[dex]
›
livenessProbe
- Line number: 870
+ Line number: 842
@@ -1571,7 +1571,7 @@
Container is running without liveness probe
- Line number: 1017
+ Line number: 1029
@@ -1623,7 +1623,7 @@
Container is running without liveness probe
- Line number: 1274
+ Line number: 1298
@@ -1739,7 +1739,7 @@
Container is running without memory limit
- Line number: 836
+ Line number: 842
@@ -1797,7 +1797,7 @@
Container is running without memory limit
- Line number: 870
+ Line number: 876
@@ -1855,7 +1855,7 @@
Container is running without memory limit
- Line number: 930
+ Line number: 936
@@ -1913,7 +1913,7 @@
Container is running without memory limit
- Line number: 1017
+ Line number: 1029
@@ -1971,7 +1971,7 @@
Container is running without memory limit
- Line number: 1274
+ Line number: 1298
@@ -2029,7 +2029,7 @@
Container is running without memory limit
- Line number: 1074
+ Line number: 1086
@@ -2087,7 +2087,7 @@
Container is running without memory limit
- Line number: 1359
+ Line number: 1383
@@ -2145,7 +2145,7 @@
Container is running without memory limit
- Line number: 1675
+ Line number: 1699
@@ -2201,7 +2201,7 @@
Container's or Pod's UID could clash with hos
- Line number: 760
+ Line number: 766
@@ -2257,7 +2257,7 @@
Container's or Pod's UID could clash with hos
- Line number: 878
+ Line number: 884
@@ -2313,7 +2313,7 @@
Container's or Pod's UID could clash with hos
- Line number: 853
+ Line number: 859
@@ -2369,7 +2369,7 @@
Container's or Pod's UID could clash with hos
- Line number: 951
+ Line number: 963
@@ -2425,7 +2425,7 @@
Container's or Pod's UID could clash with hos
- Line number: 1027
+ Line number: 1039
@@ -2481,7 +2481,7 @@
Container's or Pod's UID could clash with hos
- Line number: 1281
+ Line number: 1305
@@ -2537,7 +2537,7 @@
Container's or Pod's UID could clash with hos
- Line number: 1247
+ Line number: 1271
@@ -2593,7 +2593,7 @@
Container's or Pod's UID could clash with hos
- Line number: 1585
+ Line number: 1609
@@ -2649,7 +2649,7 @@
Container's or Pod's UID could clash with hos
- Line number: 1823
+ Line number: 1847
diff --git a/docs/snyk/master/argocd-test.html b/docs/snyk/master/argocd-test.html
index 4f0797405d6bb..1b2486932df9e 100644
--- a/docs/snyk/master/argocd-test.html
+++ b/docs/snyk/master/argocd-test.html
@@ -7,7 +7,7 @@
Snyk test report
-
+
@@ -456,7 +456,7 @@
Scanned the following paths:
@@ -466,9 +466,9 @@
@@ -476,6 +476,65 @@
+
+
LGPL-3.0 license
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
+
Detailed paths
+
+
+
+
+
+
+
+
LGPL-3.0 license
+
+
+
+
+
+
MPL-2.0 license
@@ -662,7 +721,7 @@
Detailed paths
Introduced through :
github.com/argoproj/argo-cd/v2@0.0.0
›
- github.com/argoproj/notifications-engine/pkg/cmd@#9dcecdc3eebf
+ github.com/argoproj/notifications-engine/pkg/subscriptions@#9dcecdc3eebf
›
github.com/argoproj/notifications-engine/pkg/services@#9dcecdc3eebf
›
@@ -677,7 +736,7 @@ Detailed paths
Introduced through :
github.com/argoproj/argo-cd/v2@0.0.0
›
- github.com/argoproj/notifications-engine/pkg/subscriptions@#9dcecdc3eebf
+ github.com/argoproj/notifications-engine/pkg/cmd@#9dcecdc3eebf
›
github.com/argoproj/notifications-engine/pkg/services@#9dcecdc3eebf
›
@@ -824,7 +883,7 @@ Detailed paths
Introduced through :
github.com/argoproj/argo-cd/v2@0.0.0
›
- github.com/argoproj/notifications-engine/pkg/cmd@#9dcecdc3eebf
+ github.com/argoproj/notifications-engine/pkg/subscriptions@#9dcecdc3eebf
›
github.com/argoproj/notifications-engine/pkg/services@#9dcecdc3eebf
›
@@ -841,7 +900,7 @@ Detailed paths
Introduced through :
github.com/argoproj/argo-cd/v2@0.0.0
›
- github.com/argoproj/notifications-engine/pkg/subscriptions@#9dcecdc3eebf
+ github.com/argoproj/notifications-engine/pkg/cmd@#9dcecdc3eebf
›
github.com/argoproj/notifications-engine/pkg/services@#9dcecdc3eebf
›
diff --git a/docs/snyk/master/ghcr.io_dexidp_dex_v2.37.0.html b/docs/snyk/master/ghcr.io_dexidp_dex_v2.37.0.html
index 5362d9f1153db..167a203368fb3 100644
--- a/docs/snyk/master/ghcr.io_dexidp_dex_v2.37.0.html
+++ b/docs/snyk/master/ghcr.io_dexidp_dex_v2.37.0.html
@@ -7,7 +7,7 @@
Snyk test report
-
+
@@ -456,7 +456,7 @@
Scanned the following paths:
@@ -466,8 +466,8 @@
@@ -583,6 +583,178 @@
References
More about this vulnerability
+
+
+
Denial of Service (DoS)
+
+
+
+ high severity
+
+
+
+
+
+
+
+
+
+
Detailed paths
+
+
+
+
+
+
+
+
Overview
+
google.golang.org/grpc is a Go implementation of gRPC
+
Affected versions of this package are vulnerable to Denial of Service (DoS) in the implementation of the HTTP/2 protocol. An attacker can cause a denial of service (including via DDoS) by rapidly resetting many streams through request cancellation.
+
+
Upgrade google.golang.org/grpc
to version 1.56.3, 1.57.1, 1.58.3 or higher.
+
References
+
+
+
+
+
+
+
+
+
Denial of Service (DoS)
+
+
+
+ high severity
+
+
+
+
+
+
+
+
+
+
Detailed paths
+
+
+
+
+
+
+
+
Overview
+
golang.org/x/net/http2 is a work-in-progress HTTP/2 implementation for Go.
+
Affected versions of this package are vulnerable to Denial of Service (DoS) in the implementation of the HTTP/2 protocol. An attacker can cause a denial of service (including via DDoS) by rapidly resetting many streams through request cancellation.
+
+
Upgrade golang.org/x/net/http2
to version 0.17.0 or higher.
+
References
+
+
+
+
+
+
Improper Authentication
@@ -852,7 +1024,7 @@
Detailed paths
NVD Description
-
Note: Versions mentioned in the description apply only to the upstream openssl
package and not the openssl
package as distributed by Alpine:3.18
.
+
Note: Versions mentioned in the description apply only to the upstream openssl
package and not the openssl
package as distributed by Alpine
.
See How to fix?
for Alpine:3.18
relevant fixed versions and status.
Issue summary: Checking excessively long DH keys or parameters may be very slow.
Impact summary: Applications that use the functions DH_check(), DH_check_ex()
@@ -1015,7 +1187,7 @@
Detailed paths
NVD Description
-
Note: Versions mentioned in the description apply only to the upstream openssl
package and not the openssl
package as distributed by Alpine:3.18
.
+
Note: Versions mentioned in the description apply only to the upstream openssl
package and not the openssl
package as distributed by Alpine
.
See How to fix?
for Alpine:3.18
relevant fixed versions and status.
Issue summary: Checking excessively long DH keys or parameters may be very slow.
Impact summary: Applications that use the functions DH_check(), DH_check_ex()
@@ -1050,6 +1222,9 @@
References
openssl-security@openssl.org
openssl-security@openssl.org
openssl-security@openssl.org
+
openssl-security@openssl.org
+
openssl-security@openssl.org
+
openssl-security@openssl.org
@@ -2511,6 +2686,174 @@
Detailed paths
+
+
CVE-2023-5363
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
+
Detailed paths
+
+
+
+
+
+
+
+
NVD Description
+
Note: Versions mentioned in the description apply only to the upstream openssl
package and not the openssl
package as distributed by Alpine
.
+ See How to fix?
for Alpine:3.18
relevant fixed versions and status.
+
Issue summary: A bug has been identified in the processing of key and
+ initialisation vector (IV) lengths. This can lead to potential truncation
+ or overruns during the initialisation of some symmetric ciphers.
+
Impact summary: A truncation in the IV can result in non-uniqueness,
+ which could result in loss of confidentiality for some cipher modes.
+
When calling EVP_EncryptInit_ex2(), EVP_DecryptInit_ex2() or
+ EVP_CipherInit_ex2() the provided OSSL_PARAM array is processed after
+ the key and IV have been established. Any alterations to the key length,
+ via the "keylen" parameter or the IV length, via the "ivlen" parameter,
+ within the OSSL_PARAM array will not take effect as intended, potentially
+ causing truncation or overreading of these values. The following ciphers
+ and cipher modes are impacted: RC2, RC4, RC5, CCM, GCM and OCB.
+
For the CCM, GCM and OCB cipher modes, truncation of the IV can result in
+ loss of confidentiality. For example, when following NIST's SP 800-38D
+ section 8.2.1 guidance for constructing a deterministic IV for AES in
+ GCM mode, truncation of the counter portion could lead to IV reuse.
+
Both truncations and overruns of the key and overruns of the IV will
+ produce incorrect results and could, in some cases, trigger a memory
+ exception. However, these issues are not currently assessed as security
+ critical.
+
Changing the key and/or IV lengths is not considered to be a common operation
+ and the vulnerable API was recently introduced. Furthermore it is likely that
+ application developers will have spotted this problem during testing since
+ decryption would fail unless both peers in the communication were similarly
+ vulnerable. For these reasons we expect the probability of an application being
+ vulnerable to this to be quite low. However if an application is vulnerable then
+ this issue is considered very serious. For these reasons we have assessed this
+ issue as Moderate severity overall.
+
The OpenSSL SSL/TLS implementation is not affected by this issue.
+
The OpenSSL 3.0 and 3.1 FIPS providers are not affected by this because
+ the issue lies outside of the FIPS provider boundary.
+
OpenSSL 3.1 and 3.0 are vulnerable to this issue.
+
+
Upgrade Alpine:3.18
openssl
to version 3.1.4-r0 or higher.
+
References
+
+
+
+
+
+
+
diff --git a/docs/snyk/master/haproxy_2.6.14-alpine.html b/docs/snyk/master/haproxy_2.6.14-alpine.html
index 6ba6ea51ffc0a..19c8202ec7564 100644
--- a/docs/snyk/master/haproxy_2.6.14-alpine.html
+++ b/docs/snyk/master/haproxy_2.6.14-alpine.html
@@ -7,7 +7,7 @@
Snyk test report
-
+
@@ -456,7 +456,7 @@
Scanned the following path:
@@ -466,8 +466,8 @@
@@ -484,7 +484,198 @@
- No known vulnerabilities detected.
+
+
+
CVE-2023-5363
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
+
Detailed paths
+
+
+
+
+
+
+
+
NVD Description
+
Note: Versions mentioned in the description apply only to the upstream openssl
package and not the openssl
package as distributed by Alpine
.
+ See How to fix?
for Alpine:3.18
relevant fixed versions and status.
+
Issue summary: A bug has been identified in the processing of key and
+ initialisation vector (IV) lengths. This can lead to potential truncation
+ or overruns during the initialisation of some symmetric ciphers.
+
Impact summary: A truncation in the IV can result in non-uniqueness,
+ which could result in loss of confidentiality for some cipher modes.
+
When calling EVP_EncryptInit_ex2(), EVP_DecryptInit_ex2() or
+ EVP_CipherInit_ex2() the provided OSSL_PARAM array is processed after
+ the key and IV have been established. Any alterations to the key length,
+ via the "keylen" parameter or the IV length, via the "ivlen" parameter,
+ within the OSSL_PARAM array will not take effect as intended, potentially
+ causing truncation or overreading of these values. The following ciphers
+ and cipher modes are impacted: RC2, RC4, RC5, CCM, GCM and OCB.
+
For the CCM, GCM and OCB cipher modes, truncation of the IV can result in
+ loss of confidentiality. For example, when following NIST's SP 800-38D
+ section 8.2.1 guidance for constructing a deterministic IV for AES in
+ GCM mode, truncation of the counter portion could lead to IV reuse.
+
Both truncations and overruns of the key and overruns of the IV will
+ produce incorrect results and could, in some cases, trigger a memory
+ exception. However, these issues are not currently assessed as security
+ critical.
+
Changing the key and/or IV lengths is not considered to be a common operation
+ and the vulnerable API was recently introduced. Furthermore it is likely that
+ application developers will have spotted this problem during testing since
+ decryption would fail unless both peers in the communication were similarly
+ vulnerable. For these reasons we expect the probability of an application being
+ vulnerable to this to be quite low. However if an application is vulnerable then
+ this issue is considered very serious. For these reasons we have assessed this
+ issue as Moderate severity overall.
+
The OpenSSL SSL/TLS implementation is not affected by this issue.
+
The OpenSSL 3.0 and 3.1 FIPS providers are not affected by this because
+ the issue lies outside of the FIPS provider boundary.
+
OpenSSL 3.1 and 3.0 are vulnerable to this issue.
+
+
Upgrade Alpine:3.18
openssl
to version 3.1.4-r0 or higher.
+
References
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Role with dangerous permissions
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
Impact
+
Using this role grants dangerous permissions
+
+
Remediation
+
Consider removing this permissions
+
+
+
+
+
+
+
+
+
+
Role with dangerous permissions
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
Impact
+
Using this role grants dangerous permissions
+
+
Remediation
+
Consider removing this permissions
+
+
+
+
+
+
+
+
+
+
Role with dangerous permissions
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
Impact
+
Using this role grants dangerous permissions
+
+
Remediation
+
Consider removing this permissions
+
+
+
+
+
+
+
+
+
+
Role with dangerous permissions
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
Impact
+
Using this role grants dangerous permissions
+
+
Remediation
+
Consider removing this permissions
+
+
+
+
+
+
+
+
+
+
Role with dangerous permissions
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
Impact
+
Using this role grants dangerous permissions
+
+
Remediation
+
Consider removing this permissions
+
+
+
+
+
+
+
+
+
+
Role with dangerous permissions
+
+
+
+ medium severity
+
+
+
+
+
+
+
+
+
Impact
+
Using this role grants dangerous permissions
+
+
Remediation
+
Consider removing this permissions
+
+
+
+
+
+
+
+
+
+
Container could be running with outdated image
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
The container may run with outdated or unauthorized image
+
+
Remediation
+
Set `imagePullPolicy` attribute to `Always`
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container has no CPU limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
CPU limits can prevent containers from consuming valuable compute time for no benefit (e.g. inefficient code) that might lead to unnecessary costs. It is advisable to also configure CPU requests to ensure application stability.
+
+
Remediation
+
Add `resources.limits.cpu` field with required CPU limit value
+
+
+
+
+
+
+
+
+
+
Container is running with multiple open ports
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Increases the attack surface of the application and the container.
+
+
Remediation
+
Reduce `ports` count to 2
+
+
+
+
+
+
+
+
+
+
Container is running without liveness probe
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Kubernetes will not be able to detect if application is able to service requests, and will not restart unhealthy pods
+
+
Remediation
+
Add `livenessProbe` attribute
+
+
+
+
+
+
+
+
+
+
Container is running without liveness probe
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Kubernetes will not be able to detect if application is able to service requests, and will not restart unhealthy pods
+
+
Remediation
+
Add `livenessProbe` attribute
+
+
+
+
+
+
+
+
+
+
Container is running without liveness probe
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Kubernetes will not be able to detect if application is able to service requests, and will not restart unhealthy pods
+
+
Remediation
+
Add `livenessProbe` attribute
+
+
+
+
+
+
+
+
+
+
Container is running without liveness probe
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Kubernetes will not be able to detect if application is able to service requests, and will not restart unhealthy pods
+
+
Remediation
+
Add `livenessProbe` attribute
+
+
+
+
+
+
+
+
+
+
Container is running without liveness probe
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Kubernetes will not be able to detect if application is able to service requests, and will not restart unhealthy pods
+
+
Remediation
+
Add `livenessProbe` attribute
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container is running without memory limit
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
Containers without memory limits are more likely to be terminated when the node runs out of memory
+
+
Remediation
+
Set `resources.limits.memory` value
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
Container's or Pod's UID could clash with host's UID
+
+
+
+ low severity
+
+
+
+
+
+
+
+
+
Impact
+
UID of the container processes could clash with host's UIDs and lead to unintentional authorization bypass
+
+
Remediation
+
Set `securityContext.runAsUser` value to greater or equal than 10'000. SecurityContext can be set on both `pod` and `container` level. If both are set, then the container level takes precedence
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/snyk/master/quay.io_argoproj_argocd_latest.html b/docs/snyk/master/quay.io_argoproj_argocd_latest.html
index dac774d0f0d30..c9b59ef5e997f 100644
--- a/docs/snyk/master/quay.io_argoproj_argocd_latest.html
+++ b/docs/snyk/master/quay.io_argoproj_argocd_latest.html
@@ -7,7 +7,7 @@
diff --git a/docs/snyk/v2.6.15/argocd-iac-install.html b/docs/snyk/v2.6.15/argocd-iac-install.html
index bf9ee4f20bde5..6867e68c4bd18 100644
--- a/docs/snyk/v2.6.15/argocd-iac-install.html
+++ b/docs/snyk/v2.6.15/argocd-iac-install.html
@@ -456,7 +456,7 @@
diff --git a/docs/snyk/v2.6.15/haproxy_2.6.14-alpine.html b/docs/snyk/v2.6.15/haproxy_2.6.14-alpine.html
index 4f717f2c05aab..605a7d8b7d5bd 100644
--- a/docs/snyk/v2.6.15/haproxy_2.6.14-alpine.html
+++ b/docs/snyk/v2.6.15/haproxy_2.6.14-alpine.html
@@ -7,7 +7,7 @@
diff --git a/docs/snyk/v2.6.15/quay.io_argoproj_argocd_v2.6.15.html b/docs/snyk/v2.6.15/quay.io_argoproj_argocd_v2.6.15.html
index 71e5552f26c97..759d3b81c634b 100644
--- a/docs/snyk/v2.6.15/quay.io_argoproj_argocd_v2.6.15.html
+++ b/docs/snyk/v2.6.15/quay.io_argoproj_argocd_v2.6.15.html
@@ -7,7 +7,7 @@
diff --git a/docs/snyk/v2.7.14/haproxy_2.6.14-alpine.html b/docs/snyk/v2.7.14/haproxy_2.6.14-alpine.html
index 09342f7d6f484..953bbbe0d1e05 100644
--- a/docs/snyk/v2.7.14/haproxy_2.6.14-alpine.html
+++ b/docs/snyk/v2.7.14/haproxy_2.6.14-alpine.html
@@ -7,7 +7,7 @@
diff --git a/docs/snyk/v2.7.14/quay.io_argoproj_argocd_v2.7.14.html b/docs/snyk/v2.7.14/quay.io_argoproj_argocd_v2.7.14.html
index 4c1cb8f1d8e16..5b4ea7a6ff4d0 100644
--- a/docs/snyk/v2.7.14/quay.io_argoproj_argocd_v2.7.14.html
+++ b/docs/snyk/v2.7.14/quay.io_argoproj_argocd_v2.7.14.html
@@ -7,7 +7,7 @@
diff --git a/docs/snyk/v2.9.0-rc3/argocd-iac-install.html b/docs/snyk/v2.9.0-rc3/argocd-iac-install.html
new file mode 100644
index 0000000000000..207acd982d50e
--- /dev/null
+++ b/docs/snyk/v2.9.0-rc3/argocd-iac-install.html
@@ -0,0 +1,2679 @@
+
+
+
+