diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 696e41272f..fd7d7aa972 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,15 +1,39 @@ --- name: Bug report about: Create a report to help us improve +title: '' labels: 'bug' +assignees: '' --- -## Summary -What happened/what you expected to happen? + -## Diagnostics +Checklist: -What version of Argo Rollouts are you running? +* [ ] I've included steps to reproduce the bug. +* [ ] I've inclued the version of argo rollouts. + +**Describe the bug** + + + +**To Reproduce** + + + +**Expected behavior** + + + +**Screenshots** + + + +**Version** + + + +**Logs** ``` # Paste the logs from the rollout controller @@ -18,7 +42,7 @@ What version of Argo Rollouts are you running? kubectl logs -n argo-rollouts deployment/argo-rollouts # Logs for a specific rollout: -kubectl logs -n argo-rollouts deployment/argo-rollouts | grep rollout= +kubectl logs -n argo-rollouts deployment/argo-rollouts | grep rollout= "$name.zip" + unzip -d "$name" "$name.zip" + done + + - name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + check_name: "${{ github.event.workflow.name }} Published Test Results" + commit: ${{ github.event.workflow_run.head_sha }} + event_file: artifacts/Event File/event.json + event_name: ${{ github.event.workflow_run.event }} + files: "artifacts/**/*.xml" + compare_to_earlier_commit: false + test_changes_limit: 0 + fail_on: "errors" diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index d59956ceb7..8f933b5bbd 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -16,11 +16,28 @@ on: required: false default: false +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: + event_file: + name: "Event File" + runs-on: ubuntu-latest + steps: + - name: Upload + uses: actions/upload-artifact@v2 + with: + name: Event File + path: ${{ github.event_path }} test-e2e: name: Run end-to-end tests runs-on: ubuntu-latest steps: + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 - uses: actions/checkout@v2 - name: Setup k3s run: | @@ -29,6 +46,7 @@ jobs: sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config sudo chmod 755 ~/.kube/config kubectl version + kubectl create ns argo-rollouts - uses: actions/cache@v2 with: path: ~/go/pkg/mod @@ -49,6 +67,16 @@ jobs: - name: Run e2e tests run: make test-e2e if: ${{ !(github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled) }} + - name: Output Rerun Overview + run: | + [[ -f rerunreport.txt ]] && cat rerunreport.txt || echo "No rerun report found" + - name: Upload E2E Test Results + if: always() + uses: actions/upload-artifact@v2 + with: + name: E2E Test Results + path: | + junit.xml - name: Upload e2e-controller logs uses: actions/upload-artifact@v2 with: diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml index 6b706407f7..3b054e9d82 100644 --- a/.github/workflows/gh-pages.yaml +++ b/.github/workflows/gh-pages.yaml @@ -4,6 +4,11 @@ on: push: branches: - master + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: deploy: runs-on: ubuntu-latest @@ -16,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.16 + go-version: 1.18 - name: build run: | pip install mkdocs mkdocs_material diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 6ba4e33578..66745e9b95 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -7,26 +7,43 @@ on: pull_request: branches: - "master" +env: + # Golang version to use across CI steps + GOLANG_VERSION: '1.18' + jobs: + event_file: + name: "Event File" + runs-on: ubuntu-latest + steps: + - name: Upload + uses: actions/upload-artifact@v2 + with: + name: Event File + path: ${{ github.event_path }} lint-go: name: Lint Go code runs-on: ubuntu-latest steps: + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GOLANG_VERSION }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: - version: v1.30 + version: v1.47.2 args: --timeout 5m build: name: Build runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: - go-version: 1.16 + go-version: ${{ env.GOLANG_VERSION }} id: go - name: Check out code into the Go module directory @@ -46,7 +63,15 @@ jobs: run: make controller plugin - name: Test - run: go test -failfast -covermode=count -coverprofile=coverage.out ./... + run: make test-unit + + - name: Upload Unit Test Results + if: always() + uses: actions/upload-artifact@v2 + with: + name: Unit Test Results + path: | + junit.xml - name: Generate code coverage artifacts uses: actions/upload-artifact@v2 @@ -55,7 +80,7 @@ jobs: path: coverage.out - name: Upload code coverage information to codecov.io - uses: codecov/codecov-action@v2.0.3 + uses: codecov/codecov-action@v2.1.0 with: file: coverage.out @@ -64,14 +89,19 @@ jobs: runs-on: ubuntu-latest env: GOPATH: /home/runner/go - PROTOC_ZIP: protoc-3.12.3-linux-x86_64.zip steps: - name: Checkout code uses: actions/checkout@v2 - name: Setup Golang uses: actions/setup-go@v1 with: - go-version: 1.16.2 + go-version: ${{ env.GOLANG_VERSION }} + # k8s codegen generates files into GOPATH location instead of the GitHub git checkout location + # This symlink is necessary to ensure that `git diff` detects changes + - name: Create symlink in GOPATH + run: | + mkdir -p ~/go/src/github.com/rallyhealth + ln -s $(pwd) ~/go/src/github.com/rallyhealth/argo-rollouts - uses: actions/cache@v2 with: path: /home/runner/.cache/go-build @@ -84,21 +114,9 @@ jobs: with: path: /home/runner/go/bin key: go-bin-v1-${{ hashFiles('**/go.mod') }} - - uses: actions/cache@v2 - with: - path: protoc-3.12.3-linux-x86_64.zip - key: protoc-3.12.3-linux-x86_64.zip - name: Install protoc run: | - set -eux -o pipefail - curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.12.3/$PROTOC_ZIP - sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc - sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' - sudo chmod +x /usr/local/bin/protoc - sudo find /usr/local/include -type f | xargs sudo chmod a+r - sudo find /usr/local/include -type d | xargs sudo chmod a+rx - ls /usr/local/include/google/protobuf/ - + make install-toolchain - name: Add ~/go/bin to PATH run: | echo "/home/runner/go/bin" >> $GITHUB_PATH @@ -106,31 +124,9 @@ jobs: run: | echo "/usr/local/bin" >> $GITHUB_PATH - - name: Create links - run: | - mkdir -p ~/go/src/github.com/argoproj - cp -a ../argo-rollouts ~/go/src/github.com/argoproj - - - name: Vendor and Download - run: | - go mod vendor -v - go mod download - - - name: Install UI code generator - run: | - wget https://repo1.maven.org/maven2/io/swagger/codegen/v3/swagger-codegen-cli/3.0.25/swagger-codegen-cli-3.0.25.jar -O swagger-codegen-cli.jar - echo "#!/usr/bin/java -jar" > swagger-codegen - cat swagger-codegen-cli.jar >> swagger-codegen - chmod +x swagger-codegen - sudo mv swagger-codegen /usr/local/bin/swagger-codegen - rm swagger-codegen-cli.jar - - - uses: actions/setup-java@v1 - with: - java-version: "9.0.4" - - name: Run codegen run: | + make go-mod-vendor make codegen make manifests make docs diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 677476ed8f..a5f37f1f8e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -6,7 +6,6 @@ on: tag: description: Git tag to build release from required: true - jobs: release-images: runs-on: ubuntu-latest @@ -26,12 +25,10 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - - - name: Cache Docker layers - uses: actions/cache@v2 with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ steps.get-sha.outputs.sha }} + config-inline: | + [worker.oci] + gc = false - name: Print Disk Usage run: | @@ -85,8 +82,6 @@ jobs: platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.controller-meta.outputs.tags }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - name: Build and push (plugin-image) uses: docker/build-push-action@v2 @@ -96,19 +91,11 @@ jobs: platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.plugin-meta.outputs.tags }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - # Temp fix - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache release-artifacts: runs-on: ubuntu-latest + needs: release-images steps: - name: Checkout @@ -116,11 +103,47 @@ jobs: with: ref: ${{ github.event.inputs.tag }} + - name: Setup Golang + uses: actions/setup-go@v2 + with: + go-version: 1.18 + - name: Generate release artifacts run: | make release-plugins make manifests IMAGE_TAG=${{ github.event.inputs.tag }} + - name: Generate SBOM (spdx) + id: spdx-builder + env: + # defines the spdx/spdx-sbom-generator version to use. + SPDX_GEN_VERSION: v0.0.13 + # defines the sigs.k8s.io/bom version to use. + SIGS_BOM_VERSION: v0.2.1 + # comma delimited list of project relative folders to inspect for package + # managers (gomod, yarn, npm). + PROJECT_FOLDERS: ".,./ui" + # full qualified name of the docker image to be inspected + DOCKER_IMAGE: quay.io/argoproj/argo-rollouts:${{ github.event.inputs.tag }} + + run: | + yarn install --cwd ./ui + go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION + go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION + + # Generate SPDX for project dependencies analyzing package managers + for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g") + do + generator -p $folder -o /tmp + done + + # Generate SPDX for binaries analyzing the docker image + if [[ ! -z $DOCKER_IMAGE ]]; then + bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE + fi + + cd /tmp && tar -zcf sbom.tar.gz *.spdx + - name: Draft release uses: softprops/action-gh-release@v1 with: @@ -128,6 +151,7 @@ jobs: draft: true files: | dist/kubectl-argo-rollouts-linux-amd64 + dist/kubectl-argo-rollouts-linux-arm64 dist/kubectl-argo-rollouts-darwin-amd64 dist/kubectl-argo-rollouts-windows-amd64 manifests/dashboard-install.yaml @@ -135,5 +159,6 @@ jobs: manifests/namespace-install.yaml manifests/notifications-install.yaml docs/features/kustomize/rollout_cr_schema.json + /tmp/sbom.tar.gz env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/Dockerfile b/Dockerfile index 7212a9c8df..19553e355e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image # Also used as the image in CI jobs so needs all dependencies #################################################################################################### -FROM golang:1.16.3 as builder +FROM --platform=$BUILDPLATFORM golang:1.18 as builder RUN apt-get update && apt-get install -y \ wget \ @@ -12,9 +12,7 @@ RUN apt-get update && apt-get install -y \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Install golangci-lint -RUN wget https://install.goreleaser.com/github.com/golangci/golangci-lint.sh && \ - chmod +x ./golangci-lint.sh && \ - ./golangci-lint.sh -b $GOPATH/bin && \ +RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.47.2 && \ golangci-lint linters COPY .golangci.yml ${GOPATH}/src/dummy/.golangci.yml @@ -26,7 +24,7 @@ RUN cd ${GOPATH}/src/dummy && \ #################################################################################################### # UI build stage #################################################################################################### -FROM docker.io/library/node:12.18.4 as argo-rollouts-ui +FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 as argo-rollouts-ui WORKDIR /src ADD ["ui/package.json", "ui/yarn.lock", "./"] @@ -42,7 +40,7 @@ RUN NODE_ENV='production' yarn build #################################################################################################### # Rollout Controller Build stage which performs the actual build of argo-rollouts binaries #################################################################################################### -FROM golang:1.16.3 as argo-rollouts-build +FROM --platform=$BUILDPLATFORM golang:1.18 as argo-rollouts-build WORKDIR /go/src/github.com/argoproj/argo-rollouts @@ -63,15 +61,17 @@ RUN touch ui/dist/node_modules.marker && \ touch ui/dist/app/index.html && \ find ui/dist -ARG MAKE_TARGET="controller plugin-linux plugin-darwin plugin-windows" -RUN make ${MAKE_TARGET} +ARG TARGETOS +ARG TARGETARCH +ARG MAKE_TARGET="controller plugin" +RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make ${MAKE_TARGET} #################################################################################################### # Kubectl plugin image #################################################################################################### FROM docker.io/library/ubuntu:20.10 as kubectl-argo-rollouts -COPY --from=argo-rollouts-build /go/src/github.com/argoproj/argo-rollouts/dist/kubectl-argo-rollouts-linux-amd64 /bin/kubectl-argo-rollouts +COPY --from=argo-rollouts-build /go/src/github.com/argoproj/argo-rollouts/dist/kubectl-argo-rollouts /bin/kubectl-argo-rollouts USER 999 diff --git a/Dockerfile.dev b/Dockerfile.dev index 4794d2b4df..4694dda836 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -1,7 +1,7 @@ #################################################################################################### # argo-rollouts-dev #################################################################################################### -FROM golang:1.16.3 as builder +FROM golang:1.17 as builder RUN apt-get update && apt-get install -y \ ca-certificates && \ diff --git a/Makefile b/Makefile index ea5b1f9115..df2542169b 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,8 @@ DEV_IMAGE=false # E2E variables E2E_INSTANCE_ID ?= argo-rollouts-e2e E2E_TEST_OPTIONS ?= -E2E_PARALLEL ?= 4 +E2E_PARALLEL ?= 1 +E2E_WAIT_TIMEOUT ?= 120 override LDFLAGS += \ -X ${PACKAGE}/utils/version.version=${VERSION} \ @@ -49,10 +50,11 @@ define protoc # protoc $(1) PATH=${DIST_DIR}:$$PATH protoc \ -I /usr/local/include \ + -I ${DIST_DIR}/protoc-include \ -I . \ -I ./vendor \ -I ${GOPATH}/src \ - -I ${GOPATH}/pkg/mod/github.com/gogo/protobuf@v1.3.1/gogoproto \ + -I ${GOPATH}/pkg/mod/github.com/gogo/protobuf@v1.3.2/gogoproto \ -I ${GOPATH}/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \ --gogofast_out=plugins=grpc:${GOPATH}/src \ --grpc-gateway_out=logtostderr=true:${GOPATH}/src \ @@ -69,63 +71,31 @@ go-mod-vendor: go mod tidy go mod vendor -# go_get,path -# use go_get to install a toolchain binary for a package which is *not* vendored in go.mod -define go_get - cd /tmp && GOBIN=${DIST_DIR} go get $(1) -endef - -# go_install,path -# use go_install to install a toolchain binary for a package which *is* vendored in go.mod -define go_install - GOBIN=${DIST_DIR} go install -mod=vendor ./vendor/$(1) -endef - -.PHONY: $(DIST_DIR)/controller-gen -$(DIST_DIR)/controller-gen: - $(call go_get,sigs.k8s.io/controller-tools/cmd/controller-gen@v0.5.0) - -.PHONY: $(DIST_DIR)/bin/goimports -$(DIST_DIR)/bin/goimports: - $(call go_get,golang.org/x/tools/cmd/goimports) - -.PHONY: $(DIST_DIR)/go-to-protobuf -$(DIST_DIR)/go-to-protobuf: go-mod-vendor - $(call go_install,k8s.io/code-generator/cmd/go-to-protobuf) +.PHONY: install-go-tools-local +install-go-tools-local: go-mod-vendor + ./hack/installers/install-codegen-go-tools.sh -.PHONY: $(DIST_DIR)/protoc-gen-gogo -$(DIST_DIR)/protoc-gen-gogo: go-mod-vendor - $(call go_install,github.com/gogo/protobuf/protoc-gen-gogo) +.PHONY: install-protoc-local +install-protoc-local: + ./hack/installers/install-protoc.sh -.PHONY: $(DIST_DIR)/protoc-gen-gogofast -$(DIST_DIR)/protoc-gen-gogofast: - $(call go_install,github.com/gogo/protobuf/protoc-gen-gogofast) +.PHONY: install-devtools-local +install-devtools-local: + ./hack/installers/install-dev-tools.sh -.PHONY: $(DIST_DIR)/protoc-gen-grpc-gateway -$(DIST_DIR)/protoc-gen-grpc-gateway: go-mod-vendor - $(call go_install,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway) - -.PHONY: $(DIST_DIR)/protoc-gen-swagger -$(DIST_DIR)/protoc-gen-swagger: go-mod-vendor - $(call go_install,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger) - -.PHONY: $(DIST_DIR)/openapi-gen -$(DIST_DIR)/openapi-gen: go-mod-vendor - $(call go_install,k8s.io/kube-openapi/cmd/openapi-gen) - -.PHONY: $(DIST_DIR)/mockery -$(DIST_DIR)/mockery: - $(call go_get,github.com/vektra/mockery/v2@v2.6.0) +# Installs all tools required to build and test locally +.PHONY: install-tools-local +install-tools-local: install-go-tools-local install-protoc-local install-devtools-local TYPES := $(shell find pkg/apis/rollouts/v1alpha1 -type f -name '*.go' -not -name openapi_generated.go -not -name '*generated*' -not -name '*test.go') APIMACHINERY_PKGS=k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,+k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/batch/v1 .PHONY: install-toolchain -install-toolchain: go-mod-vendor $(DIST_DIR)/controller-gen $(DIST_DIR)/bin/goimports $(DIST_DIR)/go-to-protobuf $(DIST_DIR)/protoc-gen-gogo $(DIST_DIR)/protoc-gen-gogofast $(DIST_DIR)/protoc-gen-grpc-gateway $(DIST_DIR)/protoc-gen-swagger $(DIST_DIR)/openapi-gen $(DIST_DIR)/mockery +install-toolchain: install-go-tools-local install-protoc-local # generates all auto-generated code .PHONY: codegen -codegen: gen-proto gen-k8scodegen gen-openapi gen-mocks gen-crd manifests +codegen: go-mod-vendor gen-proto gen-k8scodegen gen-openapi gen-mocks gen-crd manifests # generates all files related to proto files .PHONY: gen-proto @@ -133,21 +103,23 @@ gen-proto: k8s-proto api-proto ui-proto # generates the .proto files affected by changes to types.go .PHONY: k8s-proto -k8s-proto: go-mod-vendor install-toolchain $(TYPES) +k8s-proto: go-mod-vendor $(TYPES) PATH=${DIST_DIR}:$$PATH go-to-protobuf \ --go-header-file=./hack/custom-boilerplate.go.txt \ --packages=github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1 \ --apimachinery-packages=${APIMACHINERY_PKGS} \ - --proto-import $(CURDIR)/vendor + --proto-import $(CURDIR)/vendor \ + --proto-import=${DIST_DIR}/protoc-include touch pkg/apis/rollouts/v1alpha1/generated.proto # generates *.pb.go, *.pb.gw.go, swagger from .proto files .PHONY: api-proto -api-proto: go-mod-vendor install-toolchain k8s-proto +api-proto: go-mod-vendor k8s-proto $(call protoc,pkg/apiclient/rollout/rollout.proto) # generates ui related proto files .PHONY: ui-proto +ui-proto: yarn --cwd ui run protogen # generates k8s client, informer, lister, deepcopy from types.go @@ -157,12 +129,12 @@ gen-k8scodegen: go-mod-vendor # generates ./manifests/crds/ .PHONY: gen-crd -gen-crd: $(DIST_DIR)/controller-gen +gen-crd: install-go-tools-local go run ./hack/gen-crd-spec/main.go # generates mock files from interfaces .PHONY: gen-mocks -gen-mocks: $(DIST_DIR)/mockery +gen-mocks: install-go-tools-local ./hack/update-mocks.sh # generates openapi_generated.go @@ -191,6 +163,7 @@ ui/dist: plugin-linux: ui/dist cp -r ui/dist/app/* server/static CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${PLUGIN_CLI_NAME}-linux-amd64 ./cmd/kubectl-argo-rollouts + CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${PLUGIN_CLI_NAME}-linux-arm64 ./cmd/kubectl-argo-rollouts .PHONY: plugin-darwin plugin-darwin: ui/dist @@ -208,22 +181,22 @@ docs: .PHONY: builder-image builder-image: - docker build -t $(IMAGE_PREFIX)argo-rollouts-ci-builder:$(IMAGE_TAG) --target builder . + DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argo-rollouts-ci-builder:$(IMAGE_TAG) --target builder . @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-rollouts:$(IMAGE_TAG) ; fi .PHONY: image image: ifeq ($(DEV_IMAGE), true) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/rollouts-controller-linux-amd64 ./cmd/rollouts-controller - docker build -t $(IMAGE_PREFIX)argo-rollouts:$(IMAGE_TAG) -f Dockerfile.dev ${DIST_DIR} + DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argo-rollouts:$(IMAGE_TAG) -f Dockerfile.dev ${DIST_DIR} else - docker build -t $(IMAGE_PREFIX)argo-rollouts:$(IMAGE_TAG) . + DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argo-rollouts:$(IMAGE_TAG) . endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-rollouts:$(IMAGE_TAG) ; fi .PHONY: plugin-image plugin-image: - docker build --target kubectl-argo-rollouts -t $(IMAGE_PREFIX)kubectl-argo-rollouts:$(IMAGE_TAG) . + DOCKER_BUILDKIT=1 docker build --target kubectl-argo-rollouts -t $(IMAGE_PREFIX)kubectl-argo-rollouts:$(IMAGE_TAG) . if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)kubectl-argo-rollouts:$(IMAGE_TAG) ; fi .PHONY: lint @@ -240,11 +213,16 @@ test-kustomize: .PHONY: start-e2e start-e2e: - go run ./cmd/rollouts-controller/main.go --instance-id ${E2E_INSTANCE_ID} --loglevel debug + go run ./cmd/rollouts-controller/main.go --instance-id ${E2E_INSTANCE_ID} --loglevel debug --kloglevel 6 .PHONY: test-e2e -test-e2e: - go test -timeout 30m -v -count 1 --tags e2e -p ${E2E_PARALLEL} --short ./test/e2e ${E2E_TEST_OPTIONS} +test-e2e: install-devtools-local + ${DIST_DIR}/gotestsum --rerun-fails-report=rerunreport.txt --junitfile=junit.xml --format=testname --packages="./test/e2e" --rerun-fails=5 -- -timeout 60m -count 1 --tags e2e -p ${E2E_PARALLEL} -parallel ${E2E_PARALLEL} -v --short ./test/e2e ${E2E_TEST_OPTIONS} + +.PHONY: test-unit + test-unit: install-devtools-local + ${DIST_DIR}/gotestsum --junitfile=junit.xml --format=testname --packages="./..." -- -covermode=count -coverprofile=coverage.out ./... + .PHONY: coverage coverage: test @@ -288,3 +266,8 @@ release-plugins: .PHONY: release release: release-precheck precheckin image plugin-image release-plugins + +.PHONY: trivy +trivy: + @trivy fs --clear-cache + @trivy fs . diff --git a/OWNERS b/OWNERS index ef7133eca0..eba5228bfd 100644 --- a/OWNERS +++ b/OWNERS @@ -4,9 +4,15 @@ owners: approvers: - alexmt +- huikang - jessesuen - khhirani +- leoluz reviewers: +- agrawroh - dthomson25 -- huikang \ No newline at end of file +- harikrongali +- kostis-codefresh +- perenesenko +- zachaller diff --git a/README.md b/README.md index 4e08ea0499..f044877ab3 100644 --- a/README.md +++ b/README.md @@ -39,17 +39,27 @@ For these reasons, in large scale high-volume production environments, a rolling * Customizable metric queries and analysis of business KPIs * Ingress controller integration: NGINX, ALB * Service Mesh integration: Istio, Linkerd, SMI -* Metric provider integration: Prometheus, Wavefront, Kayenta, Web, Kubernetes Jobs, Datadog, New Relic +* Metric provider integration: Prometheus, Wavefront, Kayenta, Web, Kubernetes Jobs, Datadog, New Relic, InfluxDB ## Documentation To learn more about Argo Rollouts go to the [complete documentation](https://argoproj.github.io/argo-rollouts/). +## Community + +You can reach the Argo Rollouts community and developers via the following channels: + +* Q & A: [Github Discussions](https://github.com/argoproj/argo-rollouts/discussions) +* Chat: [The #argo-rollouts Slack channel](https://argoproj.github.io/community/join-slack) +* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) +* User Community meeting: [First Wednesday of each month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ) + ## Who uses Argo Rollouts? [Official Argo Rollouts User List](https://github.com/argoproj/argo-rollouts/blob/master/USERS.md) ## Community Blogs and Presentations +* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo) * [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY) * [Argo Rollouts - Canary Deployments Made Easy In Kubernetes](https://youtu.be/84Ky0aPbHvY) * [How Intuit Does Canary and Blue Green Deployments](https://www.youtube.com/watch?v=yeVkTTO9nOA) @@ -61,5 +71,6 @@ To learn more about Argo Rollouts go to the [complete documentation](https://arg * [GitOps with Argo CD and an Argo Rollouts canary release](https://www.youtube.com/watch?v=35Qimb_AZ8U) * [Multi-Stage Delivery with Keptn and Argo Rollouts](https://www.youtube.com/watch?v=w-E8FzTbN3g&t=1s) * [Gradual Code Releases Using an In-House Kubernetes Canary Controller on top of Argo Rollouts](https://doordash.engineering/2021/04/14/gradual-code-releases-using-an-in-house-kubernetes-canary-controller/) +* [How Scalable is Argo-Rollouts: A Cloud Operator’s Perspective](https://www.youtube.com/watch?v=rCEhxJ2NSTI) diff --git a/USERS.md b/USERS.md index 97897944aa..be3ec11e5f 100644 --- a/USERS.md +++ b/USERS.md @@ -13,15 +13,20 @@ Organizations below are **officially** using Argo Rollouts. Please send a PR wit 1. [Databricks](https://github.com/databricks) 1. [Devtron Labs](https://github.com/devtron-labs/devtron) 1. [Farfetch](https://www.farfetch.com/) +1. [Flipkart](https://flipkart.com) +1. [Gllue](https://gllue.com) +1. [Ibotta](https://home.ibotta.com/) 1. [Intuit](https://www.intuit.com/) 1. [New Relic](https://newrelic.com/) 1. [Nitro](https://www.gonitro.com) 1. [Nozzle](https://nozzle.io) +1. [PagerDuty](https://www.pagerduty.com/) 1. [PayPal](https://www.paypal.com/) 1. [PayPay](https://paypay.ne.jp/) 1. [Quipper](https://www.quipper.com/) 1. [Quizlet](https://quizlet.com) 1. [Salesforce](https://www.salesforce.com/) +1. [SAP Concur](https://www.concur.com/) 1. [Shipt](https://www.shipt.com/) 1. [Skillz](https://www.skillz.com) 1. [Spotify](https://www.spotify.com/) diff --git a/VERSION b/VERSION index 9084fa2f71..26aaba0e86 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.1.0 +1.2.0 diff --git a/analysis/analysis.go b/analysis/analysis.go index 046e3899b8..53d5358647 100644 --- a/analysis/analysis.go +++ b/analysis/analysis.go @@ -7,17 +7,17 @@ import ( "sync" "time" - "k8s.io/utils/pointer" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" "github.com/argoproj/argo-rollouts/utils/defaults" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -26,7 +26,10 @@ const ( DefaultMeasurementHistoryLimit = 10 // DefaultErrorRetryInterval is the default interval to retry a measurement upon error, in the // event an interval was not specified - DefaultErrorRetryInterval time.Duration = 10 * time.Second + DefaultErrorRetryInterval = 10 * time.Second + // SuccessfulAssessmentRunTerminatedResult is used for logging purposes when the metrics evaluation + // is successful and the run is terminated. + SuccessfulAssessmentRunTerminatedResult = "Metric Assessment Result - Successful: Run Terminated" ) // metricTask holds the metric which need to be measured during this reconciliation along with @@ -40,7 +43,7 @@ func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alph if origRun.Status.Phase.Completed() { return origRun } - log := logutil.WithAnalysisRun(origRun) + logger := logutil.WithAnalysisRun(origRun) run := origRun.DeepCopy() if run.Status.MetricResults == nil { @@ -49,8 +52,8 @@ func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alph resolvedMetrics, err := getResolvedMetricsWithoutSecrets(run.Spec.Metrics, run.Spec.Args) if err != nil { - message := fmt.Sprintf("unable to resolve metric arguments: %v", err) - log.Warn(message) + message := fmt.Sprintf("Unable to resolve metric arguments: %v", err) + logger.Warn(message) run.Status.Phase = v1alpha1.AnalysisPhaseError run.Status.Message = message c.recordAnalysisRunCompletionEvent(run) @@ -59,8 +62,28 @@ func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alph err = analysisutil.ValidateMetrics(resolvedMetrics) if err != nil { - message := fmt.Sprintf("analysis spec invalid: %v", err) - log.Warn(message) + message := fmt.Sprintf("Analysis spec invalid: %v", err) + logger.Warn(message) + run.Status.Phase = v1alpha1.AnalysisPhaseError + run.Status.Message = message + c.recordAnalysisRunCompletionEvent(run) + return run + } + + dryRunMetricsMap, err := analysisutil.GetDryRunMetrics(run.Spec.DryRun, resolvedMetrics) + if err != nil { + message := fmt.Sprintf("Analysis spec invalid: %v", err) + logger.Warn(message) + run.Status.Phase = v1alpha1.AnalysisPhaseError + run.Status.Message = message + c.recordAnalysisRunCompletionEvent(run) + return run + } + + measurementRetentionMetricsMap, err := analysisutil.GetMeasurementRetentionMetrics(run.Spec.MeasurementRetention, resolvedMetrics) + if err != nil { + message := fmt.Sprintf("Analysis spec invalid: %v", err) + logger.Warn(message) run.Status.Phase = v1alpha1.AnalysisPhaseError run.Status.Message = message c.recordAnalysisRunCompletionEvent(run) @@ -68,18 +91,18 @@ func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alph } tasks := generateMetricTasks(run, resolvedMetrics) - log.Infof("taking %d measurements", len(tasks)) - err = c.runMeasurements(run, tasks) + logger.Infof("Taking %d Measurement(s)...", len(tasks)) + err = c.runMeasurements(run, tasks, dryRunMetricsMap) if err != nil { - message := fmt.Sprintf("unable to resolve metric arguments: %v", err) - log.Warn(message) + message := fmt.Sprintf("Unable to resolve metric arguments: %v", err) + logger.Warn(message) run.Status.Phase = v1alpha1.AnalysisPhaseError run.Status.Message = message c.recordAnalysisRunCompletionEvent(run) return run } - newStatus, newMessage := c.assessRunStatus(run, resolvedMetrics) + newStatus, newMessage := c.assessRunStatus(run, resolvedMetrics, dryRunMetricsMap) if newStatus != run.Status.Phase { run.Status.Phase = newStatus run.Status.Message = newMessage @@ -88,19 +111,19 @@ func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alph } } - err = c.garbageCollectMeasurements(run, DefaultMeasurementHistoryLimit) + err = c.garbageCollectMeasurements(run, measurementRetentionMetricsMap, DefaultMeasurementHistoryLimit) if err != nil { // TODO(jessesuen): surface errors to controller so they can be retried - log.Warnf("Failed to garbage collect measurements: %v", err) + logger.Warnf("Failed to garbage collect measurements: %v", err) } nextReconcileTime := calculateNextReconcileTime(run, resolvedMetrics) if nextReconcileTime != nil { - enqueueSeconds := nextReconcileTime.Sub(time.Now()) + enqueueSeconds := nextReconcileTime.Sub(timeutil.Now()) if enqueueSeconds < 0 { enqueueSeconds = 0 } - log.Infof("enqueueing analysis after %v", enqueueSeconds) + logger.Infof("Enqueueing analysis after %v", enqueueSeconds) c.enqueueAnalysisAfter(run, enqueueSeconds) } return run @@ -133,7 +156,7 @@ func (c *Controller) recordAnalysisRunCompletionEvent(run *v1alpha1.AnalysisRun) case v1alpha1.AnalysisPhaseError, v1alpha1.AnalysisPhaseFailed: eventType = corev1.EventTypeWarning } - c.recorder.Eventf(run, record.EventOptions{EventType: eventType, EventReason: "AnalysisRun" + string(run.Status.Phase)}, "analysis completed %s", run.Status.Phase) + c.recorder.Eventf(run, record.EventOptions{EventType: eventType, EventReason: "AnalysisRun" + string(run.Status.Phase)}, "Analysis Completed. Result: %s", run.Status.Phase) } // generateMetricTasks generates a list of metrics tasks needed to be measured as part of this @@ -141,7 +164,7 @@ func (c *Controller) recordAnalysisRunCompletionEvent(run *v1alpha1.AnalysisRun) // terminating (e.g. due to manual termination or failing metric), will not schedule further // measurements other than to resume any in-flight measurements. func generateMetricTasks(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric) []metricTask { - log := logutil.WithAnalysisRun(run) + logger := logutil.WithAnalysisRun(run) var tasks []metricTask terminating := analysisutil.IsTerminating(run) @@ -149,15 +172,15 @@ func generateMetricTasks(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric) [ if analysisutil.MetricCompleted(run, metric.Name) { continue } - logCtx := log.WithField("metric", metric.Name) + logCtx := logger.WithField("metric", metric.Name) lastMeasurement := analysisutil.LastMeasurement(run, metric.Name) if lastMeasurement != nil && lastMeasurement.FinishedAt == nil { - now := metav1.Now() + now := timeutil.MetaNow() if lastMeasurement.ResumeAt != nil && lastMeasurement.ResumeAt.After(now.Time) { continue } // last measurement is still in-progress. need to complete it - logCtx.Infof("resuming in-progress measurement") + logCtx.Infof("Resuming in-progress measurement") tasks = append(tasks, metricTask{ metric: run.Spec.Metrics[i], incompleteMeasurement: lastMeasurement, @@ -165,7 +188,7 @@ func generateMetricTasks(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric) [ continue } if terminating { - logCtx.Infof("skipping measurement: run is terminating") + logCtx.Infof("Skipping measurement: run is terminating") continue } if lastMeasurement == nil { @@ -178,14 +201,14 @@ func generateMetricTasks(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric) [ logCtx.Warnf("failed to parse duration: %v", err) continue } - if run.Status.StartedAt.Add(duration).After(time.Now()) { - logCtx.Infof("waiting until start delay duration passes") + if run.Status.StartedAt.Add(duration).After(timeutil.Now()) { + logCtx.Infof("Waiting until start delay duration passes") continue } } // measurement never taken tasks = append(tasks, metricTask{metric: run.Spec.Metrics[i]}) - logCtx.Infof("running initial measurement") + logCtx.Infof("Running initial measurement") continue } metricResult := analysisutil.GetResult(run, metric.Name) @@ -201,22 +224,32 @@ func generateMetricTasks(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric) [ if lastMeasurement.Phase == v1alpha1.AnalysisPhaseError { interval = DefaultErrorRetryInterval } else if metric.Interval != "" { - metricInterval, err := metric.Interval.Duration() + parsedInterval, err := parseMetricInterval(*logCtx, metric.Interval) if err != nil { - logCtx.Warnf("failed to parse interval: %v", err) continue } - interval = metricInterval + interval = parsedInterval } - if time.Now().After(lastMeasurement.FinishedAt.Add(interval)) { + if timeutil.Now().After(lastMeasurement.FinishedAt.Add(interval)) { tasks = append(tasks, metricTask{metric: run.Spec.Metrics[i]}) - logCtx.Infof("running overdue measurement") + logCtx.Infof("Running overdue measurement") continue } } return tasks } +// parseMetricInterval is a helper method to parse the given metric interval and return the +// parsed duration or error (if any) +func parseMetricInterval(logCtx log.Entry, metricDurationString v1alpha1.DurationString) (time.Duration, error) { + metricInterval, err := metricDurationString.Duration() + if err != nil { + logCtx.Warnf("Failed to parse interval: %v", err) + return -1, err + } + return metricInterval, nil +} + // resolveArgs resolves args for metricTasks, including secret references // returns resolved metricTasks and secrets for log redaction func (c *Controller) resolveArgs(tasks []metricTask, args []v1alpha1.Argument, namespace string) ([]metricTask, []string, error) { @@ -266,7 +299,7 @@ func (c *Controller) resolveArgs(tasks []metricTask, args []v1alpha1.Argument, n } // runMeasurements iterates a list of metric tasks, and runs, resumes, or terminates measurements -func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTask) error { +func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTask, dryRunMetricsMap map[string]bool) error { var wg sync.WaitGroup // resultsLock should be held whenever we are accessing or setting status.metricResults since // we are performing queries in parallel @@ -286,26 +319,35 @@ func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTa go func(t metricTask) { defer wg.Done() //redact secret values from logs - log := logutil.WithRedactor(*logutil.WithAnalysisRun(run).WithField("metric", t.metric.Name), secrets) + logger := logutil.WithRedactor(*logutil.WithAnalysisRun(run).WithField("metric", t.metric.Name), secrets) resultsLock.Lock() metricResult := analysisutil.GetResult(run, t.metric.Name) resultsLock.Unlock() + provider, err := c.newProvider(*logger, t.metric) + //Fix for https://github.com/argoproj/argo-rollouts/issues/2024 this error is not bubbled to runMeasurements function + //it just stops the go routine to prevent nil pointer usage. Just keeping this simple due to it being a patch for a bug. + //We probably want to handle errors in this goroutine in a different way in master but for now just prevent crashing. + if err != nil { + log.Errorf("Error in getting provider :%v", err) + return + } if metricResult == nil { metricResult = &v1alpha1.MetricResult{ - Name: t.metric.Name, - Phase: v1alpha1.AnalysisPhaseRunning, + Name: t.metric.Name, + Phase: v1alpha1.AnalysisPhaseRunning, + DryRun: dryRunMetricsMap[t.metric.Name], + Metadata: provider.GetMetadata(t.metric), } } var newMeasurement v1alpha1.Measurement - provider, err := c.newProvider(*log, t.metric) if err != nil { if t.incompleteMeasurement != nil { newMeasurement = *t.incompleteMeasurement } else { - startedAt := metav1.Now() + startedAt := timeutil.MetaNow() newMeasurement.StartedAt = &startedAt } newMeasurement.Phase = v1alpha1.AnalysisPhaseError @@ -316,10 +358,10 @@ func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTa } else { // metric is incomplete. either terminate or resume it if terminating { - log.Infof("terminating in-progress measurement") + logger.Infof("Terminating in-progress measurement") newMeasurement = provider.Terminate(run, t.metric, *t.incompleteMeasurement) if newMeasurement.Phase == v1alpha1.AnalysisPhaseSuccessful { - newMeasurement.Message = "metric terminated" + newMeasurement.Message = "Metric Terminated" } } else { newMeasurement = provider.Resume(run, t.metric, *t.incompleteMeasurement) @@ -328,9 +370,9 @@ func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTa } if newMeasurement.Phase.Completed() { - log.Infof("measurement completed %s", newMeasurement.Phase) + logger.Infof("Measurement Completed. Result: %s", newMeasurement.Phase) if newMeasurement.FinishedAt == nil { - finishedAt := metav1.Now() + finishedAt := timeutil.MetaNow() newMeasurement.FinishedAt = &finishedAt } switch newMeasurement.Phase { @@ -349,7 +391,7 @@ func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTa case v1alpha1.AnalysisPhaseError: metricResult.Error++ metricResult.ConsecutiveError++ - log.Warnf("measurement had error: %s", newMeasurement.Message) + logger.Warnf("Measurement had error: %s", newMeasurement.Message) } } @@ -380,34 +422,56 @@ func (c *Controller) runMeasurements(run *v1alpha1.AnalysisRun, tasks []metricTa // assessRunStatus assesses the overall status of this AnalysisRun // If any metric is not yet completed, the AnalysisRun is still considered Running // Once all metrics are complete, the worst status is used as the overall AnalysisRun status -func (c *Controller) assessRunStatus(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric) (v1alpha1.AnalysisPhase, string) { +func (c *Controller) assessRunStatus(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Metric, dryRunMetricsMap map[string]bool) (v1alpha1.AnalysisPhase, string) { var worstStatus v1alpha1.AnalysisPhase var worstMessage string terminating := analysisutil.IsTerminating(run) everythingCompleted := true if run.Status.StartedAt == nil { - now := metav1.Now() + now := timeutil.MetaNow() run.Status.StartedAt = &now } if run.Spec.Terminate { - worstMessage = "run terminated" + worstMessage = "Run Terminated" } - // Iterate all metrics and update MetricResult.Phase fields based on latest measurement(s) + // Initialize Run & Dry-Run summary object + runSummary := v1alpha1.RunSummary{ + Count: 0, + Successful: 0, + Failed: 0, + Inconclusive: 0, + Error: 0, + } + dryRunSummary := v1alpha1.RunSummary{ + Count: 0, + Successful: 0, + Failed: 0, + Inconclusive: 0, + Error: 0, + } + + // Iterate all metrics and update `MetricResult.Phase` fields based on latest measurement(s) for _, metric := range metrics { + if dryRunMetricsMap[metric.Name] { + log.Infof("Metric '%s' is running in the Dry-Run mode.", metric.Name) + dryRunSummary.Count++ + } else { + runSummary.Count++ + } if result := analysisutil.GetResult(run, metric.Name); result != nil { - log := logutil.WithAnalysisRun(run).WithField("metric", metric.Name) + logger := logutil.WithAnalysisRun(run).WithField("metric", metric.Name) metricStatus := assessMetricStatus(metric, *result, terminating) if result.Phase != metricStatus { - log.Infof("metric transitioned from %s -> %s", result.Phase, metricStatus) + logger.Infof("Metric '%s' transitioned from %s -> %s", metric.Name, result.Phase, metricStatus) if metricStatus.Completed() { eventType := corev1.EventTypeNormal switch metricStatus { case v1alpha1.AnalysisPhaseError, v1alpha1.AnalysisPhaseFailed: eventType = corev1.EventTypeWarning } - c.recorder.Eventf(run, record.EventOptions{EventType: eventType, EventReason: "Metric" + string(metricStatus)}, "metric '%s' completed %s", metric.Name, metricStatus) + c.recorder.Eventf(run, record.EventOptions{EventType: eventType, EventReason: "Metric" + string(metricStatus)}, "Metric '%s' Completed. Result: %s", metric.Name, metricStatus) } if lastMeasurement := analysisutil.LastMeasurement(run, metric.Name); lastMeasurement != nil { result.Message = lastMeasurement.Message @@ -419,31 +483,76 @@ func (c *Controller) assessRunStatus(run *v1alpha1.AnalysisRun, metrics []v1alph // if any metric is in-progress, then entire analysis run will be considered running everythingCompleted = false } else { + phase, message := assessMetricFailureInconclusiveOrError(metric, *result) + // NOTE: We don't care about the status if the metric is marked as a Dry-Run // otherwise, remember the worst status of all completed metric results - if worstStatus == "" || analysisutil.IsWorse(worstStatus, metricStatus) { - worstStatus = metricStatus - _, message := assessMetricFailureInconclusiveOrError(metric, *result) + if !dryRunMetricsMap[metric.Name] { + if worstStatus == "" || analysisutil.IsWorse(worstStatus, metricStatus) { + worstStatus = metricStatus + if message != "" { + worstMessage = fmt.Sprintf("Metric \"%s\" assessed %s due to %s", metric.Name, metricStatus, message) + if result.Message != "" { + worstMessage += fmt.Sprintf(": \"Error Message: %s\"", result.Message) + } + } + } + // Update Run Summary + switch phase { + case v1alpha1.AnalysisPhaseError: + runSummary.Error++ + case v1alpha1.AnalysisPhaseFailed: + runSummary.Failed++ + case v1alpha1.AnalysisPhaseInconclusive: + runSummary.Inconclusive++ + case v1alpha1.AnalysisPhaseSuccessful: + runSummary.Successful++ + default: + // We'll mark the status as success by default if it doesn't match anything. + runSummary.Successful++ + } + } else { + // Update metric result message if message != "" { - worstMessage = fmt.Sprintf("metric \"%s\" assessed %s due to %s", metric.Name, metricStatus, message) + failureMessage := fmt.Sprintf("Metric assessed %s due to %s", metricStatus, message) if result.Message != "" { - worstMessage += fmt.Sprintf(": \"Error Message: %s\"", result.Message) + result.Message = fmt.Sprintf("%s: \"Error Message: %s\"", failureMessage, result.Message) + } else { + result.Message = failureMessage } + analysisutil.SetResult(run, *result) + } + // Update DryRun Summary + switch phase { + case v1alpha1.AnalysisPhaseError: + dryRunSummary.Error++ + case v1alpha1.AnalysisPhaseFailed: + dryRunSummary.Failed++ + case v1alpha1.AnalysisPhaseInconclusive: + dryRunSummary.Inconclusive++ + case v1alpha1.AnalysisPhaseSuccessful: + dryRunSummary.Successful++ + default: + // We'll mark the status as success by default if it doesn't match anything. + dryRunSummary.Successful++ } } } } else { - // metric hasn't started running. possible cases where some of the metrics starts with delay + // metric hasn't started running. possible cases where some metrics starts with delay everythingCompleted = false } } - + // Append Dry-Run metrics results if any. + worstMessage = strings.TrimSpace(worstMessage) + run.Status.RunSummary = runSummary + run.Status.DryRunSummary = &dryRunSummary if terminating { if worstStatus == "" { // we have yet to take a single measurement, but have already been instructed to stop - log.Infof("metric assessed %s: run terminated", v1alpha1.AnalysisPhaseSuccessful) + log.Infof(SuccessfulAssessmentRunTerminatedResult) return v1alpha1.AnalysisPhaseSuccessful, worstMessage } - log.Infof("metric assessed %s: run terminated", worstStatus) + log.Infof("Metric Assessment Result - %s: Run Terminated", worstStatus) return worstStatus, worstMessage } if !everythingCompleted || worstStatus == "" { @@ -453,25 +562,25 @@ func (c *Controller) assessRunStatus(run *v1alpha1.AnalysisRun, metrics []v1alph } // assessMetricStatus assesses the status of a single metric based on: -// * current/latest measurement status +// * current or latest measurement status // * parameters given by the metric (failureLimit, count, etc...) -// * whether or not we are terminating (e.g. due to failing run, or termination request) +// * whether we are terminating (e.g. due to failing run, or termination request) func assessMetricStatus(metric v1alpha1.Metric, result v1alpha1.MetricResult, terminating bool) v1alpha1.AnalysisPhase { if result.Phase.Completed() { return result.Phase } - log := log.WithField("metric", metric.Name) + logger := log.WithField("metric", metric.Name) if len(result.Measurements) == 0 { if terminating { // we have yet to take a single measurement, but have already been instructed to stop - log.Infof("metric assessed %s: run terminated", v1alpha1.AnalysisPhaseSuccessful) + logger.Infof(SuccessfulAssessmentRunTerminatedResult) return v1alpha1.AnalysisPhaseSuccessful } return v1alpha1.AnalysisPhasePending } lastMeasurement := result.Measurements[len(result.Measurements)-1] if !lastMeasurement.Phase.Completed() { - // we still have a in-flight measurement + // we still have an in-flight measurement return v1alpha1.AnalysisPhaseRunning } @@ -479,7 +588,7 @@ func assessMetricStatus(metric v1alpha1.Metric, result v1alpha1.MetricResult, te // If true, then return AnalysisRunPhase as Failed, Inconclusive, or Error respectively phaseFailureInconclusiveOrError, message := assessMetricFailureInconclusiveOrError(metric, result) if phaseFailureInconclusiveOrError != "" { - log.Infof("metric assessed %s: %s", phaseFailureInconclusiveOrError, message) + logger.Infof("Metric Assessment Result - %s: %s", phaseFailureInconclusiveOrError, message) return phaseFailureInconclusiveOrError } @@ -488,12 +597,12 @@ func assessMetricStatus(metric v1alpha1.Metric, result v1alpha1.MetricResult, te // taken into consideration above, and we do not want to fail if failures < failureLimit. effectiveCount := metric.EffectiveCount() if effectiveCount != nil && result.Count >= int32(effectiveCount.IntValue()) { - log.Infof("metric assessed %s: count (%s) reached", v1alpha1.AnalysisPhaseSuccessful, effectiveCount.String()) + logger.Infof("Metric Assessment Result - %s: Count (%s) Reached", v1alpha1.AnalysisPhaseSuccessful, effectiveCount.String()) return v1alpha1.AnalysisPhaseSuccessful } // if we get here, this metric runs indefinitely if terminating { - log.Infof("metric assessed %s: run terminated", v1alpha1.AnalysisPhaseSuccessful) + logger.Infof(SuccessfulAssessmentRunTerminatedResult) return v1alpha1.AnalysisPhaseSuccessful } return v1alpha1.AnalysisPhaseRunning @@ -542,23 +651,22 @@ func calculateNextReconcileTime(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Me lastMeasurement := analysisutil.LastMeasurement(run, metric.Name) if lastMeasurement == nil { if metric.InitialDelay != "" { - startTime := metav1.Now() + startTime := timeutil.MetaNow() if run.Status.StartedAt != nil { startTime = *run.Status.StartedAt } - duration, err := metric.InitialDelay.Duration() + parsedInterval, err := parseMetricInterval(*logCtx, metric.InitialDelay) if err != nil { - logCtx.Warnf("failed to parse interval: %v", err) continue } - endInitialDelay := startTime.Add(duration) + endInitialDelay := startTime.Add(parsedInterval) if reconcileTime == nil || reconcileTime.After(endInitialDelay) { reconcileTime = &endInitialDelay } continue } // no measurement was started . we should never get here - logCtx.Warnf("metric never started. not factored into enqueue time") + logCtx.Warnf("Metric never started. Not factored into enqueue time.") continue } if lastMeasurement.FinishedAt == nil { @@ -580,18 +688,17 @@ func calculateNextReconcileTime(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Me if lastMeasurement.Phase == v1alpha1.AnalysisPhaseError { interval = DefaultErrorRetryInterval } else if metric.Interval != "" { - metricInterval, err := metric.Interval.Duration() + parsedInterval, err := parseMetricInterval(*logCtx, metric.Interval) if err != nil { - logCtx.Warnf("failed to parse interval: %v", err) continue } - interval = metricInterval + interval = parsedInterval } else { // if we get here, an interval was not set (meaning reoccurrence was not desired), and // there was no error (meaning we don't need to retry). no need to requeue this metric. // NOTE: we shouldn't ever get here since it means we are not doing proper bookkeeping // of count. - logCtx.Warnf("skipping requeue. no interval or error (count: %d, effectiveCount: %s)", metricResult.Count, metric.EffectiveCount().String()) + logCtx.Warnf("Skipping requeue. No interval or error (count: %d, effectiveCount: %s)", metricResult.Count, metric.EffectiveCount().String()) continue } // Take the earliest time of all metrics @@ -604,7 +711,7 @@ func calculateNextReconcileTime(run *v1alpha1.AnalysisRun, metrics []v1alpha1.Me } // garbageCollectMeasurements trims the measurement history to the specified limit and GCs old measurements -func (c *Controller) garbageCollectMeasurements(run *v1alpha1.AnalysisRun, limit int) error { +func (c *Controller) garbageCollectMeasurements(run *v1alpha1.AnalysisRun, measurementRetentionMetricNamesMap map[string]*v1alpha1.MeasurementRetention, limit int) error { var errors []error metricsByName := make(map[string]v1alpha1.Metric) @@ -614,22 +721,27 @@ func (c *Controller) garbageCollectMeasurements(run *v1alpha1.AnalysisRun, limit for i, result := range run.Status.MetricResults { length := len(result.Measurements) - if length > limit { + measurementRetentionObject := measurementRetentionMetricNamesMap[result.Name] + measurementsLimit := limit + if measurementRetentionObject != nil && measurementRetentionObject.Limit > 0 { + measurementsLimit = int(measurementRetentionObject.Limit) + } + if length > measurementsLimit { metric, ok := metricsByName[result.Name] if !ok { continue } - log := logutil.WithAnalysisRun(run).WithField("metric", metric.Name) - provider, err := c.newProvider(*log, metric) + logger := logutil.WithAnalysisRun(run).WithField("metric", metric.Name) + provider, err := c.newProvider(*logger, metric) if err != nil { errors = append(errors, err) continue } - err = provider.GarbageCollect(run, metric, limit) + err = provider.GarbageCollect(run, metric, measurementsLimit) if err != nil { return err } - result.Measurements = result.Measurements[length-limit : length] + result.Measurements = result.Measurements[length-measurementsLimit : length] } run.Status.MetricResults[i] = result } diff --git a/analysis/analysis_test.go b/analysis/analysis_test.go index 5b1fa56062..30e3b7adb9 100644 --- a/analysis/analysis_test.go +++ b/analysis/analysis_test.go @@ -8,13 +8,12 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/util/intstr" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -91,7 +90,12 @@ func newRun() *v1alpha1.AnalysisRun { } // newTerminatingRun returns a run which is terminating because of the given status -func newTerminatingRun(status v1alpha1.AnalysisPhase) *v1alpha1.AnalysisRun { +func newTerminatingRun(status v1alpha1.AnalysisPhase, isDryRun bool) *v1alpha1.AnalysisRun { + var dryRunArray []v1alpha1.DryRun + if isDryRun { + dryRunArray = append(dryRunArray, v1alpha1.DryRun{MetricName: "run-forever"}) + dryRunArray = append(dryRunArray, v1alpha1.DryRun{MetricName: "failed-metric"}) + } run := v1alpha1.AnalysisRun{ Spec: v1alpha1.AnalysisRunSpec{ Metrics: []v1alpha1.Metric{ @@ -108,21 +112,24 @@ func newTerminatingRun(status v1alpha1.AnalysisPhase) *v1alpha1.AnalysisRun { }, }, }, + DryRun: dryRunArray, }, Status: v1alpha1.AnalysisRunStatus{ Phase: v1alpha1.AnalysisPhaseRunning, MetricResults: []v1alpha1.MetricResult{ { - Name: "run-forever", - Phase: v1alpha1.AnalysisPhaseRunning, + Name: "run-forever", + DryRun: isDryRun, + Phase: v1alpha1.AnalysisPhaseRunning, Measurements: []v1alpha1.Measurement{{ Phase: v1alpha1.AnalysisPhaseRunning, StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), }}, }, { - Name: "failed-metric", - Count: 1, + Name: "failed-metric", + Count: 1, + DryRun: isDryRun, Measurements: []v1alpha1.Measurement{{ Phase: status, StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), @@ -439,7 +446,7 @@ func TestAssessRunStatus(t *testing.T) { }, }, } - status, message := c.assessRunStatus(run, run.Spec.Metrics) + status, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{}) assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) assert.Equal(t, "", message) } @@ -458,7 +465,7 @@ func TestAssessRunStatus(t *testing.T) { }, }, } - status, message := c.assessRunStatus(run, run.Spec.Metrics) + status, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{}) assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status) assert.Equal(t, "", message) } @@ -512,7 +519,7 @@ func TestAssessRunStatusUpdateResult(t *testing.T) { }, }, } - status, message := c.assessRunStatus(run, run.Spec.Metrics) + status, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{}) assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) assert.Equal(t, "", message) assert.Equal(t, v1alpha1.AnalysisPhaseFailed, run.Status.MetricResults[1].Phase) @@ -885,6 +892,7 @@ func TestReconcileAnalysisRunInitial(t *testing.T) { }, } f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + f.provider.On("GetMetadata", mock.Anything, mock.Anything).Return(map[string]string{}, nil) { newRun := c.reconcileAnalysisRun(run) assert.Equal(t, v1alpha1.AnalysisPhaseRunning, newRun.Status.MetricResults[0].Phase) @@ -941,7 +949,7 @@ func TestReconcileAnalysisRunTerminateSiblingAfterFail(t *testing.T) { f.provider.On("Terminate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) for _, status := range []v1alpha1.AnalysisPhase{v1alpha1.AnalysisPhaseFailed, v1alpha1.AnalysisPhaseInconclusive, v1alpha1.AnalysisPhaseError} { - run := newTerminatingRun(status) + run := newTerminatingRun(status, false) newRun := c.reconcileAnalysisRun(run) assert.Equal(t, status, newRun.Status.Phase) @@ -950,8 +958,8 @@ func TestReconcileAnalysisRunTerminateSiblingAfterFail(t *testing.T) { // ensure the in-progress measurement is now terminated assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.MetricResults[0].Measurements[0].Phase) assert.NotNil(t, newRun.Status.MetricResults[0].Measurements[0].FinishedAt) - assert.Equal(t, "metric terminated", newRun.Status.MetricResults[0].Message) - assert.Equal(t, "metric terminated", newRun.Status.MetricResults[0].Measurements[0].Message) + assert.Equal(t, "Metric Terminated", newRun.Status.MetricResults[0].Message) + assert.Equal(t, "Metric Terminated", newRun.Status.MetricResults[0].Measurements[0].Message) } } @@ -1048,7 +1056,8 @@ func TestTrimMeasurementHistory(t *testing.T) { { run := newRun() - c.garbageCollectMeasurements(run, 2) + err := c.garbageCollectMeasurements(run, map[string]*v1alpha1.MeasurementRetention{}, 2) + assert.Nil(t, err) assert.Len(t, run.Status.MetricResults[0].Measurements, 1) assert.Equal(t, "1", run.Status.MetricResults[0].Measurements[0].Value) assert.Len(t, run.Status.MetricResults[1].Measurements, 2) @@ -1057,23 +1066,85 @@ func TestTrimMeasurementHistory(t *testing.T) { } { run := newRun() - c.garbageCollectMeasurements(run, 1) + err := c.garbageCollectMeasurements(run, map[string]*v1alpha1.MeasurementRetention{}, 1) + assert.Nil(t, err) assert.Len(t, run.Status.MetricResults[0].Measurements, 1) assert.Equal(t, "1", run.Status.MetricResults[0].Measurements[0].Value) assert.Len(t, run.Status.MetricResults[1].Measurements, 1) assert.Equal(t, "3", run.Status.MetricResults[1].Measurements[0].Value) } + { + run := newRun() + var measurementRetentionMetricsMap = map[string]*v1alpha1.MeasurementRetention{} + measurementRetentionMetricsMap["metric2"] = &v1alpha1.MeasurementRetention{MetricName: "*", Limit: 2} + err := c.garbageCollectMeasurements(run, measurementRetentionMetricsMap, 1) + assert.Nil(t, err) + assert.Len(t, run.Status.MetricResults[0].Measurements, 1) + assert.Equal(t, "1", run.Status.MetricResults[0].Measurements[0].Value) + assert.Len(t, run.Status.MetricResults[1].Measurements, 2) + assert.Equal(t, "2", run.Status.MetricResults[1].Measurements[0].Value) + assert.Equal(t, "3", run.Status.MetricResults[1].Measurements[1].Value) + } + { + run := newRun() + var measurementRetentionMetricsMap = map[string]*v1alpha1.MeasurementRetention{} + measurementRetentionMetricsMap["metric2"] = &v1alpha1.MeasurementRetention{MetricName: "metric2", Limit: 2} + err := c.garbageCollectMeasurements(run, measurementRetentionMetricsMap, 1) + assert.Nil(t, err) + assert.Len(t, run.Status.MetricResults[0].Measurements, 1) + assert.Equal(t, "1", run.Status.MetricResults[0].Measurements[0].Value) + assert.Len(t, run.Status.MetricResults[1].Measurements, 2) + assert.Equal(t, "2", run.Status.MetricResults[1].Measurements[0].Value) + assert.Equal(t, "3", run.Status.MetricResults[1].Measurements[1].Value) + } } func TestResolveMetricArgsUnableToSubstitute(t *testing.T) { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) + // Dry-Run or not if the args resolution fails then we should fail the analysis + for _, isDryRun := range [3]bool{false, true, false} { + var dryRunArray []v1alpha1.DryRun + if isDryRun { + dryRunArray = append(dryRunArray, v1alpha1.DryRun{MetricName: "*"}) + } + run := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: []v1alpha1.Metric{{ + Name: "rate", + SuccessCondition: "{{args.does-not-exist}}", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Query: "{{args.metric-name}}", + }, + }, + }}, + DryRun: dryRunArray, + }, + } + newRun := c.reconcileAnalysisRun(run) + assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase) + assert.Equal(t, "Unable to resolve metric arguments: failed to resolve {{args.metric-name}}", newRun.Status.Message) + } +} + +func TestGetMetadataIsCalled(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + arg := "success-rate" run := &v1alpha1.AnalysisRun{ Spec: v1alpha1.AnalysisRunSpec{ + Args: []v1alpha1.Argument{ + { + Name: "metric-name", + Value: &arg, + }, + }, Metrics: []v1alpha1.Metric{{ Name: "rate", - SuccessCondition: "{{args.does-not-exist}}", + SuccessCondition: "result[0] > 0", Provider: v1alpha1.MetricProvider{ Prometheus: &v1alpha1.PrometheusMetric{ Query: "{{args.metric-name}}", @@ -1082,9 +1153,12 @@ func TestResolveMetricArgsUnableToSubstitute(t *testing.T) { }}, }, } + metricMetadata := map[string]string{"foo": "bar"} + f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + f.provider.On("GetMetadata", mock.Anything, mock.Anything).Return(metricMetadata, nil) newRun := c.reconcileAnalysisRun(run) - assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase) - assert.Equal(t, "unable to resolve metric arguments: failed to resolve {{args.metric-name}}", newRun.Status.Message) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) + assert.Equal(t, metricMetadata, newRun.Status.MetricResults[0].Metadata) } // TestSecretContentReferenceSuccess verifies that secret arguments are properly resolved @@ -1131,6 +1205,7 @@ func TestSecretContentReferenceSuccess(t *testing.T) { }, } f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + f.provider.On("GetMetadata", mock.Anything, mock.Anything).Return(map[string]string{}, nil) newRun := c.reconcileAnalysisRun(run) assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) } @@ -1195,6 +1270,7 @@ func TestSecretContentReferenceProviderError(t *testing.T) { measurement.Message = error.Error() f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(measurement) + f.provider.On("GetMetadata", mock.Anything, mock.Anything).Return(map[string]string{}, nil) newRun := c.reconcileAnalysisRun(run) logMessage := buf.String() @@ -1256,6 +1332,7 @@ func TestSecretContentReferenceAndMultipleArgResolutionSuccess(t *testing.T) { } f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + f.provider.On("GetMetadata", mock.Anything, mock.Anything).Return(map[string]string{}, nil) newRun := c.reconcileAnalysisRun(run) assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) } @@ -1396,78 +1473,281 @@ func TestAssessMetricFailureInconclusiveOrError(t *testing.T) { assert.Equal(t, phase, assessMetricStatus(metric, result, true)) } -func TestAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T) { +func StartAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T, isDryRun bool) (v1alpha1.AnalysisPhase, string, *v1alpha1.RunSummary) { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) - run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed) + run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun) run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseSuccessful - status, message := c.assessRunStatus(run, run.Spec.Metrics) + status, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{"run-forever": isDryRun, "failed-metric": isDryRun}) + return status, message, run.Status.DryRunSummary +} + +func TestAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T) { + status, message, dryRunSummary := StartAssessRunStatusErrorMessageAnalysisPhaseFail(t, false) assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status) - assert.Equal(t, "metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0)", message) + assert.Equal(t, "Metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0)", message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 0, + Successful: 0, + Failed: 0, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, dryRunSummary) } -// TestAssessRunStatusErrorMessageFromProvider verifies that the message returned by assessRunStatus -// includes the error message from the provider -func TestAssessRunStatusErrorMessageFromProvider(t *testing.T) { +func TestAssessRunStatusErrorMessageAnalysisPhaseFailInDryRunMode(t *testing.T) { + status, message, dryRunSummary := StartAssessRunStatusErrorMessageAnalysisPhaseFail(t, true) + assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) + assert.Equal(t, "", message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 2, + Successful: 1, + Failed: 1, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, dryRunSummary) +} + +func StartAssessRunStatusErrorMessageFromProvider(t *testing.T, providerMessage string, isDryRun bool) (v1alpha1.AnalysisPhase, string, *v1alpha1.RunSummary) { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) - run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed) + run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun) run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseSuccessful // All metrics must complete, or assessRunStatus will not return message - - providerMessage := "Provider error" run.Status.MetricResults[1].Message = providerMessage + status, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{"run-forever": isDryRun, "failed-metric": isDryRun}) + return status, message, run.Status.DryRunSummary +} - status, message := c.assessRunStatus(run, run.Spec.Metrics) - expectedMessage := fmt.Sprintf("metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0): \"Error Message: %s\"", providerMessage) +// TestAssessRunStatusErrorMessageFromProvider verifies that the message returned by assessRunStatus +// includes the error message from the provider +func TestAssessRunStatusErrorMessageFromProvider(t *testing.T) { + providerMessage := "Provider Error" + status, message, dryRunSummary := StartAssessRunStatusErrorMessageFromProvider(t, providerMessage, false) + expectedMessage := fmt.Sprintf("Metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0): \"Error Message: %s\"", providerMessage) assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status) assert.Equal(t, expectedMessage, message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 0, + Successful: 0, + Failed: 0, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, dryRunSummary) +} + +func TestAssessRunStatusErrorMessageFromProviderInDryRunMode(t *testing.T) { + providerMessage := "Provider Error" + status, message, dryRunSummary := StartAssessRunStatusErrorMessageFromProvider(t, providerMessage, true) + assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) + assert.Equal(t, "", message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 2, + Successful: 1, + Failed: 1, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, dryRunSummary) +} + +func StartAssessRunStatusMultipleFailures(t *testing.T, isDryRun bool) (v1alpha1.AnalysisPhase, string, *v1alpha1.RunSummary) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun) + run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseFailed + run.Status.MetricResults[0].Failed = 1 + status, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{"run-forever": isDryRun, "failed-metric": isDryRun}) + return status, message, run.Status.DryRunSummary } // TestAssessRunStatusMultipleFailures verifies that if there are multiple failed metrics, assessRunStatus returns the message // from the first failed metric func TestAssessRunStatusMultipleFailures(t *testing.T) { + status, message, dryRunSummary := StartAssessRunStatusMultipleFailures(t, false) + assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status) + assert.Equal(t, "Metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 0, + Successful: 0, + Failed: 0, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, dryRunSummary) +} + +func TestAssessRunStatusMultipleFailuresInDryRunMode(t *testing.T) { + status, message, dryRunSummary := StartAssessRunStatusMultipleFailures(t, true) + assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) + assert.Equal(t, "", message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 2, + Successful: 0, + Failed: 2, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, dryRunSummary) +} + +func StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T, isDryRun bool) *v1alpha1.AnalysisRun { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) - run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed) + run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun) run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseFailed run.Status.MetricResults[0].Failed = 1 - status, message := c.assessRunStatus(run, run.Spec.Metrics) - assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status) - assert.Equal(t, "metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", message) + f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseFailed), nil) + + return c.reconcileAnalysisRun(run) } // TestAssessRunStatusWorstMessageInReconcileAnalysisRun verifies that the worstMessage returned by assessRunStatus is set as the // status of the AnalysisRun returned by reconcileAnalysisRun func TestAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T) { + newRun := StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t, false) + assert.Equal(t, v1alpha1.AnalysisPhaseFailed, newRun.Status.Phase) + assert.Equal(t, "Metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.Message) +} + +func TestAssessRunStatusWorstMessageInReconcileAnalysisRunInDryRunMode(t *testing.T) { + newRun := StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t, true) + assert.Equal(t, v1alpha1.AnalysisPhaseRunning, newRun.Status.Phase) + assert.Equal(t, "", newRun.Status.Message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 2, + Successful: 0, + Failed: 2, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, newRun.Status.DryRunSummary) + assert.Equal(t, "Metric assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.MetricResults[0].Message) + assert.Equal(t, "Metric assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.MetricResults[1].Message) +} + +func StartTerminatingAnalysisRun(t *testing.T, isDryRun bool) *v1alpha1.AnalysisRun { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) - run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed) - run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseFailed - run.Status.MetricResults[0].Failed = 1 + f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseError), nil) - f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseFailed), nil) + now := metav1.Now() + var dryRunArray []v1alpha1.DryRun + if isDryRun { + dryRunArray = append(dryRunArray, v1alpha1.DryRun{MetricName: "success-rate"}) + } + run := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Terminate: true, + Args: []v1alpha1.Argument{ + { + Name: "service", + Value: pointer.StringPtr("rollouts-demo-canary.default.svc.cluster.local"), + }, + }, + Metrics: []v1alpha1.Metric{{ + Name: "success-rate", + InitialDelay: "20s", + Interval: "20s", + SuccessCondition: "result[0] > 0.90", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{}, + }, + }}, + DryRun: dryRunArray, + }, + Status: v1alpha1.AnalysisRunStatus{ + StartedAt: &now, + Phase: v1alpha1.AnalysisPhaseRunning, + }, + } + return c.reconcileAnalysisRun(run) +} + +func TestTerminateAnalysisRun(t *testing.T) { + newRun := StartTerminatingAnalysisRun(t, false) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) + assert.Equal(t, "Run Terminated", newRun.Status.Message) +} +func TestTerminateAnalysisRunInDryRunMode(t *testing.T) { + newRun := StartTerminatingAnalysisRun(t, true) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) + assert.Equal(t, "Run Terminated", newRun.Status.Message) + expectedDryRunSummary := v1alpha1.RunSummary{ + Count: 1, + Successful: 0, + Failed: 0, + Inconclusive: 0, + Error: 0, + } + assert.Equal(t, &expectedDryRunSummary, newRun.Status.DryRunSummary) +} + +func TestInvalidDryRunConfigThrowsError(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + // Mocks terminate to cancel the in-progress measurement + f.provider.On("Terminate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + + var dryRunArray []v1alpha1.DryRun + dryRunArray = append(dryRunArray, v1alpha1.DryRun{MetricName: "error-rate"}) + now := metav1.Now() + run := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Terminate: true, + Args: []v1alpha1.Argument{ + { + Name: "service", + Value: pointer.StringPtr("rollouts-demo-canary.default.svc.cluster.local"), + }, + }, + Metrics: []v1alpha1.Metric{{ + Name: "success-rate", + InitialDelay: "20s", + Interval: "20s", + SuccessCondition: "result[0] > 0.90", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{}, + }, + }}, + DryRun: dryRunArray, + }, + Status: v1alpha1.AnalysisRunStatus{ + StartedAt: &now, + Phase: v1alpha1.AnalysisPhaseRunning, + }, + } newRun := c.reconcileAnalysisRun(run) - assert.Equal(t, v1alpha1.AnalysisPhaseFailed, newRun.Status.Phase) - assert.Equal(t, "metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.Message) + assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase) + assert.Equal(t, "Analysis spec invalid: dryRun[0]: Rule didn't match any metric name(s)", newRun.Status.Message) } -func TestTerminateAnalysisRun(t *testing.T) { +func TestInvalidMeasurementsRetentionConfigThrowsError(t *testing.T) { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) - f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseError), nil) + // Mocks terminate to cancel the in-progress measurement + f.provider.On("Terminate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + var measurementsRetentionArray []v1alpha1.MeasurementRetention + measurementsRetentionArray = append(measurementsRetentionArray, v1alpha1.MeasurementRetention{MetricName: "error-rate"}) now := metav1.Now() run := &v1alpha1.AnalysisRun{ Spec: v1alpha1.AnalysisRunSpec{ @@ -1487,6 +1767,7 @@ func TestTerminateAnalysisRun(t *testing.T) { Web: &v1alpha1.WebMetric{}, }, }}, + MeasurementRetention: measurementsRetentionArray, }, Status: v1alpha1.AnalysisRunStatus{ StartedAt: &now, @@ -1494,6 +1775,6 @@ func TestTerminateAnalysisRun(t *testing.T) { }, } newRun := c.reconcileAnalysisRun(run) - assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) - assert.Equal(t, "run terminated", newRun.Status.Message) + assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase) + assert.Equal(t, "Analysis spec invalid: measurementRetention[0]: Rule didn't match any metric name(s)", newRun.Status.Message) } diff --git a/analysis/controller.go b/analysis/controller.go index 98a9144d6e..285cd1ff13 100644 --- a/analysis/controller.go +++ b/analysis/controller.go @@ -3,6 +3,8 @@ package analysis import ( "time" + unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" + log "github.com/sirupsen/logrus" batchv1 "k8s.io/api/batch/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,6 +24,7 @@ import ( controllerutil "github.com/argoproj/argo-rollouts/utils/controller" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) // Controller is the controller implementation for Analysis resources @@ -116,7 +119,14 @@ func NewController(cfg ControllerConfig) *Controller { UpdateFunc: func(old, new interface{}) { controller.enqueueAnalysis(new) }, - DeleteFunc: controller.enqueueAnalysis, + DeleteFunc: func(obj interface{}) { + controller.enqueueAnalysis(obj) + if ar := unstructuredutil.ObjectToAnalysisRun(obj); ar != nil { + logCtx := logutil.WithAnalysisRun(ar) + logCtx.Info("analysis run deleted") + controller.metricsServer.Remove(ar.Namespace, ar.Name, logutil.AnalysisRunKey) + } + }, }) return controller } @@ -136,7 +146,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { } func (c *Controller) syncHandler(key string) error { - startTime := time.Now() + startTime := timeutil.Now() namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err diff --git a/analysis/controller_test.go b/analysis/controller_test.go index 4ef0872c65..4e3f571b8e 100644 --- a/analysis/controller_test.go +++ b/analysis/controller_test.go @@ -6,11 +6,12 @@ import ( "testing" "time" + timeutil "github.com/argoproj/argo-rollouts/utils/time" + "github.com/argoproj/argo-rollouts/utils/queue" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/undefinedlabs/go-mpatch" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -59,11 +60,13 @@ func newFixture(t *testing.T) *fixture { f.objects = []runtime.Object{} f.enqueuedObjects = make(map[string]int) now := time.Now() - patch, err := mpatch.PatchMethod(time.Now, func() time.Time { + timeutil.Now = func() time.Time { return now - }) - assert.NoError(t, err) - f.unfreezeTime = patch.Unpatch + } + f.unfreezeTime = func() error { + timeutil.Now = time.Now + return nil + } return f } diff --git a/cmd/rollouts-controller/main.go b/cmd/rollouts-controller/main.go index ce7c18e7c9..65a1374542 100644 --- a/cmd/rollouts-controller/main.go +++ b/cmd/rollouts-controller/main.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os" + "strings" "time" smiclientset "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/split/clientset/versioned" @@ -36,7 +37,9 @@ import ( const ( // CLIName is the name of the CLI - cliName = "argo-rollouts" + cliName = "argo-rollouts" + jsonFormat = "json" + textFormat = "text" ) func newCommand() *cobra.Command { @@ -44,10 +47,13 @@ func newCommand() *cobra.Command { clientConfig clientcmd.ClientConfig rolloutResyncPeriod int64 logLevel string + logFormat string klogLevel int metricsPort int healthzPort int instanceID string + qps float32 + burst int rolloutThreads int experimentThreads int analysisThreads int @@ -57,6 +63,7 @@ func newCommand() *cobra.Command { trafficSplitVersion string ambassadorVersion string ingressVersion string + appmeshCRDVersion string albIngressClasses []string nginxIngressClasses []string awsVerifyTargetGroup bool @@ -73,10 +80,9 @@ func newCommand() *cobra.Command { return nil } setLogLevel(logLevel) - formatter := &log.TextFormatter{ - FullTimestamp: true, + if logFormat != "" { + log.SetFormatter(createFormatter(logFormat)) } - log.SetFormatter(formatter) logutil.SetKLogLevel(klogLevel) log.WithField("version", version.GetVersion()).Info("Argo Rollouts starting") @@ -87,9 +93,12 @@ func newCommand() *cobra.Command { defaults.SetIstioAPIVersion(istioVersion) defaults.SetAmbassadorAPIVersion(ambassadorVersion) defaults.SetSMIAPIVersion(trafficSplitVersion) + defaults.SetAppMeshCRDVersion(appmeshCRDVersion) config, err := clientConfig.ClientConfig() checkError(err) + config.QPS = qps + config.Burst = burst namespace := metav1.NamespaceAll configNS, _, err := clientConfig.Namespace() checkError(err) @@ -98,6 +107,9 @@ func newCommand() *cobra.Command { log.Infof("Using namespace %s", namespace) } + k8sRequestProvider := &metrics.K8sRequestsCountProvider{} + kubeclientmetrics.AddMetricsTransportWrapper(config, k8sRequestProvider.IncKubernetesRequest) + kubeClient, err := kubernetes.NewForConfig(config) checkError(err) argoprojClient, err := clientset.NewForConfig(config) @@ -145,8 +157,6 @@ func newCommand() *cobra.Command { configMapInformer := controllerNamespaceInformerFactory.Core().V1().ConfigMaps() secretInformer := controllerNamespaceInformerFactory.Core().V1().Secrets() - k8sRequestProvider := &metrics.K8sRequestsCountProvider{} - kubeclientmetrics.AddMetricsTransportWrapper(config, k8sRequestProvider.IncKubernetesRequest) mode, err := ingressutil.DetermineIngressMode(ingressVersion, kubeClient.DiscoveryClient) checkError(err) ingressWrapper, err := ingressutil.NewIngressWrapper(mode, kubeClient, kubeInformerFactory) @@ -209,10 +219,13 @@ func newCommand() *cobra.Command { command.Flags().Int64Var(&rolloutResyncPeriod, "rollout-resync", controller.DefaultRolloutResyncPeriod, "Time period in seconds for rollouts resync.") command.Flags().BoolVar(&namespaced, "namespaced", false, "runs controller in namespaced mode (does not require cluster RBAC)") command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + command.Flags().StringVar(&logFormat, "logformat", "", "Set the logging format. One of: text|json") command.Flags().IntVar(&klogLevel, "kloglevel", 0, "Set the klog logging level") command.Flags().IntVar(&metricsPort, "metricsport", controller.DefaultMetricsPort, "Set the port the metrics endpoint should be exposed over") command.Flags().IntVar(&healthzPort, "healthzPort", controller.DefaultHealthzPort, "Set the port the healthz endpoint should be exposed over") command.Flags().StringVar(&instanceID, "instance-id", "", "Indicates which argo rollout objects the controller should operate on") + command.Flags().Float32Var(&qps, "qps", defaults.DefaultQPS, "Maximum QPS (queries per second) to the K8s API server") + command.Flags().IntVar(&burst, "burst", defaults.DefaultBurst, "Maximum burst for throttle.") command.Flags().IntVar(&rolloutThreads, "rollout-threads", controller.DefaultRolloutThreads, "Set the number of worker threads for the Rollout controller") command.Flags().IntVar(&experimentThreads, "experiment-threads", controller.DefaultExperimentThreads, "Set the number of worker threads for the Experiment controller") command.Flags().IntVar(&analysisThreads, "analysis-threads", controller.DefaultAnalysisThreads, "Set the number of worker threads for the Experiment controller") @@ -222,6 +235,7 @@ func newCommand() *cobra.Command { command.Flags().StringVar(&ambassadorVersion, "ambassador-api-version", defaults.DefaultAmbassadorVersion, "Set the Ambassador apiVersion that controller should look when manipulating Ambassador Mappings.") command.Flags().StringVar(&trafficSplitVersion, "traffic-split-api-version", defaults.DefaultSMITrafficSplitVersion, "Set the default TrafficSplit apiVersion that controller uses when creating TrafficSplits.") command.Flags().StringVar(&ingressVersion, "ingress-api-version", "", "Set the Ingress apiVersion that the controller should use.") + command.Flags().StringVar(&appmeshCRDVersion, "appmesh-crd-version", defaults.DefaultAppMeshCRDVersion, "Set the default AppMesh CRD Version that controller uses when manipulating resources.") command.Flags().StringArrayVar(&albIngressClasses, "alb-ingress-classes", defaultALBIngressClass, "Defines all the ingress class annotations that the alb ingress controller operates on. Defaults to alb") command.Flags().StringArrayVar(&nginxIngressClasses, "nginx-ingress-classes", defaultNGINXIngressClass, "Defines all the ingress class annotations that the nginx ingress controller operates on. Defaults to nginx") command.Flags().BoolVar(&awsVerifyTargetGroup, "alb-verify-weight", false, "Verify ALB target group weights before progressing through steps (requires AWS privileges)") @@ -229,7 +243,6 @@ func newCommand() *cobra.Command { command.Flags().BoolVar(&awsVerifyTargetGroup, "aws-verify-target-group", false, "Verify ALB target group before progressing through steps (requires AWS privileges)") command.Flags().BoolVar(&printVersion, "version", false, "Print version") command.Flags().BoolVar(&electOpts.LeaderElect, "leader-elect", controller.DefaultLeaderElect, "If true, controller will perform leader election between instances to ensure no more than one instance of controller operates at a time") - command.Flags().StringVar(&electOpts.LeaderElectionNamespace, "leader-election-namespace", controller.DefaultLeaderElectionNamespace, "Namespace used to perform leader election. Only used if leader election is enabled") command.Flags().DurationVar(&electOpts.LeaderElectionLeaseDuration, "leader-election-lease-duration", controller.DefaultLeaderElectionLeaseDuration, "The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.") command.Flags().DurationVar(&electOpts.LeaderElectionRenewDeadline, "leader-election-renew-deadline", controller.DefaultLeaderElectionRenewDeadline, "The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.") command.Flags().DurationVar(&electOpts.LeaderElectionRetryPeriod, "leader-election-retry-period", controller.DefaultLeaderElectionRetryPeriod, "The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.") @@ -262,6 +275,25 @@ func setLogLevel(logLevel string) { log.SetLevel(level) } +func createFormatter(logFormat string) log.Formatter { + var formatType log.Formatter + switch strings.ToLower(logFormat) { + case jsonFormat: + formatType = &log.JSONFormatter{} + case textFormat: + formatType = &log.TextFormatter{ + FullTimestamp: true, + } + default: + log.Infof("Unknown format: %s. Using text logformat", logFormat) + formatType = &log.TextFormatter{ + FullTimestamp: true, + } + } + + return formatType +} + func checkError(err error) { if err != nil { log.Fatal(err) diff --git a/controller/controller.go b/controller/controller.go index fb4b2b068f..f12b268fbf 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -76,9 +76,6 @@ const ( // DefaultLeaderElect is the default true leader election should be enabled DefaultLeaderElect = true - // DefaultLeaderElectionNamespace is the default namespace used to perform leader election. Only used if leader election is enabled - DefaultLeaderElectionNamespace = "kube-system" - // DefaultLeaderElectionLeaseDuration is the default time in seconds that non-leader candidates will wait to force acquire leadership DefaultLeaderElectionLeaseDuration = 15 * time.Second @@ -103,7 +100,7 @@ type LeaderElectionOptions struct { func NewLeaderElectionOptions() *LeaderElectionOptions { return &LeaderElectionOptions{ LeaderElect: DefaultLeaderElect, - LeaderElectionNamespace: DefaultLeaderElectionNamespace, + LeaderElectionNamespace: defaults.Namespace(), LeaderElectionLeaseDuration: DefaultLeaderElectionLeaseDuration, LeaderElectionRenewDeadline: DefaultLeaderElectionRenewDeadline, LeaderElectionRetryPeriod: DefaultLeaderElectionRetryPeriod, @@ -202,7 +199,7 @@ func NewManager( refResolver := rollout.NewInformerBasedWorkloadRefResolver(namespace, dynamicclientset, discoveryClient, argoprojclientset, rolloutsInformer.Informer()) apiFactory := notificationapi.NewFactory(record.NewAPIFactorySettings(), defaults.Namespace(), secretInformer.Informer(), configMapInformer.Informer()) - recorder := record.NewEventRecorder(kubeclientset, metrics.MetricRolloutEventsTotal, apiFactory) + recorder := record.NewEventRecorder(kubeclientset, metrics.MetricRolloutEventsTotal, metrics.MetricNotificationFailedTotal, metrics.MetricNotificationSuccessTotal, metrics.MetricNotificationSend, apiFactory) notificationsController := notificationcontroller.NewController(dynamicclientset.Resource(v1alpha1.RolloutGVR), rolloutsInformer.Informer(), apiFactory, notificationcontroller.WithToUnstructured(func(obj metav1.Object) (*unstructured.Unstructured, error) { data, err := json.Marshal(obj) diff --git a/controller/metrics/analysis.go b/controller/metrics/analysis.go index d8634b92c6..12480b76c8 100644 --- a/controller/metrics/analysis.go +++ b/controller/metrics/analysis.go @@ -1,6 +1,8 @@ package metrics import ( + "fmt" + "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/labels" @@ -8,7 +10,7 @@ import ( "github.com/argoproj/argo-rollouts/metricproviders" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" rolloutlister "github.com/argoproj/argo-rollouts/pkg/client/listers/rollouts/v1alpha1" - "github.com/argoproj/argo-rollouts/utils/analysis" + analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" ) type analysisRunCollector struct { @@ -80,20 +82,21 @@ func collectAnalysisRuns(ch chan<- prometheus.Metric, ar *v1alpha1.AnalysisRun) addGauge(MetricAnalysisRunPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseRunning), string(v1alpha1.AnalysisPhaseRunning)) addGauge(MetricAnalysisRunPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseInconclusive), string(v1alpha1.AnalysisPhaseInconclusive)) + dryRunMetricsMap, _ := analysisutil.GetDryRunMetrics(ar.Spec.DryRun, ar.Spec.Metrics) for _, metric := range ar.Spec.Metrics { metricType := metricproviders.Type(metric) - metricResult := analysis.GetResult(ar, metric.Name) + metricResult := analysisutil.GetResult(ar, metric.Name) addGauge(MetricAnalysisRunMetricType, 1, metric.Name, metricType) calculatedPhase := v1alpha1.AnalysisPhase("") if metricResult != nil { calculatedPhase = metricResult.Phase } - addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhasePending || calculatedPhase == ""), metric.Name, metricType, string(v1alpha1.AnalysisPhasePending)) - addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseError), metric.Name, metricType, string(v1alpha1.AnalysisPhaseError)) - addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseFailed), metric.Name, metricType, string(v1alpha1.AnalysisPhaseFailed)) - addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseSuccessful), metric.Name, metricType, string(v1alpha1.AnalysisPhaseSuccessful)) - addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseRunning), metric.Name, metricType, string(v1alpha1.AnalysisPhaseRunning)) - addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseInconclusive), metric.Name, metricType, string(v1alpha1.AnalysisPhaseInconclusive)) + addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhasePending || calculatedPhase == ""), metric.Name, metricType, fmt.Sprint(dryRunMetricsMap[metric.Name]), string(v1alpha1.AnalysisPhasePending)) + addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseError), metric.Name, metricType, fmt.Sprint(dryRunMetricsMap[metric.Name]), string(v1alpha1.AnalysisPhaseError)) + addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseFailed), metric.Name, metricType, fmt.Sprint(dryRunMetricsMap[metric.Name]), string(v1alpha1.AnalysisPhaseFailed)) + addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseSuccessful), metric.Name, metricType, fmt.Sprint(dryRunMetricsMap[metric.Name]), string(v1alpha1.AnalysisPhaseSuccessful)) + addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseRunning), metric.Name, metricType, fmt.Sprint(dryRunMetricsMap[metric.Name]), string(v1alpha1.AnalysisPhaseRunning)) + addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseInconclusive), metric.Name, metricType, fmt.Sprint(dryRunMetricsMap[metric.Name]), string(v1alpha1.AnalysisPhaseInconclusive)) } } diff --git a/controller/metrics/analysis_test.go b/controller/metrics/analysis_test.go index 4c8a6f70fd..61ca87a80f 100644 --- a/controller/metrics/analysis_test.go +++ b/controller/metrics/analysis_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/ghodss/yaml" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -54,12 +56,19 @@ metadata: namespace: jesse-test spec: metrics: - - name: webmetric + - name: web-metric-1 provider: web: jsonPath: . url: https://www.google.com successCondition: "true" + - name: web-metric-2 + dryRun: true + provider: + web: + jsonPath: . + url: https://www.msn.com + successCondition: "false" ` fakeClusterAnalysisTemplate = ` @@ -67,15 +76,22 @@ apiVersion: argoproj.io/v1alpha1 kind: ClusterAnalysisTemplate metadata: creationTimestamp: "2020-03-16T20:01:13Z" - name: http-benchmark-test + name: http-benchmark-cluster-test spec: metrics: - - name: webmetric + - name: web-metric-1 provider: web: jsonPath: . url: https://www.google.com successCondition: "true" + - name: web-metric-2 + dryRun: true + provider: + web: + jsonPath: . + url: https://www.msn.com + successCondition: "false" ` ) const expectedAnalysisRunResponse = `# HELP analysis_run_info Information about analysis run. @@ -83,12 +99,12 @@ const expectedAnalysisRunResponse = `# HELP analysis_run_info Information about analysis_run_info{name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Error"} 1 # HELP analysis_run_metric_phase Information on the duration of a specific metric in the Analysis Run # TYPE analysis_run_metric_phase gauge -analysis_run_metric_phase{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Error",type="Web"} 1 -analysis_run_metric_phase{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Failed",type="Web"} 0 -analysis_run_metric_phase{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Inconclusive",type="Web"} 0 -analysis_run_metric_phase{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Pending",type="Web"} 0 -analysis_run_metric_phase{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Running",type="Web"} 0 -analysis_run_metric_phase{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Successful",type="Web"} 0 +analysis_run_metric_phase{dry_run="false",metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Error",type="Web"} 1 +analysis_run_metric_phase{dry_run="false",metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Failed",type="Web"} 0 +analysis_run_metric_phase{dry_run="false",metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Inconclusive",type="Web"} 0 +analysis_run_metric_phase{dry_run="false",metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Pending",type="Web"} 0 +analysis_run_metric_phase{dry_run="false",metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Running",type="Web"} 0 +analysis_run_metric_phase{dry_run="false",metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",phase="Successful",type="Web"} 0 # HELP analysis_run_metric_type Information on the type of a specific metric in the Analysis Runs # TYPE analysis_run_metric_type gauge analysis_run_metric_type{metric="webmetric",name="http-benchmark-test-tr8rn",namespace="jesse-test",type="Web"} 1 @@ -148,7 +164,7 @@ func testAnalysisRunDescribe(t *testing.T, fakeAnalysisRun string, expectedRespo registry.MustRegister(NewAnalysisRunCollector(serverCfg.AnalysisRunLister, serverCfg.AnalysisTemplateLister, serverCfg.ClusterAnalysisTemplateLister)) mux := http.NewServeMux() mux.Handle(MetricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - testHttpResponse(t, mux, expectedResponse) + testHttpResponse(t, mux, expectedResponse, assert.Contains) } func TestIncAnalysisRunReconcile(t *testing.T) { @@ -170,17 +186,20 @@ analysis_run_reconcile_count{name="ar-test",namespace="ar-namespace"} 1` }, } metricsServ.IncAnalysisRunReconcile(ar, time.Millisecond) - testHttpResponse(t, metricsServ.Handler, expectedResponse) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) } func TestAnalysisTemplateDescribe(t *testing.T) { - expectedResponse := `# TYPE analysis_template_info gauge -analysis_template_info{name="http-benchmark-test",namespace=""} 1 + expectedResponse := `# HELP analysis_template_info Information about analysis templates. +# TYPE analysis_template_info gauge +analysis_template_info{name="http-benchmark-cluster-test",namespace=""} 1 analysis_template_info{name="http-benchmark-test",namespace="jesse-test"} 1 # HELP analysis_template_metric_info Information on metrics in analysis templates. # TYPE analysis_template_metric_info gauge -analysis_template_metric_info{metric="webmetric",name="http-benchmark-test",namespace="",type="Web"} 1 -analysis_template_metric_info{metric="webmetric",name="http-benchmark-test",namespace="jesse-test",type="Web"} 1 +analysis_template_metric_info{metric="web-metric-1",name="http-benchmark-cluster-test",namespace="",type="Web"} 1 +analysis_template_metric_info{metric="web-metric-1",name="http-benchmark-test",namespace="jesse-test",type="Web"} 1 +analysis_template_metric_info{metric="web-metric-2",name="http-benchmark-cluster-test",namespace="",type="Web"} 1 +analysis_template_metric_info{metric="web-metric-2",name="http-benchmark-test",namespace="jesse-test",type="Web"} 1 ` registry := prometheus.NewRegistry() at := newFakeAnalysisTemplate(fakeAnalysisTemplate) @@ -189,5 +208,5 @@ analysis_template_metric_info{metric="webmetric",name="http-benchmark-test",name registry.MustRegister(NewAnalysisRunCollector(serverCfg.AnalysisRunLister, serverCfg.AnalysisTemplateLister, serverCfg.ClusterAnalysisTemplateLister)) mux := http.NewServeMux() mux.Handle(MetricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - testHttpResponse(t, mux, expectedResponse) + testHttpResponse(t, mux, expectedResponse, assert.Contains) } diff --git a/controller/metrics/client.go b/controller/metrics/client.go index f2e2624d20..01367745ef 100644 --- a/controller/metrics/client.go +++ b/controller/metrics/client.go @@ -35,7 +35,8 @@ func (m *K8sRequestsCountProvider) IncKubernetesRequest(resourceInfo kubeclientm name = "Unknown" kind = "Unknown" } - - m.k8sRequestsCount.WithLabelValues(kind, namespace, name, string(resourceInfo.Verb), statusCode).Inc() + if m.k8sRequestsCount != nil { + m.k8sRequestsCount.WithLabelValues(kind, namespace, name, string(resourceInfo.Verb), statusCode).Inc() + } return nil } diff --git a/controller/metrics/client_test.go b/controller/metrics/client_test.go index 9aedd570d1..0ebf470a68 100644 --- a/controller/metrics/client_test.go +++ b/controller/metrics/client_test.go @@ -3,6 +3,8 @@ package metrics import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/argoproj/pkg/kubeclientmetrics" ) @@ -24,5 +26,5 @@ func TestIncKubernetesRequest(t *testing.T) { Verb: kubeclientmetrics.Unknown, StatusCode: 200, }) - testHttpResponse(t, metricsServ.Handler, expectedKubernetesRequest) + testHttpResponse(t, metricsServ.Handler, expectedKubernetesRequest, assert.Contains) } diff --git a/controller/metrics/experiment_test.go b/controller/metrics/experiment_test.go index fa4482bf87..da050e9377 100644 --- a/controller/metrics/experiment_test.go +++ b/controller/metrics/experiment_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/ghodss/yaml" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -133,7 +135,7 @@ func testExperimentDescribe(t *testing.T, fakeExperiment string, expectedRespons registry.MustRegister(NewExperimentCollector(config.ExperimentLister)) mux := http.NewServeMux() mux.Handle(MetricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - testHttpResponse(t, mux, expectedResponse) + testHttpResponse(t, mux, expectedResponse, assert.Contains) } func TestIncExperimentReconcile(t *testing.T) { @@ -156,5 +158,5 @@ experiment_reconcile_count{name="ex-test",namespace="ex-namespace"} 1` }, } metricsServ.IncExperimentReconcile(ex, time.Millisecond) - testHttpResponse(t, metricsServ.Handler, expectedResponse) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) } diff --git a/controller/metrics/metrics.go b/controller/metrics/metrics.go index 75e73063e8..3c3bbfe246 100644 --- a/controller/metrics/metrics.go +++ b/controller/metrics/metrics.go @@ -4,6 +4,8 @@ import ( "net/http" "time" + "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" registry "k8s.io/component-base/metrics/legacyregistry" @@ -26,8 +28,10 @@ type MetricsServer struct { reconcileAnalysisRunHistogram *prometheus.HistogramVec errorAnalysisRunCounter *prometheus.CounterVec - - k8sRequestsCounter *K8sRequestsCountProvider + successNotificationCounter *prometheus.CounterVec + errorNotificationCounter *prometheus.CounterVec + sendNotificationRunHistogram *prometheus.HistogramVec + k8sRequestsCounter *K8sRequestsCountProvider } const ( @@ -75,6 +79,10 @@ func NewMetricsServer(cfg ServerConfig, isPrimary bool) *MetricsServer { reg.MustRegister(MetricExperimentReconcileError) reg.MustRegister(MetricAnalysisRunReconcile) reg.MustRegister(MetricAnalysisRunReconcileError) + reg.MustRegister(MetricNotificationSuccessTotal) + reg.MustRegister(MetricNotificationFailedTotal) + reg.MustRegister(MetricNotificationSend) + reg.MustRegister(MetricVersionGauge) mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{ // contains app controller specific metrics @@ -95,6 +103,9 @@ func NewMetricsServer(cfg ServerConfig, isPrimary bool) *MetricsServer { reconcileAnalysisRunHistogram: MetricAnalysisRunReconcile, errorAnalysisRunCounter: MetricAnalysisRunReconcileError, + successNotificationCounter: MetricNotificationSuccessTotal, + errorNotificationCounter: MetricNotificationFailedTotal, + sendNotificationRunHistogram: MetricNotificationSend, k8sRequestsCounter: cfg.K8SRequestProvider, } @@ -127,6 +138,51 @@ func (m *MetricsServer) IncError(namespace, name string, kind string) { } } +// Remove removes the metrics server from the registry +func (m *MetricsServer) Remove(namespace string, name string, kind string) { + go func(namespace string, name string, kind string) { + // wait for the metrics to be collected, prometheus scrape interval is 60 seconds by default + time.Sleep(defaults.GetMetricCleanupDelaySeconds()) + switch kind { + case log.RolloutKey: + m.reconcileRolloutHistogram.Delete(map[string]string{"namespace": namespace, "name": name}) + m.errorRolloutCounter.Delete(map[string]string{"namespace": namespace, "name": name}) + + m.successNotificationCounter.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + m.errorNotificationCounter.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + m.sendNotificationRunHistogram.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + + MetricRolloutReconcile.Delete(map[string]string{"namespace": namespace, "name": name}) + + MetricRolloutReconcileError.Delete(map[string]string{"namespace": namespace, "name": name}) + + MetricRolloutEventsTotal.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + case log.AnalysisRunKey: + m.reconcileAnalysisRunHistogram.Delete(map[string]string{"namespace": namespace, "name": name}) + m.errorAnalysisRunCounter.Delete(map[string]string{"namespace": namespace, "name": name}) + + m.successNotificationCounter.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + m.errorNotificationCounter.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + m.sendNotificationRunHistogram.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + + MetricAnalysisRunReconcile.Delete(map[string]string{"namespace": namespace, "name": name}) + MetricAnalysisRunReconcileError.Delete(map[string]string{"namespace": namespace, "name": name}) + + case log.ExperimentKey: + m.reconcileExperimentHistogram.Delete(map[string]string{"namespace": namespace, "name": name}) + m.errorExperimentCounter.Delete(map[string]string{"namespace": namespace, "name": name}) + + m.successNotificationCounter.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + m.errorNotificationCounter.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + m.sendNotificationRunHistogram.DeletePartialMatch(map[string]string{"namespace": namespace, "name": name}) + + MetricExperimentReconcile.Delete(map[string]string{"namespace": namespace, "name": name}) + MetricExperimentReconcileError.Delete(map[string]string{"namespace": namespace, "name": name}) + } + }(namespace, name, kind) + +} + func boolFloat64(b bool) float64 { if b { return 1 diff --git a/controller/metrics/metrics_test.go b/controller/metrics/metrics_test.go index de4619bd17..22d671cee1 100644 --- a/controller/metrics/metrics_test.go +++ b/controller/metrics/metrics_test.go @@ -7,7 +7,9 @@ import ( "net/http/httptest" "strings" "testing" + "time" + "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" @@ -52,7 +54,7 @@ func newFakeServerConfig(objs ...runtime.Object) ServerConfig { } } -func testHttpResponse(t *testing.T, handler http.Handler, expectedResponse string) { +func testHttpResponse(t *testing.T, handler http.Handler, expectedResponse string, testFunc func(t assert.TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) bool) { t.Helper() req, err := http.NewRequest("GET", "/metrics", nil) assert.NoError(t, err) @@ -62,7 +64,7 @@ func testHttpResponse(t *testing.T, handler http.Handler, expectedResponse strin body := rr.Body.String() log.Println(body) for _, line := range strings.Split(expectedResponse, "\n") { - assert.Contains(t, body, line) + testFunc(t, body, line) } } @@ -77,6 +79,7 @@ func TestIncError(t *testing.T) { analysis_run_reconcile_error{name="name",namespace="ns"} 1 # HELP experiment_reconcile_error Error occurring during the experiment # TYPE experiment_reconcile_error counter +experiment_reconcile_error{name="name",namespace="ns"} 1 # HELP rollout_reconcile_error Error occurring during the rollout # TYPE rollout_reconcile_error counter rollout_reconcile_error{name="name",namespace="ns"} 1` @@ -86,12 +89,42 @@ rollout_reconcile_error{name="name",namespace="ns"} 1` metricsServ.IncError("ns", "name", logutil.AnalysisRunKey) metricsServ.IncError("ns", "name", logutil.ExperimentKey) metricsServ.IncError("ns", "name", logutil.RolloutKey) - testHttpResponse(t, metricsServ.Handler, expectedResponse) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) +} + +func TestVersionInfo(t *testing.T) { + expectedResponse := `# HELP argo_rollouts_controller_info Running Argo-rollouts version +# TYPE argo_rollouts_controller_info gauge` + metricsServ := NewMetricsServer(newFakeServerConfig(), true) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) } func TestSecondaryMetricsServer(t *testing.T) { expectedResponse := `` metricsServ := NewMetricsServer(newFakeServerConfig(), false) - testHttpResponse(t, metricsServ.Handler, expectedResponse) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) +} + +func TestRemove(t *testing.T) { + defaults.SetMetricCleanupDelaySeconds(1) + + expectedResponse := `analysis_run_reconcile_error{name="name1",namespace="ns"} 1 +experiment_reconcile_error{name="name1",namespace="ns"} 1 +rollout_reconcile_error{name="name1",namespace="ns"} 1` + + metricsServ := NewMetricsServer(newFakeServerConfig(), true) + + metricsServ.IncError("ns", "name1", logutil.RolloutKey) + metricsServ.IncError("ns", "name1", logutil.AnalysisRunKey) + metricsServ.IncError("ns", "name1", logutil.ExperimentKey) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) + + metricsServ.Remove("ns", "name1", logutil.AnalysisRunKey) + metricsServ.Remove("ns", "name1", logutil.ExperimentKey) + metricsServ.Remove("ns", "name1", logutil.RolloutKey) + + //Sleep for 2x the cleanup delay to allow metrics to be removed + time.Sleep(defaults.GetMetricCleanupDelaySeconds() * 2) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.NotContains) } diff --git a/controller/metrics/prommetrics.go b/controller/metrics/prommetrics.go index ff2017765e..02ed83af92 100644 --- a/controller/metrics/prommetrics.go +++ b/controller/metrics/prommetrics.go @@ -1,6 +1,9 @@ package metrics -import "github.com/prometheus/client_golang/prometheus" +import ( + "github.com/argoproj/argo-rollouts/utils/version" + "github.com/prometheus/client_golang/prometheus" +) // Follow Prometheus naming practices // https://prometheus.io/docs/practices/naming/ @@ -115,7 +118,7 @@ var ( MetricAnalysisRunMetricPhase = prometheus.NewDesc( "analysis_run_metric_phase", "Information on the duration of a specific metric in the Analysis Run", - append(namespaceNameLabels, "metric", "type", "phase"), + append(namespaceNameLabels, "metric", "type", "dry_run", "phase"), nil, ) ) @@ -172,6 +175,34 @@ var ( ) ) +// Notification metrics +var ( + MetricNotificationSuccessTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "notification_send_success", + Help: "Notification send success.", + }, + append(namespaceNameLabels, "type", "reason"), + ) + + MetricNotificationFailedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "notification_send_error", + Help: "Error sending the notification", + }, + append(namespaceNameLabels, "type", "reason"), + ) + + MetricNotificationSend = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "notification_send", + Help: "Notification send performance.", + Buckets: []float64{0.01, 0.15, .25, .5, 1}, + }, + namespaceNameLabels, + ) +) + // K8s Client metrics var ( // Custom events metric @@ -184,3 +215,17 @@ var ( []string{"kind", "namespace", "name", "verb", "status_code"}, ) ) + +// MetricVersionGauge version info +var ( + MetricVersionGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Name: "argo_rollouts_controller_info", + Help: "Running Argo-rollouts version", + ConstLabels: prometheus.Labels{"version": version.GetVersion().Version}, + }, + func() float64 { + return float64(1) + }, + ) +) diff --git a/controller/metrics/rollout_test.go b/controller/metrics/rollout_test.go index 1ee7ac9776..b10af8b184 100644 --- a/controller/metrics/rollout_test.go +++ b/controller/metrics/rollout_test.go @@ -163,7 +163,7 @@ func testRolloutDescribe(t *testing.T, fakeRollout string, cond *v1alpha1.Rollou registry.MustRegister(NewRolloutCollector(config.RolloutLister)) mux := http.NewServeMux() mux.Handle(MetricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - testHttpResponse(t, mux, expectedResponse) + testHttpResponse(t, mux, expectedResponse, assert.Contains) } func TestIncRolloutReconcile(t *testing.T) { @@ -188,7 +188,7 @@ rollout_reconcile_count{name="ro-test",namespace="ro-namespace"} 1 }, } metricsServ.IncRolloutReconcile(ro, time.Millisecond) - testHttpResponse(t, metricsServ.Handler, expectedResponse) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) } func TestGetStrategyAndTrafficRouter(t *testing.T) { @@ -271,6 +271,17 @@ func TestGetStrategyAndTrafficRouter(t *testing.T) { expectedStrategy: "canary", expectedTrafficRouter: "Nginx", }, + { + strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{}, + }, + }, + }, + expectedStrategy: "canary", + expectedTrafficRouter: "AppMesh", + }, } for _, test := range tests { @@ -299,5 +310,5 @@ rollout_events_total{name="ro-test-2",namespace="ro-namespace",reason="BazEvent" MetricRolloutEventsTotal.WithLabelValues("ro-namespace", "ro-test-1", corev1.EventTypeNormal, "BarEvent").Inc() MetricRolloutEventsTotal.WithLabelValues("ro-namespace", "ro-test-2", corev1.EventTypeWarning, "BazEvent").Inc() MetricRolloutEventsTotal.WithLabelValues("ro-namespace", "ro-test-2", corev1.EventTypeWarning, "BazEvent").Inc() - testHttpResponse(t, metricsServ.Handler, expectedResponse) + testHttpResponse(t, metricsServ.Handler, expectedResponse, assert.Contains) } diff --git a/controller/metrics/rollouts.go b/controller/metrics/rollouts.go index 1d2de7d5b9..54a0c8af01 100644 --- a/controller/metrics/rollouts.go +++ b/controller/metrics/rollouts.go @@ -111,6 +111,9 @@ func getStrategyAndTrafficRouter(rollout *v1alpha1.Rollout) (string, string) { if rollout.Spec.Strategy.Canary.TrafficRouting.SMI != nil { trafficRouter = "SMI" } + if rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh != nil { + trafficRouter = "AppMesh" + } } } return strategy, trafficRouter diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 029f8a8a5e..d4e4d89b6d 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -56,6 +56,8 @@ cd ~/go/src/github.com/argoproj/argo-rollouts The `make controller` command will build the controller. +* `make install-tools-local` - Runs scripts to install codegen utility CLIs necessary for codegen. + * `make codegen` - Runs the code generator that creates the informers, client, lister, and deepcopies from the types.go and modifies the open-api spec. @@ -91,6 +93,21 @@ Then run the e2e tests: make test-e2e ``` +## Controller architecture + +Argo Rollouts is actually a collection of individual controllers +that handle a specific aspect of Progressive Delivery. + +[![Internal Architecture](architecture-assets/internal-architecture.png)](architecture-assets/internal-architecture.png) + +The controllers are: + +* [Rollout Controller](https://github.com/argoproj/argo-rollouts/blob/master/rollout/controller.go) +* [Service Controller](https://github.com/argoproj/argo-rollouts/blob/master/service/service.go) +* [Ingress Controller](https://github.com/argoproj/argo-rollouts/blob/master/ingress/ingress.go) +* [Experiment Controller](https://github.com/argoproj/argo-rollouts/blob/master/experiments/controller.go) +* [AnalysisRun Controller](https://github.com/argoproj/argo-rollouts/blob/master/analysis/controller.go) + ### Tips 1. You can run the tests using a different kubeconfig by setting the `KUBECONFIG` environment variable: diff --git a/docs/FAQ.md b/docs/FAQ.md index 7d870eddfb..62bf91145c 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -5,7 +5,7 @@ ### Does Argo Rollouts depend on Argo CD or any other Argo project? Argo Rollouts is a standalone project. Even though it works great with Argo CD and other Argo projects, it can be used -on its own for Progressive Delivery scenarios. More specifically, argo Rollouts does **NOT** require that you also have installed Argo CD on the same cluster. +on its own for Progressive Delivery scenarios. More specifically, Argo Rollouts does **NOT** require that you also have installed Argo CD on the same cluster. ### How does Argo Rollouts integrate with Argo CD? Argo CD understands the health of Argo Rollouts resources via Argo CD’s [Lua health check](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/health.md). These Health checks understand when the Argo Rollout objects are Progressing, Suspended, Degraded, or Healthy. Additionally, Argo CD has Lua based Resource Actions that can mutate an Argo Rollouts resource (i.e. unpause a Rollout). @@ -47,7 +47,7 @@ Yes. A k8s cluster can run multiple replicas of Argo-rollouts controllers to ach Argo Rollouts supports BlueGreen, Canary, and Rolling Update. Additionally, Progressive Delivery features can be enabled on top of the blue-green/canary update, which further provides advanced deployment such as automated analysis and rollback. ### Does the Rollout object follow the provided strategy when it is first created? -As with Deployments, Rollouts does not follow the strategy parameters on the initial deploy. The controller tries to get the Rollout into a steady state as fast as possible. The controller tries to get the Rollout into a steady state as fast as possible by creating a fully scaled up ReplicaSet from the provided `.spec.template`. Once the Rollout has a stable ReplicaSet to transition from, the controller starts using the provided strategy to transition the previous ReplicaSet to the desired ReplicaSet. +As with Deployments, Rollouts does not follow the strategy parameters on the initial deploy. The controller tries to get the Rollout into a steady state as fast as possible by creating a fully scaled up ReplicaSet from the provided `.spec.template`. Once the Rollout has a stable ReplicaSet to transition from, the controller starts using the provided strategy to transition the previous ReplicaSet to the desired ReplicaSet. ### How does BlueGreen rollback work? A BlueGreen Rollout keeps the old ReplicaSet up and running for 30 seconds or the value of the scaleDownDelaySeconds. The controller tracks the remaining time before scaling down by adding an annotation called `argo-rollouts.argoproj.io/scale-down-deadline` to the old ReplicaSet. If the user applies the old Rollout manifest before the old ReplicaSet scales down, the controller does something called a fast rollback. The controller immediately switches the active service’s selector back to the old ReplicaSet’s rollout-pod-template-hash and removes the scaled down annotation from that ReplicaSet. The controller does not do any of the normal operations when trying to introduce a new version since it is trying to revert as fast as possible. A non-fast-track rollback occurs when the scale down annotation has past and the old ReplicaSet has been scaled down. In this case, the Rollout treats the ReplicaSet like any other new ReplicaSet and follows the usual procedure for deploying a new ReplicaSet. @@ -55,6 +55,12 @@ A BlueGreen Rollout keeps the old ReplicaSet up and running for 30 seconds or th ### What is the `argo-rollouts.argoproj.io/managed-by-rollouts` annotation? Argo Rollouts adds an `argo-rollouts.argoproj.io/managed-by-rollouts` annotation to Services and Ingresses that the controller modifies. They are used when the Rollout managing these resources is deleted and the controller tries to revert them back into their previous state. +### How can I deploy multiple services in a single step and roll them back according to their dependencies? + +The Rollout specification focuses on a single application/deployment. Argo Rollouts knows nothing about application dependencies. If you want to deploy multiple applications together in a smart way (e.g. automatically rollback a frontend if backend deployment fails) you need to write your own solution +on top of Argo Rollouts. In most cases, you would need one Rollout resource for each application that you +are deploying. Ideally you should also make your services backwards and forwards compatible (i.e. frontend should be able to work with both backend-preview and backend-active). + ## Experiments ### Why doesn't my Experiment end? diff --git a/docs/analysis/cloudwatch.md b/docs/analysis/cloudwatch.md index 145fad6f8b..7bccbc2072 100644 --- a/docs/analysis/cloudwatch.md +++ b/docs/analysis/cloudwatch.md @@ -11,7 +11,7 @@ You can use CloudWatch Metrics if you have used to EKS or not. This analysis is ### EKS -If you create new cluster on EKS, you can attach [cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) or attach [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). +If you create new cluster on EKS, you can attach [cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) or attach [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). If you have already cluster on EKS, you can attach [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). ### not EKS diff --git a/docs/analysis/influxdb.md b/docs/analysis/influxdb.md new file mode 100644 index 0000000000..f4bd95345e --- /dev/null +++ b/docs/analysis/influxdb.md @@ -0,0 +1,41 @@ +# InfluxDB Metrics + +An [InfluxDB](https://www.influxdata.com/) query using [Flux](https://docs.influxdata.com/influxdb/cloud/query-data/get-started/query-influxdb/) can be used to obtain measurements for analysis. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: error-rate +spec: + args: + - name: application-name + metrics: + - name: error-rate + # NOTE: To be consistent with the prometheus metrics provider InfluxDB query results are returned as an array. + # In the example we're looking at index 0 of the returned array to obtain the value we're using for the success condition + successCondition: result[0] <= 0.01 + provider: + influxdb: + profile: my-influxdb-secret # optional, defaults to 'influxdb' + query: | + from(bucket: "app_istio") + |> range(start: -15m) + |> filter(fn: (r) => r["destination_workload"] == "{{ args.application-name }}") + |> filter(fn: (r) => r["_measurement"] == "istio:istio_requests_errors_percentage:rate1m:5xx") + +``` + +An InfluxDB access profile can be configured using a Kubernetes secret in the `argo-rollouts` namespace. Alternate accounts can be used by creating more secrets of the same format and specifying which secret to use in the metric provider configuration using the `profile` field. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: influxdb +type: Opaque +data: + address: + authToken: + org: +``` diff --git a/docs/analysis/kayenta.md b/docs/analysis/kayenta.md index 907ed9196d..06cc91b02a 100644 --- a/docs/analysis/kayenta.md +++ b/docs/analysis/kayenta.md @@ -1,6 +1,6 @@ ## Kayenta (e.g. Mann-Whitney Analysis) -Analysis can also be done as part of an [Experiment](../features/experiment.md). +Analysis can also be done as part of an [Experiment](../features/experiment.md). This example starts both a canary and baseline ReplicaSet. The ReplicaSets run for 1 hour, then scale down to zero. Call out to Kayenta to perform Mann-Whintney analysis against the two pods. Demonstrates ability to start a @@ -23,7 +23,7 @@ This example demonstrates: app: guestbook spec: strategy: - canary: + canary: steps: - experiment: duration: 1h @@ -45,7 +45,7 @@ This example demonstrates: === "AnalysisTemplate" - ```yaml + ```yaml apiVersion: argoproj.io/v1alpha1 kind: AnalysisTemplate metadata: diff --git a/docs/analysis/newrelic.md b/docs/analysis/newrelic.md index de1153505d..b71b661f84 100644 --- a/docs/analysis/newrelic.md +++ b/docs/analysis/newrelic.md @@ -3,7 +3,7 @@ !!! important Available since v0.10.0 -A [New Relic](https://newrelic.com/) query using [NRQL](https://docs.newrelic.com/docs/query-your-data/nrql-new-relic-query-language/get-started/introduction-nrql-new-relics-query-language) can be used to obtain measurements for analysis. +A [New Relic](https://newrelic.com/) query using [NRQL](https://docs.newrelic.com/docs/query-your-data/nrql-new-relic-query-language/get-started/introduction-nrql-new-relics-query-language) can be used to obtain measurements for analysis. ```yaml apiVersion: argoproj.io/v1alpha1 diff --git a/docs/analysis/prometheus.md b/docs/analysis/prometheus.md index cbb70e87a9..a698626d10 100644 --- a/docs/analysis/prometheus.md +++ b/docs/analysis/prometheus.md @@ -23,7 +23,7 @@ spec: query: | sum(irate( istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] - )) / + )) / sum(irate( istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] )) @@ -34,3 +34,7 @@ you validate your [PromQL expression](https://prometheus.io/docs/prometheus/late See the [Analysis Overview page](../../features/analysis) for more details on the available options. +# Additional Metadata + +Any additional metadata from the Prometheus controller, like the resolved queries after substituting the template's +arguments, etc. will appear under the `Metadata` map in the `MetricsResult` object of `AnalysisRun`. diff --git a/docs/analysis/wavefront.md b/docs/analysis/wavefront.md index 7eb58863e2..f7c8f57d91 100644 --- a/docs/analysis/wavefront.md +++ b/docs/analysis/wavefront.md @@ -39,4 +39,3 @@ data: example1.wavefront.com: example2.wavefront.com: ``` - diff --git a/docs/analysis/web.md b/docs/analysis/web.md index e3e133cec0..285403ffe7 100644 --- a/docs/analysis/web.md +++ b/docs/analysis/web.md @@ -17,7 +17,7 @@ of the as the result variable. headers: - key: Authorization value: "Bearer {{ args.api-token }}" - jsonPath: "{$.data.ok}" + jsonPath: "{$.data.ok}" ``` In the following example, given the payload, the measurement will be Successful if the `data.ok` field was `true`, and the `data.successPercent` @@ -42,7 +42,7 @@ was greater than `0.90` headers: - key: Authorization value: "Bearer {{ args.api-token }}" - jsonPath: "{$.data}" + jsonPath: "{$.data}" ``` NOTE: if the result is a string, two convenience functions `asInt` and `asFloat` are provided @@ -67,7 +67,7 @@ It is possible to use a POST or PUT requests, by specifying the `method` and `bo - key: Content-Type # if body is a json, it is recommended to set the Content-Type value: "application/json" body: "{\"key\": \"string value\"}" - jsonPath: "{$.data.ok}" + jsonPath: "{$.data.ok}" ``` !!! tip In order to send in JSON, you have to encode it yourself, and send the correct Content-Type as well. diff --git a/docs/architecture-assets/internal-architecture.png b/docs/architecture-assets/internal-architecture.png new file mode 100644 index 0000000000..56502807b5 Binary files /dev/null and b/docs/architecture-assets/internal-architecture.png differ diff --git a/docs/assets/versions.css b/docs/assets/versions.css new file mode 100644 index 0000000000..b8bb066929 --- /dev/null +++ b/docs/assets/versions.css @@ -0,0 +1,175 @@ +.md-header__title { + display: flex; +} + +.dropdown-caret { + display: inline-block !important; + position: absolute; + right: 4px; +} + +.fa .fa-caret-down { + display: none !important; +} + +.rst-other-versions { + text-align: right; +} + +.rst-other-versions > dl, .rst-other-versions dt, .rst-other-versions small { + display: none; +} + +.rst-other-versions > dl:first-child { + display: flex !important; + flex-direction: column; + line-height: 0px !important; +} + +.rst-versions.shift-up .rst-other-versions { + display: flex !important; +} + +.rst-versions .rst-other-versions { + display: none; +} + +/* Version Warning */ +div[data-md-component=announce] { + background-color: rgb(248, 243, 236); + position: sticky; + top: 0; + z-index: 2; +} +div[data-md-component=announce]>div#announce-msg{ + color: var(--md-code-hl-number-color); + font-size: .8rem; + text-align: center; + margin: 15px; +} +div[data-md-component=announce]>div#announce-msg>a{ + color: var(--md-typeset-a-color); + text-decoration: underline; +} + +/* from https://assets.readthedocs.org/static/css/badge_only.css, +most styles have to be overriden here */ +.rst-versions{ + position: relative !important; + bottom: 0; + left: 0; + width: 100px !important; + background: hsla(173, 100%, 24%, 1) !important; + font-family: inherit !important; + z-index: 0 !important; +} +.rst-versions a{ + color:#2980B9; + text-decoration:none +} +.rst-versions .rst-badge-small{ + display:none +} +.rst-versions .rst-current-version{ + padding:12px; + background: hsla(173, 100%, 24%, 1) !important; + display:block; + text-align:right; + font-size:90%; + cursor:pointer; + color: white !important; + *zoom:1 +} +.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{ + display:table;content:"" +} +.rst-versions .rst-current-version:after{ + clear:both +} +.rst-versions .rst-current-version .fa{ + color:#fcfcfc +} +.rst-versions .rst-current-version .fa-caret-down{ + display: none; +} +.rst-versions.shift-up .rst-other-versions{ + display:block +} +.rst-versions .rst-other-versions{ + font-size:90%; + padding:12px; + color:gray; + display:none +} +.rst-versions .rst-other-versions hr{ + display: none !important; + height: 0px !important; + border: 0px; + margin: 0px !important; + padding: 0px; + border-top: none !important; +} +.rst-versions .rst-other-versions dd{ + display:inline-block; + margin:0 +} +.rst-versions .rst-other-versions dd a{ + display:inline-block; + padding: 1em 0em !important; + color:#fcfcfc; + font-size: .6rem !important; + white-space: nowrap; + text-overflow: ellipsis; + overflow: hidden; + width: 80px; +} +.rst-versions .rst-other-versions dd a:hover{ + font-size: .7rem !important; + font-weight: bold; +} +.rst-versions.rst-badge{ + display: block !important; + width: 100px !important; + bottom: 0px !important; + right: 0px !important; + left:auto; + border:none; + text-align: center !important; + line-height: 0; +} +.rst-versions.rst-badge .icon-book{ + display: none; +} +.rst-versions.rst-badge .fa-book{ + display: none !important; +} +.rst-versions.rst-badge.shift-up .rst-current-version{ + text-align: left !important; +} +.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{ + display: none !important; +} +.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{ + display: none !important; +} +.rst-versions.rst-badge .rst-current-version{ + width: 70px !important; + height: 2.4rem !important; + line-height:2.4rem !important; + padding: 0px 5px !important; + display: inline-block !important; + font-size: .6rem !important; + overflow: hidden !important; + text-overflow: ellipsis !important; + white-space: nowrap !important; + text-align: left !important; +} +@media screen and (max-width: 768px){ + .rst-versions{ + width:85%; + display:none + } + .rst-versions.shift{ + display:block + } +} \ No newline at end of file diff --git a/docs/assets/versions.js b/docs/assets/versions.js new file mode 100644 index 0000000000..057ce03158 --- /dev/null +++ b/docs/assets/versions.js @@ -0,0 +1,35 @@ +setTimeout(function() { + const callbackName = 'callback_' + new Date().getTime(); + window[callbackName] = function (response) { + const div = document.createElement('div'); + div.innerHTML = response.html; + document.querySelector(".md-header__inner > .md-header__title").appendChild(div); + const container = div.querySelector('.rst-versions'); + var caret = document.createElement('div'); + caret.innerHTML = "" + caret.classList.add('dropdown-caret') + div.querySelector('.rst-current-version').appendChild(caret); + div.querySelector('.rst-current-version').addEventListener('click', function() { + const classes = container.className.split(' '); + const index = classes.indexOf('shift-up'); + if (index === -1) { + classes.push('shift-up'); + } else { + classes.splice(index, 1); + } + container.className = classes.join(' '); + }); + } + + var CSSLink = document.createElement('link'); + CSSLink.rel='stylesheet'; + CSSLink.href = '/assets/versions.css'; + document.getElementsByTagName('head')[0].appendChild(CSSLink); + + var script = document.createElement('script'); + script.src = 'https://argo-rollouts.readthedocs.io/_/api/v2/footer_html/?'+ + 'callback=' + callbackName + '&project=argo-rollouts&page=&theme=mkdocs&format=jsonp&docroot=docs&source_suffix=.md&version=' + (window['READTHEDOCS_DATA'] || { version: 'latest' }).version; + document.getElementsByTagName('head')[0].appendChild(script); +}, 0); + + diff --git a/docs/features/analysis.md b/docs/features/analysis.md index 0c563591ad..478a7280f3 100644 --- a/docs/features/analysis.md +++ b/docs/features/analysis.md @@ -356,7 +356,7 @@ templates together. The controller combines the `metrics` and `args` fields of a The controller will error when merging the templates if: * Multiple metrics in the templates have the same name - * Two arguments with the same name both have values + * Two arguments with the same name have different default values no matter the argument value in Rollout ## Analysis Template Arguments @@ -370,11 +370,11 @@ metadata: name: args-example spec: args: - # required + # required in Rollout due to no default value - name: service-name - name: stable-hash - name: latest-hash - # optional + # optional in Rollout given the default value - name: api-url value: http://example/measure # from secret @@ -428,6 +428,7 @@ spec: ``` Analysis arguments also support valueFrom for reading metadata fields and passing them as arguments to AnalysisTemplate. An example would be to reference metadata labels like env and region and passing them along to AnalysisTemplate. + ```yaml apiVersion: argoproj.io/v1alpha1 kind: Rollout @@ -459,6 +460,38 @@ spec: fieldPath: metadata.labels['region'] ``` +!!! important + Available since v1.2 +Analysis arguments also support valueFrom for reading any field from Rollout status and passing them as arguments to AnalysisTemplate. +Following example references Rollout status field like aws canaryTargetGroup name and passing them along to AnalysisTemplate + +from the Rollout status +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: guestbook + labels: + appType: demo-app + buildType: nginx-app + ... + env: dev + region: us-west-2 +spec: +... + strategy: + canary: + analysis: + templates: + - templateName: args-example + args: + ... + - name: canary-targetgroup-name + valueFrom: + fieldRef: + fieldPath: status.alb.canaryTargetGroup.name +``` + ## BlueGreen Pre Promotion Analysis A Rollout using the BlueGreen strategy can launch an AnalysisRun *before* it switches traffic to the new version using @@ -520,15 +553,45 @@ spec: value: preview-svc.default.svc.cluster.local ``` -## Failure Conditions +## Failure Conditions and Failure Limit -`failureCondition` can be used to cause an analysis run to fail. The following example continually polls a prometheus -server to get the total number of errors every 5 minutes, causing the analysis run to fail if 10 or more errors were -encountered. +`failureCondition` can be used to cause an analysis run to fail. +`failureLimit` is the maximum number of failed run an analysis is allowed. +The following example continually polls the defined Prometheus server to get the total number of errors(i.e., HTTP response code >= 500) every 5 minutes, causing the measurement to fail if ten or more errors are encountered. +The entire analysis run is considered as Failed after three failed measurements. -```yaml hl_lines="4" +```yaml hl_lines="4 5" metrics: - name: total-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code=~"5.*"}[5m] + )) +``` + +## Dry-Run Mode + +!!! important + Available since v1.2 + +`dryRun` can be used on a metric to control whether or not to evaluate that metric in a dry-run mode. A metric running +in the dry-run mode won't impact the final state of the rollout or experiment even if it fails or the evaluation comes +out as inconclusive. + +The following example queries prometheus every 5 minutes to get the total number of 4XX and 5XX errors, and even if the +evaluation of the metric to monitor the 5XX error-rate fail, the analysis run will pass. + +```yaml hl_lines="1 2" + dryRun: + - metricName: total-5xx-errors + metrics: + - name: total-5xx-errors interval: 5m failureCondition: result[0] >= 10 failureLimit: 3 @@ -539,6 +602,236 @@ encountered. sum(irate( istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"5.*"}[5m] )) + - name: total-4xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"4.*"}[5m] + )) +``` + +RegEx matches are also supported. `.*` can be used to make all the metrics run in the dry-run mode. In the following +example, even if one or both metrics fail, the analysis run will pass. + +```yaml hl_lines="1 2" + dryRun: + - metricName: .* + metrics: + - name: total-5xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"5.*"}[5m] + )) + - name: total-4xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"4.*"}[5m] + )) +``` + +### Dry-Run Summary + +If one or more metrics are running in the dry-run mode, the summary of the dry-run results gets appended to the analysis +run message. Assuming that the `total-4xx-errors` metric fails in the above example but, the `total-5xx-errors` +succeeds, the final dry-run summary will look like this. + +```yaml hl_lines="4 5 6 7" +Message: Run Terminated +Run Summary: + ... +Dry Run Summary: + Count: 2 + Successful: 1 + Failed: 1 +Metric Results: +... +``` + +### Dry-Run Rollouts + +If a rollout wants to dry run its analysis, it simply needs to specify the `dryRun` field to its `analysis` stanza. In the +following example, all the metrics from `random-fail` and `always-pass` get merged and executed in the dry-run mode. + +```yaml hl_lines="9 10" +kind: Rollout +spec: +... + steps: + - analysis: + templates: + - templateName: random-fail + - templateName: always-pass + dryRun: + - metricName: .* +``` + +### Dry-Run Experiments + +If an experiment wants to dry run its analysis, it simply needs to specify the `dryRun` field under its specs. In the +following example, all the metrics from `analyze-job` matching the RegEx rule `test.*` will be executed in the dry-run +mode. + +```yaml hl_lines="20 21" +kind: Experiment +spec: + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:blue + analyses: + - name: analyze-job + templateName: analyze-job + dryRun: + - metricName: test.* +``` + +## Measurements Retention + +!!! important + Available since v1.2 + +`measurementRetention` can be used to retain other than the latest ten results for the metrics running in any mode +(dry/non-dry). Setting this option to `0` would disable it and, the controller will revert to the existing behavior of +retaining the latest ten measurements. + +The following example queries Prometheus every 5 minutes to get the total number of 4XX and 5XX errors and retains the +latest twenty measurements for the 5XX metric run results instead of the default ten. + +```yaml hl_lines="1 2 3" + measurementRetention: + - metricName: total-5xx-errors + limit: 20 + metrics: + - name: total-5xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"5.*"}[5m] + )) + - name: total-4xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"4.*"}[5m] + )) +``` + +RegEx matches are also supported. `.*` can be used to apply the same retention rule to all the metrics. In the following +example, the controller will retain the latest twenty run results for all the metrics instead of the default ten results. + +```yaml hl_lines="1 2 3" + measurementRetention: + - metricName: .* + limit: 20 + metrics: + - name: total-5xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"5.*"}[5m] + )) + - name: total-4xx-errors + interval: 5m + failureCondition: result[0] >= 10 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code~"4.*"}[5m] + )) +``` + +### Measurements Retention for Rollouts Analysis + +If a rollout wants to retain more results of its analysis metrics, it simply needs to specify the `measurementRetention` +field to its `analysis` stanza. In the following example, all the metrics from `random-fail` and `always-pass` get +merged, and their latest twenty measurements get retained instead of the default ten. + +```yaml hl_lines="9 10 11" +kind: Rollout +spec: +... + steps: + - analysis: + templates: + - templateName: random-fail + - templateName: always-pass + measurementRetention: + - metricName: .* + limit: 20 +``` + +### Measurements Retention for Experiments + +If an experiment wants to retain more results of its analysis metrics, it simply needs to specify the +`measurementRetention` field under its specs. In the following example, all the metrics from `analyze-job` matching the +RegEx rule `test.*` will have their latest twenty measurements get retained instead of the default ten. + +```yaml hl_lines="20 21 22" +kind: Experiment +spec: + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:blue + analyses: + - name: analyze-job + templateName: analyze-job + measurementRetention: + - metricName: test.* + limit: 20 ``` ## Inconclusive Runs diff --git a/docs/features/canary.md b/docs/features/canary.md index 5cc4fd11b8..5770673b68 100644 --- a/docs/features/canary.md +++ b/docs/features/canary.md @@ -83,15 +83,14 @@ match the traffic weight. Some use cases for this: !!! important - Setting canary scale is only available when using the canary strategy with a traffic router, since - the basic canary needs to control canary scale in order to approximate canary weight. + Setting canary scale is only available when using the canary strategy with a traffic router, since the basic canary needs to control canary scale in order to approximate canary weight. -To control canary weights during steps, use the `setCanaryScale` step and indicate which scale the +To control canary scales and weights during steps, use the `setCanaryScale` step and indicate which scale the the canary should use: -* explicit replica count -* explicit weight percentage of total spec.replicas -* to match current canary setWeight +* explicit replica count without changing traffic weight (`replicas`) +* explicit weight percentage of total spec.replicas without changing traffic weight(`weight`) +* to or not to match current canary's `setWeight` step (`matchTrafficWeight: true or false`) ```yaml spec: @@ -120,12 +119,16 @@ spec: strategy: canary: steps: + # 1 canary pod (10% of spec.replicas) - setCanaryScale: weight: 10 + # 90% of traffic to the 1 canary pod - setWeight: 90 - pause: {} ``` +The above situation is caused by the changed behvaior of `setWeight` after `setCanaryScale`. To reset, set `matchTrafficWeight: true` and the `setWeight` behavior will be restored, i.e., subsequent `setWeight` will create canary replicas matching the traffic weight. + ## Dynamic Stable Scale (with Traffic Routing) !!! important diff --git a/docs/features/kustomize/rollout_cr_schema.json b/docs/features/kustomize/rollout_cr_schema.json index c228c79733..4ec83fccff 100644 --- a/docs/features/kustomize/rollout_cr_schema.json +++ b/docs/features/kustomize/rollout_cr_schema.json @@ -1,6 +1,16647 @@ { "definitions": { - "v1alpha1.Rollout": { + "io.argoproj.v1alpha1.AnalysisRun": { + "properties": { + "spec": { + "properties": { + "args": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "fieldRef": { + "properties": { + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "key", + "name" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "metrics": { + "items": { + "properties": { + "consecutiveErrorLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "count": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "failureCondition": { + "type": "string" + }, + "failureLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "inconclusiveLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "initialDelay": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "name": { + "type": "string" + }, + "provider": { + "properties": { + "cloudWatch": { + "properties": { + "interval": { + "type": "string" + }, + "metricDataQueries": { + "items": { + "properties": { + "expression": { + "type": "string" + }, + "id": { + "type": "string" + }, + "label": { + "type": "string" + }, + "metricStat": { + "properties": { + "metric": { + "properties": { + "dimensions": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "metricName": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "type": "object" + }, + "period": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "stat": { + "type": "string" + }, + "unit": { + "type": "string" + } + }, + "type": "object" + }, + "period": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "returnData": { + "type": "boolean" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "metricDataQueries" + ], + "type": "object" + }, + "datadog": { + "properties": { + "interval": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "graphite": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "influxdb": { + "properties": { + "profile": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "job": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "backoffLimit": { + "format": "int32", + "type": "integer" + }, + "completionMode": { + "type": "string" + }, + "completions": { + "format": "int32", + "type": "integer" + }, + "manualSelector": { + "type": "boolean" + }, + "parallelism": { + "format": "int32", + "type": "integer" + }, + "selector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "suspend": { + "type": "boolean" + }, + "template": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "affinity": { + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "preference": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "preference", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "properties": { + "nodeSelectorTerms": { + "items": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object" + } + }, + "type": "object" + }, + "podAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "automountServiceAccountToken": { + "type": "boolean" + }, + "containers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "dnsConfig": { + "properties": { + "nameservers": { + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "searches": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "ephemeralContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "targetContainerName": { + "type": "string" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "hostAliases": { + "items": { + "properties": { + "hostnames": { + "items": { + "type": "string" + }, + "type": "array" + }, + "ip": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "hostIPC": { + "type": "boolean" + }, + "hostNetwork": { + "type": "boolean" + }, + "hostPID": { + "type": "boolean" + }, + "hostname": { + "type": "string" + }, + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "initContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "nodeName": { + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "os": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "overhead": { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "type": "object" + }, + "preemptionPolicy": { + "type": "string" + }, + "priority": { + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "type": "string" + }, + "readinessGates": { + "items": { + "properties": { + "conditionType": { + "type": "string" + } + }, + "required": [ + "conditionType" + ], + "type": "object" + }, + "type": "array" + }, + "restartPolicy": { + "type": "string" + }, + "runtimeClassName": { + "type": "string" + }, + "schedulerName": { + "type": "string" + }, + "securityContext": { + "properties": { + "fsGroup": { + "format": "int64", + "type": "integer" + }, + "fsGroupChangePolicy": { + "type": "string" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "supplementalGroups": { + "items": { + "format": "int64", + "type": "integer" + }, + "type": "array" + }, + "sysctls": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serviceAccount": { + "type": "string" + }, + "serviceAccountName": { + "type": "string" + }, + "setHostnameAsFQDN": { + "type": "boolean" + }, + "shareProcessNamespace": { + "type": "boolean" + }, + "subdomain": { + "type": "string" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "tolerations": { + "items": { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "tolerationSeconds": { + "format": "int64", + "type": "integer" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "topologySpreadConstraints": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "maxSkew": { + "format": "int32", + "type": "integer" + }, + "minDomains": { + "format": "int32", + "type": "integer" + }, + "topologyKey": { + "type": "string" + }, + "whenUnsatisfiable": { + "type": "string" + } + }, + "required": [ + "maxSkew", + "topologyKey", + "whenUnsatisfiable" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "topologyKey", + "whenUnsatisfiable" + ], + "x-kubernetes-list-type": "map" + }, + "volumes": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "required": [ + "containers" + ], + "type": "object" + } + }, + "type": "object" + }, + "ttlSecondsAfterFinished": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "template" + ], + "type": "object" + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "kayenta": { + "properties": { + "address": { + "type": "string" + }, + "application": { + "type": "string" + }, + "canaryConfigName": { + "type": "string" + }, + "configurationAccountName": { + "type": "string" + }, + "metricsAccountName": { + "type": "string" + }, + "scopes": { + "items": { + "properties": { + "controlScope": { + "properties": { + "end": { + "type": "string" + }, + "region": { + "type": "string" + }, + "scope": { + "type": "string" + }, + "start": { + "type": "string" + }, + "step": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "end", + "region", + "scope", + "start", + "step" + ], + "type": "object" + }, + "experimentScope": { + "properties": { + "end": { + "type": "string" + }, + "region": { + "type": "string" + }, + "scope": { + "type": "string" + }, + "start": { + "type": "string" + }, + "step": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "end", + "region", + "scope", + "start", + "step" + ], + "type": "object" + }, + "name": { + "type": "string" + } + }, + "required": [ + "controlScope", + "experimentScope", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "storageAccountName": { + "type": "string" + }, + "threshold": { + "properties": { + "marginal": { + "format": "int64", + "type": "integer" + }, + "pass": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "marginal", + "pass" + ], + "type": "object" + } + }, + "required": [ + "address", + "application", + "canaryConfigName", + "configurationAccountName", + "metricsAccountName", + "scopes", + "storageAccountName", + "threshold" + ], + "type": "object" + }, + "newRelic": { + "properties": { + "profile": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "prometheus": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "wavefront": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "web": { + "properties": { + "body": { + "type": "string" + }, + "headers": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "key", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "insecure": { + "type": "boolean" + }, + "jsonPath": { + "type": "string" + }, + "method": { + "type": "string" + }, + "timeoutSeconds": { + "format": "int64", + "type": "integer" + }, + "url": { + "type": "string" + } + }, + "required": [ + "url" + ], + "type": "object" + } + }, + "type": "object" + }, + "successCondition": { + "type": "string" + } + }, + "required": [ + "name", + "provider" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "required": [ + "metrics" + ], + "type": "object" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "AnalysisRun", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.v1alpha1.AnalysisTemplate": { + "properties": { + "spec": { + "properties": { + "args": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "fieldRef": { + "properties": { + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "key", + "name" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "metrics": { + "items": { + "properties": { + "consecutiveErrorLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "count": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "failureCondition": { + "type": "string" + }, + "failureLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "inconclusiveLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "initialDelay": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "name": { + "type": "string" + }, + "provider": { + "properties": { + "cloudWatch": { + "properties": { + "interval": { + "type": "string" + }, + "metricDataQueries": { + "items": { + "properties": { + "expression": { + "type": "string" + }, + "id": { + "type": "string" + }, + "label": { + "type": "string" + }, + "metricStat": { + "properties": { + "metric": { + "properties": { + "dimensions": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "metricName": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "type": "object" + }, + "period": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "stat": { + "type": "string" + }, + "unit": { + "type": "string" + } + }, + "type": "object" + }, + "period": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "returnData": { + "type": "boolean" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "metricDataQueries" + ], + "type": "object" + }, + "datadog": { + "properties": { + "interval": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "graphite": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "influxdb": { + "properties": { + "profile": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "job": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "backoffLimit": { + "format": "int32", + "type": "integer" + }, + "completionMode": { + "type": "string" + }, + "completions": { + "format": "int32", + "type": "integer" + }, + "manualSelector": { + "type": "boolean" + }, + "parallelism": { + "format": "int32", + "type": "integer" + }, + "selector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "suspend": { + "type": "boolean" + }, + "template": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "affinity": { + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "preference": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "preference", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "properties": { + "nodeSelectorTerms": { + "items": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object" + } + }, + "type": "object" + }, + "podAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "automountServiceAccountToken": { + "type": "boolean" + }, + "containers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "dnsConfig": { + "properties": { + "nameservers": { + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "searches": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "ephemeralContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "targetContainerName": { + "type": "string" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "hostAliases": { + "items": { + "properties": { + "hostnames": { + "items": { + "type": "string" + }, + "type": "array" + }, + "ip": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "hostIPC": { + "type": "boolean" + }, + "hostNetwork": { + "type": "boolean" + }, + "hostPID": { + "type": "boolean" + }, + "hostname": { + "type": "string" + }, + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "initContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "nodeName": { + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "os": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "overhead": { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "type": "object" + }, + "preemptionPolicy": { + "type": "string" + }, + "priority": { + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "type": "string" + }, + "readinessGates": { + "items": { + "properties": { + "conditionType": { + "type": "string" + } + }, + "required": [ + "conditionType" + ], + "type": "object" + }, + "type": "array" + }, + "restartPolicy": { + "type": "string" + }, + "runtimeClassName": { + "type": "string" + }, + "schedulerName": { + "type": "string" + }, + "securityContext": { + "properties": { + "fsGroup": { + "format": "int64", + "type": "integer" + }, + "fsGroupChangePolicy": { + "type": "string" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "supplementalGroups": { + "items": { + "format": "int64", + "type": "integer" + }, + "type": "array" + }, + "sysctls": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serviceAccount": { + "type": "string" + }, + "serviceAccountName": { + "type": "string" + }, + "setHostnameAsFQDN": { + "type": "boolean" + }, + "shareProcessNamespace": { + "type": "boolean" + }, + "subdomain": { + "type": "string" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "tolerations": { + "items": { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "tolerationSeconds": { + "format": "int64", + "type": "integer" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "topologySpreadConstraints": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "maxSkew": { + "format": "int32", + "type": "integer" + }, + "minDomains": { + "format": "int32", + "type": "integer" + }, + "topologyKey": { + "type": "string" + }, + "whenUnsatisfiable": { + "type": "string" + } + }, + "required": [ + "maxSkew", + "topologyKey", + "whenUnsatisfiable" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "topologyKey", + "whenUnsatisfiable" + ], + "x-kubernetes-list-type": "map" + }, + "volumes": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "required": [ + "containers" + ], + "type": "object" + } + }, + "type": "object" + }, + "ttlSecondsAfterFinished": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "template" + ], + "type": "object" + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "kayenta": { + "properties": { + "address": { + "type": "string" + }, + "application": { + "type": "string" + }, + "canaryConfigName": { + "type": "string" + }, + "configurationAccountName": { + "type": "string" + }, + "metricsAccountName": { + "type": "string" + }, + "scopes": { + "items": { + "properties": { + "controlScope": { + "properties": { + "end": { + "type": "string" + }, + "region": { + "type": "string" + }, + "scope": { + "type": "string" + }, + "start": { + "type": "string" + }, + "step": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "end", + "region", + "scope", + "start", + "step" + ], + "type": "object" + }, + "experimentScope": { + "properties": { + "end": { + "type": "string" + }, + "region": { + "type": "string" + }, + "scope": { + "type": "string" + }, + "start": { + "type": "string" + }, + "step": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "end", + "region", + "scope", + "start", + "step" + ], + "type": "object" + }, + "name": { + "type": "string" + } + }, + "required": [ + "controlScope", + "experimentScope", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "storageAccountName": { + "type": "string" + }, + "threshold": { + "properties": { + "marginal": { + "format": "int64", + "type": "integer" + }, + "pass": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "marginal", + "pass" + ], + "type": "object" + } + }, + "required": [ + "address", + "application", + "canaryConfigName", + "configurationAccountName", + "metricsAccountName", + "scopes", + "storageAccountName", + "threshold" + ], + "type": "object" + }, + "newRelic": { + "properties": { + "profile": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "prometheus": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "wavefront": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "web": { + "properties": { + "body": { + "type": "string" + }, + "headers": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "key", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "insecure": { + "type": "boolean" + }, + "jsonPath": { + "type": "string" + }, + "method": { + "type": "string" + }, + "timeoutSeconds": { + "format": "int64", + "type": "integer" + }, + "url": { + "type": "string" + } + }, + "required": [ + "url" + ], + "type": "object" + } + }, + "type": "object" + }, + "successCondition": { + "type": "string" + } + }, + "required": [ + "name", + "provider" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "required": [ + "metrics" + ], + "type": "object" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "AnalysisTemplate", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.v1alpha1.ClusterAnalysisTemplate": { + "properties": { + "spec": { + "properties": { + "args": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "fieldRef": { + "properties": { + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "key", + "name" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "metrics": { + "items": { + "properties": { + "consecutiveErrorLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "count": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "failureCondition": { + "type": "string" + }, + "failureLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "inconclusiveLimit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "initialDelay": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "name": { + "type": "string" + }, + "provider": { + "properties": { + "cloudWatch": { + "properties": { + "interval": { + "type": "string" + }, + "metricDataQueries": { + "items": { + "properties": { + "expression": { + "type": "string" + }, + "id": { + "type": "string" + }, + "label": { + "type": "string" + }, + "metricStat": { + "properties": { + "metric": { + "properties": { + "dimensions": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "metricName": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "type": "object" + }, + "period": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "stat": { + "type": "string" + }, + "unit": { + "type": "string" + } + }, + "type": "object" + }, + "period": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "returnData": { + "type": "boolean" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "metricDataQueries" + ], + "type": "object" + }, + "datadog": { + "properties": { + "interval": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "graphite": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "influxdb": { + "properties": { + "profile": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "job": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "backoffLimit": { + "format": "int32", + "type": "integer" + }, + "completionMode": { + "type": "string" + }, + "completions": { + "format": "int32", + "type": "integer" + }, + "manualSelector": { + "type": "boolean" + }, + "parallelism": { + "format": "int32", + "type": "integer" + }, + "selector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "suspend": { + "type": "boolean" + }, + "template": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "affinity": { + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "preference": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "preference", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "properties": { + "nodeSelectorTerms": { + "items": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object" + } + }, + "type": "object" + }, + "podAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "automountServiceAccountToken": { + "type": "boolean" + }, + "containers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "dnsConfig": { + "properties": { + "nameservers": { + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "searches": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "ephemeralContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "targetContainerName": { + "type": "string" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "hostAliases": { + "items": { + "properties": { + "hostnames": { + "items": { + "type": "string" + }, + "type": "array" + }, + "ip": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "hostIPC": { + "type": "boolean" + }, + "hostNetwork": { + "type": "boolean" + }, + "hostPID": { + "type": "boolean" + }, + "hostname": { + "type": "string" + }, + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "initContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "nodeName": { + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "os": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "overhead": { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "type": "object" + }, + "preemptionPolicy": { + "type": "string" + }, + "priority": { + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "type": "string" + }, + "readinessGates": { + "items": { + "properties": { + "conditionType": { + "type": "string" + } + }, + "required": [ + "conditionType" + ], + "type": "object" + }, + "type": "array" + }, + "restartPolicy": { + "type": "string" + }, + "runtimeClassName": { + "type": "string" + }, + "schedulerName": { + "type": "string" + }, + "securityContext": { + "properties": { + "fsGroup": { + "format": "int64", + "type": "integer" + }, + "fsGroupChangePolicy": { + "type": "string" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "supplementalGroups": { + "items": { + "format": "int64", + "type": "integer" + }, + "type": "array" + }, + "sysctls": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serviceAccount": { + "type": "string" + }, + "serviceAccountName": { + "type": "string" + }, + "setHostnameAsFQDN": { + "type": "boolean" + }, + "shareProcessNamespace": { + "type": "boolean" + }, + "subdomain": { + "type": "string" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "tolerations": { + "items": { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "tolerationSeconds": { + "format": "int64", + "type": "integer" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "topologySpreadConstraints": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "maxSkew": { + "format": "int32", + "type": "integer" + }, + "minDomains": { + "format": "int32", + "type": "integer" + }, + "topologyKey": { + "type": "string" + }, + "whenUnsatisfiable": { + "type": "string" + } + }, + "required": [ + "maxSkew", + "topologyKey", + "whenUnsatisfiable" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "topologyKey", + "whenUnsatisfiable" + ], + "x-kubernetes-list-type": "map" + }, + "volumes": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "required": [ + "containers" + ], + "type": "object" + } + }, + "type": "object" + }, + "ttlSecondsAfterFinished": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "template" + ], + "type": "object" + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "kayenta": { + "properties": { + "address": { + "type": "string" + }, + "application": { + "type": "string" + }, + "canaryConfigName": { + "type": "string" + }, + "configurationAccountName": { + "type": "string" + }, + "metricsAccountName": { + "type": "string" + }, + "scopes": { + "items": { + "properties": { + "controlScope": { + "properties": { + "end": { + "type": "string" + }, + "region": { + "type": "string" + }, + "scope": { + "type": "string" + }, + "start": { + "type": "string" + }, + "step": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "end", + "region", + "scope", + "start", + "step" + ], + "type": "object" + }, + "experimentScope": { + "properties": { + "end": { + "type": "string" + }, + "region": { + "type": "string" + }, + "scope": { + "type": "string" + }, + "start": { + "type": "string" + }, + "step": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "end", + "region", + "scope", + "start", + "step" + ], + "type": "object" + }, + "name": { + "type": "string" + } + }, + "required": [ + "controlScope", + "experimentScope", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "storageAccountName": { + "type": "string" + }, + "threshold": { + "properties": { + "marginal": { + "format": "int64", + "type": "integer" + }, + "pass": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "marginal", + "pass" + ], + "type": "object" + } + }, + "required": [ + "address", + "application", + "canaryConfigName", + "configurationAccountName", + "metricsAccountName", + "scopes", + "storageAccountName", + "threshold" + ], + "type": "object" + }, + "newRelic": { + "properties": { + "profile": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "prometheus": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "wavefront": { + "properties": { + "address": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "type": "object" + }, + "web": { + "properties": { + "body": { + "type": "string" + }, + "headers": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "key", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "insecure": { + "type": "boolean" + }, + "jsonPath": { + "type": "string" + }, + "method": { + "type": "string" + }, + "timeoutSeconds": { + "format": "int64", + "type": "integer" + }, + "url": { + "type": "string" + } + }, + "required": [ + "url" + ], + "type": "object" + } + }, + "type": "object" + }, + "successCondition": { + "type": "string" + } + }, + "required": [ + "name", + "provider" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "required": [ + "metrics" + ], + "type": "object" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "ClusterAnalysisTemplate", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.v1alpha1.Experiment": { + "properties": { + "spec": { + "properties": { + "analyses": { + "items": { + "properties": { + "args": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "fieldRef": { + "properties": { + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "key", + "name" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "clusterScope": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "requiredForCompletion": { + "type": "boolean" + }, + "templateName": { + "type": "string" + } + }, + "required": [ + "name", + "templateName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "templates": { + "items": { + "properties": { + "minReadySeconds": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "replicas": { + "format": "int32", + "type": "integer" + }, + "selector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "service": { + "type": "object" + }, + "template": { + "properties": { + "metadata": { + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "spec": { + "properties": { + "activeDeadlineSeconds": { + "format": "int64", + "type": "integer" + }, + "affinity": { + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "preference": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "preference", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "properties": { + "nodeSelectorTerms": { + "items": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchFields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object" + } + }, + "type": "object" + }, + "podAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "weight": { + "format": "int32", + "type": "integer" + } + }, + "required": [ + "podAffinityTerm", + "weight" + ], + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaceSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "namespaces": { + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "automountServiceAccountToken": { + "type": "boolean" + }, + "containers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "dnsConfig": { + "properties": { + "nameservers": { + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "searches": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "ephemeralContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "targetContainerName": { + "type": "string" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "hostAliases": { + "items": { + "properties": { + "hostnames": { + "items": { + "type": "string" + }, + "type": "array" + }, + "ip": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "hostIPC": { + "type": "boolean" + }, + "hostNetwork": { + "type": "boolean" + }, + "hostPID": { + "type": "boolean" + }, + "hostname": { + "type": "string" + }, + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "initContainers": { + "items": { + "properties": { + "args": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "properties": { + "configMapKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + }, + "fieldRef": { + "properties": { + "apiVersion": { + "type": "string" + }, + "fieldPath": { + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object" + }, + "resourceFieldRef": { + "properties": { + "containerName": { + "type": "string" + }, + "divisor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "resource": { + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "secretKeyRef": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "envFrom": { + "items": { + "properties": { + "configMapRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + }, + "prefix": { + "type": "string" + }, + "secretRef": { + "properties": { + "name": { + "type": "string" + }, + "optional": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "lifecycle": { + "properties": { + "postStart": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + }, + "preStop": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "ports": { + "items": { + "properties": { + "containerPort": { + "format": "int32", + "type": "integer" + }, + "hostIP": { + "type": "string" + }, + "hostPort": { + "format": "int32", + "type": "integer" + }, + "name": { + "type": "string" + }, + "protocol": { + "default": "TCP", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" + }, + "readinessProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "limits": { + "x-kubernetes-preserve-unknown-fields": true + }, + "requests": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "add": { + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "procMount": { + "type": "string" + }, + "readOnlyRootFilesystem": { + "type": "boolean" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "exec": { + "properties": { + "command": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "failureThreshold": { + "format": "int32", + "type": "integer" + }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "httpGet": { + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + }, + "scheme": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "initialDelaySeconds": { + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "properties": { + "host": { + "type": "string" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "x-kubernetes-int-or-string": true + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "stdin": { + "type": "boolean" + }, + "stdinOnce": { + "type": "boolean" + }, + "terminationMessagePath": { + "type": "string" + }, + "terminationMessagePolicy": { + "type": "string" + }, + "tty": { + "type": "boolean" + }, + "volumeDevices": { + "items": { + "properties": { + "devicePath": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "devicePath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "volumeMounts": { + "items": { + "properties": { + "mountPath": { + "type": "string" + }, + "mountPropagation": { + "type": "string" + }, + "name": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "subPath": { + "type": "string" + }, + "subPathExpr": { + "type": "string" + } + }, + "required": [ + "mountPath", + "name" + ], + "type": "object" + }, + "type": "array" + }, + "workingDir": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "nodeName": { + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "os": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "overhead": { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + "x-kubernetes-int-or-string": true + }, + "type": "object" + }, + "preemptionPolicy": { + "type": "string" + }, + "priority": { + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "type": "string" + }, + "readinessGates": { + "items": { + "properties": { + "conditionType": { + "type": "string" + } + }, + "required": [ + "conditionType" + ], + "type": "object" + }, + "type": "array" + }, + "restartPolicy": { + "type": "string" + }, + "runtimeClassName": { + "type": "string" + }, + "schedulerName": { + "type": "string" + }, + "securityContext": { + "properties": { + "fsGroup": { + "format": "int64", + "type": "integer" + }, + "fsGroupChangePolicy": { + "type": "string" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "role": { + "type": "string" + }, + "type": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "type": "object" + }, + "seccompProfile": { + "properties": { + "localhostProfile": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "supplementalGroups": { + "items": { + "format": "int64", + "type": "integer" + }, + "type": "array" + }, + "sysctls": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "type": "array" + }, + "windowsOptions": { + "properties": { + "gmsaCredentialSpec": { + "type": "string" + }, + "gmsaCredentialSpecName": { + "type": "string" + }, + "hostProcess": { + "type": "boolean" + }, + "runAsUserName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serviceAccount": { + "type": "string" + }, + "serviceAccountName": { + "type": "string" + }, + "setHostnameAsFQDN": { + "type": "boolean" + }, + "shareProcessNamespace": { + "type": "boolean" + }, + "subdomain": { + "type": "string" + }, + "terminationGracePeriodSeconds": { + "format": "int64", + "type": "integer" + }, + "tolerations": { + "items": { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "tolerationSeconds": { + "format": "int64", + "type": "integer" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "topologySpreadConstraints": { + "items": { + "properties": { + "labelSelector": { + "properties": { + "matchExpressions": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "maxSkew": { + "format": "int32", + "type": "integer" + }, + "minDomains": { + "format": "int32", + "type": "integer" + }, + "topologyKey": { + "type": "string" + }, + "whenUnsatisfiable": { + "type": "string" + } + }, + "required": [ + "maxSkew", + "topologyKey", + "whenUnsatisfiable" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "topologyKey", + "whenUnsatisfiable" + ], + "x-kubernetes-list-type": "map" + }, + "volumes": { + "x-kubernetes-preserve-unknown-fields": true + } + }, + "required": [ + "containers" + ], + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "name", + "selector", + "template" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "required": [ + "templates" + ], + "type": "object" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "Experiment", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.v1alpha1.Rollout": { "properties": { "spec": { "properties": { @@ -47,6 +16688,43 @@ "type": "array", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" } }, "type": "object" @@ -90,6 +16768,43 @@ "type": "array", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" } }, "type": "object" @@ -141,6 +16856,43 @@ "type": "array", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" + }, + "dryRun": { + "items": { + "properties": { + "metricName": { + "type": "string" + } + }, + "required": [ + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" + }, + "measurementRetention": { + "items": { + "properties": { + "limit": { + "format": "int32", + "type": "integer" + }, + "metricName": { + "type": "string" + } + }, + "required": [ + "limit", + "metricName" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge" } }, "type": "object" @@ -495,6 +17247,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -640,6 +17407,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -823,6 +17605,9 @@ "gmsaCredentialSpecName": { "type": "string" }, + "hostProcess": { + "type": "boolean" + }, "runAsUserName": { "type": "string" } @@ -849,6 +17634,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -1356,6 +18156,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -1477,7 +18292,12 @@ ], "type": "object" }, - "type": "array" + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map" }, "readinessProbe": { "properties": { @@ -1496,6 +18316,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -1679,6 +18514,9 @@ "gmsaCredentialSpecName": { "type": "string" }, + "hostProcess": { + "type": "boolean" + }, "runAsUserName": { "type": "string" } @@ -1705,6 +18543,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -2247,6 +19100,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -2392,6 +19260,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -2575,6 +19458,9 @@ "gmsaCredentialSpecName": { "type": "string" }, + "hostProcess": { + "type": "boolean" + }, "runAsUserName": { "type": "string" } @@ -2601,6 +19487,21 @@ "format": "int32", "type": "integer" }, + "grpc": { + "properties": { + "port": { + "format": "int32", + "type": "integer" + }, + "service": { + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, "httpGet": { "properties": { "host": { @@ -2810,6 +19711,10 @@ "format": "int32", "type": "integer" }, + "minDomains": { + "format": "int32", + "type": "integer" + }, "topologyKey": { "type": "string" }, @@ -2834,9 +19739,12 @@ "x-kubernetes-patch-strategy": "merge" }, "volumes": { + "items": { + "x-kubernetes-preserve-unknown-fields": true + }, + "type": "array", "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - "x-kubernetes-preserve-unknown-fields": true + "x-kubernetes-patch-strategy": "merge,retainKeys" } }, "required": [ diff --git a/docs/features/notifications.md b/docs/features/notifications.md index b0a14187ff..3a7dfb2e4e 100644 --- a/docs/features/notifications.md +++ b/docs/features/notifications.md @@ -50,6 +50,15 @@ stringData: Learn more about supported services and configuration settings in services [documentation](../generated/notification-services/overview.md). +## Default Trigger templates + +Currently the following triggers have [built-in templates](https://github.com/argoproj/argo-rollouts/tree/master/manifests/notifications). + +* `on-rollout-completed` when a rollout is finished and all its steps are completed +* `on-rollout-step-completed` when an individual step inside a rollout definition is completed +* `on-rollout-updated` when a rollout definition is changed +* `on-scaling-replica-set` when the number of replicas in a rollout is changed + ## Subscriptions The end-users can start leveraging notifications using `notifications.argoproj.io/subscribe..: ` annotation. @@ -134,3 +143,10 @@ data: ``` Each condition might use several templates. Typically each template is responsible for generating a service-specific notification part. + +### Notification Metrics + +The following prometheus metrics are emitted when notifications are enabled in argo-rollouts. +- notification_send_success is a counter that measures how many times the notification is sent successfully. +- notification_send_error is a counter that measures how many times the notification failed to send. +- notification_send is a histogram that measures performance of sending notification. \ No newline at end of file diff --git a/docs/features/specification.md b/docs/features/specification.md index 75c09fcfd6..100ddbb430 100644 --- a/docs/features/specification.md +++ b/docs/features/specification.md @@ -27,7 +27,15 @@ spec: matchLabels: app: guestbook - # Template describes the pods that will be created. Same as deployment + # WorkloadRef holds a references to a workload that provides Pod template + # (e.g. Deployment). If used, then do not use Rollout template property. + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment + + # Template describes the pods that will be created. Same as deployment. + # If used, then do not use Rollout workloadRef property. template: spec: containers: @@ -60,8 +68,7 @@ spec: # Defaults to 600s progressDeadlineSeconds: 600 - # Whether to abort the update when ProgressDeadlineSeconds - # is exceeded if analysis or experiment is not used. + # Whether to abort the update when ProgressDeadlineSeconds is exceeded. # Optional and default is false. progressDeadlineAbort: false @@ -197,11 +204,11 @@ spec: # in order to give time for traffic providers to re-target the new pods. # This value is ignored with basic, replica-weighted canary without # traffic routing. - ScaleDownDelaySeconds: 30 + scaleDownDelaySeconds: 30 # Limits the number of old RS that can run at one time before getting # scaled down. Defaults to nil - ScaleDownDelayRevisionLimit: 2 + scaleDownDelayRevisionLimit: 2 # Background analysis to run during a rollout update. Skipped upon # initial deploy of a rollout. +optional @@ -257,6 +264,57 @@ spec: - setCanaryScale: matchTrafficWeight: true + # Sets header based route with specified header values + # Setting header based route will send all 100 traffic to the canary for the requests + # O with a specified header, in this case request header "version":"2" + # (supported only with trafficRouting, for Istio only at the moment) + - setHeaderRoute: + # Name of the route that will be created by argo rollouts this must also be configured + # in spec.strategy.canary.trafficRouting.managedRoutes + name: "header-route-1" + # The matching rules for the header route, if this is missing it acts as a removal of the route. + match: + # headerName The name of the header to apply the match rules to. + - headerName: "version" + # headerValue must contain exactly one field of exact, regex, or prefix. Not all traffic routers support + # all types + headerValue: + # Exact will only match if the header value is exactly the same + exact: "2" + # Will match the rule if the regular expression matches + regex: "2.0.(.*)" + # prefix will be a prefix match of the header value + prefix: "2.0" + + # Sets up a mirror/shadow based route with the specified match rules + # The traffic will be mirrored at the configured percentage to the canary service + # during the rollout + # (supported only with trafficRouting, for Istio only at the moment) + - setMirrorRoute: + # Name of the route that will be created by argo rollouts this must also be configured + # in spec.strategy.canary.trafficRouting.managedRoutes + name: "header-route-1" + # The percentage of the matched traffic to mirror to the canary + percentage: 100 + # The matching rules for the header route, if this is missing it acts as a removal of the route. + # All conditions inside a single match block have AND semantics, while the list of match blocks have OR semantics. + # Each type within a match (method, path, headers) must have one and only one match type (exact, regex, prefix) + # Not all match types (exact, regex, prefix) will be supported by all traffic routers. + match: + - method: # What HTTP method to match + exact: "GET" + regex: "P.*" + prefix: "POST" + path: # What HTTP url paths to match. + exact: "/test" + regex: ""/test/.*" + prefix: ""/" + headers: + agent-1b: # What HTTP header name to use in the match. + exact: "firefox" + regex: "firefox2(.*)" + prefix: "firefox" + # an inline analysis step - analysis: templates: @@ -286,7 +344,14 @@ spec: # will achieve traffic split via a weighted replica counts between # the canary and stable ReplicaSet. trafficRouting: - + # This is a list of routes that Argo Rollouts has the rights to manage it is currently only required for + # setMirrorRoute and setHeaderRoute. The order of managedRoutes array also sets the precedence of the route + # in the traffic router. Argo Rollouts will place these routes in the order specified above any routes already + # defined in the used traffic router if something exists. The names here must match the names from the + # setHeaderRoute and setMirrorRoute steps. + managedRoutes: + - name: set-header + - name: mirror-route # Istio traffic routing configuration istio: # Either virtualService or virtualServices can be configured. @@ -341,4 +406,4 @@ status: You can find examples of Rollouts at: * The [example directory](https://github.com/argoproj/argo-rollouts/tree/master/examples) - * The [Argo Rollouts Demo application](https://github.com/argoproj/rollouts-demo) \ No newline at end of file + * The [Argo Rollouts Demo application](https://github.com/argoproj/rollouts-demo) diff --git a/docs/features/traffic-management/alb.md b/docs/features/traffic-management/alb.md index b315514c30..8255b74852 100644 --- a/docs/features/traffic-management/alb.md +++ b/docs/features/traffic-management/alb.md @@ -5,7 +5,7 @@ ## Overview -[AWS Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) +[AWS Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) (also known as AWS ALB Ingress Controller) enables traffic management through an Ingress object, which configures an AWS Application Load Balancer (ALB) to route traffic to one or more Kubernetes services. ALBs provides advanced traffic splitting capability through the concept of @@ -31,7 +31,7 @@ the desired traffic weights. ## Usage -To configure a Rollout to use the ALB integration and split traffic between the canary and stable +To configure a Rollout to use the ALB integration and split traffic between the canary and stable services during updates, the Rollout should be configured with the following fields: ```yaml @@ -83,9 +83,9 @@ spec: During an update, the rollout controller injects the `alb.ingress.kubernetes.io/actions.` annotation, containing a JSON payload understood by the AWS Load Balancer Controller, directing it -to split traffic between the `canaryService` and `stableService` according to the current canary weight. +to split traffic between the `canaryService` and `stableService` according to the current canary weight. -The following is an example of our example Ingress after the rollout has injected the custom action +The following is an example of our example Ingress after the rollout has injected the custom action annotation that splits traffic between the canary-service and stable-service, with a traffic weight of 10 and 90 respectively: @@ -97,16 +97,16 @@ metadata: annotations: kubernetes.io/ingress.class: alb alb.ingress.kubernetes.io/actions.root-service: | - { + { "Type":"forward", - "ForwardConfig":{ - "TargetGroups":[ - { + "ForwardConfig":{ + "TargetGroups":[ + { "Weight":10, "ServiceName":"canary-service", "ServicePort":"80" }, - { + { "Weight":90, "ServiceName":"stable-service", "ServicePort":"80" @@ -158,9 +158,31 @@ spec: ... ``` +### Sticky session + +Because at least two target groups (canary and stable) are used, target group stickiness requires additional configuration: +Sticky session must be activated on the target group via + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +spec: + strategy: + canary: +... + trafficRouting: + alb: + stickinessConfig: + enabled: true + durationSeconds: 3600 +... +``` + +More information can be found in the [AWS ALB API](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/sticky-sessions.html) + ### Zero-Downtime Updates with AWS TargetGroup Verification -Argo Rollouts contains two features to help ensure zero-downtime updates when used with the AWS +Argo Rollouts contains two features to help ensure zero-downtime updates when used with the AWS LoadBalancer controller: TargetGroup IP verification and TargetGroup weight verification. Both features involve the Rollout controller performing additional safety checks to AWS, to verify the changes made to the Ingress object are reflected in the underlying AWS TargetGroup. @@ -185,12 +207,12 @@ errors when the TargetGroup points to pods which have already been scaled down. To mitigate this risk, AWS recommends the use of [pod readiness gate injection](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/deploy/pod_readiness_gate/) -when running the AWS LoadBalancer in IP mode. Readiness gates allow for the AWS LoadBalancer +when running the AWS LoadBalancer in IP mode. Readiness gates allow for the AWS LoadBalancer controller to verify that TargetGroups are accurate before marking newly created Pods as "ready", preventing premature scale down of the older ReplicaSet. Pod readiness gate injection uses a mutating webhook which decides to inject readiness gates when a -pod is created based on the following conditions: +pod is created based on the following conditions: * There exists a service matching the pod labels in the same namespace * There exists at least one target group binding that refers to the matching service @@ -218,7 +240,7 @@ downtime in the following problematic scenario during an update from V1 to V2: 5. V1 ReplicaSet is scaled down to complete the update After step 5, when the V1 ReplicaSet is scaled down, the outdated TargetGroup would still be pointing -to the V1 Pods IPs which no longer exist, causing downtime. +to the V1 Pods IPs which no longer exist, causing downtime. To allow for zero-downtime updates, Argo Rollouts has the ability to perform TargetGroup IP verification as an additional safety measure during an update. When this feature is enabled, whenever @@ -309,6 +331,55 @@ include: * [kube2iam](https://github.com/jtblin/kube2iam) * [EKS ServiceAccount IAM Roles](https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) +### Zero-Downtime Updates with Ping-Pong feature + +Above there was described the recommended way by AWS to solve zero-downtime issue. Is a use a [pod readiness gate injection](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/deploy/pod_readiness_gate/) +when running the AWS LoadBalancer in IP mode. There is a challenge with that approach, modifications +of the Service selector labels (`spec.selector`) not allowed the AWS LoadBalancer controller to mutate the readiness gates. +And Ping-Pong feature helps to deal with that challenge. At some particular moment one of the services (e.g. ping) is "wearing a +hat" of stable service another one (e.g. pong) is "wearing a hat" of canary. At the end of the promotion step all 100% of traffic sending +to the "canary" (e.g. pong). And then the Rollout swapped the hats of ping and pong services so the pong became a stable one. +The Rollout status object holds the value of who is currently the stable ping or pong (`status.canary.currentPingPong`). +And this way allows the rollout to use pod readiness gate injection as the +services are not changing their labels at the end of the rollout progress. + +!!!important + + Ping-Pong feature available since Argo Rollouts v1.2 + +## Example +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: example-rollout +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.15.4 + ports: + - containerPort: 80 + strategy: + canary: + pingPong: #Indicates that the ping-pong services enabled + pingService: ping-service + pongService: pong-service + trafficRouting: + alb: + ingress: alb-ingress + servicePort: 80 + steps: + - setWeight: 20 + - pause: {} +``` ### Custom annotations-prefix @@ -330,7 +401,7 @@ spec: annotationPrefix: custom.alb.ingress.kubernetes.io ``` -### Custom kubernetes.io/ingress.class +### Custom Ingress Class By default, Argo Rollout will operate on Ingresses with the annotation: @@ -342,14 +413,22 @@ metadata: kubernetes.io/ingress.class: alb ``` -To configure the controller to operate on Ingresses with different `kubernetes.io/ingress.class` -values, the controller can specify a different value through the `--alb-ingress-classes` flag in +Or with the `ingressClassName`: +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +spec: + ingressClassName: alb +``` + +To configure the controller to operate on Ingresses with a different class name, +you can specify a different value through the `--alb-ingress-classes` flag in the controller command line arguments. Note that the `--alb-ingress-classes` flag can be specified multiple times if the Argo Rollouts controller should operate on multiple values. This may be desired when a cluster has multiple -Ingress controllers that operate on different `kubernetes.io/ingress.class` values. +Ingress controllers that operate on different `kubernetes.io/ingress.class` or `spec.ingressClassName` values. If the controller needs to operate on any Ingress without the `kubernetes.io/ingress.class` -annotation, the flag can be specified with an empty string (e.g. `--alb-ingress-classes ''`). +annotation or `spec.ingressClassName`, the flag can be specified with an empty string (e.g. `--alb-ingress-classes ''`). diff --git a/docs/features/traffic-management/index.md b/docs/features/traffic-management/index.md index 0c3aecca63..8c2e97d152 100644 --- a/docs/features/traffic-management/index.md +++ b/docs/features/traffic-management/index.md @@ -23,6 +23,7 @@ Argo Rollouts enables traffic management by manipulating the Service Mesh resour - [Istio](istio.md) - [Nginx Ingress Controller](nginx.md) - [Service Mesh Interface (SMI)](smi.md) +- [Traefik Proxy](traefik.md) - [Multiple Providers](mixed.md) - File a ticket [here](https://github.com/argoproj/argo-rollouts/issues) if you would like another implementation (or thumbs up it if that issue already exists) @@ -47,3 +48,137 @@ Additionally, the Argo Rollouts controller needs to treat the Rollout object dif Since the traffic is controlled independently by the Service Mesh resources, the controller needs to make a best effort to ensure that the Stable and New ReplicaSets are not overwhelmed by the traffic sent to them. By leaving the Stable ReplicaSet scaled up, the controller is ensuring that the Stable ReplicaSet can handle 100% of the traffic at any time[^1]. The New ReplicaSet follows the same behavior as without traffic management. The new ReplicaSet's replica count is equal to the latest SetWeight step percentage multiple by the total replica count of the Rollout. This calculation ensures that the canary version does not receive more traffic than it can handle. [^1]: The Rollout has to assume that the application can handle 100% of traffic if it is fully scaled up. It should outsource to the HPA to detect if the Rollout needs to more replicas if 100% isn't enough. + +## Traffic routing with managed routes and route precedence +##### Traffic router support: (Istio) + +When traffic routing is enabled, you have the ability to also let argo rollouts add and manage other routes besides just +controlling the traffic weight to the canary. Two such routing rules are header and mirror based routes. When using these +routes we also have to set a route precedence with the upstream traffic router. We do this using the `spec.strategy.canary.trafficRouting.managedRoutes` +field which is an array the order of the items in the array determine the precedence. This set of routes will also be placed +in the order specified on top of any other routes defined manually. + +#### WARNING: All routes listed in managed routes will be removed at the end of a rollout or on an abort. Do not put any manually created routes in the list. + + +Here is an example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +spec: + ... + strategy: + canary: + ... + trafficRouting: + managedRoutes: + - name: priority-route-1 + - name: priority-route-2 + - name: priority-route-3 +``` + + +## Traffic routing based on a header values for Canary +##### Traffic router support: (Istio) + +Argo Rollouts has ability to send all traffic to the canary-service based on a http request header value. +The step for the header based traffic routing is `setHeaderRoute` and has a list of matchers for the header. + +`name` - name of the header route. + +`match` - header matching rules is an array of `headerName, headerValue` pairs. + +`headerName` - name of the header to match. + +`headerValue`- contains exactly one of `exact` - specify the exact header value, +`regex` - value in a regex format, `prefix` - the prefix of the value could be provided. Not all traffic routers will support +all match types. + +To disable header based traffic routing just need to specify empty `setHeaderRoute` with only the name of the route. + +Example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +spec: + ... + strategy: + canary: + canaryService: canary-service + stableService: stable-service + trafficRouting: + managedRoutes: + - name: set-header-1 + istio: + virtualService: + name: rollouts-demo-vsvc + steps: + - setWeight: 20 + - setHeaderRoute: # enable header based traffic routing where + name: "set-header-1" + match: + - headerName: Custom-Header1 # Custom-Header1=Mozilla + headerValue: + exact: Mozilla + - headerName: Custom-Header2 # or Custom-Header2 has a prefix Mozilla + headerValue: + prefix: Mozilla + - headerName: Custom-Header3 # or Custom-Header3 value match regex: Mozilla(.*) + headerValue: + regex: Mozilla(.*) + - pause: {} + - setHeaderRoute: + name: "set-header-1" # disable header based traffic routing +``` + +## Traffic routing mirroring traffic to canary +##### Traffic router support: (Istio) + +Argo Rollouts has ability to mirror traffic to the canary-service based on a various matching rules. +The step for the mirror based traffic routing is `setMirrorRoute` and has a list of matchers for the header. + +`name` - name of the mirror route. + +`percentage` - what percentage of the matched traffic to mirror + +`match` - The matching rules for the header route, if this is missing it acts as a removal of the route. +All conditions inside a single match block have AND semantics, while the list of match blocks have OR semantics. +Each type within a match (method, path, headers) must have one and only one match type (exact, regex, prefix) +Not all match types (exact, regex, prefix) will be supported by all traffic routers. + +To disable mirror based traffic route you just need to specify a `setMirrorRoute` with only the name of the route. + +This example will mirror 35% of HTTP traffic that matches a `GET` requests and with the url prefix of `/` +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +spec: + ... + strategy: + canary: + canaryService: canary-service + stableService: stable-service + trafficRouting: + managedRoutes: + - name: mirror-route + istio: + virtualService: + name: rollouts-demo-vsvc + steps: + - setCanaryScale: + weight: 25 + - setMirrorRoute: + name: mirror-route + percentage: 35 + match: + - method: + exact: GET + path: + prefix: / + - pause: + duration: 10m + - setMirrorRoute: + name: "mirror-route" # removes mirror based traffic route +``` \ No newline at end of file diff --git a/docs/features/traffic-management/nginx.md b/docs/features/traffic-management/nginx.md index ae87db0d30..414c91cc46 100644 --- a/docs/features/traffic-management/nginx.md +++ b/docs/features/traffic-management/nginx.md @@ -39,6 +39,6 @@ Since the Nginx Ingress controller allows users to configure the annotation pref ## Using Argo Rollouts with multiple NGINX ingress controllers -As a default, the Argo Rollouts controller only operates on ingresses with the `kubernetes.io/ingress.class` annotation set to `nginx`. A user can configure the controller to operate on Ingresses with different `kubernetes.io/ingress.class` values by specifying the `--nginx-ingress-classes` flag. A user can list the `--nginx-ingress-classes` flag multiple times if the Argo Rollouts controller should operate on multiple values. This solves the case where a cluster has multiple Ingress controllers operating on different `kubernetes.io/ingress.class` values. +As a default, the Argo Rollouts controller only operates on ingresses with the `kubernetes.io/ingress.class` annotation or `spec.ingressClassName` set to `nginx`. A user can configure the controller to operate on Ingresses with different class name by specifying the `--nginx-ingress-classes` flag. A user can list the `--nginx-ingress-classes` flag multiple times if the Argo Rollouts controller should operate on multiple values. This solves the case where a cluster has multiple Ingress controllers operating on different class values. -If the user would like the controller to operate on any Ingress without the `kubernetes.io/ingress.class` annotation, a user should add the following `--nginx-ingress-classes ''`. \ No newline at end of file +If the user would like the controller to operate on any Ingress without the `kubernetes.io/ingress.class` annotation or `spec.ingressClassName`, a user should add the following `--nginx-ingress-classes ''`. diff --git a/docs/features/traffic-management/traefik.md b/docs/features/traffic-management/traefik.md new file mode 100644 index 0000000000..dfba805c4f --- /dev/null +++ b/docs/features/traffic-management/traefik.md @@ -0,0 +1,103 @@ +# Traefik + +You can use the [Traefik Proxy](https://traefik.io/traefik/) for traffic management with Argo Rollouts. + +The [TraefikService](https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-traefikservice) is the object that supports the ability for [weighted round robin load balancing](https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#weighted-round-robin) and [traffic mirroring](https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#mirroring) when using Traefik as ingress. + +## How to integrate TraefikService with Argo Rollouts using it as weighted round robin load balancer + +First, we need to create the TraefikService object using its ability for weighted round robin load balancing. + +```yaml +apiVersion: traefik.containo.us/v1alpha1 +kind: TraefikService +metadata: + name: traefik-service +spec: + weighted: + services: + - name: stable-rollout # k8s service name that you need to create for stable application version + port: 80 + - name: canary-rollout # k8s service name that you need to create for new application version + port: 80 +``` + +Notice, we don't specify the `weight` field. It is necessary to be synced with ArgoCD. If we specify this field and Argo Rollouts controller changes it, then the ArgoCD controller will notice it and will show that this resource is out of sync (if you are using Argo CD to manage your Rollout). + +Secondly, we need to create the Argo Rollouts object. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + replicas: 5 + strategy: + canary: + canaryService: canary-rollout + stableService: stable-rollout + trafficRouting: + traefik: + weightedTraefikServiceName: traefik-service # specify traefikService resource name that we have created before + steps: + - setWeight: 30 + - pause: {} + - setWeight: 40 + - pause: {duration: 10} + - setWeight: 60 + - pause: {duration: 10} + - setWeight: 80 + - pause: {duration: 10} + ... +``` + +## How to integrate TraefikService with Argo Rollouts using it as traffic mirror + +First, we also need to create the TraefikService object but using its ability for traffic mirroring. + +```yaml +apiVersion: traefik.containo.us/v1alpha1 +kind: TraefikService +metadata: + name: traefik-service +spec: + mirroring: + name: some-service + port: 80 + mirrors: + - name: stable-rollout # k8s service name that you need to create for stable application version + port: 80 + - name: canary-rollout # k8s service name that you need to create for new application version + port: 80 +``` + +Notice, we don't specify the `percent` field. It is necessary to be synced with ArgoCD. If we specify this field and Argo Rollouts controller changes it, then the ArgoCD controller will notice it and will show that this resource is out of sync (if you are using Argo CD to manage your Rollout). + +Secondly, we need to create the Argo Rollouts object. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + replicas: 5 + strategy: + canary: + canaryService: canary-rollout + stableService: stable-rollout + trafficRouting: + traefik: + mirrorTraefikServiceName: traefik-service # specify traefikService resource name that we have created before + steps: + - setWeight: 30 + - pause: {} + - setWeight: 40 + - pause: {duration: 10} + - setWeight: 60 + - pause: {duration: 10} + - setWeight: 80 + - pause: {duration: 10} + ... +``` diff --git a/docs/features/vpa-support.md b/docs/features/vpa-support.md new file mode 100644 index 0000000000..48733cd1f8 --- /dev/null +++ b/docs/features/vpa-support.md @@ -0,0 +1,356 @@ + +# Vertical Pod Autoscaling + +Vertical Pod Autoscaling (VPA) reduces the maintenance cost and improve utilization of cluster resources by automating configuration of resource requirements. + +## VPA modes + +There are four modes in which VPAs operate + +1. "Auto": VPA assigns resource requests on pod creation as well as updates them on existing pods using the preferred update mechanism. Currently this is equivalent to "Recreate" (see below). Once restart free ("in-place") update of pod requests is available, it may be used as the preferred update mechanism by the "Auto" mode. +NOTE: This feature of VPA is experimental and may cause downtime for your applications. + +1. "Recreate": VPA assigns resource requests on pod creation as well as updates them on existing pods by evicting them when the requested resources differ significantly from the new recommendation (respecting the Pod Disruption Budget, if defined). This mode should be used rarely, only if you need to ensure that the pods are restarted whenever the resource request changes. Otherwise prefer the "Auto" mode which may take advantage of restart free updates once they are available. +NOTE: This feature of VPA is experimental and may cause downtime for your applications. + +1. "Initial": VPA only assigns resource requests on pod creation and never changes them later. + +1. "Off": VPA does not automatically change resource requirements of the pods. The recommendations are calculated and can be inspected in the VPA object. + + +## Example + +Below is an example of a Vertical Pod Autoscaler with Argo-Rollouts. + +Rollout sample app: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: vpa-demo-rollout + namespace: test-vpa +spec: + replicas: 5 + strategy: + canary: + steps: + - setWeight: 20 + - pause: {duration: 10} + - setWeight: 40 + - pause: {duration: 10} + - setWeight: 60 + - pause: {duration: 10} + - setWeight: 80 + - pause: {duration: 10} + revisionHistoryLimit: 10 + selector: + matchLabels: + app: vpa-demo-rollout + template: + metadata: + labels: + app: vpa-demo-rollout + spec: + containers: + - name: vpa-demo-rollout + image: ravihari/nginx:v1 + ports: + - containerPort: 80 + resources: + requests: + cpu: "5m" + memory: "5Mi" +``` + +VPA configuration for Rollout sample app: + +```yaml +apiVersion: "autoscaling.k8s.io/v1beta2" +kind: VerticalPodAutoscaler +metadata: + name: vpa-rollout-example + namespace: test-vpa +spec: + targetRef: + apiVersion: "argoproj.io/v1alpha1" + kind: Rollout + name: vpa-demo-rollout + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: '*' + minAllowed: + cpu: 5m + memory: 5Mi + maxAllowed: + cpu: 1 + memory: 500Mi + controlledResources: ["cpu", "memory"] +``` + +Describe VPA when initially deployed we donot see recommendations as it will take few mins. + +```yaml +Name: kubengix-vpa +Namespace: test-vpa +Labels: +Annotations: +API Version: autoscaling.k8s.io/v1 +Kind: VerticalPodAutoscaler +Metadata: + Creation Timestamp: 2022-03-14T12:54:06Z + Generation: 1 + Managed Fields: + API Version: autoscaling.k8s.io/v1beta2 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:resourcePolicy: + .: + f:containerPolicies: + f:targetRef: + .: + f:apiVersion: + f:kind: + f:name: + f:updatePolicy: + .: + f:updateMode: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-14T12:54:06Z + Resource Version: 3886 + UID: 4ac64e4c-c84b-478e-92e4-5f072f985971 +Spec: + Resource Policy: + Container Policies: + Container Name: * + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1 + Memory: 500Mi + Min Allowed: + Cpu: 5m + Memory: 5Mi + Target Ref: + API Version: argoproj.io/v1alpha1 + Kind: Rollout + Name: vpa-demo-rollout + Update Policy: + Update Mode: Auto +Events: +``` + +After few minutes when VPA starts to process and provide recommendation: + +```yaml +Name: kubengix-vpa +Namespace: test-vpa +Labels: +Annotations: +API Version: autoscaling.k8s.io/v1 +Kind: VerticalPodAutoscaler +Metadata: + Creation Timestamp: 2022-03-14T12:54:06Z + Generation: 2 + Managed Fields: + API Version: autoscaling.k8s.io/v1beta2 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:resourcePolicy: + .: + f:containerPolicies: + f:targetRef: + .: + f:apiVersion: + f:kind: + f:name: + f:updatePolicy: + .: + f:updateMode: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-14T12:54:06Z + API Version: autoscaling.k8s.io/v1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:recommendation: + .: + f:containerRecommendations: + Manager: recommender + Operation: Update + Time: 2022-03-14T12:54:52Z + Resource Version: 3950 + UID: 4ac64e4c-c84b-478e-92e4-5f072f985971 +Spec: + Resource Policy: + Container Policies: + Container Name: * + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1 + Memory: 500Mi + Min Allowed: + Cpu: 5m + Memory: 5Mi + Target Ref: + API Version: argoproj.io/v1alpha1 + Kind: Rollout + Name: vpa-demo-rollout + Update Policy: + Update Mode: Auto +Status: + Conditions: + Last Transition Time: 2022-03-14T12:54:52Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: vpa-demo-rollout + Lower Bound: + Cpu: 25m + Memory: 262144k + Target: + Cpu: 25m + Memory: 262144k + Uncapped Target: + Cpu: 25m + Memory: 262144k + Upper Bound: + Cpu: 1 + Memory: 500Mi +Events: +``` + +Here we see the recommendation for cpu, memory with lowerbound, upper bound, Target etc., are provided. If we check the status of the pods.. the older pods with initial configuration would get terminated and newer pods get created. + +```yaml +# kubectl get po -n test-vpa -w +NAME READY STATUS RESTARTS AGE +vpa-demo-rollout-f5df6d577-65f26 1/1 Running 0 17m +vpa-demo-rollout-f5df6d577-d55cx 1/1 Running 0 17m +vpa-demo-rollout-f5df6d577-fdpn2 1/1 Running 0 17m +vpa-demo-rollout-f5df6d577-jg2pw 1/1 Running 0 17m +vpa-demo-rollout-f5df6d577-vlx5x 1/1 Running 0 17m +... + +vpa-demo-rollout-f5df6d577-jg2pw 1/1 Terminating 0 17m +vpa-demo-rollout-f5df6d577-vlx5x 1/1 Terminating 0 17m +vpa-demo-rollout-f5df6d577-jg2pw 0/1 Terminating 0 18m +vpa-demo-rollout-f5df6d577-vlx5x 0/1 Terminating 0 18m +vpa-demo-rollout-f5df6d577-w7tx4 0/1 Pending 0 0s +vpa-demo-rollout-f5df6d577-w7tx4 0/1 Pending 0 0s +vpa-demo-rollout-f5df6d577-w7tx4 0/1 ContainerCreating 0 0s +vpa-demo-rollout-f5df6d577-vdlqq 0/1 Pending 0 0s +vpa-demo-rollout-f5df6d577-vdlqq 0/1 Pending 0 1s +vpa-demo-rollout-f5df6d577-jg2pw 0/1 Terminating 0 18m +vpa-demo-rollout-f5df6d577-jg2pw 0/1 Terminating 0 18m +vpa-demo-rollout-f5df6d577-vdlqq 0/1 ContainerCreating 0 1s +vpa-demo-rollout-f5df6d577-w7tx4 1/1 Running 0 6s +vpa-demo-rollout-f5df6d577-vdlqq 1/1 Running 0 7s +vpa-demo-rollout-f5df6d577-vlx5x 0/1 Terminating 0 18m +vpa-demo-rollout-f5df6d577-vlx5x 0/1 Terminating 0 18m +``` + +If we check the new pod cpu and memory they would be updated as per VPA recommendation: + + +```yaml +# kubectl describe po vpa-demo-rollout-f5df6d577-vdlqq -n test-vpa +Name: vpa-demo-rollout-f5df6d577-vdlqq +Namespace: test-vpa +Priority: 0 +Node: argo-rollouts-control-plane/172.18.0.2 +Start Time: Mon, 14 Mar 2022 12:55:06 +0000 +Labels: app=vpa-demo-rollout + rollouts-pod-template-hash=f5df6d577 +Annotations: vpaObservedContainers: vpa-demo-rollout + vpaUpdates: Pod resources updated by kubengix-vpa: container 0: cpu request, memory request +Status: Running +IP: 10.244.0.17 +IPs: + IP: 10.244.0.17 +Controlled By: ReplicaSet/vpa-demo-rollout-f5df6d577 +Containers: + vpa-demo-rollout: + Container ID: containerd://b79bd88851fe0622d33bc90a1560ca54ef2c27405a3bc9a4fc3a333eef5f9733 + Image: ravihari/nginx:v1 + Image ID: docker.io/ravihari/nginx@sha256:205961b09a80476af4c2379841bf6abec0022101a7e6c5585a88316f7115d17a + Port: 80/TCP + Host Port: 0/TCP + State: Running + Started: Mon, 14 Mar 2022 12:55:11 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 25m + memory: 262144k + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mk4fz (ro) +Conditions: + Type Status + Initialized True + Ready True + ContainersReady True + PodScheduled True +Volumes: + kube-api-access-mk4fz: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: Burstable +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 38s default-scheduler Successfully assigned test-vpa/vpa-demo-rollout-f5df6d577-vdlqq to argo-rollouts-control-plane + Normal Pulled 35s kubelet Container image "ravihari/nginx:v1" already present on machine + Normal Created 35s kubelet Created container vpa-demo-rollout + Normal Started 33s kubelet Started container vpa-demo-rollout +``` + +## Requirements +In order for the VPA to manipulate the rollout, the Kubernetes cluster hosting the rollout CRD needs the subresources support for CRDs. This feature was introduced as alpha in Kubernetes version 1.10 and transitioned to beta in Kubernetes version 1.11. If a user wants to use VPA on v1.10, the Kubernetes Cluster operator will need to add a custom feature flag to the API server. After 1.10, the flag is turned on by default. Check out the following [link](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/) for more information on setting the custom feature flag. + +When installing VPA you may need to add the following in RBAC configurations for `system:vpa-target-reader` cluster role as by default VPA maynot support rollouts in all the versions. + +```yaml + - apiGroups: + - argoproj.io + resources: + - rollouts + - rollouts/scale + - rollouts/status + - replicasets + verbs: + - get + - list + - watch +``` + +Makes sure Metrics-Server is installed in the cluster and openssl is upto date for VPA latest version to apply recommendations to the pods properly. diff --git a/docs/getting-started/appmesh/index.md b/docs/getting-started/appmesh/index.md new file mode 100644 index 0000000000..7378f85820 --- /dev/null +++ b/docs/getting-started/appmesh/index.md @@ -0,0 +1,123 @@ +# Getting Started - App Mesh + +This guide covers how Argo Rollouts integrates with service-meshes managed by [AWS App Mesh](https://docs.aws.amazon.com/app-mesh/latest/userguide/what-is-app-mesh.html). This guide builds upon the concepts of the [basic getting started guide](../../getting-started.md). + +## Requirements +- Kubernetes cluster with AWS App Mesh Controller for K8s installed + +!!! tip + + See the [App Mesh Controler Installation instructions](https://docs.aws.amazon.com/app-mesh/latest/userguide/getting-started-kubernetes.html) on how to get started using App Mesh with Kubernetes. + +## 1. Deploy the Rollout, Services, App Mesh CRD + +When App Mesh is used as the traffic router, the Rollout canary strategy must define the following mandatory fields: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: my-rollout +spec: + strategy: + canary: + # canaryService and stableService are references to Services which the Rollout will modify + # to target the canary ReplicaSet and stable ReplicaSet respectively (required). + canaryService: my-svc-canary + stableService: my-svc-stable + trafficRouting: + appMesh: + # The referenced virtual-service will be used to determine the virtual-router that is + # manipulated to update canary weights. + virtualService: + # name of the virtual-service App Mesh CR + name: my-svc + # Optional set of routes to update. If empty, all routes associated with the virtual-service are updated. + routes: + - http-primary + # virtualNodeGroup is a structure to refer App Mesh virtual-node CR corresponding to Canary and Stable versions + virtualNodeGroup: + # canaryVirtualNodeRef refers to virtual-node corresponding to canary version. Rollouts controller will + # update the podSelector of this virtual-node to latest canary pod-hash generated by controller. + canaryVirtualNodeRef: + name: my-vn-canary + # stableVirtualNodeRef refers to virtual-node corresponding to stable version. Rollouts controller will + # update the podSelector of this virtual-node to latest stable pod-hash generated by controller. + stableVirtualNodeRef: + name: my-vn-stable + steps: + - setWeight: 25 + - pause: {} + ... +``` + +In this guide, the two services are: `my-svc-canary` and `my-svc-stable` respectively. There are two +virtual-node CRs corresponding to these services named `my-vn-canary` and `my-vn-stable` +respectively. In addition, there is a virtual-service named `rollout-demo-vsvc` that is provided by a +virtual-router CR named `rollout-demo-vrouter`. This virtual-router need have at least one route with action to forward +traffic to the canary and stable virtual-nodes. Initially weight for canary is set to 0% while for stable it is 100%. +During rollout, controller will modify the weights on route(s) based on the configuraiton defined in +`steps[N].setWeight`. + +To summarize, run the following commands to deploy a service: + +* Two services (stable and canary) +* One service (for VIP and DNS lookup) +* Two App Mesh virtual-nodes (stable and canary) +* One App Mesh virtual-router with routes to virtual-nodes +* One App Mesh virtual-service corresponding to VIP service +* A rollout + +```shell +kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-rollouts/master/docs/getting-started/appmesh/canary-service.yaml +kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-rollouts/master/docs/getting-started/appmesh/canary-rollout.yaml +``` +## 2. Verify service + +First make sure that rollout is stable. + +```shell +kubectl argo rollouts get rollout my-rollout -n argo-examples -w +``` + +Then make sure the service is functional. + +```shell +kubectl -n argo-examples port-forward svc/my-svc 8181:80 +``` + +## 3. Rollout new version + +Now its time to deploy new version. Update the rollout with new image. + +```shell +kubectl argo rollouts set image my-rollout demo=argoproj/rollouts-demo:green -n argo-examples +``` + +Rollout should deploy a new canary revision and update the weights under virtual-router. + +```shell +kubectl get -n argo-examples virtualrouter my-vrouter -o json | jq ".spec.routes[0].httpRoute.action.weightedTargets" +[ + { + "virtualNodeRef": { + "name": "my-vn-canary" + }, + "weight": 25 + }, + { + "virtualNodeRef": { + "name": "my-vn-stable" + }, + "weight": 75 + } +] +``` + +Now manually approve the rollout that is paused indefinitely, and continue watching the routes get updated + +```shell +kubectl argo rollouts promote my-rollout -n argo-examples + +watch -d 'kubectl get -n argo-examples virtualrouter my-vrouter -o json | jq ".spec.routes[0].httpRoute.action.weightedTargets"' +``` diff --git a/docs/getting-started/setup/index.md b/docs/getting-started/setup/index.md index 8066de72a1..7b72bbc14b 100644 --- a/docs/getting-started/setup/index.md +++ b/docs/getting-started/setup/index.md @@ -27,7 +27,7 @@ minikube addons enable ingress Optionally, Prometheus and Grafana can be installed to utilize progressive delivery functionality: -``` +```shell # Install Prometheus kubectl create ns monitoring helm install prometheus prometheus-community/prometheus -n monitoring -f docs/getting-started/setup/values-prometheus.yaml @@ -35,8 +35,14 @@ helm install prometheus prometheus-community/prometheus -n monitoring -f docs/ge # Patch the ingress-nginx-controller pod so that it has the required # prometheus annotations. This allows the pod to be scraped by the # prometheus server. -kubectl patch deploy ingress-nginx-controller -n kube-system -p "$(cat docs/getting-started/setup/ingress-nginx-controller-metrics-scrape.yaml)" +kubectl patch deploy ingress-nginx-controller -n ingress-nginx -p "$(cat docs/getting-started/setup/ingress-nginx-controller-metrics-scrape.yaml)" +``` +!!! note + [For Minikube version 1.18.1 or earlier](https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/#enable-the-ingress-controller), + change the `-n` parameter value (namespace) to `kube-system`. + +```shell # Install grafana along with nginx ingress dashboards helm install grafana grafana/grafana -n monitoring -f docs/getting-started/setup/values-grafana-nginx.yaml diff --git a/docs/index.md b/docs/index.md index 0174ed9f48..1ac7a815ba 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,7 +1,7 @@ # Argo Rollouts - Kubernetes Progressive Delivery Controller ## What is Argo Rollouts? -Argo Rollouts is a [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) and set of [CRDs](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes. +Argo Rollouts is a [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) and set of [CRDs](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes. Argo Rollouts (optionally) integrates with [ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress/) and service meshes, leveraging their traffic shaping abilities to gradually shift traffic to the new version during an update. Additionally, Rollouts can query and interpret metrics from various providers to verify key KPIs and drive automated promotion or rollback during an update. @@ -30,7 +30,7 @@ For these reasons, in large scale high-volume production environments, a rolling * Ingress controller integration: NGINX, ALB * Service Mesh integration: Istio, Linkerd, SMI * Simultaneous usage of multiple providers: SMI + NGINX, Istio + ALB, etc. -* Metric provider integration: Prometheus, Wavefront, Kayenta, Web, Kubernetes Jobs, Datadog, New Relic, Graphite +* Metric provider integration: Prometheus, Wavefront, Kayenta, Web, Kubernetes Jobs, Datadog, New Relic, Graphite, InfluxDB ## Quick Start @@ -39,12 +39,12 @@ kubectl create namespace argo-rollouts kubectl apply -n argo-rollouts -f https://github.com/argoproj/argo-rollouts/releases/latest/download/install.yaml ``` -Follow the full [getting started guide](getting-started.md) to walk through creating and then updating a rollout object. +Follow the full [getting started guide](getting-started.md) to walk through creating and then updating a rollout object. ## How does it work? -Similar to the [deployment object](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), the Argo Rollouts controller will manage the creation, scaling, and deletion of [ReplicaSets](https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/). These ReplicaSets are defined by the `spec.template` field inside the Rollout resource, which uses the same pod template as the deployment object. +Similar to the [deployment object](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), the Argo Rollouts controller will manage the creation, scaling, and deletion of [ReplicaSets](https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/). These ReplicaSets are defined by the `spec.template` field inside the Rollout resource, which uses the same pod template as the deployment object. -When the `spec.template` is changed, that signals to the Argo Rollouts controller that a new ReplicaSet will be introduced. The controller will use the strategy set within the `spec.strategy` field in order to determine how the rollout will progress from the old ReplicaSet to the new ReplicaSet. Once that new ReplicaSet is scaled up (and optionally passes an [Analysis](features/analysis/)), the controller will mark it as "stable". +When the `spec.template` is changed, that signals to the Argo Rollouts controller that a new ReplicaSet will be introduced. The controller will use the strategy set within the `spec.strategy` field in order to determine how the rollout will progress from the old ReplicaSet to the new ReplicaSet. Once that new ReplicaSet is scaled up (and optionally passes an [Analysis](features/analysis/)), the controller will mark it as "stable". If another change occurs in the `spec.template` during a transition from a stable ReplicaSet to a new ReplicaSet (i.e. you change the application version in the middle of a rollout), then the previously new ReplicaSet will be scaled down, and the controller will try to progress the ReplicasSet that reflects the updated `spec.template` field. There is more information on the behaviors of each strategy in the [spec](features/specification/) section. @@ -60,7 +60,7 @@ If another change occurs in the `spec.template` during a transition from a stabl - A user wants to use the normal Rolling Update strategy from the deployment. If a user uses the canary strategy with no steps, the rollout will use the max surge and max unavailable values to roll to the new version. ([example](https://github.com/argoproj/argo-rollouts/blob/master/examples/rollout-rolling-update.yaml)) -## Examples +## Examples You can see more examples of Rollouts at: diff --git a/docs/migrating.md b/docs/migrating.md index 47a2861e3e..ee603a2999 100644 --- a/docs/migrating.md +++ b/docs/migrating.md @@ -66,6 +66,9 @@ metadata: name: rollout-ref-deployment spec: replicas: 5 + selector: + matchLabels: + app: rollout-ref-deployment workloadRef: # Reference an existing Deployment using workloadRef field apiVersion: apps/v1 kind: Deployment @@ -102,7 +105,7 @@ spec: Consider following if your Deployment runs in production: -**Running Rollout and Deployment side-by-side** +### Running Rollout and Deployment side-by-side After creation Rollout will spinup required number of Pods side-by-side with the Deployment Pods. Rollout won't try to manage existing Deployment Pods. That means you can safely update add Rollout @@ -110,7 +113,7 @@ to the production environment without any interruption but you are going to run Argo-rollouts controller patches the spec of rollout object with an annotation of `rollout.argoproj.io/workload-generation`, which equals the generation of referenced deployment. Users can detect if the rollout matches desired generation of deployment by checking the `workloadObservedGeneration` in the rollout status. -**Traffic Management During Migration** +### Traffic Management During Migration The Rollout offers traffic management functionality that manages routing rules and flows the traffic to different versions of an application. For example [Blue-Green](features/bluegreen.md) deployment strategy manipulates @@ -119,4 +122,35 @@ Kubernetes Service selector and direct production traffic to "green" instances o If you are using this feature then Rollout switches production traffic to Pods that it manages. The switch happens only when the required number of Pod is running and healthy so it is safe in production as well. However, if you want to be extra careful then consider creating a temporal Service or Ingress object to validate Rollout behavior. -Once testing is done delete temporal Service/Ingress and switch rollout to production one. \ No newline at end of file +Once testing is done delete temporal Service/Ingress and switch rollout to production one. + +# Migrating to Deployments + +In case users want to rollback to the deployment kinds from rollouts, there are two scenarios aligned with those in [Migrating to Rollouts](#migrating-to-rollouts). + +* Convert a Rollout resource to a Deployment resource. +* Reference an existing Deployment from a Rollout using `workloadRef` field. + +## Convert Rollout to Deployment + +When converting a Rollout to a Deployment, it involves changing three fields: + +1. Changing the apiVersion from argoproj.io/v1alpha1 to apps/v1 +1. Changing the kind from Rollout to Deployment +1. Remove the rollout strategy in `spec.strategy.canary` or ``spec.strategy.blueGreen`` + + +!!! warning + When migrating a Rollout which is already serving live production traffic, a Deployment should + run next to the rollout before deleting the rollout or scaling down the rollout. + **Not following this approach might result in downtime**. It also allows for the Deployment to be + tested before deleting the original Rollout. + +## Reference Deployment From Rollout + +When a rollout is referencing to a deployment: + +1. Scale-up the Deployment by changing `replicas` field of an existing Rollout to zero. +1. Scale-down existing Rollout by changing `replicas` field of an existing Rollout to zero. + +Please refer to [Running Rollout and Deployment side-by-side](#running-rollout-and-deployment-side-by-side) and [Traffic Management During Migration](#traffic-management-during-migration) for caveats. diff --git a/docs/releasing.md b/docs/releasing.md index 4e5b527302..a72f1308e6 100644 --- a/docs/releasing.md +++ b/docs/releasing.md @@ -19,14 +19,15 @@ 1. Update Brew formula: + * Fork the repo https://github.com/argoproj/homebrew-tap + * Run the following commands to update the brew formula: ```bash - git clone git@github.com:argoproj/homebrew-tap.git cd homebrew-tap - git pull ./update.sh kubectl-argo-rollouts $VERSION git commit -am "Update kubectl-argo-rollouts to $VERSION" - git push ``` + * Create a PR with the modified files pointing to upstream/master + * Once the PR is approved by a maintainer, it can be merged. ### Verify diff --git a/docs/requirements.txt b/docs/requirements.txt index f3fda1b012..fa8fc0cf4c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -mkdocs==1.1.2 -mkdocs-material==7.1.7 +mkdocs==1.2.3 +mkdocs-material==8.1.9 markdown_include==0.6.0 -pygments==2.7.4 \ No newline at end of file +pygments==2.11.2 diff --git a/docs/security.md b/docs/security.md index 38bd8a771b..e4b28f7385 100644 --- a/docs/security.md +++ b/docs/security.md @@ -1,10 +1,21 @@ -# Security +# Security Policy for Argo Rollouts -## Reporting Vulnerabilities +## Reporting a Vulnerability -Please report security vulnerabilities by e-mailing: +If you find a security related bug in Argo Rollouts, we kindly ask you for responsible +disclosure and for giving us appropriate time to react, analyze and develop a +fix to mitigate the found security vulnerability. -* [Jesse_Suen@intuit.com](mailto:Jesse_Suen@intuit.com) -* [Alexander_Matyushentsev@intuit.com](mailto:Alexander_Matyushentsev@intuit.com) -* [Edward_Lee@intuit.com](mailto:Edward_Lee@intuit.com) \ No newline at end of file +Please report vulnerabilities by e-mail to the following address: + +* cncf-argo-security@lists.cncf.io + +All vulnerabilites and associated information will be treated with full confidentiality. + +## Public Disclosure + +We will publish security advisiories using the +[GitHub Security Advisories](https://github.com/argoproj/argo-rollouts/security/advisories) +feature to keep our community well informed, and will credit you for your +findings (unless you prefer to stay anonymous, of course). diff --git a/examples/appmesh/canary-rollout.yaml b/examples/appmesh/canary-rollout.yaml new file mode 100644 index 0000000000..5f6f79cdd1 --- /dev/null +++ b/examples/appmesh/canary-rollout.yaml @@ -0,0 +1,71 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: success-rate + namespace: argo-examples +spec: + args: + - name: envoy_cluster_name + metrics: + - name: success-rate + interval: 5m + successCondition: result[0] >= 0.99 + failureLimit: 3 + provider: + prometheus: + address: http://appmesh-prometheus.appmesh-system:9090 + query: | + sum(irate(envoy_cluster_upstream_rq_xx{app="wrk-tester",envoy_cluster_name="{{args.envoy_cluster_name}}",envoy_response_code_class!~"5.*"}[5m])) / + sum(irate(envoy_cluster_upstream_rq_xx{app="wrk-tester",envoy_cluster_name="{{args.envoy_cluster_name}}"}[5m])) + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: my-rollout + namespace: argo-examples +spec: + replicas: 4 + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: demo + image: argoproj/rollouts-demo:blue + imagePullPolicy: Always + ports: + - name: http + containerPort: 8080 + strategy: + canary: + canaryService: my-svc-canary + stableService: my-svc-stable + trafficRouting: + appMesh: + virtualService: + name: my-svc + virtualNodeGroup: + canaryVirtualNodeRef: + name: my-vn-canary + stableVirtualNodeRef: + name: my-vn-stable + steps: + - setWeight: 25 + - pause: {} + - setWeight: 50 + - pause: {duration: 10m} + - setWeight: 75 + - pause: {duration: 10m} + # Uncomment below to enable analysis +# analysis: +# templates: +# - templateName: success-rate +# startingStep: 2 +# args: +# - name: envoy_cluster_name +# value: cds_egress_argo-examples_my-vn-canary_argo-examples_http_80 diff --git a/examples/appmesh/canary-service.yaml b/examples/appmesh/canary-service.yaml new file mode 100644 index 0000000000..1523266471 --- /dev/null +++ b/examples/appmesh/canary-service.yaml @@ -0,0 +1,183 @@ +apiVersion: appmesh.k8s.aws/v1beta2 +kind: Mesh +metadata: + name: argo-examples +spec: + namespaceSelector: + matchLabels: + mesh: argo-examples + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: argo-examples + labels: + mesh: argo-examples + appmesh.k8s.aws/sidecarInjectorWebhook: enabled + +--- +# This service is used by virtual-service to resolve initial dns requests done by app container +apiVersion: v1 +kind: Service +metadata: + name: my-svc + namespace: argo-examples +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: my-app + +--- +apiVersion: v1 +kind: Service +metadata: + name: my-svc-canary + namespace: argo-examples +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + # This selector will be updated with the pod-template-hash of the canary ReplicaSet. + app: my-app + +--- +apiVersion: v1 +kind: Service +metadata: + name: my-svc-stable + namespace: argo-examples +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + # This selector will be updated with the pod-template-hash of the stable ReplicaSet. + app: my-app + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + name: my-svc + namespace: argo-examples +spec: + provider: + virtualRouter: + virtualRouterRef: + name: my-vrouter + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + name: my-vrouter + namespace: argo-examples +spec: + listeners: + - portMapping: + port: 80 + protocol: http + routes: + - name: primary + httpRoute: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: my-vn-canary + weight: 0 + - virtualNodeRef: + name: my-vn-stable + weight: 100 + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + name: my-vn-canary + namespace: argo-examples +spec: + podSelector: + matchLabels: + app: my-app + rollouts-pod-template-hash: canary-tbd + listeners: + - portMapping: + port: 80 + protocol: http + serviceDiscovery: + dns: + hostname: my-svc-canary.argo-examples.svc.cluster.local + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + name: my-vn-stable + namespace: argo-examples +spec: + podSelector: + matchLabels: + app: my-app + rollouts-pod-template-hash: stable-tbd + listeners: + - portMapping: + port: 80 + protocol: http + serviceDiscovery: + dns: + hostname: my-svc-stable.argo-examples.svc.cluster.local + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + name: wrk-tester + namespace: argo-examples +spec: + podSelector: + matchLabels: + app: wrk-tester + backends: + - virtualService: + virtualServiceRef: + name: my-svc + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: wrk-tester + namespace: argo-examples + labels: + app: wrk-tester +spec: + replicas: 1 + selector: + matchLabels: + app: wrk-tester + template: + metadata: + name: wrk-tester + labels: + app: wrk-tester + spec: + containers: + - name: wrk + image: argoproj/load-tester:latest + command: + - /bin/sh + - -c + - -x + - "while true; do wrk -t10 -c40 -d2m -s report.lua http://my-svc.argo-examples/color; jq -e '.errors_ratio <= 0.35 and .latency_avg_ms < 100' report.json; done" diff --git a/examples/dashboard.json b/examples/dashboard.json index 05f60be4f6..b73151f669 100644 --- a/examples/dashboard.json +++ b/examples/dashboard.json @@ -28,7 +28,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "custom": {} @@ -107,7 +107,7 @@ }, { "cacheTimeout": null, - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -185,7 +185,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "custom": {} @@ -279,7 +279,7 @@ }, { "collapsed": true, - "datasource": null, + "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, @@ -290,7 +290,7 @@ "panels": [ { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -370,7 +370,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -450,7 +450,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -530,7 +530,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -607,7 +607,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -687,7 +687,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -764,7 +764,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -846,7 +846,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "custom": {} @@ -952,7 +952,7 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "custom": {} @@ -1014,7 +1014,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "custom": {} @@ -1112,7 +1112,7 @@ }, { "collapsed": true, - "datasource": null, + "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, @@ -1122,7 +1122,7 @@ "id": 32, "panels": [ { - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -1186,7 +1186,7 @@ "type": "table" }, { - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -1246,7 +1246,7 @@ "type": "stat" }, { - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -1309,7 +1309,7 @@ "type": "stat" }, { - "datasource": null, + "datasource": "$datasource", "fieldConfig": { "defaults": { "color": { @@ -1382,6 +1382,25 @@ "tags": [], "templating": { "list": [ + { + "current": { + "selected": false, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "definition": "datasource(prometheus)", + "description": "rollout datasource", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, { "allValue": null, "current": { @@ -1389,7 +1408,7 @@ "text": "default", "value": "default" }, - "datasource": null, + "datasource": "$datasource", "definition": "label_values(rollout_info,namespace)", "description": "rollout namespace", "error": null, @@ -1420,7 +1439,7 @@ "text": "", "value": "" }, - "datasource": null, + "datasource": "$datasource", "definition": "label_values(rollout_info{namespace=\"$rollout_namespace\"},name)", "description": "rollout name", "error": null, diff --git a/examples/traffic-routing/istio-mirror.yaml b/examples/traffic-routing/istio-mirror.yaml new file mode 100644 index 0000000000..d56c36add4 --- /dev/null +++ b/examples/traffic-routing/istio-mirror.yaml @@ -0,0 +1,117 @@ +## This examples sets up istio mirroring if running locally using docker for destkop you can add +## istio-host-split.com to your /etc/hosts and point it to 127.0.0.1 to view demo. +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split.com + gateways: + - istio-host-split-gateway + http: + - name: primary + route: + - destination: + host: istio-host-split-stable + weight: 100 + - destination: + host: istio-host-split-canary + weight: 0 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-host-split +spec: + replicas: 4 + strategy: + canary: + canaryService: istio-host-split-canary + stableService: istio-host-split-stable + trafficRouting: + managedRoutes: + - name: mirror-route + istio: + virtualService: + name: istio-host-split-vsvc + routes: + - primary + steps: + - setCanaryScale: + weight: 50 + - setMirrorRoute: + name: mirror-route + percentage: 50 + match: + - method: + exact: POST + path: + prefix: /color + - pause: {} + selector: + matchLabels: + app: istio-host-split + template: + metadata: + labels: + app: istio-host-split + spec: + containers: + - name: istio-host-split + image: argoproj/rollouts-demo:green + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: istio-host-split-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "istio-host-split.com" + diff --git a/experiments/conditions_test.go b/experiments/conditions_test.go index 71b9f59922..cefbc1c2e7 100644 --- a/experiments/conditions_test.go +++ b/experiments/conditions_test.go @@ -9,6 +9,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/conditions" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func TestUpdateProgressingLastUpdateTime(t *testing.T) { @@ -20,7 +21,7 @@ func TestUpdateProgressingLastUpdateTime(t *testing.T) { Name: "bar", }} prevCond := newCondition(conditions.ReplicaSetUpdatedReason, e) - prevTime := metav1.NewTime(metav1.Now().Add(-10 * time.Second)) + prevTime := metav1.NewTime(timeutil.Now().Add(-10 * time.Second)) prevCond.LastUpdateTime = prevTime prevCond.LastTransitionTime = prevTime e.Status.Conditions = []v1alpha1.ExperimentCondition{ @@ -53,7 +54,7 @@ func TestEnterTimeoutDegradedState(t *testing.T) { Status: v1alpha1.TemplateStatusProgressing, }} e.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(30) - prevTime := metav1.NewTime(metav1.Now().Add(-1 * time.Minute).Truncate(time.Second)) + prevTime := metav1.NewTime(timeutil.Now().Add(-1 * time.Minute).Truncate(time.Second)) e.Status.TemplateStatuses[0].LastTransitionTime = &prevTime rs := templateToRS(e, templates[0], 0) diff --git a/experiments/controller.go b/experiments/controller.go index 59de00f3aa..2b26005971 100644 --- a/experiments/controller.go +++ b/experiments/controller.go @@ -4,9 +4,6 @@ import ( "context" "time" - informersv1 "k8s.io/client-go/informers/core/v1" - listersv1 "k8s.io/client-go/listers/core/v1" - log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -15,8 +12,10 @@ import ( patchtypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" + informersv1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" appslisters "k8s.io/client-go/listers/apps/v1" + listersv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller" @@ -33,6 +32,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/diff" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" + timeutil "github.com/argoproj/argo-rollouts/utils/time" unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" ) @@ -174,6 +174,11 @@ func NewController(cfg ControllerConfig) *Controller { controllerutil.Enqueue(obj, cfg.RolloutWorkQueue) } controllerutil.EnqueueParentObject(obj, register.RolloutKind, enqueueRollout) + if ex := unstructuredutil.ObjectToExperiment(obj); ex != nil { + logCtx := logutil.WithExperiment(ex) + logCtx.Info("experiment deleted") + controller.metricsServer.Remove(ex.Namespace, ex.Name, logutil.ExperimentKey) + } }, }) @@ -233,7 +238,7 @@ func (ec *Controller) Run(threadiness int, stopCh <-chan struct{}) error { } func (ec *Controller) syncHandler(key string) error { - startTime := time.Now() + startTime := timeutil.Now() namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err diff --git a/experiments/controller_test.go b/experiments/controller_test.go index 960397c2b8..d86889c550 100644 --- a/experiments/controller_test.go +++ b/experiments/controller_test.go @@ -8,10 +8,11 @@ import ( "testing" "time" + timeutil "github.com/argoproj/argo-rollouts/utils/time" + "k8s.io/apimachinery/pkg/util/intstr" "github.com/stretchr/testify/assert" - "github.com/undefinedlabs/go-mpatch" "github.com/argoproj/argo-rollouts/utils/queue" @@ -52,12 +53,12 @@ const ( ) func now() *metav1.Time { - now := metav1.Time{Time: time.Now().Truncate(time.Second)} + now := metav1.Time{Time: timeutil.Now().Truncate(time.Second)} return &now } func secondsAgo(seconds int) *metav1.Time { - ago := metav1.Time{Time: time.Now().Add(-1 * time.Second * time.Duration(seconds)).Truncate(time.Second)} + ago := metav1.Time{Time: timeutil.Now().Add(-1 * time.Second * time.Duration(seconds)).Truncate(time.Second)} return &ago } @@ -114,11 +115,13 @@ func newFixture(t *testing.T, objects ...runtime.Object) *fixture { f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) f.enqueuedObjects = make(map[string]int) now := time.Now() - patch, err := mpatch.PatchMethod(time.Now, func() time.Time { + timeutil.Now = func() time.Time { return now - }) - assert.NoError(t, err) - f.unfreezeTime = patch.Unpatch + } + f.unfreezeTime = func() error { + timeutil.Now = time.Now + return nil + } return f } @@ -200,8 +203,8 @@ func newCondition(reason string, experiment *v1alpha1.Experiment) *v1alpha1.Expe return &v1alpha1.ExperimentCondition{ Type: v1alpha1.ExperimentProgressing, Status: corev1.ConditionTrue, - LastUpdateTime: metav1.Now().Rfc3339Copy(), - LastTransitionTime: metav1.Now().Rfc3339Copy(), + LastUpdateTime: timeutil.MetaNow().Rfc3339Copy(), + LastTransitionTime: timeutil.MetaNow().Rfc3339Copy(), Reason: reason, Message: fmt.Sprintf(conditions.ExperimentProgressingMessage, experiment.Name), } @@ -210,8 +213,8 @@ func newCondition(reason string, experiment *v1alpha1.Experiment) *v1alpha1.Expe return &v1alpha1.ExperimentCondition{ Type: v1alpha1.ExperimentProgressing, Status: corev1.ConditionFalse, - LastUpdateTime: metav1.Now().Rfc3339Copy(), - LastTransitionTime: metav1.Now().Rfc3339Copy(), + LastUpdateTime: timeutil.MetaNow().Rfc3339Copy(), + LastTransitionTime: timeutil.MetaNow().Rfc3339Copy(), Reason: reason, Message: fmt.Sprintf(conditions.ExperimentCompletedMessage, experiment.Name), } @@ -220,8 +223,8 @@ func newCondition(reason string, experiment *v1alpha1.Experiment) *v1alpha1.Expe return &v1alpha1.ExperimentCondition{ Type: v1alpha1.ExperimentProgressing, Status: corev1.ConditionFalse, - LastUpdateTime: metav1.Now().Rfc3339Copy(), - LastTransitionTime: metav1.Now().Rfc3339Copy(), + LastUpdateTime: timeutil.MetaNow().Rfc3339Copy(), + LastTransitionTime: timeutil.MetaNow().Rfc3339Copy(), Reason: reason, Message: fmt.Sprintf(conditions.ExperimentRunningMessage, experiment.Name), } @@ -230,8 +233,8 @@ func newCondition(reason string, experiment *v1alpha1.Experiment) *v1alpha1.Expe return &v1alpha1.ExperimentCondition{ Type: v1alpha1.InvalidExperimentSpec, Status: corev1.ConditionTrue, - LastUpdateTime: metav1.Now().Rfc3339Copy(), - LastTransitionTime: metav1.Now().Rfc3339Copy(), + LastUpdateTime: timeutil.MetaNow().Rfc3339Copy(), + LastTransitionTime: timeutil.MetaNow().Rfc3339Copy(), Reason: reason, Message: fmt.Sprintf(conditions.ExperimentTemplateNameEmpty, experiment.Name, 0), } @@ -643,7 +646,7 @@ func (f *fixture) verifyPatchedReplicaSetAddScaleDownDelay(index int, scaleDownD if !ok { assert.Fail(f.t, "Expected Patch action, not %s", action.GetVerb()) } - now := metav1.Now().Add(time.Duration(scaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) + now := timeutil.Now().Add(time.Duration(scaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, now) assert.Equal(f.t, string(patchAction.GetPatch()), patch) } @@ -728,7 +731,7 @@ func TestNoReconcileForDeletedExperiment(t *testing.T) { defer f.Close() e := newExperiment("foo", nil, "10s") - now := metav1.Now() + now := timeutil.MetaNow() e.DeletionTimestamp = &now f.experimentLister = append(f.experimentLister, e) diff --git a/experiments/experiment.go b/experiments/experiment.go index 17e19b7762..aa154d03d9 100644 --- a/experiments/experiment.go +++ b/experiments/experiment.go @@ -5,8 +5,6 @@ import ( "fmt" "time" - "k8s.io/utils/pointer" - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" clientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" rolloutslisters "github.com/argoproj/argo-rollouts/pkg/client/listers/rollouts/v1alpha1" @@ -17,6 +15,8 @@ import ( "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" templateutil "github.com/argoproj/argo-rollouts/utils/template" + timeutil "github.com/argoproj/argo-rollouts/utils/time" + log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/kubernetes" appslisters "k8s.io/client-go/listers/apps/v1" v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/utils/pointer" ) const ( @@ -99,7 +100,7 @@ func (ec *experimentContext) reconcile() *v1alpha1.ExperimentStatus { } for _, analysis := range ec.ex.Spec.Analyses { - ec.reconcileAnalysisRun(analysis) + ec.reconcileAnalysisRun(analysis, ec.ex.Spec.DryRun, ec.ex.Spec.MeasurementRetention) } newStatus := ec.calculateStatus() @@ -123,7 +124,7 @@ func (ec *experimentContext) reconcileTemplate(template v1alpha1.TemplateSpec) { } prevStatus := templateStatus.DeepCopy() desiredReplicaCount := experimentutil.CalculateTemplateReplicasCount(ec.ex, template) - now := metav1.Now() + now := timeutil.MetaNow() rs := ec.templateRSs[template.Name] @@ -348,7 +349,7 @@ func calculateEnqueueDuration(ex *v1alpha1.Experiment, newStatus *v1alpha1.Exper } } deadlineSeconds := defaults.GetExperimentProgressDeadlineSecondsOrDefault(ex) - now := time.Now() + now := timeutil.Now() for _, template := range ex.Spec.Templates { // Set candidate to the earliest of LastTransitionTime + progressDeadlineSeconds ts := experimentutil.GetTemplateStatus(ex.Status, template.Name) @@ -370,7 +371,7 @@ func calculateEnqueueDuration(ex *v1alpha1.Experiment, newStatus *v1alpha1.Exper // reconcileAnalysisRun reconciles a single analysis run, creating or terminating it as necessary. // Updates the analysis run statuses, which may subsequently fail the experiment. -func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef) { +func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) { logCtx := ec.log.WithField("analysis", analysis.Name) logCtx.Infof("Reconciling analysis") prevStatus := experimentutil.GetAnalysisRunStatus(ec.ex.Status, analysis.Name) @@ -426,7 +427,7 @@ func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAn logCtx.Warnf("Skipping AnalysisRun creation for analysis %s: experiment is terminating", analysis.Name) return } - run, err := ec.createAnalysisRun(analysis) + run, err := ec.createAnalysisRun(analysis, dryRunMetrics, measurementRetentionMetrics) if err != nil { msg := fmt.Sprintf("Failed to create AnalysisRun for analysis '%s': %v", analysis.Name, err.Error()) newStatus.Phase = v1alpha1.AnalysisPhaseError @@ -473,13 +474,13 @@ func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAn // createAnalysisRun creates the analysis run. If an existing runs exists with same name, is // semantically equal, and is not complete, returns the existing one, otherwise creates a new // run with a collision counter increase. -func (ec *experimentContext) createAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef) (*v1alpha1.AnalysisRun, error) { +func (ec *experimentContext) createAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) (*v1alpha1.AnalysisRun, error) { analysisRunIf := ec.argoProjClientset.ArgoprojV1alpha1().AnalysisRuns(ec.ex.Namespace) args, err := ec.ResolveAnalysisRunArgs(analysis.Args) if err != nil { return nil, err } - run, err := ec.newAnalysisRun(analysis, args) + run, err := ec.newAnalysisRun(analysis, args, dryRunMetrics, measurementRetentionMetrics) if err != nil { return nil, err } @@ -511,7 +512,7 @@ func (ec *experimentContext) calculateStatus() *v1alpha1.ExperimentStatus { templateStatus, templateMessage := ec.assessTemplates() analysesStatus, analysesMessage := ec.assessAnalysisRuns() if templateStatus == v1alpha1.AnalysisPhaseRunning && ec.newStatus.AvailableAt == nil { - now := metav1.Now() + now := timeutil.MetaNow() ec.newStatus.AvailableAt = &now ec.log.Infof("Marked AvailableAt: %v", now) } @@ -615,7 +616,7 @@ func (ec *experimentContext) assessAnalysisRuns() (v1alpha1.AnalysisPhase, strin } // newAnalysisRun generates an AnalysisRun from the experiment and template -func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, args []v1alpha1.Argument) (*v1alpha1.AnalysisRun, error) { +func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) (*v1alpha1.AnalysisRun, error) { if analysis.ClusterScope { clusterTemplate, err := ec.clusterAnalysisTemplateLister.Get(analysis.TemplateName) @@ -624,7 +625,8 @@ func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysis } name := fmt.Sprintf("%s-%s", ec.ex.Name, analysis.Name) - run, err := analysisutil.NewAnalysisRunFromClusterTemplate(clusterTemplate, args, name, "", ec.ex.Namespace) + clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{clusterTemplate} + run, err := analysisutil.NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, dryRunMetrics, measurementRetentionMetrics, name, "", ec.ex.Namespace) if err != nil { return nil, err } @@ -641,7 +643,8 @@ func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysis } name := fmt.Sprintf("%s-%s", ec.ex.Name, analysis.Name) - run, err := analysisutil.NewAnalysisRunFromTemplate(template, args, name, "", ec.ex.Namespace) + analysisTemplates := []*v1alpha1.AnalysisTemplate{template} + run, err := analysisutil.NewAnalysisRunFromTemplates(analysisTemplates, nil, args, dryRunMetrics, measurementRetentionMetrics, name, "", ec.ex.Namespace) if err != nil { return nil, err } diff --git a/experiments/experiment_test.go b/experiments/experiment_test.go index ebb7e7bea3..60c04e0723 100644 --- a/experiments/experiment_test.go +++ b/experiments/experiment_test.go @@ -7,23 +7,24 @@ import ( "testing" "time" - "k8s.io/utils/pointer" - "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" kubeinformers "k8s.io/client-go/informers" k8sfake "k8s.io/client-go/kubernetes/fake" kubetesting "k8s.io/client-go/testing" + "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" - informers "github.com/argoproj/argo-rollouts/pkg/client/informers/externalversions" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/record" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func newTestContext(ex *v1alpha1.Experiment, objects ...runtime.Object) *experimentContext { @@ -123,7 +124,7 @@ func TestRemoveScaleDownDelayFromRS(t *testing.T) { cond := conditions.NewExperimentConditions(v1alpha1.ExperimentProgressing, corev1.ConditionTrue, conditions.NewRSAvailableReason, "Experiment \"foo\" is running.") e.Status.Conditions = append(e.Status.Conditions, *cond) rs := templateToRS(e, templates[0], 1) - rs.ObjectMeta.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = metav1.Now().Add(600 * time.Second).UTC().Format(time.RFC3339) + rs.ObjectMeta.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = timeutil.Now().Add(600 * time.Second).UTC().Format(time.RFC3339) e.Status.TemplateStatuses = []v1alpha1.TemplateStatus{ generateTemplatesStatus("bar", 1, 1, v1alpha1.TemplateStatusSuccessful, now()), } @@ -160,7 +161,7 @@ func TestScaleDownRSAfterFinish(t *testing.T) { cond := conditions.NewExperimentConditions(v1alpha1.ExperimentProgressing, corev1.ConditionTrue, conditions.NewRSAvailableReason, "Experiment \"foo\" is running.") e.Status.Conditions = append(e.Status.Conditions, *cond) - inThePast := metav1.Now().Add(-10 * time.Second).UTC().Format(time.RFC3339) + inThePast := timeutil.Now().Add(-10 * time.Second).UTC().Format(time.RFC3339) rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inThePast rs2.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inThePast @@ -219,9 +220,9 @@ func TestNoPatch(t *testing.T) { Type: v1alpha1.ExperimentProgressing, Reason: conditions.NewRSAvailableReason, Message: fmt.Sprintf(conditions.ExperimentRunningMessage, e.Name), - LastTransitionTime: metav1.Now(), + LastTransitionTime: timeutil.MetaNow(), Status: corev1.ConditionTrue, - LastUpdateTime: metav1.Now(), + LastUpdateTime: timeutil.MetaNow(), }} e.Status.AvailableAt = now() @@ -243,7 +244,7 @@ func TestSuccessAfterDurationPasses(t *testing.T) { templates := generateTemplates("bar", "baz") e := newExperiment("foo", templates, "5s") - tenSecondsAgo := metav1.Now().Add(-10 * time.Second) + tenSecondsAgo := timeutil.Now().Add(-10 * time.Second) e.Status.AvailableAt = &metav1.Time{Time: tenSecondsAgo} e.Status.Phase = v1alpha1.AnalysisPhaseRunning e.Status.TemplateStatuses = []v1alpha1.TemplateStatus{ @@ -278,7 +279,7 @@ func TestSuccessAfterDurationPasses(t *testing.T) { func TestDontRequeueWithoutDuration(t *testing.T) { templates := generateTemplates("bar") ex := newExperiment("foo", templates, "") - ex.Status.AvailableAt = &metav1.Time{Time: metav1.Now().Add(-10 * time.Second)} + ex.Status.AvailableAt = &metav1.Time{Time: timeutil.MetaNow().Add(-10 * time.Second)} ex.Status.TemplateStatuses = []v1alpha1.TemplateStatus{ generateTemplatesStatus("bar", 1, 1, v1alpha1.TemplateStatusRunning, now()), } @@ -303,7 +304,7 @@ func TestRequeueAfterDuration(t *testing.T) { templates := generateTemplates("bar") ex := newExperiment("foo", templates, "") ex.Spec.Duration = "30s" - ex.Status.AvailableAt = &metav1.Time{Time: metav1.Now().Add(-10 * time.Second)} + ex.Status.AvailableAt = &metav1.Time{Time: timeutil.MetaNow().Add(-10 * time.Second)} ex.Status.TemplateStatuses = []v1alpha1.TemplateStatus{ generateTemplatesStatus("bar", 1, 1, v1alpha1.TemplateStatusRunning, now()), } @@ -318,7 +319,7 @@ func TestRequeueAfterDuration(t *testing.T) { // ensures we are enqueued around ~20 seconds twentySeconds := time.Second * time.Duration(20) delta := math.Abs(float64(twentySeconds - duration)) - assert.True(t, delta < float64(100*time.Millisecond), "") + assert.True(t, delta < float64(150*time.Millisecond), "") } exCtx.reconcile() assert.True(t, enqueueCalled) @@ -332,7 +333,7 @@ func TestRequeueAfterProgressDeadlineSeconds(t *testing.T) { ex.Status.TemplateStatuses = []v1alpha1.TemplateStatus{ generateTemplatesStatus("bar", 0, 0, v1alpha1.TemplateStatusProgressing, now()), } - now := metav1.Now() + now := timeutil.MetaNow() ex.Status.TemplateStatuses[0].LastTransitionTime = &now exCtx := newTestContext(ex) rs1 := templateToRS(ex, ex.Spec.Templates[0], 0) @@ -345,7 +346,7 @@ func TestRequeueAfterProgressDeadlineSeconds(t *testing.T) { // ensures we are enqueued around 10 minutes tenMinutes := time.Second * time.Duration(600) delta := math.Abs(float64(tenMinutes - duration)) - assert.True(t, delta < float64(100*time.Millisecond)) + assert.True(t, delta < float64(150*time.Millisecond)) } exCtx.reconcile() assert.True(t, enqueueCalled) @@ -414,6 +415,32 @@ func TestFailAddScaleDownDelay(t *testing.T) { assert.Equal(t, newStatus.Phase, v1alpha1.AnalysisPhaseError) } +func TestFailAddScaleDownDelayIsConflict(t *testing.T) { + templates := generateTemplates("bar") + ex := newExperiment("foo", templates, "") + ex.Spec.ScaleDownDelaySeconds = pointer.Int32Ptr(0) + ex.Status.TemplateStatuses = []v1alpha1.TemplateStatus{ + generateTemplatesStatus("bar", 1, 1, v1alpha1.TemplateStatusRunning, now()), + } + rs := templateToRS(ex, templates[0], 1) + rs.Spec.Replicas = pointer.Int32(0) + + exCtx := newTestContext(ex, rs) + exCtx.templateRSs["bar"] = rs + + fakeClient := exCtx.kubeclientset.(*k8sfake.Clientset) + updateCalled := false + fakeClient.PrependReactor("update", "replicasets", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + updateCalled = true + return true, nil, k8serrors.NewConflict(schema.GroupResource{}, "guestbook", errors.New("intentional-error")) + }) + newStatus := exCtx.reconcile() + assert.True(t, updateCalled) + assert.Equal(t, v1alpha1.TemplateStatusRunning, newStatus.TemplateStatuses[0].Status) + assert.Equal(t, "", newStatus.TemplateStatuses[0].Message) + assert.Equal(t, newStatus.Phase, v1alpha1.AnalysisPhaseRunning) +} + // TestDeleteOutdatedService verifies that outdated service for Template in templateServices map is deleted and new service is created func TestDeleteOutdatedService(t *testing.T) { templates := generateTemplates("bar") diff --git a/experiments/replicaset.go b/experiments/replicaset.go index 5d97eb15c1..d91843a03a 100644 --- a/experiments/replicaset.go +++ b/experiments/replicaset.go @@ -6,23 +6,24 @@ import ( "fmt" "time" - "github.com/argoproj/argo-rollouts/utils/defaults" - + log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" patchtypes "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/controller" labelsutil "k8s.io/kubernetes/pkg/util/labels" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/conditions" + "github.com/argoproj/argo-rollouts/utils/defaults" experimentutil "github.com/argoproj/argo-rollouts/utils/experiment" + "github.com/argoproj/argo-rollouts/utils/hash" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" - log "github.com/sirupsen/logrus" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -160,7 +161,7 @@ func newReplicaSetFromTemplate(experiment *v1alpha1.Experiment, template v1alpha delete(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey) } } - podHash := controller.ComputeHash(&newRSTemplate, collisionCount) + podHash := hash.ComputePodTemplateHash(&newRSTemplate, collisionCount) newRSTemplate.Labels = labelsutil.CloneAndAddLabel(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey, podHash) // Add podTemplateHash label to selector. @@ -227,7 +228,7 @@ func (ec *experimentContext) addScaleDownDelay(rs *appsv1.ReplicaSet) (bool, err } } - deadline := metav1.Now().Add(scaleDownDelaySeconds * time.Second).UTC().Format(time.RFC3339) + deadline := timeutil.MetaNow().Add(scaleDownDelaySeconds * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, deadline) _, err := ec.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{}) if err == nil { @@ -267,7 +268,11 @@ func (ec *experimentContext) scaleReplicaSetAndRecordEvent(rs *appsv1.ReplicaSet } scaled, newRS, err := ec.scaleReplicaSet(rs, newScale, scalingOperation) if err != nil { - // TODO(jessesuen): gracefully handle conflict issues + if k8serrors.IsConflict(err) { + ec.log.Warnf("Retrying scaling of ReplicaSet '%s': %s", rs.Name, err) + ec.enqueueExperimentAfter(ec.ex, time.Second) + return false, nil, nil + } msg := fmt.Sprintf("Failed to scale %s %s: %v", rs.Name, scalingOperation, err) ec.recorder.Warnf(ec.ex, record.EventOptions{EventReason: "ReplicaSetUpdateError"}, msg) } else { diff --git a/go.mod b/go.mod index 4e5eb688da..ca2591fc13 100644 --- a/go.mod +++ b/go.mod @@ -1,89 +1,205 @@ module github.com/argoproj/argo-rollouts -go 1.16 +go 1.18 require ( - github.com/antonmedv/expr v1.8.9 - github.com/argoproj/notifications-engine v0.2.1-0.20210525191332-e8e293898477 - github.com/argoproj/pkg v0.9.0 - github.com/aws/aws-sdk-go-v2/config v1.8.1 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.6.1 + github.com/antonmedv/expr v1.9.0 + github.com/argoproj/notifications-engine v0.3.1-0.20220129012210-32519f8f68ec + github.com/argoproj/pkg v0.13.6 + github.com/aws/aws-sdk-go-v2 v1.16.7 + github.com/aws/aws-sdk-go-v2/config v1.15.14 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.6 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.18.8 github.com/blang/semver v3.5.1+incompatible - github.com/evanphx/json-patch/v5 v5.2.0 + github.com/evanphx/json-patch/v5 v5.6.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-openapi/spec v0.19.5 github.com/gogo/protobuf v1.3.2 - github.com/golang/mock v1.4.4 - github.com/golang/protobuf v1.4.3 - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/golang/mock v1.6.0 + github.com/golang/protobuf v1.5.2 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a - github.com/lunixbochs/vtclean v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.3.3 - github.com/newrelic/newrelic-client-go v0.49.0 + github.com/influxdata/influxdb-client-go/v2 v2.9.1 + github.com/juju/ansiterm v1.0.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/newrelic/newrelic-client-go v0.86.5 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.10.0 + github.com/prometheus/client_golang v1.12.2-0.20220620141757-4ad265f1b4ee github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.21.0 - github.com/servicemeshinterface/smi-sdk-go v0.4.1 - github.com/sirupsen/logrus v1.7.0 - github.com/soheilhy/cmux v0.1.4 + github.com/prometheus/common v0.36.0 + github.com/servicemeshinterface/smi-sdk-go v0.5.0 + github.com/sirupsen/logrus v1.8.1 + github.com/soheilhy/cmux v0.1.5 github.com/spaceapegames/go-wavefront v1.8.1 - github.com/spf13/cobra v1.1.3 - github.com/stretchr/testify v1.7.0 + github.com/spf13/cobra v1.5.0 + github.com/stretchr/testify v1.8.0 github.com/tj/assert v0.0.3 - github.com/undefinedlabs/go-mpatch v1.0.6 github.com/valyala/fasttemplate v1.2.1 - google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a - google.golang.org/grpc v1.33.1 - google.golang.org/grpc/examples v0.0.0-20210331235824-f6bb3972ed15 // indirect - google.golang.org/protobuf v1.25.0 + google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d + google.golang.org/grpc v1.47.0 + google.golang.org/protobuf v1.28.0 gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/api v0.21.0 - k8s.io/apiextensions-apiserver v0.21.0 - k8s.io/apimachinery v0.21.0 - k8s.io/apiserver v0.21.0 - k8s.io/cli-runtime v0.21.0 - k8s.io/client-go v0.21.0 - k8s.io/code-generator v0.21.0 - k8s.io/component-base v0.21.0 - k8s.io/klog/v2 v2.8.0 - k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 - k8s.io/kubectl v0.21.0 - k8s.io/kubernetes v1.21.0 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + k8s.io/api v0.24.2 + k8s.io/apiextensions-apiserver v0.24.2 + k8s.io/apimachinery v0.24.2 + k8s.io/apiserver v0.24.2 + k8s.io/cli-runtime v0.24.2 + k8s.io/client-go v0.24.2 + k8s.io/code-generator v0.24.2 + k8s.io/component-base v0.24.2 + k8s.io/klog/v2 v2.70.1 + k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 + k8s.io/kubectl v0.24.2 + k8s.io/kubernetes v1.24.2 + k8s.io/utils v0.0.0-20220706174534-f6158b442e7c + +) + +require ( + cloud.google.com/go/compute v1.7.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.27 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.12.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 // indirect + github.com/aws/smithy-go v1.12.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/deepmap/oapi-codegen v1.11.0 // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.21.1 // indirect + github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 // indirect + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-github/v41 v41.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gregdel/pushover v1.1.0 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/lunixbochs/vtclean v1.0.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo v1.16.4 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.13 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/russross/blackfriday v1.6.0 // indirect + github.com/slack-go/slack v0.11.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fastjson v1.6.3 // indirect + github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 // indirect + github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9 // indirect + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect + golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect + golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect + golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + golang.org/x/tools v0.1.10 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect + gomodules.xyz/notify v0.1.1 // indirect + google.golang.org/appengine v1.6.7 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/cluster-bootstrap v0.24.2 // indirect + k8s.io/component-helpers v0.24.2 // indirect + k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect + sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect + sigs.k8s.io/kustomize/api v0.11.5 // indirect + sigs.k8s.io/kustomize/kyaml v0.13.7 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) replace ( github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 - github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0 - k8s.io/api => k8s.io/api v0.21.0 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.0 - k8s.io/apimachinery => k8s.io/apimachinery v0.21.0 - k8s.io/apiserver => k8s.io/apiserver v0.21.0 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.0 - k8s.io/client-go => k8s.io/client-go v0.21.0 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.0 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.0 - k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0 - k8s.io/component-base => k8s.io/component-base v0.21.0 - k8s.io/component-helpers => k8s.io/component-helpers v0.21.0 - k8s.io/controller-manager => k8s.io/controller-manager v0.21.0 - k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.0 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.0 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.0 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.0 - k8s.io/kubectl => k8s.io/kubectl v0.21.0 - k8s.io/kubelet => k8s.io/kubelet v0.21.0 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.0 - k8s.io/metrics => k8s.io/metrics v0.21.0 - k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0 - k8s.io/sample-controller => k8s.io/sample-controller v0.21.0 + k8s.io/api v0.0.0 => k8s.io/api v0.24.2 + k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.24.2 + k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.24.2 + k8s.io/apiserver v0.0.0 => k8s.io/apiserver v0.24.2 + k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.24.2 + k8s.io/client-go v0.0.0 => k8s.io/client-go v0.24.2 + k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2 + k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.24.2 + k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.24.2 + k8s.io/component-base v0.0.0 => k8s.io/component-base v0.24.2 + k8s.io/component-helpers v0.0.0 => k8s.io/component-helpers v0.24.2 + k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2 + k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.24.2 + k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2 + k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2 + k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2 + k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2 + k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2 + k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.24.2 + k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2 + k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2 + k8s.io/metrics v0.0.0 => k8s.io/metrics v0.24.2 + k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.24.2 + k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.24.2 + k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2 ) diff --git a/go.sum b/go.sum index 3784ded5aa..a2f53996ca 100644 --- a/go.sum +++ b/go.sum @@ -1,35 +1,54 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.55.0/go.mod h1:ZHmoY+/lIMNkN2+fBmuTiqZ4inFhvQad8ft7MT8IV5Y= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.58.0 h1:vtAfVc723K3xKq1BQydk/FyCldnaNFhGhpJxaJzgRMQ= -cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.2.0/go.mod h1:iISCjWnTpnoJT1R287xRdjvQHJrxQOpeah4phb5D3h0= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -38,460 +57,399 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= -code.gitea.io/sdk/gitea v0.12.1/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= -code.gitea.io/sdk/gitea v0.13.1/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= -contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= -contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= -contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to= -github.com/Azure/azure-storage-blob-go v0.9.0/go.mod h1:8UBPbiOhrMQ4pLPi3gA1tXnpjrS76UYE/fo5A40vf4g= -github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= -github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= +github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 h1:prBTRx78AQnXzivNT9Crhu564W/zPPr3ibSlpT9xKcE= github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60/go.mod h1:rjP7sIipbZcagro/6TCk6X0ZeFT2eyudH5+fve/cbBA= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214 h1:MdZskg1II+YVe+9ss935i8+paqqf4KEuYcTYUWSwABI= +github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214/go.mod h1:rjP7sIipbZcagro/6TCk6X0ZeFT2eyudH5+fve/cbBA= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.8.9 h1:O9stiHmHHww9b4ozhPx7T6BK7fXfOCHJ8ybxf0833zw= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= -github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= -github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/antonmedv/expr v1.9.0 h1:j4HI3NHEdgDnN9p6oI6Ndr0G5QryMY0FNxT4ONrFDGU= +github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/appscode/go v0.0.0-20190808133642-1d4ef1f1c1e0/go.mod h1:iy07dV61Z7QQdCKJCIvUoDL21u6AIceRhZzyleh2ymc= -github.com/argoproj/notifications-engine v0.2.1-0.20210525191332-e8e293898477 h1:mcfUn59PKafSlbkZ39+aQAIzoAsYzjVR61O6Ns/Dfzo= -github.com/argoproj/notifications-engine v0.2.1-0.20210525191332-e8e293898477/go.mod h1:rKhm9LtebGKgLA/UtPtBeRUrrS/CT0U5az1jSfUiipw= -github.com/argoproj/pkg v0.9.0 h1:PfWWYykfcEQdN0g41XLbVh/aonTjD+dPkvDp3hwpLYM= -github.com/argoproj/pkg v0.9.0/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA= +github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE= +github.com/argoproj/notifications-engine v0.3.1-0.20220129012210-32519f8f68ec h1:ulv8ieYQZLyQrTVR4za1ucLFnemS0Dksz8y5e91xxak= +github.com/argoproj/notifications-engine v0.3.1-0.20220129012210-32519f8f68ec/go.mod h1:QF4tr3wfWOnhkKSaRpx7k/KEErQAh8iwKQ2pYFu/SfA= +github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM= +github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.31.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.33.16/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= -github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2 v1.9.0 h1:+S+dSqQCN3MSU5vJRu1HqHrq00cJn6heIMU7X9hcsoo= -github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.8.1 h1:AcAenV2NVwOViG+3ts73uT08L1olN4NBNNz7lUlHSUo= -github.com/aws/aws-sdk-go-v2/config v1.8.1/go.mod h1:AQtpYfVYjuuft4Dgh0jGSkPQJ9MvmK9vXfSub7oSXlI= -github.com/aws/aws-sdk-go-v2/credentials v1.4.1 h1:oDiUP50hKRwC6xAgESAj46lgL2prJRZQWnCBzn+TU/c= -github.com/aws/aws-sdk-go-v2/credentials v1.4.1/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0 h1:OxTAgH8Y4BXHD6PGCJ8DHx2kaZPCQfSTqmDsdRZFezE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2 h1:d95cddM3yTm4qffj3P6EnP+TzX1SSkWaQypXSgT/hpA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0 h1:XO1uX7dQKWfD0WzycEfz+bL/7rl0SsQ05VJwLPWGzGM= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.6.1 h1:mGc8UvJS4XJv8Tp7Doxlx2p3vfwPx46K9zg+9s9szPE= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.6.1/go.mod h1:lGKz4aJbqGX+pgyXG47ZBAJPjwrlA5+TJsAuJ2+aE2g= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0 h1:VNJ5NLBteVXEwE2F1zEXVmyIH58mZ6kIQGJoC7C+vkg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.0 h1:sHXMIKYS6YiLPzmKSvDpPmOpJDHxmAUgbiF49YNVztg= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.0 h1:1at4e5P+lvHNl2nUktdM2/v+rpICg/QSEr9TO/uW9vU= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.39/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go-v2 v1.16.7 h1:zfBwXus3u14OszRxGcqCDS4MfMCv10e8SMJ2r8Xm0Ns= +github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw= +github.com/aws/aws-sdk-go-v2/config v1.15.14 h1:+BqpqlydTq4c2et9Daury7gE+o67P4lbk7eybiCBNc4= +github.com/aws/aws-sdk-go-v2/config v1.15.14/go.mod h1:CQBv+VVv8rR5z2xE+Chdh5m+rFfsqeY4k0veEZeq6QM= +github.com/aws/aws-sdk-go-v2/credentials v1.12.9 h1:DloAJr0/jbvm0iVRFDFh8GlWxrOd9XKyX82U+dfVeZs= +github.com/aws/aws-sdk-go-v2/credentials v1.12.9/go.mod h1:2Vavxl1qqQXJ8MUcQZTsIEW8cwenFCWYXtLRPba3L/o= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8 h1:VfBdn2AxwMbFyJN/lF/xuT3SakomJ86PZu3rCxb5K0s= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8/go.mod h1:oL1Q3KuCq1D4NykQnIvtRiBGLUXhcpY5pl6QZB2XEPU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14 h1:2C0pYHcUBmdzPj+EKNC4qj97oK6yjrUhc1KoSodglvk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8 h1:2J+jdlBJWEmTyAwC82Ym68xCykIvnSnIN18b8xHGlcc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15 h1:QquxR7NH3ULBsKC+NoTpilzbKKS+5AELfNREInbhvas= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.15/go.mod h1:Tkrthp/0sNBShQQsamR7j/zY4p19tVTAs+nnqhH6R3c= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.6 h1:3FtKgndLdv919p3V4VStk8y3agcC9yEu9vrhhe+rvfQ= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.6/go.mod h1:A9gdtslk61CskUB2nDcY2fuvJ1RNl5bskr1eTJrcUJU= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.18.8 h1:D6Sc+XyjK++NhkJJLvZNcf0xyzNhhC+GVn/MYDeONS4= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.18.8/go.mod h1:8OyausC7+VUBNJFOEDjvSrowuefSkEoJaUzsGLNRXZQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8 h1:oKnAXxSF2FUvfgw8uzU/v9OTYorJJZ8eBmWhr9TWVVQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.12 h1:760bUnTX/+d693FT6T6Oa7PZHfEQT9XMFZeM5IQIB0A= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.12/go.mod h1:MO4qguFjs3wPGcCSpQ7kOFTwRvb+eu+fn+1vKleGHUk= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 h1:yOfILxyjmtr2ubRkRJldlHDFBhf5vw4CzhbwWIBmimQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.9/go.mod h1:O1IvkYxr+39hRf960Us6j0x1P8pDqhTX+oXM5kQNl/Y= +github.com/aws/smithy-go v1.12.0 h1:gXpeZel/jPoWQ7OEmLIgCUnhkFftqNfwWUwAHSlp1v0= +github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/bradleyfalzon/ghinstallation v1.1.1 h1:pmBXkxgM1WeF8QYvDLT5kuQiHMcmf+X015GI0KM/E3I= -github.com/bradleyfalzon/ghinstallation v1.1.1/go.mod h1:vyCmHTciHx/uuyN82Zc3rXN3X2KTK8nUTCrTMwAhcug= +github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4kz6OXd0PKPlFqf81M= +github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bwmarrin/discordgo v0.19.0/go.mod h1:O9S4p+ofTFwB02em7jkpkV8M3R0/PUVOwN61zSZ0r4Q= -github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= -github.com/container-storage-interface/spec v1.3.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.11/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= +github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/daixiang0/gci v0.0.0-20200727065011-66f1df783cb2/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4= -github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0= -github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/deepmap/oapi-codegen v1.11.0 h1:f/X2NdIkaBKsSdpeuwLnY/vDI0AtPUrmB5LMgc7YD+A= +github.com/deepmap/oapi-codegen v1.11.0/go.mod h1:k+ujhoQGxmQYBZBbxhOZNZf4j08qv5mC+OH+fFTnKxM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20220417044921-416226498f94 h1:VIy7cdK7ufs7ctpTFkXJHm1uP3dJSnCGSPysEICB1so= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.2.0 h1:8ozOH5xxoMYDt5/u+yMTsVXydVCbTORFnOOoq2lumco= -github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flosch/pongo2 v0.0.0-20181225140029-79872a7b2769/go.mod h1:tbAXHifHQWNSpWbiJHpJTZH5fi3XHhDMdP//vuz9WS4= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373/go.mod h1:Dcsy1kii/xFyNad5JqY/d0GO5mu91sungp5xotbm3Yk= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-critic/go-critic v0.5.0/go.mod h1:4jeRh3ZAVnRYhuWdOEvwzVqLUpxMSoAT0xZ74JsTPlo= -github.com/go-critic/go-critic v0.5.2/go.mod h1:cc0+HvdE3lFpqLecgqMaJcvWWH77sLdBp+wLGPM1Yyo= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= -github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= -github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible h1:2cauKuaELYAEARXRkq2LrJ0yDDv1rW7+wrTEdVL3uaU= -github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible/go.mod h1:qf9acutJ8cwBUhm1bqgz6Bei9/C/c93FPDljKWwsOgM= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.4.0/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= +github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc= +github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -505,31 +463,25 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.30.0/go.mod h1:5t0i3wHlqQc9deBBvZsP+a/4xz7cfjV+zhp5U0Mzp14= -github.com/golangci/golangci-lint v1.31.0/go.mod h1:aMQuNCA+NDU5+4jLL5pEuFHoue0IznKE2+/GsFvvs8A= -github.com/golangci/golangci-lint v1.32.2/go.mod h1:ydr+IqtIVyAh72L16aK0bNdNg/YGa+AEgdbKj9MluzI= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.39.0/go.mod h1:rjQFmK4jPCpxeUdLq9bYhNFFsjgGOtpnDmDeap0+nsw= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cadvisor v0.44.1/go.mod h1:GQ9KQfz0iNHQk3D6ftzJWK4TXabfIgM10Oy3FkR+Gzg= +github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -538,168 +490,172 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= -github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts= -github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= -github.com/google/go-github/v33 v33.0.0 h1:qAf9yP0qc54ufQxzwv+u9H0tiVOnPJxo0lI/JXqw3ZM= -github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg= +github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= -github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/rpmpack v0.0.0-20200731134257-3685799e8fdf/go.mod h1:+y9lKiqDhR4zkLl+V9h4q0rdyrYVsWWm6LLCQP33DIk= -github.com/google/rpmpack v0.0.0-20200919095143-1c1eea455332/go.mod h1:+y9lKiqDhR4zkLl+V9h4q0rdyrYVsWWm6LLCQP33DIk= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= -github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= -github.com/gookit/color v1.3.1/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gopackage/ddp v0.0.0-20170117053602-652027933df4 h1:4EZlYQIiyecYJlUbVkFXCXHz1QPhVXcHnQKAzBTPfQo= github.com/gopackage/ddp v0.0.0-20170117053602-652027933df4/go.mod h1:lEO7XoHJ/xNRBCxrn4h/CEB67h0kW1B0t4ooP2yrjUA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/goreleaser/chglog v0.1.1/go.mod h1:xSDa/73C0TxBcLvoT2JHh47QyXpCx5rrNVzJKyeFGPw= -github.com/goreleaser/chglog v0.1.2/go.mod h1:tTZsFuSZK4epDXfjMkxzcGbrIOXprf0JFp47BjIr3B8= -github.com/goreleaser/fileglob v0.3.0/go.mod h1:kNcPrPzjCp+Ox3jmXLU5QEsjhqrtLBm6OnXAif8KRl8= -github.com/goreleaser/goreleaser v0.143.0/go.mod h1:/zq84GQ8WZFnspGTONdZO0Kgf5BzOD3CzufXyw+ut4A= -github.com/goreleaser/goreleaser v0.147.0/go.mod h1:AkI3X+mBaAEc99RDZgUGvjbUEqh/+t+EjNMaqUbd+3w= -github.com/goreleaser/nfpm v1.7.0/go.mod h1:V6xp021JRvYdBYpGFoP6m6YsuedzgB6IO2ub2NNBohs= -github.com/goreleaser/nfpm v1.10.1/go.mod h1:G0vvOjif+gnnTTWBtvYqMBh8nMGM7eNkrZU/W2gdM6o= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregdel/pushover v1.1.0 h1:dwHyvrcpZCOS9V1fAnKPaGRRI5OC55cVaKhMybqNsKQ= +github.com/gregdel/pushover v1.1.0/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER1PthX7to= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-retryablehttp v0.5.1/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= -github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heketi/heketi v10.2.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb-client-go/v2 v2.9.1 h1:5kbH226fmmiV0MMTs7a8L7/ECCKdJWBi1QZNNv4/TkI= +github.com/influxdata/influxdb-client-go/v2 v2.9.1/go.mod h1:x7Jo5UHHl+w8wu8UnGiNobDDHygojXwJX4mx7rXGKMk= +github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= +github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= -github.com/jarcoal/httpmock v1.0.6/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jaytaylor/html2text v0.0.0-20190408195923-01ec452cbe43/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= -github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU= -github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/juju/ansiterm v1.0.0 h1:gmMvnZRq7JZJx6jkfSq9/+2LMrVEwGwt7UR6G+lmDEg= +github.com/juju/ansiterm v1.0.0/go.mod h1:PyXUpnI3olx3bsPcHt98FGPX/KCFZ1Fi+hw1XLI6384= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= @@ -707,21 +663,13 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -729,443 +677,352 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kyoh86/exportloopref v0.1.7/go.mod h1:h1rDl2Kdj97+Kwh4gdz3ujE7XHmH51Q0lUiZ1z4NLj8= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks= +github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= +github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx v1.2.24/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= +github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/llorllale/go-gitlint v0.0.0-20190914155841-58c0b8cef0e5/go.mod h1:omoASPlaaf3ECEhTMfLZVS6o550eBWI2YsM/saGEbVA= -github.com/llorllale/go-gitlint v0.0.0-20200802191503-5984945d4b80/go.mod h1:omoASPlaaf3ECEhTMfLZVS6o550eBWI2YsM/saGEbVA= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5/go.mod h1:c2mYKRyMb1BPkO5St0c/ps62L4S0W2NAkaTXj9qEI+0= github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018/go.mod h1:sFlOUpQL1YcjhFVXhg1CG8ZASEs/Mf1oVb6H75JL/zg= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailgun/mailgun-go v2.0.0+incompatible/go.mod h1:NWTyU+O4aczg/nsGhQnvHL6v2n5Gy6Sv5tNDVvC6FbU= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.10/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-zglob v0.0.3/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.1.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= +github.com/minio/minio-go/v7 v7.0.29/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= -github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/newrelic/newrelic-client-go v0.49.0 h1:MnAK89AcdOqapfPGcsUzLdzTUmeIojBz1W9VbC4Llag= -github.com/newrelic/newrelic-client-go v0.49.0/go.mod h1://vEwOJWDi1nsSnmmdZrB8Kab9ibSfGcF0UmnwzoSNQ= -github.com/newrelic/tutone v0.2.5/go.mod h1:Jv8miaLyP2pjx4wqvAdx1nLXMx/Cl7kmsNiKZlCeIds= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/newrelic/newrelic-client-go v0.86.5 h1:RxjhA/xdjcnMTxl1oq+ms6tGTuZOOL+h8IcfBCD1PVY= +github.com/newrelic/newrelic-client-go v0.86.5/go.mod h1:RYMXt7hgYw7nzuXIGd2BH0F1AivgWw7WrBhNBQZEB4k= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.0.0-20200708172631-8866003e3856/go.mod h1:wBEpHwM2OdmeNpdCvRPUlkEbBuaFmcK4Wv8Q7FuGW3c= -github.com/nishanths/exhaustive v0.0.0-20200811152831-6cf413ae40e0/go.mod h1:wBEpHwM2OdmeNpdCvRPUlkEbBuaFmcK4Wv8Q7FuGW3c= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= github.com/nlopes/slack v0.5.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852/go.mod h1:eqOVx5Vwu4gd2mmMZvVZsgIqNSaW3xxRThUJ0k/TPk4= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/opsgenie/opsgenie-go-sdk-v2 v1.0.5 h1:AnS8ZCC5dle8P4X4FZ+IOlX9v0jAkCMiZDIzRnYwBbs= github.com/opsgenie/opsgenie-go-sdk-v2 v1.0.5/go.mod h1:f0ezb0R/mrB9Hpm5RrIS6EX3ydjsR2nAB88nYYXZcNY= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.13 h1:nV98dkBpqaYbDnhefmOQ+Rn4hE+jD6AtjYHXaU5WyJI= +github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.13/go.mod h1:4OjcxgwdXzezqytxN534MooNmrxRD50geWZxTD7845s= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2-0.20220620141757-4ad265f1b4ee h1:8dyWwbEbKRZ13K6VEueQsOH2Ywu58j9YM/mn3bp50ww= +github.com/prometheus/client_golang v1.12.2-0.20220620141757-4ad265f1b4ee/go.mod h1:nDOYPpTKRWyFSHGWY5QbDUvjSMBusROfFzxhmDKUNWo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.21.0 h1:SMvI2JVldvfUvRVlP64jkIJEC6WiGHJcN2e5tB+ztF8= -github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.36.0 h1:78hJTing+BLYLjhXE+Z2BubeEymH5Lr0/Mt8FKkxxYo= +github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/psampaz/go-mod-outdated v0.7.0/go.mod h1:r78NYWd1z+F9Zdsfy70svgXOz363B08BWnTyFSgEESs= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1:CGFX09Ci3pq9QZdj86B+VGIdNj4VyCo2iPOGS9esB/k= -github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/securego/gosec/v2 v2.4.0/go.mod h1:0/Q4cjmlFDfDUj1+Fib61sc+U5IQb2w+Iv9/C3wPVko= -github.com/securego/gosec/v2 v2.5.0/go.mod h1:L/CDXVntIff5ypVHIkqPXbtRpJiNCh6c6Amn68jXDjo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/servicemeshinterface/smi-sdk-go v0.4.1 h1:L8nS7WtVlGoEJF7RdCbwh0Oj/JheGY+5fa3R+cA2ReY= -github.com/servicemeshinterface/smi-sdk-go v0.4.1/go.mod h1:9rsLPBNcqfDNmEgyYwpopn93aE9yz46d2EHFBNOYj/w= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/servicemeshinterface/smi-sdk-go v0.5.0 h1:9cZdhvGbGDlmnp9qqmcQL+RL6KZ3IzHfDLoA5Axg8n0= +github.com/servicemeshinterface/smi-sdk-go v0.5.0/go.mod h1:nm1Slf3pfaZPP3g2tE/K5wDmQ1uWVSP0p3uu5rQAQLc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/slack-go/slack v0.6.6 h1:ln0fO794CudStSJEfhZ08Ok5JanMjvW6/k2xBuHqedU= -github.com/slack-go/slack v0.6.6/go.mod h1:FGqNzJBmxIsZURAxh2a8D21AnOVvvXZvGligs4npPUM= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slack-go/slack v0.10.1/go.mod h1:wWL//kk0ho+FcQXcBTmEafUI5dz4qz5f4mMk8oIkioQ= +github.com/slack-go/slack v0.11.0 h1:sBBjQz8LY++6eeWhGJNZpRm5jvLRNnWBFZ/cAq58a6k= +github.com/slack-go/slack v0.11.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/sonyflake v1.0.0 h1:MpU6Ro7tfXwgn2l5eluf9xQvQJDROTBImNCfRXn/YeM= github.com/sony/sonyflake v1.0.0/go.mod h1:Jv3cfhf/UFtolOTTRd3q4Nl6ENqM+KfyZ5PseKfZGF4= -github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= -github.com/sourcegraph/go-diff v0.6.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaceapegames/go-wavefront v1.8.1 h1:Xuby0uBfw1WVxD9d+l8Gh+zINqnBfd0RJT8e/3i3vBM= github.com/spaceapegames/go-wavefront v1.8.1/go.mod h1:GtdIjtJ0URkfPmaKx0+7vMSDvT/MON9v+4pbdagA8As= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.4.1 h1:asw9sl74539yqavKaglDM5hFpdJVK0Y5Dr/JOgQ89nQ= -github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= -github.com/ssgreg/nlreturn/v2 v2.0.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/technoweenie/multipartstreamer v1.0.1 h1:XRztA5MXiR1TIRHxH2uNxXxaIkKQDeX7m2XsSOlQEnM= -github.com/technoweenie/multipartstreamer v1.0.1/go.mod h1:jNVxdtShOxzAsukZwTSw6MDx5eUJoiEBsSvzDU9uzog= -github.com/tetafro/godot v0.4.8/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= -github.com/tetafro/godot v0.4.9/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= -github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= -github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= -github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= -github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= -github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE= -github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df/go.mod h1:pnyouUty/nBr/zm3GYwTIt+qFTLWbdjeLjZmJdzJOu8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/undefinedlabs/go-mpatch v1.0.6 h1:h8q5ORH/GaOE1Se1DMhrOyljXZEhRcROO7agMqWXCOY= -github.com/undefinedlabs/go-mpatch v1.0.6/go.mod h1:TyJZDQ/5AgyN7FSLiBJ8RO9u2c6wbtRvK827b6AVqY4= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.12.0/go.mod h1:229t1eWu9UXTPmoUkbpN/fctKPBY4IJoFXQnxHGXy6E= -github.com/valyala/fasthttp v1.15.1/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= +github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/quicktemplate v1.5.1/go.mod h1:v7yYWpBEiutDyNfVaph6oC/yKwejzVyTX/2cwwHxyok= -github.com/valyala/quicktemplate v1.6.2/go.mod h1:mtEJpQtUiBV0SHhMX6RtiJtqxncgrfmjcUy5T68X8TM= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 h1:qqllXPzXh+So+mmANlX/gCJrgo+1kQyshMoQ+NASzm0= github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0/go.mod h1:2rx5KE5FLD0HRfkkpyn8JwbVLBdhgeiOb2D2D9LLKM4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xanzy/go-gitlab v0.37.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/go-gitlab v0.39.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= @@ -1174,60 +1031,73 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9 h1:NUzdAbFtCJSXU20AOXgeqaUwg8Ypg4MPYmL+d+rsB5c= +golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1257,6 +1127,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= @@ -1267,22 +1139,24 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1290,14 +1164,12 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190607181551-461777fb6f67/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1310,7 +1182,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1318,17 +1189,54 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1339,24 +1247,21 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1370,17 +1275,17 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1393,7 +1298,6 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1401,78 +1305,116 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201109165425-215b40eba54c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d h1:Zu/JngovGLVi6t2J3nmAf3AoTDwuzw85YZ3b9o4yU7s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1480,13 +1422,10 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1496,89 +1435,108 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200321224714-0d839f3cf2ed/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200519015757-0d0afa43d58a/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200608174601-1b747fd94509/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200701041122-1837592efa10/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 h1:juzzlx91nWAOsHuOVfXZPMXHtJEKouZvY9bBbwlOeYs= gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45/go.mod h1:41y72mzHT7+jFNgyBpJRrZWuZJcLmLrTpq6iGgOFJMQ= -gomodules.xyz/notify v0.1.0 h1:lN7CAFKIWxaXJXm3F/7KTbgw3lUy9peh6iyjgj1skvA= gomodules.xyz/notify v0.1.0/go.mod h1:wGy0vLXGpabCg0j9WbjzXf7pM7Khz11FqCLtBbTujP0= +gomodules.xyz/notify v0.1.1 h1:1tTuoyswmPvzqPCTEDQK8SZ3ukCxLsonAAwst2+y1a0= +gomodules.xyz/notify v0.1.1/go.mod h1:QgQyU4xEA/plJcDeT66J2Go2V7U4c0pD9wjo7HfFil4= +gomodules.xyz/version v0.1.0/go.mod h1:Y8xuV02mL/45psyPKG3NCVOwvAOy6T5Kx0l3rCjKSjU= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1595,39 +1553,110 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200325114520-5b2d0af7952b/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw= +google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc/examples v0.0.0-20210331235824-f6bb3972ed15 h1:5zzARWGVJhfHEHNuN5Irypt6oKD506IgclKOta6InM0= -google.golang.org/grpc/examples v0.0.0-20210331235824-f6bb3972ed15/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1637,22 +1666,24 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/AlecAivazis/survey.v1 v1.8.7/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1660,39 +1691,34 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/kyokomi/emoji.v1 v1.5.1/go.mod h1:N9AZ6hi1jHOPn34PsbpufQZUcKftSD7WgS2pgpmH4Lg= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= -gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.10.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.6.0/go.mod h1:LEX+ioCVdeWhZc8GYfiBRag360eBhwixWJ62R9eDQtI= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1700,89 +1726,116 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY= -k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/apiextensions-apiserver v0.21.0 h1:Nd4uBuweg6ImzbxkC1W7xUNZcCV/8Vt10iTdTIVF3hw= -k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= -k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apiserver v0.21.0 h1:1hWMfsz+cXxB77k6/y0XxWxwl6l9OF26PC9QneUVn1Q= -k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= -k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= -k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= -k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/cloud-provider v0.21.0/go.mod h1:z17TQgu3JgUFjcgby8sj5X86YdVK5Pbt+jm/eYMZU9M= -k8s.io/cluster-bootstrap v0.21.0 h1:9CfnWrvXm12k6fP3WR3ist76rrqGq6H5pRVEUvEc4Ws= -k8s.io/cluster-bootstrap v0.21.0/go.mod h1:rs7i1JpBCa56YNmnYxFJuoUghIwpMzDidY8ZmqiRnrQ= -k8s.io/code-generator v0.20.5-rc.0 h1:t1T/nu5/kZtZp61BSgj+TucDp5hBg0/JcbaIZufxRMg= -k8s.io/code-generator v0.20.5-rc.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/component-base v0.21.0 h1:tLLGp4BBjQaCpS/KiuWh7m2xqvAdsxLm4ATxHSe5Zpg= -k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= -k8s.io/component-helpers v0.21.0 h1:SoWLsd63LI5uwofcHVSO4jtlmZEJRycfwNBKU4eAGPQ= -k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= -k8s.io/controller-manager v0.21.0/go.mod h1:Ohy0GRNRKPVjB8C8G+dV+4aPn26m8HYUI6ejloUBvUA= -k8s.io/cri-api v0.20.5-rc.0/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/csi-translation-lib v0.21.0/go.mod h1:edq+UMpgqEx3roTuGF/03uIuSOsI986jtu65+ytLlkA= +k8s.io/api v0.17.8/go.mod h1:N++Llhs8kCixMUoCaXXAyMMPbo8dDVnh+IQ36xZV2/0= +k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= +k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= +k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= +k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= +k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= +k8s.io/apimachinery v0.17.8/go.mod h1:Lg8zZ5iC/O8UjCqW6DNhcQG2m4TdjF9kwG3891OWbbA= +k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= +k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.24.2 h1:orxipm5elPJSkkFNlwH9ClqaKEDJJA3yR2cAAlCnyj4= +k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= +k8s.io/cli-runtime v0.24.2 h1:KxY6tSgPGsahA6c1/dmR3uF5jOxXPx2QQY6C5ZrLmtE= +k8s.io/cli-runtime v0.24.2/go.mod h1:1LIhKL2RblkhfG4v5lZEt7FtgFG5mVb8wqv5lE9m5qY= +k8s.io/client-go v0.17.8/go.mod h1:SJsDS64AAtt9VZyeaQMb4Ck5etCitZ/FwajWdzua5eY= +k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= +k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= +k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= +k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= +k8s.io/cloud-provider v0.24.2/go.mod h1:a7jyWjizk+IKbcIf8+mX2cj3NvpRv9ZyGdXDyb8UEkI= +k8s.io/cluster-bootstrap v0.24.2 h1:p177dIhDst4INUWBZgTnqSad8oJiUdKo0cLLVU24AzE= +k8s.io/cluster-bootstrap v0.24.2/go.mod h1:eIHV338K03vBm3u/ROZiNXxWJ4AJRoTR9PEUhcTvYkg= +k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/code-generator v0.24.2 h1:EGeRWzJrpwi6T6CvoNl0spM6fnAnOdCr0rz7H4NU1rk= +k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= +k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= +k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= +k8s.io/component-helpers v0.24.2 h1:gtXmI/TjVINtkAdZn7m5p8+Vd0Mk4d1q8kwJMMLBdwY= +k8s.io/component-helpers v0.24.2/go.mod h1:TRQPBQKfmqkmV6c0HAmUs8cXVNYYYLsXy4zu8eODi9g= +k8s.io/controller-manager v0.24.2/go.mod h1:hpwCof4KxP4vrw/M5QiVxU6Zmmggmr1keGXtjGHF+vc= +k8s.io/cri-api v0.24.2/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig= +k8s.io/csi-translation-lib v0.24.2/go.mod h1:pdHc2CYLViQYYsOqOp79hjKYi8J4NZ7vpiVzn1SqBrg= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-aggregator v0.21.0/go.mod h1:sIaa9L4QCBo9gjPyoGJns4cBjYVLq3s49FxF7m/1A0A= -k8s.io/kube-controller-manager v0.21.0/go.mod h1:QGJ1P7eU4FQq8evpCHN5e4QwPpcr2sbWFJBO/DKBUrw= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-proxy v0.21.0/go.mod h1:36jW3e6+5iQql9tHrLjVrmwpPsbhTywoI6OCFL7MWRs= -k8s.io/kube-scheduler v0.21.0/go.mod h1:wf1oi1NHSsFYfG7lKwxJVmnQNBnhL9vOMXztcKQu5IU= -k8s.io/kubectl v0.21.0 h1:WZXlnG/yjcE4LWO2g6ULjFxtzK6H1TKzsfaBFuVIhNg= -k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= -k8s.io/kubelet v0.21.0/go.mod h1:G5ZxMTVev9t4bhmsSxDAWhH6wXDYEVHVVFyYsw4laR4= -k8s.io/kubernetes v1.21.0 h1:LUUQgdFsKB+wVgKPUapmXjkvvJHSLN53CuQwre4c+mM= -k8s.io/kubernetes v1.21.0/go.mod h1:Yx6XZ8zalyqEk7but+j4+5SvLzdyH1eeqZ4cwO+5dD4= -k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4= -k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= -k8s.io/mount-utils v0.20.5-rc.0/go.mod h1:Jv9NRZ5L2LF87A17GaGlArD+r3JAJdZFvo4XD1cG4Kc= -k8s.io/sample-apiserver v0.21.0/go.mod h1:yMffYq14yQZtuVPVBGaBJ+3Scb2xHT6QeqFfk3v+AEY= -k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-aggregator v0.24.2/go.mod h1:Ju2jNDixn+vqeeKEBfjfpc204bO1pbdXX0N9knCxeMQ= +k8s.io/kube-controller-manager v0.24.2/go.mod h1:KDE0yqiEvxYiO0WRpPA4rVx8AcK1vsWydUF37AJ9lTI= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M= +k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 h1:yEQKdMCjzAOvGeiTwG4hO/hNVNtDOuUFvMUZ0OlaIzs= +k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8/go.mod h1:mbJ+NSUoAhuR14N0S63bPkh8MGVSo3VYSGZtH/mfMe0= +k8s.io/kube-proxy v0.24.2/go.mod h1:bozS2ufl/Ns6s40Ue34eV7rqyLVygi5usSmCgW7rFU8= +k8s.io/kube-scheduler v0.24.2/go.mod h1:DRa+aeXKSYUUOHHIc/9EcaO9+FW5FydaOfPSvaSW5Ko= +k8s.io/kubectl v0.24.2 h1:+RfQVhth8akUmIc2Ge8krMl/pt66V7210ka3RE/p0J4= +k8s.io/kubectl v0.24.2/go.mod h1:+HIFJc0bA6Tzu5O/YcuUt45APAxnNL8LeMuXwoiGsPg= +k8s.io/kubelet v0.24.2/go.mod h1:Xm9DkWQjwOs+uGOUIIGIPMvvmenvj0lDVOErvIKOOt0= +k8s.io/kubernetes v1.24.2 h1:AyjtHzSysliKR04Km91njmk2yaKmOa3ZISQZCIGUnVI= +k8s.io/kubernetes v1.24.2/go.mod h1:8e8maMiZzBR2/8Po5Uulx+MXZUYJuN3vtKwD4Ct1Xi0= +k8s.io/legacy-cloud-providers v0.24.2/go.mod h1:sgkasgIP2ZOew8fzoOq0mQLVXJ4AmB57IUbFUjzPWEo= +k8s.io/metrics v0.24.2/go.mod h1:5NWURxZ6Lz5gj8TFU83+vdWIVASx7W8lwPpHYCqopMo= +k8s.io/mount-utils v0.24.2/go.mod h1:XrSqB3a2e8sq+aU+rlbcBtQ3EgcuDk5RP9ZsGxjoDrI= +k8s.io/pod-security-admission v0.24.2/go.mod h1:znnuDHWWWvh/tpbYYPwTsd4y//qHi3cOX+wGxET/mMI= +k8s.io/sample-apiserver v0.24.2/go.mod h1:mf8qgDdu450wqpCJOkSAmoTgU4PIMAcfa5uTBwmJekE= +k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220706174534-f6158b442e7c h1:hFZO68mv/0xe8+V0gRT9BAq3/31cKjjeVv4nScriuBk= +k8s.io/utils v0.0.0-20220706174534-f6158b442e7c/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/gofumpt v0.0.0-20200709182408-4fd085cb6d5f/go.mod h1:9VQ397fNXEnF84t90W4r4TRCQK+pg9f8ugVfyj+S26w= -mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d/go.mod h1:bzrjFmaD6+xqohD3KYP0H2FEuxknnBmyyOxdhLdaIws= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= -mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ= -sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= -sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= -sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= -sigs.k8s.io/kustomize/kyaml v0.10.15 h1:dSLgG78KyaxN4HylPXdK+7zB3k7sW6q3IcCmcfKA+aI= -sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= +sigs.k8s.io/kustomize/api v0.11.5 h1:vLDp++YAX7iy2y2CVPJNy9pk9CY8XaUKgHkjbVtnWag= +sigs.k8s.io/kustomize/api v0.11.5/go.mod h1:2UDpxS6AonWXow2ZbySd4AjUxmdXLeTlvGBC46uSiq8= +sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= +sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= +sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= +sigs.k8s.io/kustomize/kyaml v0.13.7 h1:/EZ/nPaLUzeJKF/BuJ4QCuMVJWiEVoI8iftOHY3g3tk= +sigs.k8s.io/kustomize/kyaml v0.13.7/go.mod h1:6K+IUOuir3Y7nucPRAjw9yth04KSWBnP5pqUTGwj/qU= +sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/build-release-plugins.sh b/hack/build-release-plugins.sh index 07f9b66ddc..00f86b7b98 100755 --- a/hack/build-release-plugins.sh +++ b/hack/build-release-plugins.sh @@ -6,11 +6,12 @@ SRCROOT="$( CDPATH='' cd -- "$(dirname "$0")/.." && pwd -P )" mkdir -p ${SRCROOT}/dist rollout_iid_file=$(mktemp -d "${SRCROOT}/dist/rollout_iid.XXXXXXXXX") -docker build --iidfile ${rollout_iid_file} --target argo-rollouts-build . +DOCKER_BUILDKIT=1 docker build --iidfile ${rollout_iid_file} --build-arg MAKE_TARGET="plugin-linux plugin-darwin plugin-windows" \ +--target argo-rollouts-build . rollout_iid=$(cat ${rollout_iid_file}) container_id=$(docker create ${rollout_iid}) -for plat in linux-amd64 darwin-amd64 windows-amd64; do +for plat in linux-amd64 linux-arm64 darwin-amd64 windows-amd64; do docker cp ${container_id}:/go/src/github.com/argoproj/argo-rollouts/dist/kubectl-argo-rollouts-${plat} ${SRCROOT}/dist done docker rm -v ${container_id} diff --git a/hack/gen-crd-spec/main.go b/hack/gen-crd-spec/main.go index 42f611d009..607f8ddd53 100644 --- a/hack/gen-crd-spec/main.go +++ b/hack/gen-crd-spec/main.go @@ -14,9 +14,10 @@ import ( "github.com/blang/semver" "github.com/ghodss/yaml" - "github.com/go-openapi/spec" extensionsobj "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kubeopenapiutil "k8s.io/kube-openapi/pkg/util" + spec "k8s.io/kube-openapi/pkg/validation/spec" ) const metadataValidation = `properties: @@ -79,12 +80,6 @@ func NewCustomResourceDefinition() []*extensionsobj.CustomResourceDefinition { crdYamlBytes, err := exec.Command( "controller-gen", "paths=./pkg/apis/rollouts/...", - "crd:trivialVersions=true", - // The only possible value is 'false' since 'apiextensions.k8s.io/v1' - // https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning - // It is possible though to opt-out of pruning for specifc sub-trees of fields by adding x-kubernetes-preserve-unknown-fields: true - // by using the 'setValidationOverride' function in this file. - "crd:preserveUnknownFields=false", "crd:crdVersions=v1", "crd:maxDescLen=0", "output:crd:stdout", @@ -118,12 +113,6 @@ func NewCustomResourceDefinition() []*extensionsobj.CustomResourceDefinition { removeK8S118Fields(obj) createMetadataValidation(obj) crd := toCRD(obj) - - if crd.Name == "clusteranalysistemplates.argoproj.io" { - crd.Spec.Scope = "Cluster" - } else { - crd.Spec.Scope = "Namespaced" - } crds = append(crds, crd) } @@ -244,7 +233,7 @@ func removeK8S118Fields(un *unstructured.Unstructured) { setValidationOverride(un, preserveUnknownFields, "spec.template.spec.ephemeralContainers[].resources.requests") // Replace this with "spec.template.spec.volumes[].ephemeral.volumeClaimTemplate.spec.resources.{limits/requests}" // when it's ok to only support k8s 1.17+ - setValidationOverride(un, preserveUnknownFields, "spec.template.spec.volumes") + setValidationOverride(un, preserveUnknownFields, "spec.template.spec.volumes[]") case "Experiment": setValidationOverride(un, preserveUnknownFields, "spec.templates[].template.spec.containers[].resources.limits") setValidationOverride(un, preserveUnknownFields, "spec.templates[].template.spec.containers[].resources.requests") @@ -404,9 +393,6 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out definitions := map[string]interface{}{} for _, crd := range crds { - if crd.Spec.Names.Kind != "Rollout" { - continue - } var version string var props map[string]extensionsobj.JSONSchemaProps for _, v := range crd.Spec.Versions { @@ -440,7 +426,8 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out } } - definitions[fmt.Sprintf("%s.%s", version, crd.Spec.Names.Kind)] = map[string]interface{}{ + definitionName := kubeopenapiutil.ToRESTFriendlyName(fmt.Sprintf("%s/%s.%s", crd.Spec.Group, version, crd.Spec.Names.Kind)) + definitions[definitionName] = map[string]interface{}{ "properties": propsMap, "x-kubernetes-group-version-kind": []map[string]string{{ "group": crd.Spec.Group, diff --git a/hack/installers/install-codegen-go-tools.sh b/hack/installers/install-codegen-go-tools.sh new file mode 100755 index 0000000000..8a08efd826 --- /dev/null +++ b/hack/installers/install-codegen-go-tools.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set -eux -o pipefail + +SRCROOT="$( CDPATH='' cd -- "$(dirname "$0")/../.." && pwd -P )" + +# This script installs all our golang-based codegen utility CLIs necessary for codegen. +# Some dependencies are vendored in go.mod (ones which are actually imported in our codebase). +# Other dependencies are only used as a CLI and do not need vendoring in go.mod (doing so adds +# unecessary dependencies to go.mod). We want to maintain a single source of truth for versioning +# our binaries (either go.mod or go install @), so we use two techniques to install +# our CLIs: +# 1. For CLIs which are NOT vendored in go.mod, we can run `go install @` with an explicit version +# 2. For packages which we *do* vendor in go.mod, we determine version from go.mod followed by `go install` with that version +go_mod_install() { + module=$(go list -f '{{.Module}}' $1 | awk '{print $1}') + module_version=$(go list -m $module | awk '{print $NF}' | head -1) + go install $1@$module_version +} + +# All binaries are compiled into the argo-cd/dist directory, which is added to the PATH during codegen +export GOBIN="${SRCROOT}/dist" +mkdir -p $GOBIN + +# protoc-gen-go* is used to generate .pb.go from .proto files +#go_mod_install github.com/golang/protobuf/protoc-gen-go +#go_mod_install github.com/gogo/protobuf/protoc-gen-gogo +go_mod_install github.com/gogo/protobuf/protoc-gen-gogofast + +# protoc-gen-grpc-gateway is used to build .pb.gw.go files from from .proto files +go_mod_install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway + +# # protoc-gen-swagger is used to build swagger.json +go_mod_install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger + +# k8s tools to codegen .proto files, client libraries, and helpers from types.go +go_mod_install k8s.io/code-generator/cmd/go-to-protobuf +go_mod_install k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo +go_mod_install k8s.io/code-generator/cmd/client-gen +go_mod_install k8s.io/code-generator/cmd/deepcopy-gen +go_mod_install k8s.io/code-generator/cmd/defaulter-gen +go_mod_install k8s.io/code-generator/cmd/informer-gen +go_mod_install k8s.io/code-generator/cmd/lister-gen + +# We still install openapi-gen from go.mod since upstream does not utilize release tags +go_mod_install k8s.io/kube-openapi/cmd/openapi-gen + +# controller-gen is run by ./hack/gen-crd-spec to generate the CRDs +go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 + +# swagger cli is used to generate swagger docs +go install github.com/go-swagger/go-swagger/cmd/swagger@v0.28.0 + +# goimports is used to auto-format generated code +go install golang.org/x/tools/cmd/goimports@v0.1.8 + +# mockery is used for generating mock +go install github.com/vektra/mockery/v2@v2.14.0 diff --git a/hack/installers/install-dev-tools.sh b/hack/installers/install-dev-tools.sh new file mode 100755 index 0000000000..0e5f67f00a --- /dev/null +++ b/hack/installers/install-dev-tools.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -eux -o pipefail + +PROJECT_ROOT=$(cd $(dirname ${BASH_SOURCE})/../..; pwd) +DIST_PATH="${PROJECT_ROOT}/dist" +PATH="${DIST_PATH}:${PATH}" + +mkdir -p ${DIST_PATH} + +gotestsum_version=1.8.1 + +OS=$(go env GOOS) +ARCH=$(go env GOARCH) + +export TARGET_FILE=gotestsum_${gotestsum_version}_${OS}_${ARCH}.tar.gz +temp_path="/tmp/${TARGET_FILE}" +url=https://github.com/gotestyourself/gotestsum/releases/download/v${gotestsum_version}/gotestsum_${gotestsum_version}_${OS}_${ARCH}.tar.gz +[ -e ${temp_path} ] || curl -sLf --retry 3 -o ${temp_path} ${url} + +mkdir -p /tmp/gotestsum-${gotestsum_version} +tar -xvzf ${temp_path} -C /tmp/gotestsum-${gotestsum_version} +cp /tmp/gotestsum-${gotestsum_version}/gotestsum ${DIST_PATH}/gotestsum +chmod +x ${DIST_PATH}/gotestsum +gotestsum --version diff --git a/hack/installers/install-protoc.sh b/hack/installers/install-protoc.sh new file mode 100755 index 0000000000..1f3dd8e7bc --- /dev/null +++ b/hack/installers/install-protoc.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -eux -o pipefail + +PROJECT_ROOT=$(cd $(dirname ${BASH_SOURCE})/../..; pwd) +DIST_PATH="${PROJECT_ROOT}/dist" +PATH="${DIST_PATH}:${PATH}" + +protoc_version=3.17.3 + +OS=$(go env GOOS) +ARCH=$(go env GOARCH) +case $OS in + darwin) + # For macOS, the x86_64 binary is used even on Apple Silicon (it is run through rosetta), so + # we download and install the x86_64 version. See: https://github.com/protocolbuffers/protobuf/pull/8557 + protoc_os=osx + protoc_arch=x86_64 + ;; + *) + protoc_os=linux + case $ARCH in + arm64|arm) + protoc_arch=aarch_64 + ;; + *) + protoc_arch=x86_64 + ;; + esac + ;; +esac + +export TARGET_FILE=protoc_${protoc_version}_${OS}_${ARCH}.zip +temp_path="/tmp/${TARGET_FILE}" +url=https://github.com/protocolbuffers/protobuf/releases/download/v${protoc_version}/protoc-${protoc_version}-${protoc_os}-${protoc_arch}.zip +[ -e ${temp_path} ] || curl -sLf --retry 3 -o ${temp_path} ${url} + +mkdir -p /tmp/protoc-${protoc_version} +unzip -o ${temp_path} -d /tmp/protoc-${protoc_version} +mkdir -p ${DIST_PATH}/protoc-include +cp /tmp/protoc-${protoc_version}/bin/protoc ${DIST_PATH}/protoc +chmod +x ${DIST_PATH}/protoc +cp -a /tmp/protoc-${protoc_version}/include/* ${DIST_PATH}/protoc-include +chmod -R +rx ${DIST_PATH}/protoc-include +protoc --version diff --git a/hack/swagger-codegen.sh b/hack/swagger-codegen.sh new file mode 100755 index 0000000000..8b5dc9c3d2 --- /dev/null +++ b/hack/swagger-codegen.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +export SWAGGER_CODEGEN_VERSION=3.0.25 +PROJECT_ROOT=$(cd $(dirname ${BASH_SOURCE})/..; pwd) + +test -f "/tmp/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar" || \ + curl https://repo1.maven.org/maven2/io/swagger/codegen/v3/swagger-codegen-cli/${SWAGGER_CODEGEN_VERSION}/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar -o "/tmp/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar" + +docker run --rm -v /tmp:/tmp -v $PROJECT_ROOT:/src -w /src/ui -t maven:3-jdk-8 java -jar /tmp/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar $@ \ No newline at end of file diff --git a/hack/tools.go b/hack/tools.go index 22abdb37c9..ac3872df7f 100644 --- a/hack/tools.go +++ b/hack/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools package tools @@ -8,6 +9,7 @@ import ( _ "k8s.io/code-generator/cmd/deepcopy-gen" _ "k8s.io/code-generator/cmd/defaulter-gen" _ "k8s.io/code-generator/cmd/go-to-protobuf" + _ "k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo" _ "k8s.io/code-generator/cmd/informer-gen" _ "k8s.io/code-generator/cmd/lister-gen" _ "k8s.io/code-generator/pkg/util" diff --git a/hack/update-k8s-dependencies.sh b/hack/update-k8s-dependencies.sh index c8f0ba2bf1..fab66785f6 100755 --- a/hack/update-k8s-dependencies.sh +++ b/hack/update-k8s-dependencies.sh @@ -19,3 +19,6 @@ for MOD in "${MODS[@]}"; do go mod edit "-replace=${MOD}=${MOD}@${V}" done go get "k8s.io/kubernetes@v${VERSION}" + +go mod vendor +go mod tidy diff --git a/ingress/alb.go b/ingress/alb.go index 01288c86e7..3803f53810 100644 --- a/ingress/alb.go +++ b/ingress/alb.go @@ -18,7 +18,7 @@ import ( func (c *Controller) syncALBIngress(ingress *ingressutil.Ingress, rollouts []*v1alpha1.Rollout) error { ctx := context.TODO() annotations := ingress.GetAnnotations() - managedActions, err := ingressutil.NewManagedALBActions(annotations[ingressutil.ManagedActionsAnnotation]) + managedActions, err := ingressutil.NewManagedALBAnnotations(annotations[ingressutil.ManagedAnnotations]) if err != nil { return nil } @@ -35,31 +35,38 @@ func (c *Controller) syncALBIngress(ingress *ingressutil.Ingress, rollouts []*v1 for roName := range managedActions { if _, ok := actionHasExistingRollout[roName]; !ok { modified = true - actionKey := managedActions[roName] + actionKeys := managedActions[roName] delete(managedActions, roName) - resetALBAction, err := getResetALBActionStr(ingress, actionKey) - if err != nil { - log.WithField(logutil.RolloutKey, roName). - WithField(logutil.IngressKey, ingress.GetName()). - WithField(logutil.NamespaceKey, ingress.GetNamespace()). - Error(err) - return nil + for _, actionKey := range actionKeys { + if !strings.Contains(actionKey, ingressutil.ALBActionPrefix) { + continue + } + resetALBAction, err := getResetALBActionStr(ingress, actionKey) + if err != nil { + log.WithField(logutil.RolloutKey, roName). + WithField(logutil.IngressKey, ingress.GetName()). + WithField(logutil.NamespaceKey, ingress.GetNamespace()). + Error(err) + return nil + } + annotations := newIngress.GetAnnotations() + annotations[actionKey] = resetALBAction + newIngress.SetAnnotations(annotations) } - annotations := newIngress.GetAnnotations() - annotations[actionKey] = resetALBAction - newIngress.SetAnnotations(annotations) } } if !modified { return nil } - newManagedStr := managedActions.String() newAnnotations := newIngress.GetAnnotations() - newAnnotations[ingressutil.ManagedActionsAnnotation] = newManagedStr - newIngress.SetAnnotations(newAnnotations) - if newManagedStr == "" { - delete(newIngress.GetAnnotations(), ingressutil.ManagedActionsAnnotation) + if len(managedActions) == 0 { + delete(newAnnotations, ingressutil.ManagedAnnotations) + } else { + newAnnotations[ingressutil.ManagedAnnotations] = managedActions.String() } + // delete leftovers from old implementation ManagedActionsAnnotation + delete(newAnnotations, ingressutil.ManagedActionsAnnotation) + newIngress.SetAnnotations(newAnnotations) _, err = c.ingressWrapper.Update(ctx, ingress.GetNamespace(), newIngress) return err } @@ -101,6 +108,14 @@ func getResetALBActionStr(ingress *ingressutil.Ingress, action string) (string, }, }, } + + if previousAction.ForwardConfig.TargetGroupStickinessConfig != nil { + albAction.ForwardConfig.TargetGroupStickinessConfig = &ingressutil.ALBTargetGroupStickinessConfig{ + Enabled: previousAction.ForwardConfig.TargetGroupStickinessConfig.Enabled, + DurationSeconds: previousAction.ForwardConfig.TargetGroupStickinessConfig.DurationSeconds, + } + } + bytes := jsonutil.MustMarshal(albAction) return string(bytes), nil } diff --git a/ingress/alb_test.go b/ingress/alb_test.go index a8ca3db64d..79a1c915a1 100644 --- a/ingress/alb_test.go +++ b/ingress/alb_test.go @@ -32,23 +32,50 @@ const actionTemplate = `{ } }` +const actionTemplateWithStickyConfig = `{ + "Type":"forward", + "ForwardConfig":{ + "TargetGroups":[ + { + "ServiceName":"%s", + "ServicePort":"%d", + "Weight": 85 + },{ + "ServiceName":"%s", + "ServicePort":"%d", + "Weight": 15 + } + ], + "TargetGroupStickinessConfig":{ + "DurationSeconds" : 300, + "Enabled" : true + } + } +}` + func albActionAnnotation(stable string) string { return fmt.Sprintf("%s%s%s", ingressutil.ALBIngressAnnotation, ingressutil.ALBActionPrefix, stable) } -func newALBIngress(name string, port int, serviceName string, rollout string) *extensionsv1beta1.Ingress { +func newALBIngress(name string, port int, serviceName string, rollout string, includeStickyConfig bool) *extensionsv1beta1.Ingress { canaryService := fmt.Sprintf("%s-canary", serviceName) albActionKey := albActionAnnotation(serviceName) - managedBy := fmt.Sprintf("%s:%s", rollout, albActionKey) + albConditionKey := fmt.Sprintf("%s%s%s", ingressutil.ALBIngressAnnotation, ingressutil.ALBConditionPrefix, serviceName) + managedBy := ingressutil.ManagedALBAnnotations{ + rollout: ingressutil.ManagedALBAnnotation{albActionKey, albConditionKey}, + } action := fmt.Sprintf(actionTemplate, serviceName, port, canaryService, port) + if includeStickyConfig { + action = fmt.Sprintf(actionTemplateWithStickyConfig, serviceName, port, canaryService, port) + } return &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ - "kubernetes.io/ingress.class": "alb", - albActionKey: action, - ingressutil.ManagedActionsAnnotation: managedBy, + "kubernetes.io/ingress.class": "alb", + albActionKey: action, + ingressutil.ManagedAnnotations: managedBy.String(), }, }, Spec: extensionsv1beta1.IngressSpec{ @@ -98,8 +125,8 @@ func rollout(name, service, ingress string) *v1alpha1.Rollout { func TestInvalidManagedALBActions(t *testing.T) { rollout := rollout("rollout", "stable-service", "test-ingress") - ing := newALBIngress("test-ingress", 80, "stable-service", rollout.Name) - ing.Annotations[ingressutil.ManagedActionsAnnotation] = "invalid-managed-by" + ing := newALBIngress("test-ingress", 80, "stable-service", rollout.Name, false) + ing.Annotations[ingressutil.ManagedAnnotations] = "invalid-managed-by" ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, rollout) @@ -110,7 +137,7 @@ func TestInvalidManagedALBActions(t *testing.T) { } func TestInvalidPreviousALBActionAnnotationValue(t *testing.T) { - ing := newALBIngress("test-ingress", 80, "stable-service", "not-existing-rollout") + ing := newALBIngress("test-ingress", 80, "stable-service", "not-existing-rollout", false) ing.Annotations[albActionAnnotation("stable-service")] = "{" ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, nil) @@ -122,8 +149,8 @@ func TestInvalidPreviousALBActionAnnotationValue(t *testing.T) { } func TestInvalidPreviousALBActionAnnotationKey(t *testing.T) { - ing := newALBIngress("test-ingress", 80, "stable-service", "not-existing-rollout") - ing.Annotations[ingressutil.ManagedActionsAnnotation] = "invalid-action-key" + ing := newALBIngress("test-ingress", 80, "stable-service", "also-not-existing-rollout", false) + ing.Annotations[ingressutil.ManagedAnnotations] = "invalid-action-key" ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, nil) err := ctrl.syncIngress("default/test-ingress") @@ -133,7 +160,7 @@ func TestInvalidPreviousALBActionAnnotationKey(t *testing.T) { } func TestResetActionFailureFindNoPort(t *testing.T) { - ing := newALBIngress("test-ingress", 80, "stable-service", "not-existing-rollout") + ing := newALBIngress("test-ingress", 80, "stable-service", "still-not-existing-rollout", false) ing.Annotations[albActionAnnotation("stable-service")] = "{}" ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, nil) @@ -146,7 +173,7 @@ func TestResetActionFailureFindNoPort(t *testing.T) { func TestALBIngressNoModifications(t *testing.T) { rollout := rollout("rollout", "stable-service", "test-ingress") - ing := newALBIngress("test-ingress", 80, "stable-service", rollout.Name) + ing := newALBIngress("test-ingress", 80, "stable-service", rollout.Name, false) ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, rollout) @@ -157,7 +184,7 @@ func TestALBIngressNoModifications(t *testing.T) { } func TestALBIngressResetAction(t *testing.T) { - ing := newALBIngress("test-ingress", 80, "stable-service", "non-existing-rollout") + ing := newALBIngress("test-ingress", 80, "stable-service", "non-existing-rollout", false) ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, nil) err := ctrl.syncIngress("default/test-ingress") @@ -175,7 +202,31 @@ func TestALBIngressResetAction(t *testing.T) { panic(err) } annotations := acc.GetAnnotations() - assert.NotContains(t, annotations, ingressutil.ManagedActionsAnnotation) + assert.NotContains(t, annotations, ingressutil.ManagedAnnotations) expectedAction := `{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"stable-service","ServicePort":"80","Weight":100}]}}` assert.Equal(t, expectedAction, annotations[albActionAnnotation("stable-service")]) } + +func TestALBIngressResetActionWithStickyConfig(t *testing.T) { + ing := newALBIngress("test-ingress", 80, "stable-service", "non-existing-rollout", true) + + ctrl, kubeclient, enqueuedObjects := newFakeIngressController(t, ing, nil) + err := ctrl.syncIngress("default/test-ingress") + assert.Nil(t, err) + assert.Len(t, enqueuedObjects, 0) + actions := kubeclient.Actions() + assert.Len(t, actions, 1) + updateAction, ok := actions[0].(k8stesting.UpdateAction) + if !ok { + assert.Fail(t, "Client call was not an update") + updateAction.GetObject() + } + acc, err := meta.Accessor(updateAction.GetObject()) + if err != nil { + panic(err) + } + annotations := acc.GetAnnotations() + assert.NotContains(t, annotations, ingressutil.ManagedAnnotations) + expectedAction := `{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"stable-service","ServicePort":"80","Weight":100}],"TargetGroupStickinessConfig":{"Enabled":true,"DurationSeconds":300}}}` + assert.Equal(t, expectedAction, annotations[albActionAnnotation("stable-service")]) +} diff --git a/ingress/ingress.go b/ingress/ingress.go index bbe5f2f0d6..7bbf856b89 100644 --- a/ingress/ingress.go +++ b/ingress/ingress.go @@ -140,12 +140,7 @@ func (c *Controller) syncIngress(key string) error { if err != nil { return nil } - // An ingress without annotations cannot be a alb or nginx ingress - if ingress.GetAnnotations() == nil { - return nil - } - annotations := ingress.GetAnnotations() - class := annotations["kubernetes.io/ingress.class"] + class := ingress.GetClass() switch { case hasClass(c.albClasses, class): return c.syncALBIngress(ingress, rollouts) diff --git a/ingress/ingress_test.go b/ingress/ingress_test.go index b26b213e25..8d109fa8af 100644 --- a/ingress/ingress_test.go +++ b/ingress/ingress_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" kubeinformers "k8s.io/client-go/informers" k8sfake "k8s.io/client-go/kubernetes/fake" @@ -23,6 +24,37 @@ import ( ) func newNginxIngress(name string, port int, serviceName string) *extensionsv1beta1.Ingress { + class := "nginx" + return &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: extensionsv1beta1.IngressSpec{ + IngressClassName: &class, + Rules: []extensionsv1beta1.IngressRule{ + { + Host: "fakehost.example.com", + IngressRuleValue: extensionsv1beta1.IngressRuleValue{ + HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ + Paths: []extensionsv1beta1.HTTPIngressPath{ + { + Path: "/foo", + Backend: extensionsv1beta1.IngressBackend{ + ServiceName: serviceName, + ServicePort: intstr.FromInt(port), + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func newNginxIngressWithAnnotation(name string, port int, serviceName string) *extensionsv1beta1.Ingress { return &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -54,7 +86,15 @@ func newNginxIngress(name string, port int, serviceName string) *extensionsv1bet } } +func newFakeIngressControllerMultiIngress(t *testing.T, ing []*extensionsv1beta1.Ingress, rollout *v1alpha1.Rollout) (*Controller, *k8sfake.Clientset, map[string]int) { + return underlyingControllerBuilder(t, ing, rollout) +} + func newFakeIngressController(t *testing.T, ing *extensionsv1beta1.Ingress, rollout *v1alpha1.Rollout) (*Controller, *k8sfake.Clientset, map[string]int) { + return underlyingControllerBuilder(t, []*extensionsv1beta1.Ingress{ing}, rollout) +} + +func underlyingControllerBuilder(t *testing.T, ing []*extensionsv1beta1.Ingress, rollout *v1alpha1.Rollout) (*Controller, *k8sfake.Clientset, map[string]int) { t.Helper() client := fake.NewSimpleClientset() if rollout != nil { @@ -62,7 +102,13 @@ func newFakeIngressController(t *testing.T, ing *extensionsv1beta1.Ingress, roll } kubeclient := k8sfake.NewSimpleClientset() if ing != nil { - kubeclient = k8sfake.NewSimpleClientset(ing) + var x []runtime.Object + for _, i := range ing { + if i != nil { + x = append(x, i) + } + } + kubeclient = k8sfake.NewSimpleClientset(x...) } i := informers.NewSharedInformerFactory(client, 0) k8sI := kubeinformers.NewSharedInformerFactory(kubeclient, 0) @@ -107,7 +153,11 @@ func newFakeIngressController(t *testing.T, ing *extensionsv1beta1.Ingress, roll } if ing != nil { - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(ing) + for _, i := range ing { + if i != nil { + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) + } + } } if rollout != nil { i.Argoproj().V1alpha1().Rollouts().Informer().GetIndexer().Add(rollout) @@ -133,6 +183,20 @@ func TestSyncIngressNotReferencedByRollout(t *testing.T) { assert.Len(t, actions, 0) } +func TestSyncIngressNotReferencedByRolloutMultiIngress(t *testing.T) { + ings := []*extensionsv1beta1.Ingress{ + newNginxIngress("test-stable-ingress", 80, "stable-service"), + newNginxIngress("test-stable-ingress-additional", 80, "stable-service"), + } + + ctrl, kubeclient, _ := newFakeIngressControllerMultiIngress(t, ings, nil) + + err := ctrl.syncIngress("default/test-stable-ingress") + assert.NoError(t, err) + actions := kubeclient.Actions() + assert.Len(t, actions, 0) +} + func TestSyncIngressReferencedByRollout(t *testing.T) { ing := newNginxIngress("test-stable-ingress", 80, "stable-service") @@ -165,8 +229,44 @@ func TestSyncIngressReferencedByRollout(t *testing.T) { assert.Equal(t, 1, enqueuedObjects["default/rollout"]) } -func TestSkipIngressWithNoAnnotations(t *testing.T) { - ing := newNginxIngress("test-stable-ingress", 80, "stable-service") +func TestSyncIngressReferencedByRolloutMultiIngress(t *testing.T) { + ings := []*extensionsv1beta1.Ingress{ + newNginxIngress("test-stable-ingress", 80, "stable-service"), + newNginxIngress("test-stable-ingress-additional", 80, "stable-service"), + } + + rollout := &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + StableService: "stable-service", + CanaryService: "canary-service", + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{ + StableIngress: "test-stable-ingress", + AdditionalStableIngresses: []string{"test-stable-ingress-additional"}, + }, + }, + }, + }, + }, + } + + ctrl, kubeclient, enqueuedObjects := newFakeIngressControllerMultiIngress(t, ings, rollout) + + err := ctrl.syncIngress("default/test-stable-ingress") + assert.NoError(t, err) + actions := kubeclient.Actions() + assert.Len(t, actions, 0) + assert.Equal(t, 1, enqueuedObjects["default/rollout"]) +} + +func TestSkipIngressWithNoClass(t *testing.T) { + ing := newNginxIngressWithAnnotation("test-stable-ingress", 80, "stable-service") ing.Annotations = nil rollout := &v1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{ @@ -196,3 +296,42 @@ func TestSkipIngressWithNoAnnotations(t *testing.T) { assert.Len(t, actions, 0) assert.Len(t, enqueuedObjects, 0) } + +func TestSkipIngressWithNoAnnotationsMultiIngress(t *testing.T) { + ings := []*extensionsv1beta1.Ingress{ + newNginxIngress("test-stable-ingress", 80, "stable-service"), + newNginxIngress("test-stable-ingress-additional", 80, "stable-service"), + } + for _, i := range ings { + i.Annotations = nil + } + + rollout := &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + StableService: "stable-service", + CanaryService: "canary-service", + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{ + StableIngress: "test-stable-ingress", + AdditionalStableIngresses: []string{"test-stable-ingress-additional"}, + }, + }, + }, + }, + }, + } + + ctrl, kubeclient, enqueuedObjects := newFakeIngressControllerMultiIngress(t, ings, rollout) + + err := ctrl.syncIngress("default/test-stable-ingress") + assert.NoError(t, err) + actions := kubeclient.Actions() + assert.Len(t, actions, 0) + assert.Len(t, enqueuedObjects, 0) +} diff --git a/manifests/crds/analysis-run-crd.yaml b/manifests/crds/analysis-run-crd.yaml index 9d612a9206..962db250b3 100644 --- a/manifests/crds/analysis-run-crd.yaml +++ b/manifests/crds/analysis-run-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: analysisruns.argoproj.io spec: group: argoproj.io @@ -68,6 +68,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -170,6 +192,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -811,6 +840,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -905,6 +944,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1023,6 +1072,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1039,6 +1090,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1378,6 +1439,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1456,6 +1527,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1468,6 +1543,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1586,6 +1671,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1602,6 +1689,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1945,6 +2042,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2039,6 +2146,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2157,6 +2274,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2173,6 +2292,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2283,6 +2412,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2371,6 +2508,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2433,6 +2572,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -2617,6 +2759,24 @@ spec: type: object status: properties: + dryRunSummary: + properties: + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + successful: + format: int32 + type: integer + type: object message: type: string metricResults: @@ -2628,6 +2788,8 @@ spec: count: format: int32 type: integer + dryRun: + type: boolean error: format: int32 type: integer @@ -2665,6 +2827,10 @@ spec: type: array message: type: string + metadata: + additionalProperties: + type: string + type: object name: type: string phase: @@ -2679,6 +2845,24 @@ spec: type: array phase: type: string + runSummary: + properties: + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + successful: + format: int32 + type: integer + type: object startedAt: format: date-time type: string diff --git a/manifests/crds/analysis-template-crd.yaml b/manifests/crds/analysis-template-crd.yaml index 516a5bf5a8..54aad4b66f 100644 --- a/manifests/crds/analysis-template-crd.yaml +++ b/manifests/crds/analysis-template-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: analysistemplates.argoproj.io spec: group: argoproj.io @@ -64,6 +64,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -166,6 +188,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -807,6 +836,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -901,6 +940,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1019,6 +1068,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1035,6 +1086,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1374,6 +1435,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1452,6 +1523,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1464,6 +1539,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1582,6 +1667,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1598,6 +1685,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1941,6 +2038,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2035,6 +2142,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2153,6 +2270,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2169,6 +2288,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2279,6 +2408,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2367,6 +2504,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2429,6 +2568,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: diff --git a/manifests/crds/cluster-analysis-template-crd.yaml b/manifests/crds/cluster-analysis-template-crd.yaml index b10ac4a166..1551b9d114 100644 --- a/manifests/crds/cluster-analysis-template-crd.yaml +++ b/manifests/crds/cluster-analysis-template-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: clusteranalysistemplates.argoproj.io spec: group: argoproj.io @@ -64,6 +64,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -166,6 +188,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -807,6 +836,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -901,6 +940,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1019,6 +1068,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1035,6 +1086,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1374,6 +1435,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1452,6 +1523,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1464,6 +1539,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1582,6 +1667,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1598,6 +1685,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1941,6 +2038,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2035,6 +2142,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2153,6 +2270,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2169,6 +2288,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2279,6 +2408,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2367,6 +2504,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2429,6 +2568,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: diff --git a/manifests/crds/experiment-crd.yaml b/manifests/crds/experiment-crd.yaml index 2e65d4fe77..7af9a81d52 100644 --- a/manifests/crds/experiment-crd.yaml +++ b/manifests/crds/experiment-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: experiments.argoproj.io spec: group: argoproj.io @@ -84,8 +84,30 @@ spec: - templateName type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array duration: type: string + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array progressDeadlineSeconds: format: int32 type: integer @@ -713,6 +735,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -807,6 +839,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -925,6 +967,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -941,6 +985,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1280,6 +1334,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1358,6 +1422,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1370,6 +1438,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1488,6 +1566,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1504,6 +1584,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1847,6 +1937,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1941,6 +2041,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2059,6 +2169,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2075,6 +2187,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2185,6 +2307,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2273,6 +2403,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2335,6 +2467,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: diff --git a/manifests/crds/rollout-crd.yaml b/manifests/crds/rollout-crd.yaml index 7881a6b8d5..05b8a7e980 100644 --- a/manifests/crds/rollout-crd.yaml +++ b/manifests/crds/rollout-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: rollouts.argoproj.io spec: group: argoproj.io @@ -170,6 +170,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -205,6 +227,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -270,6 +314,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array startingStep: format: int32 type: integer @@ -321,6 +387,19 @@ spec: - type: integer - type: string x-kubernetes-int-or-string: true + minPodsPerReplicaSet: + format: int32 + type: integer + pingPong: + properties: + pingService: + type: string + pongService: + type: string + required: + - pingService + - pongService + type: object scaleDownDelayRevisionLimit: format: int32 type: integer @@ -368,6 +447,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -495,6 +596,74 @@ spec: format: int32 type: integer type: object + setHeaderRoute: + properties: + match: + items: + properties: + headerName: + type: string + headerValue: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + required: + - headerName + - headerValue + type: object + type: array + name: + type: string + type: object + setMirrorRoute: + properties: + match: + items: + properties: + headers: + additionalProperties: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: object + method: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + path: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: object + type: array + name: + type: string + percentage: + format: int32 + type: integer + required: + - name + type: object setWeight: format: int32 type: integer @@ -513,6 +682,17 @@ spec: servicePort: format: int32 type: integer + stickinessConfig: + properties: + durationSeconds: + format: int64 + type: integer + enabled: + type: boolean + required: + - durationSeconds + - enabled + type: object required: - ingress - servicePort @@ -526,6 +706,40 @@ spec: required: - mappings type: object + appMesh: + properties: + virtualNodeGroup: + properties: + canaryVirtualNodeRef: + properties: + name: + type: string + required: + - name + type: object + stableVirtualNodeRef: + properties: + name: + type: string + required: + - name + type: object + required: + - canaryVirtualNodeRef + - stableVirtualNodeRef + type: object + virtualService: + properties: + name: + type: string + routes: + items: + type: string + type: array + required: + - name + type: object + type: object istio: properties: destinationRule: @@ -590,12 +804,25 @@ spec: type: object type: array type: object + managedRoutes: + items: + properties: + name: + type: string + required: + - name + type: object + type: array nginx: properties: additionalIngressAnnotations: additionalProperties: type: string type: object + additionalStableIngresses: + items: + type: string + type: array annotationPrefix: type: string stableIngress: @@ -610,6 +837,13 @@ spec: trafficSplitName: type: string type: object + traefik: + properties: + weightedTraefikServiceName: + type: string + required: + - weightedTraefikServiceName + type: object type: object type: object type: object @@ -1198,6 +1432,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1292,6 +1536,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1410,6 +1664,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1426,6 +1682,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1765,6 +2031,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1843,6 +2119,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1855,6 +2135,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1973,6 +2263,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1989,6 +2281,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2332,6 +2634,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2426,6 +2738,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2544,6 +2866,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2560,6 +2884,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2670,6 +3004,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2758,6 +3100,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2820,6 +3164,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -2835,7 +3182,9 @@ spec: - whenUnsatisfiable x-kubernetes-list-type: map volumes: - x-kubernetes-preserve-unknown-fields: true + items: + x-kubernetes-preserve-unknown-fields: true + type: array required: - containers type: object @@ -2957,6 +3306,8 @@ spec: - name - status type: object + stablePingPong: + type: string weights: properties: additional: diff --git a/manifests/dashboard-install.yaml b/manifests/dashboard-install.yaml index 4ecf48208e..009f5dff73 100644 --- a/manifests/dashboard-install.yaml +++ b/manifests/dashboard-install.yaml @@ -50,6 +50,15 @@ rules: - get - list - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - get + - update + - list + - watch - apiGroups: - apps resources: diff --git a/manifests/dashboard-install/dashboard-clusterrole.yaml b/manifests/dashboard-install/dashboard-clusterrole.yaml index 7523a16ed0..2499752457 100644 --- a/manifests/dashboard-install/dashboard-clusterrole.yaml +++ b/manifests/dashboard-install/dashboard-clusterrole.yaml @@ -40,6 +40,15 @@ rules: - get - list - watch + - apiGroups: + - apps + resources: + - deployments + verbs: + - get + - update + - list + - watch - apiGroups: - apps resources: diff --git a/manifests/install.yaml b/manifests/install.yaml index b957a4f992..22ea1f53a3 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: analysisruns.argoproj.io spec: group: argoproj.io @@ -69,6 +69,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -171,6 +193,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -812,6 +841,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -906,6 +945,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1024,6 +1073,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1040,6 +1091,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1379,6 +1440,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1457,6 +1528,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1469,6 +1544,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1587,6 +1672,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1603,6 +1690,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1946,6 +2043,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2040,6 +2147,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2158,6 +2275,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2174,6 +2293,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2284,6 +2413,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2372,6 +2509,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2434,6 +2573,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -2618,6 +2760,24 @@ spec: type: object status: properties: + dryRunSummary: + properties: + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + successful: + format: int32 + type: integer + type: object message: type: string metricResults: @@ -2629,6 +2789,8 @@ spec: count: format: int32 type: integer + dryRun: + type: boolean error: format: int32 type: integer @@ -2666,6 +2828,10 @@ spec: type: array message: type: string + metadata: + additionalProperties: + type: string + type: object name: type: string phase: @@ -2680,6 +2846,24 @@ spec: type: array phase: type: string + runSummary: + properties: + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + successful: + format: int32 + type: integer + type: object startedAt: format: date-time type: string @@ -2697,7 +2881,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: analysistemplates.argoproj.io spec: group: argoproj.io @@ -2759,6 +2943,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -2861,6 +3067,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -3502,6 +3715,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -3596,6 +3819,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -3714,6 +3947,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -3730,6 +3965,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4069,6 +4314,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4147,6 +4402,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -4159,6 +4418,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4277,6 +4546,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -4293,6 +4564,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4636,6 +4917,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4730,6 +5021,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4848,6 +5149,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -4864,6 +5167,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4974,6 +5287,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -5062,6 +5383,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -5124,6 +5447,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -5315,7 +5641,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: clusteranalysistemplates.argoproj.io spec: group: argoproj.io @@ -5377,6 +5703,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -5479,12 +5827,19 @@ spec: query: type: string type: object - job: + influxdb: properties: - metadata: - properties: - annotations: - additionalProperties: + profile: + type: string + query: + type: string + type: object + job: + properties: + metadata: + properties: + annotations: + additionalProperties: type: string type: object labels: @@ -6120,6 +6475,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6214,6 +6579,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6332,6 +6707,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -6348,6 +6725,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6687,6 +7074,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6765,6 +7162,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -6777,6 +7178,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6895,6 +7306,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -6911,6 +7324,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7254,6 +7677,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7348,6 +7781,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7466,6 +7909,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -7482,6 +7927,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7592,6 +8047,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -7680,6 +8143,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -7742,6 +8207,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -7933,7 +8401,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: experiments.argoproj.io spec: group: argoproj.io @@ -8015,8 +8483,30 @@ spec: - templateName type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array duration: type: string + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array progressDeadlineSeconds: format: int32 type: integer @@ -8644,6 +9134,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -8738,6 +9238,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -8856,6 +9366,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -8872,6 +9384,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9211,6 +9733,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9289,6 +9821,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -9301,6 +9837,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9419,6 +9965,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -9435,6 +9983,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9778,6 +10336,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9872,6 +10440,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9990,6 +10568,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -10006,6 +10586,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -10116,6 +10706,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -10204,6 +10802,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -10266,6 +10866,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -10400,7 +11003,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: rollouts.argoproj.io spec: group: argoproj.io @@ -10568,6 +11171,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -10603,6 +11228,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -10668,6 +11315,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array startingStep: format: int32 type: integer @@ -10719,6 +11388,16 @@ spec: - type: integer - type: string x-kubernetes-int-or-string: true + pingPong: + properties: + pingService: + type: string + pongService: + type: string + required: + - pingService + - pongService + type: object scaleDownDelayRevisionLimit: format: int32 type: integer @@ -10766,6 +11445,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -10893,6 +11594,74 @@ spec: format: int32 type: integer type: object + setHeaderRoute: + properties: + match: + items: + properties: + headerName: + type: string + headerValue: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + required: + - headerName + - headerValue + type: object + type: array + name: + type: string + type: object + setMirrorRoute: + properties: + match: + items: + properties: + headers: + additionalProperties: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: object + method: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + path: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: object + type: array + name: + type: string + percentage: + format: int32 + type: integer + required: + - name + type: object setWeight: format: int32 type: integer @@ -10911,6 +11680,17 @@ spec: servicePort: format: int32 type: integer + stickinessConfig: + properties: + durationSeconds: + format: int64 + type: integer + enabled: + type: boolean + required: + - durationSeconds + - enabled + type: object required: - ingress - servicePort @@ -10924,6 +11704,40 @@ spec: required: - mappings type: object + appMesh: + properties: + virtualNodeGroup: + properties: + canaryVirtualNodeRef: + properties: + name: + type: string + required: + - name + type: object + stableVirtualNodeRef: + properties: + name: + type: string + required: + - name + type: object + required: + - canaryVirtualNodeRef + - stableVirtualNodeRef + type: object + virtualService: + properties: + name: + type: string + routes: + items: + type: string + type: array + required: + - name + type: object + type: object istio: properties: destinationRule: @@ -10988,12 +11802,25 @@ spec: type: object type: array type: object + managedRoutes: + items: + properties: + name: + type: string + required: + - name + type: object + type: array nginx: properties: additionalIngressAnnotations: additionalProperties: type: string type: object + additionalStableIngresses: + items: + type: string + type: array annotationPrefix: type: string stableIngress: @@ -11008,6 +11835,13 @@ spec: trafficSplitName: type: string type: object + traefik: + properties: + weightedTraefikServiceName: + type: string + required: + - weightedTraefikServiceName + type: object type: object type: object type: object @@ -11596,6 +12430,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -11690,6 +12534,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -11808,6 +12662,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -11824,6 +12680,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12163,6 +13029,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12241,6 +13117,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -12253,6 +13133,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12371,6 +13261,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -12387,6 +13279,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12730,6 +13632,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12824,6 +13736,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12942,6 +13864,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -12958,6 +13882,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -13068,6 +14002,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -13156,6 +14098,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -13218,6 +14162,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -13233,7 +14180,9 @@ spec: - whenUnsatisfiable x-kubernetes-list-type: map volumes: - x-kubernetes-preserve-unknown-fields: true + items: + x-kubernetes-preserve-unknown-fields: true + type: array required: - containers type: object @@ -13355,6 +14304,8 @@ spec: - name - status type: object + stablePingPong: + type: string weights: properties: additional: @@ -13578,6 +14529,7 @@ rules: - watch - patch - create + - delete - apiGroups: - coordination.k8s.io resources: @@ -13602,6 +14554,7 @@ rules: verbs: - list - update + - watch - apiGroups: - "" resources: @@ -13686,6 +14639,33 @@ rules: verbs: - list - get +- apiGroups: + - appmesh.k8s.aws + resources: + - virtualservices + verbs: + - watch + - get + - list +- apiGroups: + - appmesh.k8s.aws + resources: + - virtualnodes + - virtualrouters + verbs: + - watch + - get + - list + - update + - patch +- apiGroups: + - traefik.containo.us + resources: + - traefikservices + verbs: + - watch + - get + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index a5c88a7efc..c53eeb0c67 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: analysisruns.argoproj.io spec: group: argoproj.io @@ -69,6 +69,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -171,6 +193,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -812,6 +841,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -906,6 +945,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1024,6 +1073,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1040,6 +1091,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1379,6 +1440,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1457,6 +1528,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -1469,6 +1544,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1587,6 +1672,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -1603,6 +1690,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -1946,6 +2043,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2040,6 +2147,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2158,6 +2275,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2174,6 +2293,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -2284,6 +2413,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -2372,6 +2509,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -2434,6 +2573,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -2618,6 +2760,24 @@ spec: type: object status: properties: + dryRunSummary: + properties: + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + successful: + format: int32 + type: integer + type: object message: type: string metricResults: @@ -2629,6 +2789,8 @@ spec: count: format: int32 type: integer + dryRun: + type: boolean error: format: int32 type: integer @@ -2666,6 +2828,10 @@ spec: type: array message: type: string + metadata: + additionalProperties: + type: string + type: object name: type: string phase: @@ -2680,6 +2846,24 @@ spec: type: array phase: type: string + runSummary: + properties: + count: + format: int32 + type: integer + error: + format: int32 + type: integer + failed: + format: int32 + type: integer + inconclusive: + format: int32 + type: integer + successful: + format: int32 + type: integer + type: object startedAt: format: date-time type: string @@ -2697,7 +2881,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: analysistemplates.argoproj.io spec: group: argoproj.io @@ -2759,6 +2943,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -2861,6 +3067,13 @@ spec: query: type: string type: object + influxdb: + properties: + profile: + type: string + query: + type: string + type: object job: properties: metadata: @@ -3502,6 +3715,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -3596,6 +3819,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -3714,6 +3947,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -3730,6 +3965,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4069,6 +4314,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4147,6 +4402,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -4159,6 +4418,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4277,6 +4546,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -4293,6 +4564,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4636,6 +4917,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4730,6 +5021,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4848,6 +5149,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -4864,6 +5167,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -4974,6 +5287,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -5062,6 +5383,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -5124,6 +5447,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -5315,7 +5641,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: clusteranalysistemplates.argoproj.io spec: group: argoproj.io @@ -5377,6 +5703,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array metrics: items: properties: @@ -5479,12 +5827,19 @@ spec: query: type: string type: object - job: + influxdb: properties: - metadata: - properties: - annotations: - additionalProperties: + profile: + type: string + query: + type: string + type: object + job: + properties: + metadata: + properties: + annotations: + additionalProperties: type: string type: object labels: @@ -6120,6 +6475,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6214,6 +6579,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6332,6 +6707,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -6348,6 +6725,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6687,6 +7074,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6765,6 +7162,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -6777,6 +7178,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -6895,6 +7306,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -6911,6 +7324,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7254,6 +7677,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7348,6 +7781,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7466,6 +7909,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -7482,6 +7927,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -7592,6 +8047,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -7680,6 +8143,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -7742,6 +8207,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -7933,7 +8401,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: experiments.argoproj.io spec: group: argoproj.io @@ -8015,8 +8483,30 @@ spec: - templateName type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array duration: type: string + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array progressDeadlineSeconds: format: int32 type: integer @@ -8644,6 +9134,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -8738,6 +9238,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -8856,6 +9366,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -8872,6 +9384,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9211,6 +9733,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9289,6 +9821,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -9301,6 +9837,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9419,6 +9965,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -9435,6 +9983,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9778,6 +10336,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9872,6 +10440,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -9990,6 +10568,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -10006,6 +10586,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -10116,6 +10706,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -10204,6 +10802,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -10266,6 +10866,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -10400,7 +11003,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.7.0 name: rollouts.argoproj.io spec: group: argoproj.io @@ -10568,6 +11171,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -10603,6 +11228,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -10668,6 +11315,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array startingStep: format: int32 type: integer @@ -10719,6 +11388,16 @@ spec: - type: integer - type: string x-kubernetes-int-or-string: true + pingPong: + properties: + pingService: + type: string + pongService: + type: string + required: + - pingService + - pongService + type: object scaleDownDelayRevisionLimit: format: int32 type: integer @@ -10766,6 +11445,28 @@ spec: - name type: object type: array + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array + measurementRetention: + items: + properties: + limit: + format: int32 + type: integer + metricName: + type: string + required: + - limit + - metricName + type: object + type: array templates: items: properties: @@ -10893,6 +11594,74 @@ spec: format: int32 type: integer type: object + setHeaderRoute: + properties: + match: + items: + properties: + headerName: + type: string + headerValue: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + required: + - headerName + - headerValue + type: object + type: array + name: + type: string + type: object + setMirrorRoute: + properties: + match: + items: + properties: + headers: + additionalProperties: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: object + method: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + path: + properties: + exact: + type: string + prefix: + type: string + regex: + type: string + type: object + type: object + type: array + name: + type: string + percentage: + format: int32 + type: integer + required: + - name + type: object setWeight: format: int32 type: integer @@ -10911,6 +11680,17 @@ spec: servicePort: format: int32 type: integer + stickinessConfig: + properties: + durationSeconds: + format: int64 + type: integer + enabled: + type: boolean + required: + - durationSeconds + - enabled + type: object required: - ingress - servicePort @@ -10924,6 +11704,40 @@ spec: required: - mappings type: object + appMesh: + properties: + virtualNodeGroup: + properties: + canaryVirtualNodeRef: + properties: + name: + type: string + required: + - name + type: object + stableVirtualNodeRef: + properties: + name: + type: string + required: + - name + type: object + required: + - canaryVirtualNodeRef + - stableVirtualNodeRef + type: object + virtualService: + properties: + name: + type: string + routes: + items: + type: string + type: array + required: + - name + type: object + type: object istio: properties: destinationRule: @@ -10988,12 +11802,25 @@ spec: type: object type: array type: object + managedRoutes: + items: + properties: + name: + type: string + required: + - name + type: object + type: array nginx: properties: additionalIngressAnnotations: additionalProperties: type: string type: object + additionalStableIngresses: + items: + type: string + type: array annotationPrefix: type: string stableIngress: @@ -11008,6 +11835,13 @@ spec: trafficSplitName: type: string type: object + traefik: + properties: + weightedTraefikServiceName: + type: string + required: + - weightedTraefikServiceName + type: object type: object type: object type: object @@ -11596,6 +12430,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -11690,6 +12534,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -11808,6 +12662,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -11824,6 +12680,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12163,6 +13029,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12241,6 +13117,10 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: exec: @@ -12253,6 +13133,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12371,6 +13261,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -12387,6 +13279,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12730,6 +13632,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12824,6 +13736,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -12942,6 +13864,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -12958,6 +13882,16 @@ spec: failureThreshold: format: int32 type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object httpGet: properties: host: @@ -13068,6 +14002,14 @@ spec: additionalProperties: type: string type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -13156,6 +14098,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string type: object @@ -13218,6 +14162,9 @@ spec: maxSkew: format: int32 type: integer + minDomains: + format: int32 + type: integer topologyKey: type: string whenUnsatisfiable: @@ -13233,7 +14180,9 @@ spec: - whenUnsatisfiable x-kubernetes-list-type: map volumes: - x-kubernetes-preserve-unknown-fields: true + items: + x-kubernetes-preserve-unknown-fields: true + type: array required: - containers type: object @@ -13355,6 +14304,8 @@ spec: - name - status type: object + stablePingPong: + type: string weights: properties: additional: @@ -13578,6 +14529,7 @@ rules: - watch - patch - create + - delete - apiGroups: - coordination.k8s.io resources: @@ -13602,6 +14554,7 @@ rules: verbs: - list - update + - watch - apiGroups: - "" resources: @@ -13686,6 +14639,33 @@ rules: verbs: - list - get +- apiGroups: + - appmesh.k8s.aws + resources: + - virtualservices + verbs: + - watch + - get + - list +- apiGroups: + - appmesh.k8s.aws + resources: + - virtualnodes + - virtualrouters + verbs: + - watch + - get + - list + - update + - patch +- apiGroups: + - traefik.containo.us + resources: + - traefikservices + verbs: + - watch + - get + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/manifests/notifications-install.yaml b/manifests/notifications-install.yaml index f7d8a4436e..c0f6b2aa59 100644 --- a/manifests/notifications-install.yaml +++ b/manifests/notifications-install.yaml @@ -1,6 +1,110 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: v1 data: + template.analysis-run-error: | + message: Rollout {{.rollout.metadata.name}}'s analysis run is in error state. + email: + subject: Rollout {{.rollout.metadata.name}}'s analysis run is in error state. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#ECB22E", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] + template.analysis-run-failed: | + message: Rollout {{.rollout.metadata.name}}'s analysis run failed. + email: + subject: Rollout {{.rollout.metadata.name}}'s analysis run failed. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#E01E5A", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] + template.analysis-run-running: | + message: Rollout {{.rollout.metadata.name}}'s analysis run is running. + email: + subject: Rollout {{.rollout.metadata.name}}'s analysis run is running. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#18be52", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] + template.rollout-aborted: | + message: Rollout {{.rollout.metadata.name}} has been aborted. + email: + subject: Rollout {{.rollout.metadata.name}} has been aborted. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#E01E5A", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] template.rollout-completed: | message: Rollout {{.rollout.metadata.name}} has been completed. email: @@ -27,6 +131,32 @@ data: {{end}} ] }] + template.rollout-paused: | + message: Rollout {{.rollout.metadata.name}} has been paused. + email: + subject: Rollout {{.rollout.metadata.name}} has been paused. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#18be52", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] template.rollout-step-completed: | message: Rollout {{.rollout.metadata.name}} step number {{ add .rollout.status.currentStepIndex 1}}/{{len .rollout.spec.strategy.canary.steps}} has been completed. email: @@ -120,8 +250,18 @@ data: {{end}} ] }] + trigger.on-analysis-run-error: | + - send: [analysis-run-error] + trigger.on-analysis-run-failed: | + - send: [analysis-run-failed] + trigger.on-analysis-run-running: | + - send: [analysis-run-running] + trigger.on-rollout-aborted: | + - send: [rollout-aborted] trigger.on-rollout-completed: | - send: [rollout-completed] + trigger.on-rollout-paused: | + - send: [rollout-paused] trigger.on-rollout-step-completed: | - send: [rollout-step-completed] trigger.on-rollout-updated: | diff --git a/manifests/notifications/kustomization.yaml b/manifests/notifications/kustomization.yaml index d63cbca662..e8b7beeed9 100644 --- a/manifests/notifications/kustomization.yaml +++ b/manifests/notifications/kustomization.yaml @@ -8,4 +8,9 @@ patchesStrategicMerge: - on-rollout-completed.yaml - on-scaling-replica-set.yaml - on-rollout-step-completed.yaml - - on-rollout-updated.yaml \ No newline at end of file + - on-rollout-updated.yaml + - on-rollout-aborted.yaml + - on-rollout-paused.yaml + - on-analysis-run-running.yaml + - on-analysis-run-error.yaml + - on-analysis-run-failed.yaml diff --git a/manifests/notifications/on-analysis-run-error.yaml b/manifests/notifications/on-analysis-run-error.yaml new file mode 100644 index 0000000000..e92aff4487 --- /dev/null +++ b/manifests/notifications/on-analysis-run-error.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-rollouts-notification-configmap +data: + trigger.on-analysis-run-error: | + - send: [analysis-run-error] + template.analysis-run-error: | + message: Rollout {{.rollout.metadata.name}}'s analysis run is in error state. + email: + subject: Rollout {{.rollout.metadata.name}}'s analysis run is in error state. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#ECB22E", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] diff --git a/manifests/notifications/on-analysis-run-failed.yaml b/manifests/notifications/on-analysis-run-failed.yaml new file mode 100644 index 0000000000..822fde16c9 --- /dev/null +++ b/manifests/notifications/on-analysis-run-failed.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-rollouts-notification-configmap +data: + trigger.on-analysis-run-failed: | + - send: [analysis-run-failed] + template.analysis-run-failed: | + message: Rollout {{.rollout.metadata.name}}'s analysis run failed. + email: + subject: Rollout {{.rollout.metadata.name}}'s analysis run failed. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#E01E5A", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] diff --git a/manifests/notifications/on-analysis-run-running.yaml b/manifests/notifications/on-analysis-run-running.yaml new file mode 100644 index 0000000000..c7cbd6ac07 --- /dev/null +++ b/manifests/notifications/on-analysis-run-running.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-rollouts-notification-configmap +data: + trigger.on-analysis-run-running: | + - send: [analysis-run-running] + template.analysis-run-running: | + message: Rollout {{.rollout.metadata.name}}'s analysis run is running. + email: + subject: Rollout {{.rollout.metadata.name}}'s analysis run is running. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#18be52", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] diff --git a/manifests/notifications/on-rollout-aborted.yaml b/manifests/notifications/on-rollout-aborted.yaml new file mode 100644 index 0000000000..6ad3448ce5 --- /dev/null +++ b/manifests/notifications/on-rollout-aborted.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-rollouts-notification-configmap +data: + trigger.on-rollout-aborted: | + - send: [rollout-aborted] + template.rollout-aborted: | + message: Rollout {{.rollout.metadata.name}} has been aborted. + email: + subject: Rollout {{.rollout.metadata.name}} has been aborted. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#E01E5A", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] diff --git a/manifests/notifications/on-rollout-paused.yaml b/manifests/notifications/on-rollout-paused.yaml new file mode 100644 index 0000000000..48085f67b8 --- /dev/null +++ b/manifests/notifications/on-rollout-paused.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-rollouts-notification-configmap +data: + trigger.on-rollout-paused: | + - send: [rollout-paused] + template.rollout-paused: | + message: Rollout {{.rollout.metadata.name}} has been paused. + email: + subject: Rollout {{.rollout.metadata.name}} has been paused. + slack: + attachments: | + [{ + "title": "{{ .rollout.metadata.name}}", + "color": "#18be52", + "fields": [ + { + "title": "Strategy", + "value": "{{if .rollout.spec.strategy.blueGreen}}BlueGreen{{end}}{{if .rollout.spec.strategy.canary}}Canary{{end}}", + "short": true + } + {{range $index, $c := .rollout.spec.template.spec.containers}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.name}}", + "value": "{{$c.image}}", + "short": true + } + {{end}} + ] + }] diff --git a/manifests/role/argo-rollouts-clusterrole.yaml b/manifests/role/argo-rollouts-clusterrole.yaml index 207e50ad17..db1a97f875 100644 --- a/manifests/role/argo-rollouts-clusterrole.yaml +++ b/manifests/role/argo-rollouts-clusterrole.yaml @@ -68,7 +68,7 @@ rules: - list - watch # services patch needed to update selector of canary/stable/active/preview services -# services create needed to create services for experiments +# services create needed to create and delete services for experiments - apiGroups: - "" resources: @@ -79,6 +79,7 @@ rules: - watch - patch - create + - delete # leases create/get/update needed for leader election - apiGroups: - coordination.k8s.io @@ -106,6 +107,7 @@ rules: verbs: - list - update + - watch # pods eviction needed for restart - apiGroups: - "" @@ -198,3 +200,32 @@ rules: verbs: - list - get +# AppMesh virtualservices/virtualrouter CRD read-only access needed for using the App Mesh provider +- apiGroups: + - appmesh.k8s.aws + resources: + - virtualservices + verbs: + - watch + - get + - list +# AppMesh virtualnode CRD r/w access needed for using the App Mesh provider +- apiGroups: + - appmesh.k8s.aws + resources: + - virtualnodes + - virtualrouters + verbs: + - watch + - get + - list + - update + - patch +- apiGroups: + - traefik.containo.us + resources: + - traefikservices + verbs: + - watch + - get + - update diff --git a/metricproviders/cloudwatch/cloudwatch.go b/metricproviders/cloudwatch/cloudwatch.go index 09312cb25e..49e318e666 100644 --- a/metricproviders/cloudwatch/cloudwatch.go +++ b/metricproviders/cloudwatch/cloudwatch.go @@ -9,11 +9,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/evaluate" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -30,7 +30,7 @@ type CloudWatchClient struct { } func (c *CloudWatchClient) Query(interval time.Duration, query []types.MetricDataQuery) (*cloudwatch.GetMetricDataOutput, error) { - endTime := time.Now() + endTime := timeutil.Now() startTime := endTime.Add(-interval) return c.client.GetMetricData(context.TODO(), &cloudwatch.GetMetricDataInput{ StartTime: &startTime, @@ -50,9 +50,14 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + // Run queries with CloudWatch provider for the metric func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() measurement := v1alpha1.Measurement{ StartedAt: &startTime, Metadata: map[string]string{}, @@ -92,7 +97,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph } measurement.Phase = status - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() measurement.FinishedAt = &finishedTime return measurement diff --git a/metricproviders/cloudwatch/cloudwatch_test.go b/metricproviders/cloudwatch/cloudwatch_test.go index 0b52c0b112..cd55dcc41c 100644 --- a/metricproviders/cloudwatch/cloudwatch_test.go +++ b/metricproviders/cloudwatch/cloudwatch_test.go @@ -78,6 +78,9 @@ func TestRunWithQueryError(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.Equal(t, expectedErr.Error(), measurement.Message) assert.NotNil(t, measurement.StartedAt) @@ -103,6 +106,9 @@ func TestRunWithResolveArgsError(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.Equal(t, expectedErr.Error(), measurement.Message) assert.NotNil(t, measurement.StartedAt) @@ -131,6 +137,9 @@ constraint: Member must not be null'`) }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.Equal(t, expectedErr.Error(), measurement.Message) assert.NotNil(t, measurement.StartedAt) diff --git a/metricproviders/datadog/datadog.go b/metricproviders/datadog/datadog.go index a436523a99..9461a61ed6 100644 --- a/metricproviders/datadog/datadog.go +++ b/metricproviders/datadog/datadog.go @@ -17,12 +17,14 @@ import ( "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/evaluate" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" + log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) -var unixNow = func() int64 { return time.Now().Unix() } +var unixNow = func() int64 { return timeutil.Now().Unix() } const ( //ProviderType indicates the provider is datadog @@ -57,8 +59,13 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() // Measurement to pass back measurement := v1alpha1.Measurement{ @@ -116,7 +123,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph measurement.Value = value measurement.Phase = status - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() measurement.FinishedAt = &finishedTime return measurement diff --git a/metricproviders/datadog/datadog_test.go b/metricproviders/datadog/datadog_test.go index 03142b4ae6..b4432f3260 100644 --- a/metricproviders/datadog/datadog_test.go +++ b/metricproviders/datadog/datadog_test.go @@ -314,6 +314,9 @@ func TestRunSuite(t *testing.T) { provider, _ := NewDatadogProvider(*logCtx, fakeClient) + metricsMetadata := provider.GetMetadata(test.metric) + assert.Nil(t, metricsMetadata) + // Get our result measurement := provider.Run(newAnalysisRun(), test.metric) diff --git a/metricproviders/graphite/graphite.go b/metricproviders/graphite/graphite.go index c12fb3142b..d34a5ca7b2 100644 --- a/metricproviders/graphite/graphite.go +++ b/metricproviders/graphite/graphite.go @@ -8,11 +8,11 @@ import ( "time" log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/evaluate" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -49,9 +49,14 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + // Run queries Graphite for the metric. func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() newMeasurement := v1alpha1.Measurement{ StartedAt: &startTime, } @@ -72,7 +77,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph } newMeasurement.Phase = newStatus - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() newMeasurement.FinishedAt = &finishedTime return newMeasurement diff --git a/metricproviders/graphite/graphite_test.go b/metricproviders/graphite/graphite_test.go index 5d041aa328..d38555bca6 100644 --- a/metricproviders/graphite/graphite_test.go +++ b/metricproviders/graphite/graphite_test.go @@ -50,6 +50,9 @@ func TestType(t *testing.T) { func TestRunSuccessfulEvaluation(t *testing.T) { response := 10.000000 g := NewGraphiteProvider(newMockAPI(&response, nil), log.Entry{}) + metricsMetadata := g.GetMetadata(newTestingMetric()) + assert.Nil(t, metricsMetadata) + measurement := g.Run(&v1alpha1.AnalysisRun{}, newTestingMetric()) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, "[10.000000]", measurement.Value) @@ -60,6 +63,9 @@ func TestRunSuccessfulEvaluation(t *testing.T) { func TestRunFailedEvaluation(t *testing.T) { response := 5.000000 g := NewGraphiteProvider(newMockAPI(&response, nil), log.Entry{}) + metricsMetadata := g.GetMetadata(newTestingMetric()) + assert.Nil(t, metricsMetadata) + measurement := g.Run(&v1alpha1.AnalysisRun{}, newTestingMetric()) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, "[5.000000]", measurement.Value) diff --git a/metricproviders/influxdb/influxdb.go b/metricproviders/influxdb/influxdb.go new file mode 100644 index 0000000000..f67c8d5f94 --- /dev/null +++ b/metricproviders/influxdb/influxdb.go @@ -0,0 +1,136 @@ +package influxdb + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/evaluate" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + influxapi "github.com/influxdata/influxdb-client-go/v2/api" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + metricutil "github.com/argoproj/argo-rollouts/utils/metric" +) + +const ( + //ProviderType indicates the provider is InfluxDB + ProviderType = "Influxdb" + //DefaultInfluxdbTokensSecretName is the k8s secret that has InfluxDB api token, org and address + DefaultInfluxdbTokensSecretName = "influxdb" + influxdbToken = "authToken" + influxdbOrg = "org" + influxdbAddress = "address" + defaultQueryTimeout = 30 * time.Second +) + +// Provider contains all the required components to run a influxdb flux query +type Provider struct { + api influxapi.QueryAPI + logCtx log.Entry +} + +// Type indicates provider is a influxdb provider +func (p *Provider) Type() string { + return ProviderType +} + +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + +// Run queries influxdb for the metric +func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { + startTime := metav1.Now() + newMeasurement := v1alpha1.Measurement{ + StartedAt: &startTime, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultQueryTimeout) + defer cancel() + result, err := p.api.Query(ctx, metric.Provider.Influxdb.Query) + if err != nil { + return metricutil.MarkMeasurementError(newMeasurement, err) + } + newValue, newStatus, err := p.processResponse(metric, result) + if err != nil { + return metricutil.MarkMeasurementError(newMeasurement, err) + } + newMeasurement.Value = newValue + + newMeasurement.Phase = newStatus + finishedTime := metav1.Now() + newMeasurement.FinishedAt = &finishedTime + return newMeasurement +} + +// Resume should not be used by the influxdb provider since all the work should occur in the Run method. +func (p *Provider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { + p.logCtx.Warn("Influxdb provider should not execute the Resume method") + return measurement +} + +// Terminate should not be used by the influxdb provider since all the work should occur in the Run method. +func (p *Provider) Terminate(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { + p.logCtx.Warn("Influxdb provider should not execute the Terminate method") + return measurement +} + +// GarbageCollect is a no-op for the influxdb provider +func (p *Provider) GarbageCollect(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, limit int) error { + return nil +} + +func (p *Provider) processResponse(metric v1alpha1.Metric, result *influxapi.QueryTableResult) (string, v1alpha1.AnalysisPhase, error) { + var res []interface{} + if result == nil { + return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("no QueryTableResult returned from flux query") + } + for result.Next() { + res = append(res, result.Record().Value()) + } + if len(res) == 0 { + return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("no results returned from flux query") + } + status, err := evaluate.EvaluateResult(res, metric, p.logCtx) + if err != nil { + return "", v1alpha1.AnalysisPhaseError, err + } + return fmt.Sprint(res), status, err +} + +// NewInfluxdbProvider Creates a new Influxdb client +func NewInfluxdbProvider(api influxapi.QueryAPI, logCtx log.Entry) *Provider { + return &Provider{ + logCtx: logCtx, + api: api, + } +} + +// NewInfluxdbAPI generates a Influx API from the metric configuration +func NewInfluxdbAPI(metric v1alpha1.Metric, kubeclientset kubernetes.Interface) (influxapi.QueryAPI, error) { + profileSecret := DefaultInfluxdbTokensSecretName + if metric.Provider.Influxdb.Profile != "" { + profileSecret = metric.Provider.Influxdb.Profile + } + ns := defaults.Namespace() + secret, err := kubeclientset.CoreV1().Secrets(ns).Get(context.TODO(), profileSecret, metav1.GetOptions{}) + if err != nil { + return nil, err + } + authToken := string(secret.Data[influxdbToken]) + address := string(secret.Data[influxdbAddress]) + org := string(secret.Data[influxdbOrg]) + + if authToken != "" && address != "" && org != "" { + influxClient := influxdb2.NewClient(address, authToken) + return influxClient.QueryAPI(org), nil + } + + return nil, errors.New("authToken, org, or address not found") +} diff --git a/metricproviders/influxdb/influxdb_test.go b/metricproviders/influxdb/influxdb_test.go new file mode 100644 index 0000000000..d4f1512e54 --- /dev/null +++ b/metricproviders/influxdb/influxdb_test.go @@ -0,0 +1,249 @@ +package influxdb + +import ( + "errors" + "fmt" + "io/ioutil" + "strings" + "testing" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + influxdb2 "github.com/influxdata/influxdb-client-go/v2/api" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sfake "k8s.io/client-go/kubernetes/fake" + kubetesting "k8s.io/client-go/testing" +) + +func newAnalysisRun() *v1alpha1.AnalysisRun { + return &v1alpha1.AnalysisRun{} +} + +func TestType(t *testing.T) { + e := log.Entry{} + mock := &mockAPI{} + p := NewInfluxdbProvider(mock, e) + assert.Equal(t, ProviderType, p.Type()) +} + +func TestRunSuccessfully(t *testing.T) { + e := log.Entry{} + csvTable := `#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string +#group,false,false,true,true,false,false,true,true,true,true +#default,_result,,,,,,,,, +,result,table,_start,_stop,_time,_value,_field,_measurement,a,b +,,0,2020-02-17T22:19:49.747562847Z,2020-02-18T22:19:49.747562847Z,2020-02-18T10:34:08.135814545Z,1.0,f,test,1,adsfasdf +,,0,2020-02-17T22:19:49.747562847Z,2020-02-18T22:19:49.747562847Z,2020-02-18T22:08:44.850214724Z,6.6,f,test,1,adsfasdf +` + reader := strings.NewReader(csvTable) + result := influxdb2.NewQueryTableResult(ioutil.NopCloser(reader)) + mock := &mockAPI{response: result} + p := NewInfluxdbProvider(mock, e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 1", + FailureCondition: "result[0] != 1", + Provider: v1alpha1.MetricProvider{ + Influxdb: &v1alpha1.InfluxdbMetric{ + Query: "test", + }, + }, + } + measurement := p.Run(newAnalysisRun(), metric) + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + assert.NotNil(t, measurement.StartedAt) + assert.Equal(t, `[1 6.6]`, measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) +} + +func TestRunWithTimeseries(t *testing.T) { + e := log.Entry{} + csvTable := `#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string +#group,false,false,true,true,false,false,true,true,true,true +#default,_result,,,,,,,,, +,result,table,_start,_stop,_time,_value,_field,_measurement,a,b +,,0,2020-02-17T22:19:49.747562847Z,2020-02-18T22:19:49.747562847Z,2020-02-18T10:34:08.135814545Z,10,f,test,1,adsfasdf +,,0,2020-02-17T22:19:49.747562847Z,2020-02-18T22:19:49.747562847Z,2020-02-18T22:08:44.850214724Z,20,f,test,1,adsfasdf +` + reader := strings.NewReader(csvTable) + result := influxdb2.NewQueryTableResult(ioutil.NopCloser(reader)) + mock := &mockAPI{response: result} + p := NewInfluxdbProvider(mock, e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[1] < 20", + Provider: v1alpha1.MetricProvider{ + Influxdb: &v1alpha1.InfluxdbMetric{ + Query: "test", + }, + }, + } + measurement := p.Run(newAnalysisRun(), metric) + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + assert.NotNil(t, measurement.StartedAt) + assert.Equal(t, `[10 20]`, measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) +} + +func TestRunWithEmptyResult(t *testing.T) { + e := log.NewEntry(log.New()) + expectedErr := fmt.Errorf("no results returned from flux query") + csvTable := `#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string +#group,false,false,true,true,false,false,true,true,true,true +#default,_result,,,,,,,,, +,result,table,_start,_stop,_time,_value,_field,_measurement,a,b +` + reader := strings.NewReader(csvTable) + result := influxdb2.NewQueryTableResult(ioutil.NopCloser(reader)) + mock := &mockAPI{response: result} + p := NewInfluxdbProvider(mock, *e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[0] != 10", + Provider: v1alpha1.MetricProvider{ + Influxdb: &v1alpha1.InfluxdbMetric{ + Query: "test", + }, + }, + } + measurement := p.Run(newAnalysisRun(), metric) + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + assert.Equal(t, expectedErr.Error(), measurement.Message) + assert.NotNil(t, measurement.StartedAt) + assert.Equal(t, "", measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, v1alpha1.AnalysisPhaseError, measurement.Phase) +} + +func TestRunWithEvaluationError(t *testing.T) { + e := log.WithField("", "") + mock := &mockAPI{} + p := NewInfluxdbProvider(mock, *e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result == 10", + FailureCondition: "result != 10", + Provider: v1alpha1.MetricProvider{ + Influxdb: &v1alpha1.InfluxdbMetric{ + Query: "test", + }, + }, + } + measurement := p.Run(newAnalysisRun(), metric) + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + assert.Equal(t, "no QueryTableResult returned from flux query", measurement.Message) + assert.NotNil(t, measurement.StartedAt) + assert.Equal(t, "", measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, v1alpha1.AnalysisPhaseError, measurement.Phase) +} + +func TestResume(t *testing.T) { + e := log.WithField("", "") + mock := &mockAPI{} + p := NewInfluxdbProvider(mock, *e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result == 10", + FailureCondition: "result != 10", + Provider: v1alpha1.MetricProvider{ + Influxdb: &v1alpha1.InfluxdbMetric{ + Query: "test", + }, + }, + } + now := metav1.Now() + previousMeasurement := v1alpha1.Measurement{ + StartedAt: &now, + Phase: v1alpha1.AnalysisPhaseInconclusive, + } + measurement := p.Resume(newAnalysisRun(), metric, previousMeasurement) + assert.Equal(t, previousMeasurement, measurement) +} + +func TestTerminate(t *testing.T) { + e := log.NewEntry(log.New()) + mock := &mockAPI{} + p := NewInfluxdbProvider(mock, *e) + metric := v1alpha1.Metric{} + now := metav1.Now() + previousMeasurement := v1alpha1.Measurement{ + StartedAt: &now, + Phase: v1alpha1.AnalysisPhaseRunning, + } + measurement := p.Terminate(newAnalysisRun(), metric, previousMeasurement) + assert.Equal(t, previousMeasurement, measurement) +} + +func TestGarbageCollect(t *testing.T) { + e := log.NewEntry(log.New()) + mock := &mockAPI{} + p := NewInfluxdbProvider(mock, *e) + err := p.GarbageCollect(nil, v1alpha1.Metric{}, 0) + assert.NoError(t, err) +} + +func TestNewInfluxdbAPI(t *testing.T) { + metric := v1alpha1.Metric{ + Provider: v1alpha1.MetricProvider{ + Influxdb: &v1alpha1.InfluxdbMetric{}, + }, + } + tokenSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultInfluxdbTokensSecretName, + }, + } + fakeClient := k8sfake.NewSimpleClientset() + fakeClient.PrependReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + return true, tokenSecret, nil + }) + + t.Run("with default settings", func(t *testing.T) { + tokenSecret.Data = map[string][]byte{ + influxdbToken: []byte("ABCDEFG01234"), + influxdbOrg: []byte("test-org"), + influxdbAddress: []byte("http://localhost:8086"), + } + _, err := NewInfluxdbAPI(metric, fakeClient) + assert.Nil(t, err) + }) + + t.Run("with authToken, org, or address missing", func(t *testing.T) { + tokenSecret.Data = map[string][]byte{ + influxdbToken: []byte("ABCDEFG01234"), + } + _, err := NewInfluxdbAPI(metric, fakeClient) + assert.EqualError(t, err, "authToken, org, or address not found") + }) + + t.Run("when secretName is specified by the metric", func(t *testing.T) { + metric.Provider.Influxdb.Profile = "my-influx-token-secret" + tokenSecret.Name = "my-influx-token-secret" + tokenSecret.Data = map[string][]byte{ + influxdbToken: []byte("ABCDEFG01234"), + influxdbOrg: []byte("test-org"), + influxdbAddress: []byte("http://localhost:8086"), + } + _, err := NewInfluxdbAPI(metric, fakeClient) + assert.Nil(t, err) + }) + t.Run("when the secret is not found", func(t *testing.T) { + fakeClient.PrependReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("secret not found") + }) + _, err := NewInfluxdbAPI(metric, fakeClient) + assert.NotNil(t, err) + }) +} diff --git a/metricproviders/influxdb/mock_test.go b/metricproviders/influxdb/mock_test.go new file mode 100644 index 0000000000..3aff084802 --- /dev/null +++ b/metricproviders/influxdb/mock_test.go @@ -0,0 +1,32 @@ +package influxdb + +import ( + "context" + + influxapi "github.com/influxdata/influxdb-client-go/v2/api" + "github.com/influxdata/influxdb-client-go/v2/domain" +) + +type mockAPI struct { + response *influxapi.QueryTableResult + err error +} + +func (m mockAPI) Query(ctx context.Context, query string) (*influxapi.QueryTableResult, error) { + if m.err != nil { + return nil, m.err + } + return m.response, nil +} + +func (m mockAPI) QueryRaw(context.Context, string, *domain.Dialect) (string, error) { + panic("Not used") +} + +func (m mockAPI) QueryRawWithParams(ctx context.Context, query string, dialect *domain.Dialect, params interface{}) (string, error) { + panic("Not used") +} + +func (m mockAPI) QueryWithParams(ctx context.Context, query string, params interface{}) (*influxapi.QueryTableResult, error) { + panic("Not used") +} diff --git a/metricproviders/job/job.go b/metricproviders/job/job.go index 1f3c42d096..046a9b098b 100644 --- a/metricproviders/job/job.go +++ b/metricproviders/job/job.go @@ -17,6 +17,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -54,6 +55,11 @@ func (p *JobProvider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *JobProvider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + // newJobName returns a new job name for the run and metric. Names must be shortened so that it can // fit into a 63 character label, since the k8s job controller incorporates the job name into the // pod spec labels. @@ -93,7 +99,7 @@ func newMetricJob(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) (*batchv1.J func (p *JobProvider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { ctx := context.TODO() - now := metav1.Now() + now := timeutil.MetaNow() measurement := v1alpha1.Measurement{ StartedAt: &now, Phase: v1alpha1.AnalysisPhaseRunning, @@ -133,7 +139,7 @@ func (p *JobProvider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1a func (p *JobProvider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { jobName, err := getJobName(measurement) - now := metav1.Now() + now := timeutil.MetaNow() if err != nil { return metricutil.MarkMeasurementError(measurement, err) } @@ -166,7 +172,7 @@ func (p *JobProvider) Terminate(run *v1alpha1.AnalysisRun, metric v1alpha1.Metri if err != nil { return metricutil.MarkMeasurementError(measurement, err) } - now := metav1.Now() + now := timeutil.MetaNow() measurement.FinishedAt = &now measurement.Phase = v1alpha1.AnalysisPhaseSuccessful p.logCtx.Infof("job %s/%s terminated", run.Namespace, jobName) diff --git a/metricproviders/job/job_test.go b/metricproviders/job/job_test.go index f2caa8ae8c..98d898f84d 100644 --- a/metricproviders/job/job_test.go +++ b/metricproviders/job/job_test.go @@ -120,6 +120,9 @@ func TestRun(t *testing.T) { p := newTestJobProvider() run := newRunWithJobMetric() metric := run.Spec.Metrics[0] + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(run, metric) assert.Equal(t, v1alpha1.AnalysisPhaseRunning, measurement.Phase) @@ -160,6 +163,8 @@ func TestRunCreateFail(t *testing.T) { fakeClient.PrependReactor("create", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, fmt.Errorf(errMsg) }) + metricsMetadata := p.GetMetadata(run.Spec.Metrics[0]) + assert.Nil(t, metricsMetadata) measurement := p.Run(run, run.Spec.Metrics[0]) assert.Equal(t, v1alpha1.AnalysisPhaseError, measurement.Phase) diff --git a/metricproviders/kayenta/kayenta.go b/metricproviders/kayenta/kayenta.go index e15da1ed3d..fbfbc42123 100644 --- a/metricproviders/kayenta/kayenta.go +++ b/metricproviders/kayenta/kayenta.go @@ -9,14 +9,13 @@ import ( "net/http" "time" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -62,6 +61,11 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + func getCanaryConfigId(metric v1alpha1.Metric, p *Provider) (string, error) { configIdLookupURL := fmt.Sprintf(configIdLookupURLFormat, metric.Provider.Kayenta.Address, metric.Provider.Kayenta.Application, metric.Provider.Kayenta.StorageAccountName) @@ -97,7 +101,7 @@ func getCanaryConfigId(metric v1alpha1.Metric, p *Provider) (string, error) { // Run queries kayentd for the metric func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() newMeasurement := v1alpha1.Measurement{ StartedAt: &startTime, } @@ -157,7 +161,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph newMeasurement.Phase = v1alpha1.AnalysisPhaseRunning - resumeTime := metav1.NewTime(time.Now().Add(resumeDelay)) + resumeTime := metav1.NewTime(timeutil.Now().Add(resumeDelay)) newMeasurement.ResumeAt = &resumeTime return newMeasurement @@ -191,7 +195,7 @@ func (p *Provider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, mea status, ok, err := unstructured.NestedBool(patch, "complete") if ok { if !status { //resume later since it is incomplete - resumeTime := metav1.NewTime(time.Now().Add(resumeDelay)) + resumeTime := metav1.NewTime(timeutil.Now().Add(resumeDelay)) measurement.ResumeAt = &resumeTime measurement.Phase = v1alpha1.AnalysisPhaseRunning @@ -217,7 +221,7 @@ func (p *Provider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, mea return metricutil.MarkMeasurementError(measurement, err) } - finishTime := metav1.Now() + finishTime := timeutil.MetaNow() measurement.FinishedAt = &finishTime return measurement diff --git a/metricproviders/kayenta/kayenta_test.go b/metricproviders/kayenta/kayenta_test.go index 6e2f8769b5..5830d8294f 100644 --- a/metricproviders/kayenta/kayenta_test.go +++ b/metricproviders/kayenta/kayenta_test.go @@ -206,6 +206,8 @@ func TestRunSuccessfully(t *testing.T) { {Name: "stable-hash", Value: &stableHash}, {Name: "canary-hash", Value: &canaryHash}, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) measurement := p.Run(run, metric) @@ -268,6 +270,8 @@ func TestRunBadJobResponse(t *testing.T) { {Name: "stable-hash", Value: &stableHash}, {Name: "canary-hash", Value: &canaryHash}, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) measurement := p.Run(run, metric) diff --git a/metricproviders/metricproviders.go b/metricproviders/metricproviders.go index 06ecc04e96..d8a1637acf 100644 --- a/metricproviders/metricproviders.go +++ b/metricproviders/metricproviders.go @@ -3,6 +3,8 @@ package metricproviders import ( "fmt" + "github.com/argoproj/argo-rollouts/metricproviders/influxdb" + "github.com/argoproj/argo-rollouts/metricproviders/cloudwatch" "github.com/argoproj/argo-rollouts/metricproviders/datadog" "github.com/argoproj/argo-rollouts/metricproviders/graphite" @@ -34,6 +36,9 @@ type Provider interface { GarbageCollect(*v1alpha1.AnalysisRun, v1alpha1.Metric, int) error // Type gets the provider type Type() string + // GetMetadata returns any additional metadata which providers need to store/display as part + // of the metric result. For example, Prometheus uses is to store the final resolved queries. + GetMetadata(metric v1alpha1.Metric) map[string]string } type ProviderFactory struct { @@ -85,11 +90,17 @@ func (f *ProviderFactory) NewProvider(logCtx log.Entry, metric v1alpha1.Metric) } return graphite.NewGraphiteProvider(client, logCtx), nil case cloudwatch.ProviderType: - clinet, err := cloudwatch.NewCloudWatchAPIClient(metric) + client, err := cloudwatch.NewCloudWatchAPIClient(metric) if err != nil { return nil, err } - return cloudwatch.NewCloudWatchProvider(clinet, logCtx), nil + return cloudwatch.NewCloudWatchProvider(client, logCtx), nil + case influxdb.ProviderType: + client, err := influxdb.NewInfluxdbAPI(metric, f.KubeClient) + if err != nil { + return nil, err + } + return influxdb.NewInfluxdbProvider(client, logCtx), nil default: return nil, fmt.Errorf("no valid provider in metric '%s'", metric.Name) } @@ -112,6 +123,11 @@ func Type(metric v1alpha1.Metric) string { return newrelic.ProviderType } else if metric.Provider.CloudWatch != nil { return cloudwatch.ProviderType + } else if metric.Provider.Graphite != nil { + return graphite.ProviderType + } else if metric.Provider.Influxdb != nil { + return influxdb.ProviderType } + return "Unknown Provider" } diff --git a/metricproviders/mocks/Provider.go b/metricproviders/mocks/Provider.go index 56708feb0e..1b54e4281c 100644 --- a/metricproviders/mocks/Provider.go +++ b/metricproviders/mocks/Provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -26,6 +26,22 @@ func (_m *Provider) GarbageCollect(_a0 *v1alpha1.AnalysisRun, _a1 v1alpha1.Metri return r0 } +// GetMetadata provides a mock function with given fields: metric +func (_m *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + ret := _m.Called(metric) + + var r0 map[string]string + if rf, ok := ret.Get(0).(func(v1alpha1.Metric) map[string]string); ok { + r0 = rf(metric) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + // Resume provides a mock function with given fields: _a0, _a1, _a2 func (_m *Provider) Resume(_a0 *v1alpha1.AnalysisRun, _a1 v1alpha1.Metric, _a2 v1alpha1.Measurement) v1alpha1.Measurement { ret := _m.Called(_a0, _a1, _a2) @@ -81,3 +97,18 @@ func (_m *Provider) Type() string { return r0 } + +type mockConstructorTestingTNewProvider interface { + mock.TestingT + Cleanup(func()) +} + +// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewProvider(t mockConstructorTestingTNewProvider) *Provider { + mock := &Provider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/metricproviders/newrelic/mock_test.go b/metricproviders/newrelic/mock_test.go index 4f08173232..a96b4febbc 100644 --- a/metricproviders/newrelic/mock_test.go +++ b/metricproviders/newrelic/mock_test.go @@ -3,11 +3,11 @@ package newrelic import "github.com/newrelic/newrelic-client-go/pkg/nrdb" type mockAPI struct { - response []nrdb.NrdbResult + response []nrdb.NRDBResult err error } -func (m *mockAPI) Query(query string) ([]nrdb.NrdbResult, error) { +func (m *mockAPI) Query(query string) ([]nrdb.NRDBResult, error) { if m.err != nil { return nil, m.err } diff --git a/metricproviders/newrelic/newrelic.go b/metricproviders/newrelic/newrelic.go index 8b52ed66ec..6ec122f5df 100644 --- a/metricproviders/newrelic/newrelic.go +++ b/metricproviders/newrelic/newrelic.go @@ -17,6 +17,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/evaluate" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" "github.com/argoproj/argo-rollouts/utils/version" ) @@ -30,7 +31,7 @@ const ( var userAgent = fmt.Sprintf("argo-rollouts/%s (%s)", version.GetVersion(), repoURL) type NewRelicClientAPI interface { - Query(query string) ([]nrdb.NrdbResult, error) + Query(query string) ([]nrdb.NRDBResult, error) } type NewRelicClient struct { @@ -39,8 +40,8 @@ type NewRelicClient struct { } //Query executes a NRQL query against the given New Relic account -func (n *NewRelicClient) Query(query string) ([]nrdb.NrdbResult, error) { - results, err := n.Nrdb.Query(n.AccountID, nrdb.Nrql(query)) +func (n *NewRelicClient) Query(query string) ([]nrdb.NRDBResult, error) { + results, err := n.Nrdb.Query(n.AccountID, nrdb.NRQL(query)) if err != nil { return nil, err } @@ -55,7 +56,7 @@ type Provider struct { // Run queries NewRelic for the metric func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() newMeasurement := v1alpha1.Measurement{ StartedAt: &startTime, } @@ -72,7 +73,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph newMeasurement.Value = valueStr newMeasurement.Phase = newStatus - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() newMeasurement.FinishedAt = &finishedTime return newMeasurement } @@ -85,7 +86,7 @@ func toJSONString(v interface{}) (string, error) { return string(b), nil } -func (p *Provider) processResponse(metric v1alpha1.Metric, results []nrdb.NrdbResult) (string, v1alpha1.AnalysisPhase, error) { +func (p *Provider) processResponse(metric v1alpha1.Metric, results []nrdb.NRDBResult) (string, v1alpha1.AnalysisPhase, error) { if len(results) == 1 { result := results[0] if len(result) == 0 { @@ -130,6 +131,11 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + //NewNewRelicProvider creates a new NewRelic provider func NewNewRelicProvider(api NewRelicClientAPI, logCtx log.Entry) *Provider { return &Provider{ diff --git a/metricproviders/newrelic/newrelic_test.go b/metricproviders/newrelic/newrelic_test.go index 54353ab288..b483d96872 100644 --- a/metricproviders/newrelic/newrelic_test.go +++ b/metricproviders/newrelic/newrelic_test.go @@ -31,7 +31,7 @@ func TestType(t *testing.T) { func TestRunSuccessfully(t *testing.T) { e := log.Entry{} mock := &mockAPI{ - response: []nrdb.NrdbResult{map[string]interface{}{"count": 10}}, + response: []nrdb.NRDBResult{map[string]interface{}{"count": 10}}, } p := NewNewRelicProvider(mock, e) metric := v1alpha1.Metric{ @@ -44,6 +44,9 @@ func TestRunSuccessfully(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, `{"count":10}`, measurement.Value) @@ -54,7 +57,7 @@ func TestRunSuccessfully(t *testing.T) { func TestRunWithTimeseries(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ - response: []nrdb.NrdbResult{ + response: []nrdb.NRDBResult{ map[string]interface{}{"count": 10}, map[string]interface{}{"count": 20}, map[string]interface{}{"count": 30}}, @@ -70,6 +73,9 @@ func TestRunWithTimeseries(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, `[{"count":10},{"count":20},{"count":30}]`, measurement.Value) @@ -80,7 +86,7 @@ func TestRunWithTimeseries(t *testing.T) { func TestRunWithFacet(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ - response: []nrdb.NrdbResult{map[string]interface{}{"count": 10, "average.duration": 12.34}}, + response: []nrdb.NRDBResult{map[string]interface{}{"count": 10, "average.duration": 12.34}}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -93,6 +99,9 @@ func TestRunWithFacet(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, `{"average.duration":12.34,"count":10}`, measurement.Value) @@ -103,7 +112,7 @@ func TestRunWithFacet(t *testing.T) { func TestRunWithMultipleSelectTerms(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ - response: []nrdb.NrdbResult{map[string]interface{}{"count": 10}}, + response: []nrdb.NRDBResult{map[string]interface{}{"count": 10}}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -116,6 +125,9 @@ func TestRunWithMultipleSelectTerms(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, `{"count":10}`, measurement.Value) @@ -127,7 +139,7 @@ func TestRunWithEmptyResult(t *testing.T) { e := log.NewEntry(log.New()) expectedErr := fmt.Errorf("no results returned from NRQL query") mock := &mockAPI{ - response: []nrdb.NrdbResult{make(map[string]interface{})}, + response: []nrdb.NRDBResult{make(map[string]interface{})}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -233,7 +245,7 @@ func TestRunWithInvalidJSON(t *testing.T) { } t.Run("with a single result map", func(t *testing.T) { mock := &mockAPI{ - response: []nrdb.NrdbResult{map[string]interface{}{"func": func() {}}}, + response: []nrdb.NRDBResult{map[string]interface{}{"func": func() {}}}, } p := NewNewRelicProvider(mock, *e) measurement := p.Run(newAnalysisRun(), metric) @@ -246,7 +258,7 @@ func TestRunWithInvalidJSON(t *testing.T) { t.Run("with multiple results", func(t *testing.T) { // cover branch where results slice is longer than 1 mock := &mockAPI{ - response: []nrdb.NrdbResult{map[string]interface{}{"key": "value"}, map[string]interface{}{"func": func() {}}}, + response: []nrdb.NRDBResult{map[string]interface{}{"key": "value"}, map[string]interface{}{"func": func() {}}}, } p := NewNewRelicProvider(mock, *e) measurement := p.Run(newAnalysisRun(), metric) diff --git a/metricproviders/prometheus/mock_test.go b/metricproviders/prometheus/mock_test.go index 7aa6ed6f95..e16a42902d 100644 --- a/metricproviders/prometheus/mock_test.go +++ b/metricproviders/prometheus/mock_test.go @@ -14,8 +14,12 @@ type mockAPI struct { warnings v1.Warnings } +func (m mockAPI) WalReplay(ctx context.Context) (v1.WalReplayStatus, error) { + panic("Not used") +} + // Query performs a query for the given time. -func (m mockAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, v1.Warnings, error) { +func (m mockAPI) Query(ctx context.Context, query string, ts time.Time, opt ...v1.Option) (model.Value, v1.Warnings, error) { if m.err != nil { return nil, m.warnings, m.err } @@ -44,7 +48,7 @@ func (m mockAPI) LabelValues(ctx context.Context, label string, matches []string panic("Not used") } -func (m mockAPI) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) { +func (m mockAPI) QueryRange(ctx context.Context, query string, r v1.Range, opt ...v1.Option) (model.Value, v1.Warnings, error) { panic("Not used") } @@ -95,3 +99,7 @@ func (m mockAPI) TSDB(ctx context.Context) (v1.TSDBResult, error) { func (m mockAPI) Buildinfo(ctx context.Context) (v1.BuildinfoResult, error) { panic("Not used") } + +func (m mockAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]v1.ExemplarQueryResult, error) { + panic("Not used") +} diff --git a/metricproviders/prometheus/prometheus.go b/metricproviders/prometheus/prometheus.go index 5243a4a342..2dadaf8aa2 100644 --- a/metricproviders/prometheus/prometheus.go +++ b/metricproviders/prometheus/prometheus.go @@ -2,23 +2,30 @@ package prometheus import ( "context" + "errors" "fmt" + "net/url" + "os" "time" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/evaluate" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( - //ProviderType indicates the provider is prometheus + // ProviderType indicates the provider is prometheus ProviderType = "Prometheus" + // ResolvedPrometheusQuery is used as the key for storing the resolved prometheus query in the metrics result + // metadata object. + ResolvedPrometheusQuery = "ResolvedPrometheusQuery" + EnvVarArgoRolloutsPrometheusAddress = "ARGO_ROLLOUTS_PROMETHEUS_ADDRESS" ) // Provider contains all the required components to run a prometheus query @@ -32,9 +39,18 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + metricsMetadata := make(map[string]string) + if metric.Provider.Prometheus.Query != "" { + metricsMetadata[ResolvedPrometheusQuery] = metric.Provider.Prometheus.Query + } + return metricsMetadata +} + // Run queries prometheus for the metric func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() newMeasurement := v1alpha1.Measurement{ StartedAt: &startTime, } @@ -67,7 +83,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph } newMeasurement.Phase = newStatus - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() newMeasurement.FinishedAt = &finishedTime return newMeasurement } @@ -128,12 +144,39 @@ func NewPrometheusProvider(api v1.API, logCtx log.Entry) *Provider { // NewPrometheusAPI generates a prometheus API from the metric configuration func NewPrometheusAPI(metric v1alpha1.Metric) (v1.API, error) { + envValuesByKey := make(map[string]string) + if value, ok := os.LookupEnv(fmt.Sprintf("%s", EnvVarArgoRolloutsPrometheusAddress)); ok { + envValuesByKey[EnvVarArgoRolloutsPrometheusAddress] = value + log.Debugf("ARGO_ROLLOUTS_PROMETHEUS_ADDRESS: %v", envValuesByKey[EnvVarArgoRolloutsPrometheusAddress]) + } + if len(metric.Provider.Prometheus.Address) != 0 { + if !IsUrl(metric.Provider.Prometheus.Address) { + return nil, errors.New("prometheus address is not is url format") + } + } else if envValuesByKey[EnvVarArgoRolloutsPrometheusAddress] != "" { + if IsUrl(envValuesByKey[EnvVarArgoRolloutsPrometheusAddress]) { + metric.Provider.Prometheus.Address = envValuesByKey[EnvVarArgoRolloutsPrometheusAddress] + } else { + return nil, errors.New("prometheus address is not is url format") + } + } else { + return nil, errors.New("prometheus address is not configured") + } client, err := api.NewClient(api.Config{ Address: metric.Provider.Prometheus.Address, }) if err != nil { + log.Errorf("Error in getting prometheus client: %v", err) return nil, err } - return v1.NewAPI(client), nil } + +func IsUrl(str string) bool { + u, err := url.Parse(str) + if err != nil { + log.Errorf("Error in parsing url: %v", err) + } + log.Debugf("Parsed url: %v", u) + return err == nil && u.Scheme != "" && u.Host != "" +} diff --git a/metricproviders/prometheus/prometheus_test.go b/metricproviders/prometheus/prometheus_test.go index b83b4a3934..d62f080bcd 100644 --- a/metricproviders/prometheus/prometheus_test.go +++ b/metricproviders/prometheus/prometheus_test.go @@ -3,16 +3,15 @@ package prometheus import ( "fmt" "math" + "os" "testing" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" v1 "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/prometheus/common/model" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) func newScalar(f float64) model.Value { @@ -58,6 +57,31 @@ func TestRunSuccessfully(t *testing.T) { assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) } +func TestRunSuccessfullyWithEnv(t *testing.T) { + e := log.Entry{} + mock := mockAPI{ + value: newScalar(10), + } + address := "http://127.0.0.1:9090" + os.Setenv(EnvVarArgoRolloutsPrometheusAddress, address) + p := NewPrometheusProvider(mock, e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result == 10", + FailureCondition: "result != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Query: "test", + }, + }, + } + measurement := p.Run(newAnalysisRun(), metric) + assert.NotNil(t, measurement.StartedAt) + assert.Equal(t, "10", measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) +} + func TestRunSuccessfullyWithWarning(t *testing.T) { e := log.NewEntry(log.New()) mock := mockAPI{ @@ -83,6 +107,31 @@ func TestRunSuccessfullyWithWarning(t *testing.T) { assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) } +func TestRunSuccessfullyWithWarningWithEnv(t *testing.T) { + e := log.NewEntry(log.New()) + mock := mockAPI{ + value: newScalar(10), + warnings: v1.Warnings([]string{"warning", "warning2"}), + } + p := NewPrometheusProvider(mock, *e) + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result == 10", + FailureCondition: "result != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Query: "test", + }, + }, + } + measurement := p.Run(newAnalysisRun(), metric) + assert.NotNil(t, measurement.StartedAt) + assert.Equal(t, "10", measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, `"warning", "warning2"`, measurement.Metadata["warnings"]) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) +} + func TestRunWithQueryError(t *testing.T) { e := log.NewEntry(log.New()) expectedErr := fmt.Errorf("bad big bug :(") @@ -131,6 +180,23 @@ func TestRunWithResolveArgsError(t *testing.T) { assert.Equal(t, v1alpha1.AnalysisPhaseError, measurement.Phase) } +func TestGetStatusReturnsResolvedQuery(t *testing.T) { + e := log.Entry{} + mock := mockAPI{} + p := NewPrometheusProvider(mock, e) + metric := v1alpha1.Metric{ + Name: "foo", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Query: "resolved-query", + }, + }, + } + metricsMetadata := p.GetMetadata(metric) + assert.NotNil(t, metricsMetadata) + assert.Equal(t, "resolved-query", metricsMetadata["ResolvedPrometheusQuery"]) +} + func TestRunWithEvaluationError(t *testing.T) { e := log.WithField("", "") mock := mockAPI{} @@ -345,17 +411,57 @@ func TestProcessInvalidResponse(t *testing.T) { } func TestNewPrometheusAPI(t *testing.T) { + os.Unsetenv(EnvVarArgoRolloutsPrometheusAddress) + address := ":invalid::url" metric := v1alpha1.Metric{ Provider: v1alpha1.MetricProvider{ Prometheus: &v1alpha1.PrometheusMetric{ - Address: ":invalid::url", + Address: address, }, }, } - _, err := NewPrometheusAPI(metric) + api, err := NewPrometheusAPI(metric) assert.NotNil(t, err) + log.Infof("api:%v", api) metric.Provider.Prometheus.Address = "https://www.example.com" _, err = NewPrometheusAPI(metric) assert.Nil(t, err) } + +func TestNewPrometheusAPIWithEnv(t *testing.T) { + os.Unsetenv(EnvVarArgoRolloutsPrometheusAddress) + os.Setenv(EnvVarArgoRolloutsPrometheusAddress, ":invalid::url") + address := "" + metric := v1alpha1.Metric{ + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: address, + }, + }, + } + api, err := NewPrometheusAPI(metric) + assert.NotNil(t, err) + log.Infof("api:%v", api) + + os.Unsetenv(EnvVarArgoRolloutsPrometheusAddress) + os.Setenv(EnvVarArgoRolloutsPrometheusAddress, "https://www.example.com") + _, err = NewPrometheusAPI(metric) + assert.Nil(t, err) +} + +func TestNewPrometheusAddressNotConfigured(t *testing.T) { + os.Unsetenv(EnvVarArgoRolloutsPrometheusAddress) + os.Setenv(EnvVarArgoRolloutsPrometheusAddress, "") + address := "" + metric := v1alpha1.Metric{ + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: address, + }, + }, + } + api, err := NewPrometheusAPI(metric) + assert.NotNil(t, err) + log.Infof("api:%v", api) +} diff --git a/metricproviders/wavefront/wavefront.go b/metricproviders/wavefront/wavefront.go index d6e3f28f77..94935a71a1 100644 --- a/metricproviders/wavefront/wavefront.go +++ b/metricproviders/wavefront/wavefront.go @@ -17,6 +17,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/evaluate" metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -35,6 +36,11 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + type WavefrontClientAPI interface { NewQuery(params *wavefrontapi.QueryParams) WavefrontQueryAPI } @@ -68,7 +74,7 @@ type wavefrontResponse struct { // Run queries with wavefront provider for the metric func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() newMeasurement := v1alpha1.Measurement{ StartedAt: &startTime, Metadata: map[string]string{}, @@ -103,7 +109,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph newMeasurement.Phase = result.newStatus newMeasurement.Metadata["timestamps"] = result.epochsUsed newMeasurement.Metadata["drift"] = result.drift - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() newMeasurement.FinishedAt = &finishedTime return newMeasurement } diff --git a/metricproviders/wavefront/wavefront_test.go b/metricproviders/wavefront/wavefront_test.go index a42a115db8..af8bd53e7f 100644 --- a/metricproviders/wavefront/wavefront_test.go +++ b/metricproviders/wavefront/wavefront_test.go @@ -57,6 +57,9 @@ func TestRunSuccessfully(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.NotNil(t, measurement.StartedAt) assert.Equal(t, "10.00", measurement.Value) @@ -81,6 +84,9 @@ func TestRunWithQueryError(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.Equal(t, expectedErr.Error(), measurement.Message) assert.NotNil(t, measurement.StartedAt) @@ -107,6 +113,9 @@ func TestRunWithEvaluationError(t *testing.T) { }, }, } + metricsMetadata := p.GetMetadata(metric) + assert.Nil(t, metricsMetadata) + measurement := p.Run(newAnalysisRun(), metric) assert.Equal(t, "No TimeSeries found in response from Wavefront", measurement.Message) assert.Equal(t, "No query provided", measurement.Metadata["warnings"]) diff --git a/metricproviders/webmetric/webmetric.go b/metricproviders/webmetric/webmetric.go index 1def6a94b1..f1b215de81 100644 --- a/metricproviders/webmetric/webmetric.go +++ b/metricproviders/webmetric/webmetric.go @@ -12,13 +12,13 @@ import ( "strings" "time" - metricutil "github.com/argoproj/argo-rollouts/utils/metric" log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/jsonpath" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/evaluate" + metricutil "github.com/argoproj/argo-rollouts/utils/metric" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -39,8 +39,13 @@ func (p *Provider) Type() string { return ProviderType } +// GetMetadata returns any additional metadata which needs to be stored & displayed as part of the metrics result. +func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { + return nil +} + func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := metav1.Now() + startTime := timeutil.MetaNow() // Measurement to pass back measurement := v1alpha1.Measurement{ @@ -91,7 +96,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph measurement.Value = value measurement.Phase = status - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() measurement.FinishedAt = &finishedTime return measurement diff --git a/metricproviders/webmetric/webmetric_test.go b/metricproviders/webmetric/webmetric_test.go index a6107e9c00..66861478f5 100644 --- a/metricproviders/webmetric/webmetric_test.go +++ b/metricproviders/webmetric/webmetric_test.go @@ -613,6 +613,9 @@ func TestRunSuite(t *testing.T) { assert.NoError(t, err) provider := NewWebMetricProvider(*logCtx, server.Client(), jsonparser) + metricsMetadata := provider.GetMetadata(test.metric) + assert.Nil(t, metricsMetadata) + // Get our result measurement := provider.Run(newAnalysisRun(), test.metric) diff --git a/mkdocs.yml b/mkdocs.yml index a03f96edf6..a84ef3a807 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,3 +1,7 @@ +extra_css: +- assets/versions.css +extra_javascript: +- assets/versions.js markdown_extensions: - codehilite - admonition @@ -25,6 +29,7 @@ nav: - Canary: features/canary.md - Rollout Spec: features/specification.md - HPA: features/hpa-support.md + - VPA: features/vpa-support.md - Ephemeral Metadata: features/ephemeral-metadata.md - Restarting Rollouts: features/restart.md - Scaledown Aborted Rollouts: features/scaledown-aborted-rs.md @@ -39,6 +44,7 @@ nav: - Istio: features/traffic-management/istio.md - NGINX: features/traffic-management/nginx.md - SMI: features/traffic-management/smi.md + - Traefik: features/traffic-management/traefik.md - Analysis: - Overview: features/analysis.md - Prometheus: analysis/prometheus.md @@ -49,16 +55,21 @@ nav: - Web: analysis/web.md - Kayenta: analysis/kayenta.md - CloudWatch: analysis/cloudwatch.md + - Graphite: analysis/graphite.md + - InfluxDB: analysis/influxdb.md - Experiments: features/experiment.md - Notifications: - Overview: features/notifications.md - Services: + - generated/notification-services/alertmanager.md - generated/notification-services/email.md - generated/notification-services/github.md + - generated/notification-services/googlechat.md - generated/notification-services/grafana.md - generated/notification-services/mattermost.md - generated/notification-services/opsgenie.md - generated/notification-services/overview.md + - generated/notification-services/pushover.md - generated/notification-services/rocketchat.md - generated/notification-services/slack.md - generated/notification-services/teams.md @@ -122,4 +133,12 @@ theme: logo: assets/logo.png name: material palette: - primary: teal + - primary: teal + scheme: default + toggle: + icon: material/toggle-switch-off-outline + name: Switch to dark mode + - scheme: slate + toggle: + icon: material/toggle-switch + name: Switch to light mode diff --git a/pkg/apiclient/rollout/rollout.pb.go b/pkg/apiclient/rollout/rollout.pb.go index 3c118fb424..4f36576290 100644 --- a/pkg/apiclient/rollout/rollout.pb.go +++ b/pkg/apiclient/rollout/rollout.pb.go @@ -908,7 +908,7 @@ func (m *RolloutInfo) GetSteps() []*v1alpha1.CanaryStep { type ExperimentInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Icon string `protobuf:"bytes,2,opt,name=icon,proto3" json:"icon,omitempty"` - Revision int32 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` ReplicaSets []*ReplicaSetInfo `protobuf:"bytes,6,rep,name=replicaSets,proto3" json:"replicaSets,omitempty"` @@ -965,7 +965,7 @@ func (m *ExperimentInfo) GetIcon() string { return "" } -func (m *ExperimentInfo) GetRevision() int32 { +func (m *ExperimentInfo) GetRevision() int64 { if m != nil { return m.Revision } @@ -1004,7 +1004,7 @@ type ReplicaSetInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` Icon string `protobuf:"bytes,3,opt,name=icon,proto3" json:"icon,omitempty"` - Revision int32 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` Stable bool `protobuf:"varint,5,opt,name=stable,proto3" json:"stable,omitempty"` Canary bool `protobuf:"varint,6,opt,name=canary,proto3" json:"canary,omitempty"` Active bool `protobuf:"varint,7,opt,name=active,proto3" json:"active,omitempty"` @@ -1015,6 +1015,8 @@ type ReplicaSetInfo struct { ScaleDownDeadline string `protobuf:"bytes,12,opt,name=scaleDownDeadline,proto3" json:"scaleDownDeadline,omitempty"` Images []string `protobuf:"bytes,13,rep,name=images,proto3" json:"images,omitempty"` Pods []*PodInfo `protobuf:"bytes,14,rep,name=pods,proto3" json:"pods,omitempty"` + Ping bool `protobuf:"varint,15,opt,name=ping,proto3" json:"ping,omitempty"` + Pong bool `protobuf:"varint,16,opt,name=pong,proto3" json:"pong,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1074,7 +1076,7 @@ func (m *ReplicaSetInfo) GetIcon() string { return "" } -func (m *ReplicaSetInfo) GetRevision() int32 { +func (m *ReplicaSetInfo) GetRevision() int64 { if m != nil { return m.Revision } @@ -1151,6 +1153,20 @@ func (m *ReplicaSetInfo) GetPods() []*PodInfo { return nil } +func (m *ReplicaSetInfo) GetPing() bool { + if m != nil { + return m.Ping + } + return false +} + +func (m *ReplicaSetInfo) GetPong() bool { + if m != nil { + return m.Pong + } + return false +} + type PodInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` @@ -1289,6 +1305,8 @@ type JobInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` Icon string `protobuf:"bytes,3,opt,name=icon,proto3" json:"icon,omitempty"` + MetricName string `protobuf:"bytes,4,opt,name=metricName,proto3" json:"metricName,omitempty"` + StartedAt *v1.Time `protobuf:"bytes,5,opt,name=startedAt,proto3" json:"startedAt,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1348,16 +1366,32 @@ func (m *JobInfo) GetIcon() string { return "" } +func (m *JobInfo) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +func (m *JobInfo) GetStartedAt() *v1.Time { + if m != nil { + return m.StartedAt + } + return nil +} + type AnalysisRunInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Icon string `protobuf:"bytes,2,opt,name=icon,proto3" json:"icon,omitempty"` - Revision int32 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` Successful int32 `protobuf:"varint,5,opt,name=successful,proto3" json:"successful,omitempty"` Failed int32 `protobuf:"varint,6,opt,name=failed,proto3" json:"failed,omitempty"` Inconclusive int32 `protobuf:"varint,7,opt,name=inconclusive,proto3" json:"inconclusive,omitempty"` Error int32 `protobuf:"varint,8,opt,name=error,proto3" json:"error,omitempty"` Jobs []*JobInfo `protobuf:"bytes,9,rep,name=jobs,proto3" json:"jobs,omitempty"` + NonJobInfo []*NonJobInfo `protobuf:"bytes,10,rep,name=nonJobInfo,proto3" json:"nonJobInfo,omitempty"` + Metrics []*Metrics `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1410,7 +1444,7 @@ func (m *AnalysisRunInfo) GetIcon() string { return "" } -func (m *AnalysisRunInfo) GetRevision() int32 { +func (m *AnalysisRunInfo) GetRevision() int64 { if m != nil { return m.Revision } @@ -1459,6 +1493,170 @@ func (m *AnalysisRunInfo) GetJobs() []*JobInfo { return nil } +func (m *AnalysisRunInfo) GetNonJobInfo() []*NonJobInfo { + if m != nil { + return m.NonJobInfo + } + return nil +} + +func (m *AnalysisRunInfo) GetMetrics() []*Metrics { + if m != nil { + return m.Metrics + } + return nil +} + +type NonJobInfo struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + MetricName string `protobuf:"bytes,3,opt,name=metricName,proto3" json:"metricName,omitempty"` + StartedAt *v1.Time `protobuf:"bytes,4,opt,name=startedAt,proto3" json:"startedAt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonJobInfo) Reset() { *m = NonJobInfo{} } +func (m *NonJobInfo) String() string { return proto.CompactTextString(m) } +func (*NonJobInfo) ProtoMessage() {} +func (*NonJobInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_99101d942e8912a7, []int{19} +} +func (m *NonJobInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonJobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NonJobInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NonJobInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonJobInfo.Merge(m, src) +} +func (m *NonJobInfo) XXX_Size() int { + return m.Size() +} +func (m *NonJobInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NonJobInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_NonJobInfo proto.InternalMessageInfo + +func (m *NonJobInfo) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *NonJobInfo) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *NonJobInfo) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +func (m *NonJobInfo) GetStartedAt() *v1.Time { + if m != nil { + return m.StartedAt + } + return nil +} + +type Metrics struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + SuccessCondition string `protobuf:"bytes,2,opt,name=successCondition,proto3" json:"successCondition,omitempty"` + Count int32 `protobuf:"varint,3,opt,name=count,proto3" json:"count,omitempty"` + InconclusiveLimit int32 `protobuf:"varint,4,opt,name=inconclusiveLimit,proto3" json:"inconclusiveLimit,omitempty"` + FailureLimit int32 `protobuf:"varint,5,opt,name=failureLimit,proto3" json:"failureLimit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metrics) Reset() { *m = Metrics{} } +func (m *Metrics) String() string { return proto.CompactTextString(m) } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { + return fileDescriptor_99101d942e8912a7, []int{20} +} +func (m *Metrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) +} +func (m *Metrics) XXX_Size() int { + return m.Size() +} +func (m *Metrics) XXX_DiscardUnknown() { + xxx_messageInfo_Metrics.DiscardUnknown(m) +} + +var xxx_messageInfo_Metrics proto.InternalMessageInfo + +func (m *Metrics) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metrics) GetSuccessCondition() string { + if m != nil { + return m.SuccessCondition + } + return "" +} + +func (m *Metrics) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Metrics) GetInconclusiveLimit() int32 { + if m != nil { + return m.InconclusiveLimit + } + return 0 +} + +func (m *Metrics) GetFailureLimit() int32 { + if m != nil { + return m.FailureLimit + } + return 0 +} + func init() { proto.RegisterType((*RolloutInfoQuery)(nil), "rollout.RolloutInfoQuery") proto.RegisterType((*RolloutInfoListQuery)(nil), "rollout.RolloutInfoListQuery") @@ -1479,6 +1677,8 @@ func init() { proto.RegisterType((*ContainerInfo)(nil), "rollout.ContainerInfo") proto.RegisterType((*JobInfo)(nil), "rollout.JobInfo") proto.RegisterType((*AnalysisRunInfo)(nil), "rollout.AnalysisRunInfo") + proto.RegisterType((*NonJobInfo)(nil), "rollout.NonJobInfo") + proto.RegisterType((*Metrics)(nil), "rollout.Metrics") } func init() { @@ -1486,106 +1686,117 @@ func init() { } var fileDescriptor_99101d942e8912a7 = []byte{ - // 1576 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x18, 0xcd, 0x6f, 0xdc, 0xc4, - 0x57, 0xde, 0xcd, 0x26, 0x9b, 0xd9, 0x7c, 0x4e, 0xd2, 0xd6, 0xdd, 0xf6, 0x17, 0xe5, 0xe7, 0xfe, - 0xa4, 0x5f, 0x1a, 0xc0, 0x4e, 0x4a, 0x95, 0x52, 0x3e, 0x0e, 0xa1, 0x8d, 0x42, 0x51, 0x81, 0xe0, - 0x08, 0x2a, 0x90, 0xa0, 0x9a, 0xf5, 0x4e, 0x36, 0x6e, 0xbd, 0x1e, 0xe3, 0x19, 0x6f, 0x59, 0x45, - 0x7b, 0x80, 0x0b, 0x47, 0x0e, 0xfc, 0x11, 0x88, 0x13, 0x17, 0x2e, 0x1c, 0x38, 0x21, 0x21, 0x4e, - 0x08, 0x89, 0x7f, 0x00, 0x55, 0x5c, 0xf8, 0x2f, 0xd0, 0x3c, 0x8f, 0xc7, 0xf6, 0x66, 0xd3, 0xa6, - 0x4a, 0x20, 0x9c, 0x3c, 0xef, 0xbd, 0x79, 0x1f, 0xe3, 0xf7, 0x31, 0xef, 0x0d, 0xba, 0x12, 0x3d, - 0xec, 0x38, 0x24, 0xf2, 0xbd, 0xc0, 0xa7, 0xa1, 0x70, 0x62, 0x16, 0x04, 0x2c, 0xd1, 0x5f, 0x3b, - 0x8a, 0x99, 0x60, 0x78, 0x42, 0x81, 0xcd, 0xcb, 0x1d, 0xc6, 0x3a, 0x01, 0x95, 0x0c, 0x0e, 0x09, - 0x43, 0x26, 0x88, 0xf0, 0x59, 0xc8, 0xd3, 0x6d, 0xcd, 0xbb, 0x1d, 0x5f, 0xec, 0x27, 0x2d, 0xdb, - 0x63, 0x5d, 0x87, 0xc4, 0x1d, 0x16, 0xc5, 0xec, 0x01, 0x2c, 0x5e, 0x50, 0xfc, 0xdc, 0x51, 0xda, - 0xb8, 0xa3, 0x31, 0xbd, 0x75, 0x12, 0x44, 0xfb, 0x64, 0xdd, 0xe9, 0xd0, 0x90, 0xc6, 0x44, 0xd0, - 0xb6, 0x92, 0x76, 0xfd, 0xe1, 0x4b, 0xdc, 0xf6, 0x99, 0xdc, 0xde, 0x25, 0xde, 0xbe, 0x1f, 0xd2, - 0xb8, 0x9f, 0xf3, 0x77, 0xa9, 0x20, 0x4e, 0xef, 0x30, 0xd7, 0x25, 0x65, 0x21, 0x40, 0xad, 0x64, - 0xcf, 0xa1, 0xdd, 0x48, 0xf4, 0x53, 0xa2, 0x75, 0x1b, 0xcd, 0xb9, 0xa9, 0xde, 0x3b, 0xe1, 0x1e, - 0x7b, 0x37, 0xa1, 0x71, 0x1f, 0x63, 0x34, 0x16, 0x92, 0x2e, 0x35, 0x8d, 0x65, 0x63, 0x65, 0xd2, - 0x85, 0x35, 0xbe, 0x8c, 0x26, 0xe5, 0x97, 0x47, 0xc4, 0xa3, 0x66, 0x05, 0x08, 0x39, 0xc2, 0xba, - 0x8e, 0x16, 0x0b, 0x52, 0xee, 0xfa, 0x5c, 0xa4, 0x92, 0x4a, 0x5c, 0xc6, 0x30, 0xd7, 0x97, 0x06, - 0x9a, 0xdd, 0xa5, 0xe2, 0x4e, 0x97, 0x74, 0xa8, 0x4b, 0x3f, 0x49, 0x28, 0x17, 0xd8, 0x44, 0xd9, - 0x9f, 0x55, 0xfb, 0x33, 0x50, 0xca, 0xf2, 0x58, 0x28, 0x88, 0x3c, 0x75, 0x66, 0x81, 0x46, 0xe0, - 0x45, 0x54, 0xf3, 0xa5, 0x1c, 0xb3, 0x0a, 0x94, 0x14, 0xc0, 0x73, 0xa8, 0x2a, 0x48, 0xc7, 0x1c, - 0x03, 0x9c, 0x5c, 0x96, 0x2d, 0xaa, 0x0d, 0x5b, 0xb4, 0x8f, 0xf0, 0x7b, 0x61, 0x9b, 0xa9, 0xb3, - 0x3c, 0xdd, 0xa6, 0x26, 0xaa, 0xc7, 0xb4, 0xe7, 0x73, 0x9f, 0x85, 0x60, 0x52, 0xd5, 0xd5, 0x70, - 0x59, 0x53, 0x75, 0x58, 0xd3, 0x1d, 0x74, 0xce, 0xa5, 0x5c, 0x90, 0x58, 0x0c, 0x29, 0x7b, 0xf6, - 0x9f, 0xff, 0x11, 0x3a, 0xb7, 0x13, 0xb3, 0x2e, 0x13, 0xf4, 0xa4, 0xa2, 0x24, 0xc7, 0x5e, 0x12, - 0x04, 0x60, 0x6e, 0xdd, 0x85, 0xb5, 0xb5, 0x8d, 0x16, 0x36, 0x5b, 0xec, 0x14, 0xec, 0xdc, 0x46, - 0x0b, 0x2e, 0x15, 0x71, 0xff, 0xc4, 0x82, 0xee, 0xa3, 0x79, 0x25, 0xe3, 0x1e, 0x11, 0xde, 0xfe, - 0x56, 0x8f, 0x86, 0x20, 0x46, 0xf4, 0x23, 0x2d, 0x46, 0xae, 0xf1, 0x06, 0x6a, 0xc4, 0x79, 0x58, - 0x82, 0xa0, 0xc6, 0xb5, 0x45, 0x3b, 0xcb, 0xe4, 0x42, 0xc8, 0xba, 0xc5, 0x8d, 0xd6, 0x7d, 0x34, - 0xfd, 0x76, 0xa6, 0x4d, 0x22, 0x9e, 0x1c, 0xc7, 0x78, 0x0d, 0x2d, 0x90, 0x1e, 0xf1, 0x03, 0xd2, - 0x0a, 0xa8, 0xe6, 0xe3, 0x66, 0x65, 0xb9, 0xba, 0x32, 0xe9, 0x8e, 0x22, 0x59, 0xb7, 0xd0, 0xec, - 0x50, 0xbe, 0xe0, 0x35, 0x54, 0xcf, 0x0a, 0x80, 0x69, 0x2c, 0x57, 0x8f, 0x34, 0x54, 0xef, 0xb2, - 0x6e, 0xa0, 0xc6, 0xfb, 0x34, 0x96, 0xb1, 0x06, 0x36, 0xae, 0xa0, 0xd9, 0x8c, 0xa4, 0xd0, 0xca, - 0xd2, 0x61, 0xb4, 0xf5, 0xf5, 0x38, 0x6a, 0x14, 0x44, 0xe2, 0x1d, 0x84, 0x58, 0xeb, 0x01, 0xf5, - 0xc4, 0x5b, 0x54, 0x10, 0x60, 0x6a, 0x5c, 0x5b, 0xb3, 0xd3, 0x5a, 0x63, 0x17, 0x6b, 0x8d, 0x1d, - 0x3d, 0xec, 0x48, 0x04, 0xb7, 0x65, 0xad, 0xb1, 0x7b, 0xeb, 0xf6, 0x3b, 0x9a, 0xcf, 0x2d, 0xc8, - 0xc0, 0xe7, 0xd1, 0x38, 0x17, 0x44, 0x24, 0x5c, 0x39, 0x4f, 0x41, 0x32, 0x93, 0xba, 0x94, 0xf3, - 0x3c, 0x4f, 0x33, 0x50, 0xba, 0xcf, 0xf7, 0x58, 0xa8, 0x52, 0x15, 0xd6, 0x32, 0xbb, 0xb8, 0x90, - 0x95, 0xac, 0xd3, 0x57, 0xa9, 0xaa, 0x61, 0xb9, 0x9f, 0x0b, 0x1a, 0x99, 0xe3, 0xe9, 0x7e, 0xb9, - 0x96, 0x5e, 0xe2, 0x54, 0xdc, 0xa3, 0x7e, 0x67, 0x5f, 0x98, 0x13, 0xa9, 0x97, 0x34, 0x02, 0x5b, - 0x68, 0x8a, 0x78, 0x22, 0x21, 0x81, 0xda, 0x50, 0x87, 0x0d, 0x25, 0x9c, 0xac, 0x22, 0x31, 0x25, - 0xed, 0xbe, 0x39, 0xb9, 0x6c, 0xac, 0xd4, 0xdc, 0x14, 0x90, 0x56, 0x7b, 0x49, 0x1c, 0xd3, 0x50, - 0x98, 0x08, 0xf0, 0x19, 0x28, 0x29, 0x6d, 0xca, 0xfd, 0x98, 0xb6, 0xcd, 0x46, 0x4a, 0x51, 0xa0, - 0xa4, 0x24, 0x51, 0x5b, 0x56, 0x61, 0x73, 0x2a, 0xa5, 0x28, 0x50, 0x5a, 0xa9, 0x43, 0xc2, 0x9c, - 0x06, 0x5a, 0x8e, 0xc0, 0xcb, 0xa8, 0x11, 0xa7, 0x75, 0x81, 0xb6, 0x37, 0x85, 0x39, 0x03, 0x46, - 0x16, 0x51, 0x78, 0x09, 0x21, 0x55, 0xe1, 0xa5, 0x8b, 0x67, 0x61, 0x43, 0x01, 0x83, 0x6f, 0x4a, - 0x09, 0x51, 0xe0, 0x7b, 0x64, 0x97, 0x0a, 0x6e, 0xce, 0x41, 0x2c, 0x5d, 0xc8, 0x63, 0x49, 0xd3, - 0x54, 0xdc, 0xe7, 0x7b, 0x25, 0x2b, 0xfd, 0x34, 0xa2, 0xb1, 0xdf, 0xa5, 0xa1, 0xe0, 0xe6, 0xfc, - 0x10, 0xeb, 0x96, 0xa6, 0xa5, 0xac, 0x85, 0xbd, 0xf8, 0x55, 0x34, 0x45, 0x42, 0x12, 0xf4, 0xb9, - 0xcf, 0xdd, 0x24, 0xe4, 0x26, 0x06, 0x5e, 0x53, 0xf3, 0x6e, 0xe6, 0x44, 0x60, 0x2e, 0xed, 0xc6, - 0x1b, 0x08, 0xe9, 0x52, 0xce, 0xcd, 0x05, 0xe0, 0x3d, 0xaf, 0x79, 0x6f, 0x65, 0x24, 0xe0, 0x2c, - 0xec, 0xc4, 0x1f, 0xa3, 0x9a, 0xf4, 0x3c, 0x37, 0x17, 0x81, 0xe5, 0x0d, 0x3b, 0xbf, 0x6e, 0xed, - 0xec, 0xba, 0x85, 0xc5, 0xfd, 0x2c, 0x07, 0xf2, 0x10, 0xd6, 0x98, 0xec, 0xba, 0xb5, 0x6f, 0x91, - 0x90, 0xc4, 0xfd, 0x5d, 0x41, 0x23, 0x37, 0x15, 0x6b, 0xfd, 0x50, 0x41, 0x33, 0xe5, 0x53, 0xff, - 0x0d, 0xc9, 0x92, 0x85, 0x7e, 0xa5, 0x1c, 0xfa, 0xfa, 0x62, 0xa9, 0x42, 0x8c, 0xe4, 0x17, 0x4b, - 0x9e, 0x5c, 0x63, 0x47, 0x25, 0x57, 0xad, 0x9c, 0x5c, 0x43, 0x21, 0x31, 0xfe, 0x0c, 0x21, 0x31, - 0xec, 0xd7, 0x89, 0x67, 0xf1, 0xab, 0xf5, 0x4b, 0x15, 0xcd, 0x94, 0xa5, 0xff, 0x83, 0xc5, 0x26, - 0xfb, 0xaf, 0xd5, 0x23, 0xfe, 0xeb, 0xd8, 0xc8, 0xff, 0x2a, 0xb3, 0xb2, 0x06, 0xd7, 0x9f, 0x82, - 0x24, 0xde, 0x83, 0xc8, 0x80, 0x62, 0x53, 0x77, 0x15, 0x24, 0xf1, 0xc4, 0x13, 0x7e, 0x8f, 0x42, - 0xad, 0xa9, 0xbb, 0x0a, 0x92, 0x7e, 0x88, 0xa4, 0x50, 0xfa, 0x08, 0x6a, 0x4c, 0xdd, 0xcd, 0xc0, - 0x54, 0x3b, 0xfc, 0x0d, 0xae, 0x2a, 0x8c, 0x86, 0xcb, 0x65, 0x01, 0x0d, 0x97, 0x85, 0x26, 0xaa, - 0x0b, 0xda, 0x8d, 0x02, 0x22, 0x28, 0x54, 0x9a, 0x49, 0x57, 0xc3, 0xf8, 0x79, 0x34, 0xcf, 0x3d, - 0x12, 0xd0, 0xdb, 0xec, 0x51, 0x78, 0x9b, 0x92, 0x76, 0xe0, 0x87, 0x14, 0x8a, 0xce, 0xa4, 0x7b, - 0x98, 0x20, 0xad, 0x86, 0xde, 0x88, 0x9b, 0xd3, 0x70, 0x3f, 0x29, 0x08, 0xff, 0x0f, 0x8d, 0x45, - 0xac, 0xcd, 0xcd, 0x19, 0x70, 0xf0, 0x9c, 0x76, 0xf0, 0x0e, 0x6b, 0x83, 0x63, 0x81, 0x6a, 0x7d, - 0x6f, 0xa0, 0x09, 0x85, 0x39, 0x63, 0x4f, 0xea, 0x52, 0x9d, 0x26, 0x81, 0x2a, 0xd5, 0xf0, 0x87, - 0xa1, 0x56, 0x72, 0xf0, 0x22, 0xfc, 0xe1, 0x14, 0xb6, 0x6e, 0xa2, 0xe9, 0x52, 0x25, 0x19, 0xd9, - 0x79, 0xe8, 0x3e, 0xb2, 0x52, 0xe8, 0x23, 0xad, 0x2f, 0x0c, 0x34, 0xf1, 0x26, 0x6b, 0x9d, 0xfd, - 0xb1, 0xad, 0x1f, 0x2b, 0x68, 0x76, 0x28, 0xe7, 0xfe, 0xc5, 0x25, 0x69, 0x09, 0x21, 0x9e, 0x78, - 0x1e, 0xe5, 0x7c, 0x2f, 0x09, 0x94, 0x43, 0x0a, 0x18, 0xc9, 0xb7, 0x47, 0xfc, 0x80, 0xb6, 0x21, - 0xb5, 0x6a, 0xae, 0x82, 0xe4, 0x5d, 0xed, 0x87, 0x1e, 0x0b, 0xbd, 0x20, 0xe1, 0x59, 0x82, 0xd5, - 0xdc, 0x12, 0x4e, 0x7a, 0x8a, 0xc6, 0x31, 0x8b, 0x21, 0xc9, 0x6a, 0x6e, 0x0a, 0xc8, 0x30, 0x7e, - 0xc0, 0x5a, 0x32, 0xbd, 0xca, 0x61, 0xac, 0xbc, 0xe7, 0x02, 0xf5, 0xda, 0x9f, 0xd3, 0x68, 0x46, - 0x75, 0x40, 0xbb, 0x34, 0xee, 0xf9, 0x1e, 0xc5, 0x1c, 0xcd, 0x6c, 0x53, 0x51, 0x6c, 0x8b, 0x2e, - 0x8e, 0xea, 0xbf, 0x60, 0xae, 0x69, 0x8e, 0x6c, 0xcd, 0xac, 0xb5, 0xcf, 0x7f, 0xfb, 0xe3, 0xab, - 0xca, 0x2a, 0x5e, 0x81, 0x61, 0xb0, 0xb7, 0x9e, 0x4f, 0x74, 0x07, 0xba, 0x59, 0x1c, 0xa4, 0xeb, - 0x81, 0xe3, 0x4b, 0x15, 0x03, 0x34, 0x07, 0x2d, 0xec, 0x89, 0xd4, 0x6e, 0x80, 0xda, 0x35, 0x6c, - 0x1f, 0x57, 0xad, 0xf3, 0x48, 0xea, 0x5c, 0x33, 0x70, 0x0f, 0xcd, 0xc9, 0xde, 0xb3, 0x20, 0x8c, - 0xe3, 0xff, 0x8c, 0xd2, 0xa1, 0x27, 0xba, 0xa6, 0x79, 0x14, 0xd9, 0xba, 0x0a, 0x66, 0x5c, 0xc1, - 0xff, 0x7d, 0xa2, 0x19, 0x70, 0xec, 0xcf, 0x0c, 0x34, 0x3f, 0x7c, 0xee, 0xa7, 0x6a, 0x6e, 0x0e, - 0x93, 0xf3, 0xe6, 0xdf, 0x72, 0x40, 0xf7, 0x55, 0xfc, 0xff, 0xa7, 0xea, 0xd6, 0x67, 0xff, 0x00, - 0x4d, 0x6d, 0x53, 0xa1, 0x7b, 0x72, 0x7c, 0xde, 0x4e, 0xc7, 0x64, 0x3b, 0x1b, 0x93, 0xed, 0x2d, - 0x39, 0x26, 0x37, 0xf3, 0x36, 0xa4, 0x34, 0x12, 0x58, 0x17, 0x41, 0xe5, 0x02, 0x9e, 0xcf, 0x54, - 0xe6, 0xf3, 0xc0, 0xb7, 0x86, 0xbc, 0xf5, 0x8a, 0xc3, 0x1d, 0x5e, 0x2a, 0x5c, 0xb6, 0x23, 0xa6, - 0xbe, 0xe6, 0xd6, 0xc9, 0x3a, 0x17, 0x25, 0x2d, 0x0b, 0x85, 0xe6, 0x73, 0xc7, 0x09, 0x05, 0x55, - 0x18, 0x5f, 0x36, 0x56, 0xc1, 0xe2, 0xf2, 0x0c, 0x59, 0xb0, 0x78, 0xe4, 0x70, 0x79, 0x26, 0x16, - 0x47, 0xa9, 0x25, 0xd2, 0xe2, 0x6f, 0x0c, 0x34, 0x55, 0x1c, 0x4b, 0xf1, 0xe5, 0xbc, 0x25, 0x39, - 0x3c, 0xad, 0x9e, 0x96, 0xb5, 0xd7, 0xc1, 0x5a, 0xbb, 0x79, 0xf5, 0x38, 0xd6, 0x12, 0x69, 0x87, - 0xb4, 0xf5, 0xa7, 0xf4, 0x9d, 0x23, 0x8b, 0x6a, 0x78, 0x99, 0xc8, 0xf3, 0x68, 0xe8, 0x05, 0xe4, - 0xb4, 0x4c, 0x75, 0xc1, 0xd4, 0xbb, 0xcd, 0xed, 0x27, 0x9b, 0xaa, 0xb0, 0x03, 0x87, 0x53, 0xe1, - 0x1c, 0xe8, 0xd6, 0x7a, 0xe0, 0x1c, 0xc0, 0xcd, 0xf7, 0xda, 0xea, 0xea, 0xc0, 0x39, 0x10, 0xa4, - 0x33, 0x90, 0x07, 0xf9, 0xce, 0x40, 0x8d, 0xc2, 0xfb, 0x08, 0xbe, 0xa4, 0x0f, 0x71, 0xf8, 0xd5, - 0xe4, 0xb4, 0xce, 0xb1, 0x09, 0xe7, 0x78, 0xa5, 0xb9, 0x71, 0xcc, 0x73, 0x24, 0x61, 0x9b, 0x39, - 0x07, 0xd9, 0xcd, 0x34, 0xc8, 0x62, 0xa5, 0xf8, 0xf2, 0x50, 0x88, 0x95, 0x11, 0x0f, 0x12, 0x67, - 0x12, 0x2b, 0xb1, 0xb4, 0x43, 0xda, 0xba, 0x83, 0x26, 0xd4, 0x98, 0x7e, 0x64, 0x45, 0xca, 0x6f, - 0x81, 0xc2, 0xf8, 0x6f, 0x5d, 0x00, 0x75, 0xf3, 0x78, 0x36, 0x53, 0xd7, 0x4b, 0x89, 0xaf, 0x6f, - 0xfd, 0xfc, 0x78, 0xc9, 0xf8, 0xf5, 0xf1, 0x92, 0xf1, 0xfb, 0xe3, 0x25, 0xe3, 0xc3, 0x1b, 0xc7, - 0x7e, 0x90, 0x2c, 0x3f, 0x7f, 0xb6, 0xc6, 0xc1, 0x8a, 0x17, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, - 0xc0, 0xbd, 0xfe, 0xe3, 0x1e, 0x15, 0x00, 0x00, + // 1751 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x57, 0x7b, 0x3c, 0xf6, 0xf8, 0x8d, 0x3f, 0xc6, 0xe5, 0x6c, 0xb6, 0x77, 0x36, 0x58, 0xa6, + 0x17, 0x09, 0xc7, 0x40, 0xb7, 0x93, 0x8d, 0xb2, 0x2c, 0x1f, 0x87, 0x90, 0x58, 0xde, 0xa0, 0xec, + 0x12, 0x3a, 0xc0, 0x0a, 0x24, 0x88, 0x6a, 0x7a, 0xca, 0xe3, 0x4e, 0x7a, 0xba, 0x9a, 0xae, 0xea, + 0x09, 0x23, 0x6b, 0x0e, 0xf0, 0x0f, 0x70, 0xe0, 0x5f, 0x58, 0x09, 0x71, 0x42, 0x48, 0x5c, 0x38, + 0x70, 0x45, 0x1c, 0x91, 0xf8, 0x07, 0x50, 0xc4, 0x85, 0x23, 0x17, 0xce, 0xa8, 0x5e, 0x57, 0x57, + 0x7f, 0x78, 0xec, 0x38, 0xb2, 0x21, 0x7b, 0x9a, 0x7a, 0xef, 0xd5, 0x7b, 0xef, 0x57, 0xf3, 0x3e, + 0xaa, 0xfa, 0xc1, 0x7b, 0xc9, 0xf3, 0x91, 0x47, 0x93, 0x30, 0x88, 0x42, 0x16, 0x4b, 0x2f, 0xe5, + 0x51, 0xc4, 0x33, 0xf3, 0xeb, 0x26, 0x29, 0x97, 0x9c, 0x2c, 0x6b, 0xb2, 0x7f, 0x63, 0xc4, 0xf9, + 0x28, 0x62, 0x4a, 0xc1, 0xa3, 0x71, 0xcc, 0x25, 0x95, 0x21, 0x8f, 0x45, 0xbe, 0xad, 0xff, 0x68, + 0x14, 0xca, 0xe3, 0x6c, 0xe0, 0x06, 0x7c, 0xec, 0xd1, 0x74, 0xc4, 0x93, 0x94, 0x3f, 0xc3, 0xc5, + 0xd7, 0xb4, 0xbe, 0xf0, 0xb4, 0x37, 0xe1, 0x19, 0xce, 0xe4, 0x16, 0x8d, 0x92, 0x63, 0x7a, 0xcb, + 0x1b, 0xb1, 0x98, 0xa5, 0x54, 0xb2, 0xa1, 0xb6, 0x76, 0xe7, 0xf9, 0xd7, 0x85, 0x1b, 0x72, 0xb5, + 0x7d, 0x4c, 0x83, 0xe3, 0x30, 0x66, 0xe9, 0xb4, 0xd4, 0x1f, 0x33, 0x49, 0xbd, 0xc9, 0x69, 0xad, + 0x77, 0x35, 0x42, 0xa4, 0x06, 0xd9, 0x91, 0xc7, 0xc6, 0x89, 0x9c, 0xe6, 0x42, 0xe7, 0x01, 0xf4, + 0xfc, 0xdc, 0xef, 0xc3, 0xf8, 0x88, 0x7f, 0x3f, 0x63, 0xe9, 0x94, 0x10, 0x58, 0x8c, 0xe9, 0x98, + 0xd9, 0xd6, 0x8e, 0xb5, 0xbb, 0xe2, 0xe3, 0x9a, 0xdc, 0x80, 0x15, 0xf5, 0x2b, 0x12, 0x1a, 0x30, + 0x7b, 0x01, 0x05, 0x25, 0xc3, 0xb9, 0x03, 0xd7, 0x2a, 0x56, 0x1e, 0x85, 0x42, 0xe6, 0x96, 0x6a, + 0x5a, 0x56, 0x53, 0xeb, 0xd7, 0x16, 0x6c, 0x3c, 0x61, 0xf2, 0xe1, 0x98, 0x8e, 0x98, 0xcf, 0x7e, + 0x9e, 0x31, 0x21, 0x89, 0x0d, 0xc5, 0x3f, 0xab, 0xf7, 0x17, 0xa4, 0xb2, 0x15, 0xf0, 0x58, 0x52, + 0x75, 0xea, 0x02, 0x81, 0x61, 0x90, 0x6b, 0xd0, 0x0e, 0x95, 0x1d, 0xbb, 0x85, 0x92, 0x9c, 0x20, + 0x3d, 0x68, 0x49, 0x3a, 0xb2, 0x17, 0x91, 0xa7, 0x96, 0x75, 0x44, 0xed, 0x26, 0xa2, 0x63, 0x20, + 0x3f, 0x8c, 0x87, 0x5c, 0x9f, 0xe5, 0xd5, 0x98, 0xfa, 0xd0, 0x49, 0xd9, 0x24, 0x14, 0x21, 0x8f, + 0x11, 0x52, 0xcb, 0x37, 0x74, 0xdd, 0x53, 0xab, 0xe9, 0xe9, 0x21, 0xbc, 0xe5, 0x33, 0x21, 0x69, + 0x2a, 0x1b, 0xce, 0x5e, 0xff, 0xcf, 0xff, 0x29, 0xbc, 0xf5, 0x38, 0xe5, 0x63, 0x2e, 0xd9, 0x65, + 0x4d, 0x29, 0x8d, 0xa3, 0x2c, 0x8a, 0x10, 0x6e, 0xc7, 0xc7, 0xb5, 0x73, 0x08, 0x5b, 0xf7, 0x06, + 0xfc, 0x0a, 0x70, 0x1e, 0xc2, 0x96, 0xcf, 0x64, 0x3a, 0xbd, 0xb4, 0xa1, 0xa7, 0xb0, 0xa9, 0x6d, + 0x7c, 0x4a, 0x65, 0x70, 0x7c, 0x30, 0x61, 0x31, 0x9a, 0x91, 0xd3, 0xc4, 0x98, 0x51, 0x6b, 0x72, + 0x17, 0xba, 0x69, 0x99, 0x96, 0x68, 0xa8, 0x7b, 0xfb, 0x9a, 0x5b, 0x54, 0x72, 0x25, 0x65, 0xfd, + 0xea, 0x46, 0xe7, 0x29, 0xac, 0x7d, 0x52, 0x78, 0x53, 0x8c, 0xf3, 0xf3, 0x98, 0xec, 0xc3, 0x16, + 0x9d, 0xd0, 0x30, 0xa2, 0x83, 0x88, 0x19, 0x3d, 0x61, 0x2f, 0xec, 0xb4, 0x76, 0x57, 0xfc, 0x79, + 0x22, 0xe7, 0x3e, 0x6c, 0x34, 0xea, 0x85, 0xec, 0x43, 0xa7, 0x68, 0x00, 0xb6, 0xb5, 0xd3, 0x3a, + 0x13, 0xa8, 0xd9, 0xe5, 0x7c, 0x00, 0xdd, 0x1f, 0xb1, 0x54, 0xe5, 0x1a, 0x62, 0xdc, 0x85, 0x8d, + 0x42, 0xa4, 0xd9, 0x1a, 0x69, 0x93, 0xed, 0xfc, 0x76, 0x09, 0xba, 0x15, 0x93, 0xe4, 0x31, 0x00, + 0x1f, 0x3c, 0x63, 0x81, 0xfc, 0x98, 0x49, 0x8a, 0x4a, 0xdd, 0xdb, 0xfb, 0x6e, 0xde, 0x6b, 0xdc, + 0x6a, 0xaf, 0x71, 0x93, 0xe7, 0x23, 0xc5, 0x10, 0xae, 0xea, 0x35, 0xee, 0xe4, 0x96, 0xfb, 0x3d, + 0xa3, 0xe7, 0x57, 0x6c, 0x90, 0xeb, 0xb0, 0x24, 0x24, 0x95, 0x99, 0xd0, 0xc1, 0xd3, 0x94, 0xaa, + 0xa4, 0x31, 0x13, 0xa2, 0xac, 0xd3, 0x82, 0x54, 0xe1, 0x0b, 0x03, 0x1e, 0xeb, 0x52, 0xc5, 0xb5, + 0xaa, 0x2e, 0x21, 0x55, 0x27, 0x1b, 0x4d, 0x75, 0xa9, 0x1a, 0x5a, 0xed, 0x17, 0x92, 0x25, 0xf6, + 0x52, 0xbe, 0x5f, 0xad, 0x55, 0x94, 0x04, 0x93, 0x9f, 0xb2, 0x70, 0x74, 0x2c, 0xed, 0xe5, 0x3c, + 0x4a, 0x86, 0x41, 0x1c, 0x58, 0xa5, 0x81, 0xcc, 0x68, 0xa4, 0x37, 0x74, 0x70, 0x43, 0x8d, 0xa7, + 0xba, 0x48, 0xca, 0xe8, 0x70, 0x6a, 0xaf, 0xec, 0x58, 0xbb, 0x6d, 0x3f, 0x27, 0x14, 0xea, 0x20, + 0x4b, 0x53, 0x16, 0x4b, 0x1b, 0x90, 0x5f, 0x90, 0x4a, 0x32, 0x64, 0x22, 0x4c, 0xd9, 0xd0, 0xee, + 0xe6, 0x12, 0x4d, 0x2a, 0x49, 0x96, 0x0c, 0x55, 0x17, 0xb6, 0x57, 0x73, 0x89, 0x26, 0x15, 0x4a, + 0x93, 0x12, 0xf6, 0x1a, 0xca, 0x4a, 0x06, 0xd9, 0x81, 0x6e, 0x9a, 0xf7, 0x05, 0x36, 0xbc, 0x27, + 0xed, 0x75, 0x04, 0x59, 0x65, 0x91, 0x6d, 0x00, 0xdd, 0xe1, 0x55, 0x88, 0x37, 0x70, 0x43, 0x85, + 0x43, 0x3e, 0x54, 0x16, 0x92, 0x28, 0x0c, 0xe8, 0x13, 0x26, 0x85, 0xdd, 0xc3, 0x5c, 0x7a, 0xbb, + 0xcc, 0x25, 0x23, 0xd3, 0x79, 0x5f, 0xee, 0x55, 0xaa, 0xec, 0x17, 0x09, 0x4b, 0xc3, 0x31, 0x8b, + 0xa5, 0xb0, 0x37, 0x1b, 0xaa, 0x07, 0x46, 0x96, 0xab, 0x56, 0xf6, 0x92, 0x6f, 0xc1, 0x2a, 0x8d, + 0x69, 0x34, 0x15, 0xa1, 0xf0, 0xb3, 0x58, 0xd8, 0x04, 0x75, 0x6d, 0xa3, 0x7b, 0xaf, 0x14, 0xa2, + 0x72, 0x6d, 0x37, 0xb9, 0x0b, 0x60, 0x5a, 0xb9, 0xb0, 0xb7, 0x50, 0xf7, 0xba, 0xd1, 0xbd, 0x5f, + 0x88, 0x50, 0xb3, 0xb2, 0x93, 0xfc, 0x0c, 0xda, 0x2a, 0xf2, 0xc2, 0xbe, 0x86, 0x2a, 0x1f, 0xb9, + 0xe5, 0x75, 0xeb, 0x16, 0xd7, 0x2d, 0x2e, 0x9e, 0x16, 0x35, 0x50, 0xa6, 0xb0, 0xe1, 0x14, 0xd7, + 0xad, 0x7b, 0x9f, 0xc6, 0x34, 0x9d, 0x3e, 0x91, 0x2c, 0xf1, 0x73, 0xb3, 0xce, 0x9f, 0x17, 0x60, + 0xbd, 0x7e, 0xea, 0xff, 0x41, 0xb1, 0x14, 0xa9, 0xbf, 0x50, 0x4f, 0x7d, 0x73, 0xb1, 0xb4, 0x1a, + 0x17, 0x4b, 0x59, 0x5c, 0x8b, 0x67, 0x15, 0x57, 0xbb, 0x5e, 0x5c, 0x8d, 0x94, 0x58, 0x7a, 0x8d, + 0x94, 0x68, 0xc6, 0x75, 0xf9, 0x75, 0xe2, 0xea, 0xfc, 0xa7, 0x05, 0xeb, 0x75, 0xeb, 0xff, 0xc7, + 0x66, 0x53, 0xfc, 0xaf, 0xad, 0x33, 0xfe, 0xd7, 0xc5, 0xb9, 0xff, 0xab, 0xaa, 0xca, 0x36, 0x5e, + 0x7f, 0x9a, 0x52, 0xfc, 0x00, 0x33, 0x03, 0x9b, 0x4d, 0xc7, 0xd7, 0x94, 0xe2, 0xd3, 0x40, 0x86, + 0x13, 0x86, 0xbd, 0xa6, 0xe3, 0x6b, 0x4a, 0xc5, 0x21, 0x51, 0x46, 0xd9, 0x0b, 0xec, 0x31, 0x1d, + 0xbf, 0x20, 0x73, 0xef, 0xf8, 0x6f, 0x08, 0xdd, 0x61, 0x0c, 0x5d, 0x6f, 0x0b, 0xd0, 0x6c, 0x0b, + 0x7d, 0xe8, 0x48, 0x36, 0x4e, 0x22, 0x2a, 0x19, 0x76, 0x9a, 0x15, 0xdf, 0xd0, 0xe4, 0xab, 0xb0, + 0x29, 0x02, 0x1a, 0xb1, 0x07, 0xfc, 0x45, 0xfc, 0x80, 0xd1, 0x61, 0x14, 0xc6, 0x0c, 0x9b, 0xce, + 0x8a, 0x7f, 0x5a, 0xa0, 0x50, 0xe3, 0xdb, 0x48, 0xd8, 0x6b, 0x78, 0x3f, 0x69, 0x8a, 0x7c, 0x09, + 0x16, 0x13, 0x3e, 0x14, 0xf6, 0x3a, 0x06, 0xb8, 0x67, 0x02, 0xfc, 0x98, 0x0f, 0x31, 0xb0, 0x28, + 0x55, 0xff, 0x69, 0x12, 0xc6, 0x23, 0x6c, 0x3b, 0x1d, 0x1f, 0xd7, 0xc8, 0xe3, 0xf1, 0xc8, 0xee, + 0x69, 0x1e, 0x8f, 0x47, 0xce, 0x9f, 0x2c, 0x58, 0xd6, 0x9a, 0x6f, 0x38, 0xe2, 0xa6, 0xa5, 0xe7, + 0xc5, 0xa2, 0x5b, 0x3a, 0x46, 0x02, 0x7b, 0xaa, 0xc0, 0x68, 0x63, 0x24, 0x72, 0xda, 0xf9, 0x10, + 0xd6, 0x6a, 0x1d, 0x67, 0xee, 0x0b, 0xc5, 0xbc, 0x37, 0x17, 0x2a, 0xef, 0x4d, 0xe7, 0xdf, 0x16, + 0x2c, 0x7f, 0x97, 0x0f, 0x3e, 0x07, 0xc7, 0xde, 0x06, 0x18, 0x33, 0x99, 0x86, 0x81, 0x7a, 0x75, + 0xe8, 0xb3, 0x57, 0x38, 0xe4, 0x23, 0x58, 0x29, 0x6f, 0x99, 0x36, 0x82, 0xdb, 0xbb, 0x18, 0xb8, + 0x1f, 0x84, 0x63, 0xe6, 0x97, 0xca, 0xce, 0x67, 0x2d, 0xd8, 0x68, 0x74, 0x81, 0xcf, 0x71, 0x93, + 0xdc, 0x06, 0x10, 0x59, 0x10, 0x30, 0x21, 0x8e, 0xb2, 0x48, 0x87, 0xbe, 0xc2, 0x51, 0x7a, 0x47, + 0x34, 0x8c, 0xd8, 0x10, 0x8b, 0xbd, 0xed, 0x6b, 0x4a, 0xbd, 0x1e, 0xc2, 0x38, 0xe0, 0x71, 0x10, + 0x65, 0xa2, 0x28, 0xf9, 0xb6, 0x5f, 0xe3, 0xa9, 0x9c, 0x60, 0x69, 0xca, 0x53, 0x2c, 0xfb, 0xb6, + 0x9f, 0x13, 0xaa, 0xb0, 0x9e, 0xf1, 0x81, 0x2a, 0xf8, 0x7a, 0x61, 0xe9, 0x3c, 0xf1, 0x51, 0x4a, + 0xde, 0x07, 0x88, 0x79, 0xac, 0x79, 0x36, 0xe0, 0xde, 0x2d, 0xb3, 0xf7, 0x13, 0x23, 0xf2, 0x2b, + 0xdb, 0xc8, 0x9e, 0xea, 0xf8, 0x2a, 0xa4, 0xc2, 0xee, 0x36, 0xac, 0x7f, 0x9c, 0xf3, 0xfd, 0x62, + 0x83, 0xf3, 0x99, 0x05, 0x50, 0x9a, 0x51, 0x58, 0x27, 0x34, 0xca, 0x8a, 0xa4, 0xce, 0x89, 0x33, + 0x33, 0xac, 0x9e, 0x4d, 0xad, 0xf3, 0xb3, 0x69, 0xf1, 0x32, 0xd9, 0xf4, 0x07, 0x0b, 0x96, 0x35, + 0xf6, 0xb9, 0x75, 0xb7, 0x07, 0x3d, 0x1d, 0xad, 0xfb, 0x3c, 0x1e, 0x86, 0x32, 0x34, 0x39, 0x71, + 0x8a, 0xaf, 0xce, 0x18, 0xf0, 0x2c, 0x96, 0x08, 0xb8, 0xed, 0xe7, 0x84, 0x6a, 0x97, 0xd5, 0xa8, + 0x3d, 0x0a, 0xc7, 0x61, 0x8e, 0xb9, 0xed, 0x9f, 0x16, 0xa8, 0xb8, 0xab, 0x0c, 0xc8, 0x52, 0xbd, + 0x31, 0xcf, 0x98, 0x1a, 0xef, 0xf6, 0xbf, 0xd6, 0x60, 0x5d, 0xbf, 0xa7, 0x9f, 0xb0, 0x74, 0x12, + 0x06, 0x8c, 0x08, 0x58, 0x3f, 0x64, 0xb2, 0xfa, 0xc8, 0x7e, 0x67, 0xde, 0x6b, 0x1e, 0xbf, 0x92, + 0xfb, 0x73, 0x1f, 0xfa, 0xce, 0xfe, 0xaf, 0xfe, 0xfe, 0xcf, 0xdf, 0x2c, 0xec, 0x91, 0x5d, 0x1c, + 0x2d, 0x4c, 0x6e, 0x95, 0xf3, 0x81, 0x13, 0xf3, 0xe9, 0x31, 0xcb, 0xd7, 0x33, 0x2f, 0x54, 0x2e, + 0x66, 0xd0, 0xc3, 0x0f, 0xa2, 0x4b, 0xb9, 0xbd, 0x8b, 0x6e, 0xf7, 0x89, 0x7b, 0x51, 0xb7, 0xde, + 0x0b, 0xe5, 0x73, 0xdf, 0x22, 0x13, 0xe8, 0xa9, 0x2f, 0x99, 0x8a, 0x31, 0x41, 0xbe, 0x30, 0xcf, + 0x87, 0x99, 0x0f, 0xf4, 0xed, 0xb3, 0xc4, 0xce, 0x4d, 0x84, 0xf1, 0x1e, 0xf9, 0xe2, 0xb9, 0x30, + 0xf0, 0xd8, 0xbf, 0xb4, 0x60, 0xb3, 0x79, 0xee, 0x57, 0x7a, 0xee, 0x37, 0xc5, 0xe5, 0xa7, 0xa4, + 0xe3, 0xa1, 0xef, 0x9b, 0xe4, 0xcb, 0xaf, 0xf4, 0x6d, 0xce, 0xfe, 0x63, 0x58, 0x3d, 0x64, 0xd2, + 0x7c, 0xe1, 0x91, 0xeb, 0x6e, 0x3e, 0x74, 0x71, 0x8b, 0xa1, 0x8b, 0x7b, 0x30, 0x4e, 0xe4, 0xb4, + 0x5f, 0x3e, 0x6a, 0x6b, 0x1f, 0x98, 0xce, 0x3b, 0xe8, 0x72, 0x8b, 0x6c, 0x16, 0x2e, 0xcb, 0xaf, + 0xcb, 0xdf, 0x5b, 0xea, 0x0d, 0x55, 0x1d, 0x15, 0x90, 0xed, 0xca, 0xd3, 0x6d, 0xce, 0x0c, 0xa1, + 0x7f, 0x70, 0xb9, 0x77, 0xb0, 0xb6, 0x56, 0xa4, 0x42, 0xff, 0x2b, 0x17, 0x49, 0x05, 0x7d, 0x7d, + 0x7e, 0xc3, 0xda, 0x43, 0xc4, 0xf5, 0x89, 0x44, 0x05, 0xf1, 0xdc, 0x51, 0xc5, 0x1b, 0x41, 0x9c, + 0xe4, 0x48, 0x14, 0xe2, 0xdf, 0x59, 0xb0, 0x5a, 0x1d, 0x72, 0x90, 0x1b, 0xe5, 0x03, 0xf7, 0xf4, + 0xec, 0xe3, 0xaa, 0xd0, 0xde, 0x41, 0xb4, 0x6e, 0xff, 0xe6, 0x45, 0xd0, 0x52, 0x85, 0x43, 0x61, + 0xfd, 0x4b, 0x3e, 0x35, 0x2b, 0xb2, 0x1a, 0xe7, 0x5c, 0x65, 0x1d, 0x35, 0xe6, 0x69, 0x57, 0x05, + 0xd5, 0x47, 0xa8, 0x8f, 0xfa, 0x87, 0xe7, 0x43, 0xd5, 0xdc, 0x99, 0x27, 0x98, 0xf4, 0x4e, 0xcc, + 0x87, 0xda, 0xcc, 0x3b, 0xc1, 0xf7, 0xd1, 0xb7, 0xf7, 0xf6, 0x66, 0xde, 0x89, 0xa4, 0xa3, 0x99, + 0x3a, 0xc8, 0x1f, 0x2d, 0xe8, 0x56, 0xa6, 0x6d, 0xe4, 0x5d, 0x73, 0x88, 0xd3, 0x33, 0xb8, 0xab, + 0x3a, 0xc7, 0x3d, 0x3c, 0xc7, 0x37, 0xfb, 0x77, 0x2f, 0x78, 0x8e, 0x2c, 0x1e, 0x72, 0xef, 0xa4, + 0x78, 0x55, 0xcc, 0x8a, 0x5c, 0xa9, 0xce, 0xb1, 0x2a, 0xb9, 0x32, 0x67, 0xbc, 0xf5, 0x46, 0x72, + 0x25, 0x55, 0x38, 0x14, 0xd6, 0xc7, 0xb0, 0xac, 0x87, 0x3e, 0x67, 0x76, 0xa4, 0xf2, 0x16, 0xa8, + 0x0c, 0x93, 0x9c, 0xb7, 0xd1, 0xdd, 0x26, 0xd9, 0x28, 0xdc, 0x4d, 0x72, 0xe1, 0x77, 0x0e, 0xfe, + 0xfa, 0x72, 0xdb, 0xfa, 0xdb, 0xcb, 0x6d, 0xeb, 0x1f, 0x2f, 0xb7, 0xad, 0x9f, 0x7c, 0x70, 0xe1, + 0xf1, 0x76, 0x7d, 0x98, 0x3e, 0x58, 0x42, 0x14, 0xef, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0xa1, + 0x08, 0xa7, 0x61, 0x6c, 0x17, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2964,6 +3175,28 @@ func (m *ReplicaSetInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Pong { + i-- + if m.Pong { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.Ping { + i-- + if m.Ping { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } if len(m.Pods) > 0 { for iNdEx := len(m.Pods) - 1; iNdEx >= 0; iNdEx-- { { @@ -3215,6 +3448,25 @@ func (m *JobInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.StartedAt != nil { + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRollout(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.MetricName) > 0 { + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintRollout(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x22 + } if len(m.Icon) > 0 { i -= len(m.Icon) copy(dAtA[i:], m.Icon) @@ -3268,6 +3520,34 @@ func (m *AnalysisRunInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRollout(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NonJobInfo) > 0 { + for iNdEx := len(m.NonJobInfo) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonJobInfo[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRollout(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } if len(m.Jobs) > 0 { for iNdEx := len(m.Jobs) - 1; iNdEx >= 0; iNdEx-- { { @@ -3336,49 +3616,165 @@ func (m *AnalysisRunInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintRollout(dAtA []byte, offset int, v uint64) int { - offset -= sovRollout(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *NonJobInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *RolloutInfoQuery) Size() (n int) { - if m == nil { - return 0 - } + +func (m *NonJobInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonJobInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRollout(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovRollout(uint64(l)) + if m.StartedAt != nil { + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRollout(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.MetricName) > 0 { + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintRollout(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x1a } - return n + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintRollout(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintRollout(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RolloutInfoListQuery) Size() (n int) { - if m == nil { - return 0 +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovRollout(uint64(l)) - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.FailureLimit != 0 { + i = encodeVarintRollout(dAtA, i, uint64(m.FailureLimit)) + i-- + dAtA[i] = 0x28 + } + if m.InconclusiveLimit != 0 { + i = encodeVarintRollout(dAtA, i, uint64(m.InconclusiveLimit)) + i-- + dAtA[i] = 0x20 + } + if m.Count != 0 { + i = encodeVarintRollout(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x18 + } + if len(m.SuccessCondition) > 0 { + i -= len(m.SuccessCondition) + copy(dAtA[i:], m.SuccessCondition) + i = encodeVarintRollout(dAtA, i, uint64(len(m.SuccessCondition))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRollout(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRollout(dAtA []byte, offset int, v uint64) int { + offset -= sovRollout(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RolloutInfoQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RolloutInfoListQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) } return n } @@ -3796,6 +4192,12 @@ func (m *ReplicaSetInfo) Size() (n int) { n += 1 + l + sovRollout(uint64(l)) } } + if m.Ping { + n += 2 + } + if m.Pong { + n += 3 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3871,6 +4273,14 @@ func (m *JobInfo) Size() (n int) { if l > 0 { n += 1 + l + sovRollout(uint64(l)) } + l = len(m.MetricName) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + if m.StartedAt != nil { + l = m.StartedAt.Size() + n += 1 + l + sovRollout(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3916,6 +4326,75 @@ func (m *AnalysisRunInfo) Size() (n int) { n += 1 + l + sovRollout(uint64(l)) } } + if len(m.NonJobInfo) > 0 { + for _, e := range m.NonJobInfo { + l = e.Size() + n += 1 + l + sovRollout(uint64(l)) + } + } + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovRollout(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NonJobInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + l = len(m.MetricName) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + if m.StartedAt != nil { + l = m.StartedAt.Size() + n += 1 + l + sovRollout(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + l = len(m.SuccessCondition) + if l > 0 { + n += 1 + l + sovRollout(uint64(l)) + } + if m.Count != 0 { + n += 1 + sovRollout(uint64(m.Count)) + } + if m.InconclusiveLimit != 0 { + n += 1 + sovRollout(uint64(m.InconclusiveLimit)) + } + if m.FailureLimit != 0 { + n += 1 + sovRollout(uint64(m.FailureLimit)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6104,7 +6583,7 @@ func (m *ExperimentInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int32(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6406,7 +6885,7 @@ func (m *ReplicaSetInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int32(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6659,6 +7138,46 @@ func (m *ReplicaSetInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ping", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ping = bool(v != 0) + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pong", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Pong = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRollout(dAtA[iNdEx:]) @@ -7127,6 +7646,74 @@ func (m *JobInfo) Unmarshal(dAtA []byte) error { } m.Icon = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &v1.Time{} + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRollout(dAtA[iNdEx:]) @@ -7260,7 +7847,7 @@ func (m *AnalysisRunInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int32(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7407,6 +7994,429 @@ func (m *AnalysisRunInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonJobInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonJobInfo = append(m.NonJobInfo, &NonJobInfo{}) + if err := m.NonJobInfo[len(m.NonJobInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &Metrics{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRollout(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRollout + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonJobInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonJobInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonJobInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &v1.Time{} + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRollout(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRollout + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InconclusiveLimit", wireType) + } + m.InconclusiveLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InconclusiveLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureLimit", wireType) + } + m.FailureLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRollout(dAtA[iNdEx:]) diff --git a/pkg/apiclient/rollout/rollout.proto b/pkg/apiclient/rollout/rollout.proto index 0150a5196c..0d21c5d9d6 100644 --- a/pkg/apiclient/rollout/rollout.proto +++ b/pkg/apiclient/rollout/rollout.proto @@ -101,7 +101,7 @@ message RolloutInfo { message ExperimentInfo { k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta objectMeta = 1; string icon = 2; - int32 revision = 3; + int64 revision = 3; string status = 4; string message = 5; repeated ReplicaSetInfo replicaSets = 6; @@ -112,7 +112,7 @@ message ReplicaSetInfo { k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta objectMeta = 1; string status = 2; string icon = 3; - int32 revision = 4; + int64 revision = 4; bool stable = 5; bool canary = 6; bool active = 7; @@ -123,6 +123,8 @@ message ReplicaSetInfo { string scaleDownDeadline = 12; repeated string images = 13; repeated PodInfo pods = 14; + bool ping = 15; + bool pong = 16; } message PodInfo { @@ -142,20 +144,39 @@ message JobInfo { k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta objectMeta = 1; string status = 2; string icon = 3; + string metricName = 4; + k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; } message AnalysisRunInfo { k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta objectMeta = 1; string icon = 2; - int32 revision = 3; + int64 revision = 3; string status = 4; int32 successful = 5; int32 failed = 6; int32 inconclusive = 7; int32 error = 8; repeated JobInfo jobs = 9; + repeated NonJobInfo nonJobInfo = 10; + repeated Metrics metrics = 11; } +message NonJobInfo { + string value = 1; + string status = 2; + string metricName = 3; + k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt =4; +} + +message Metrics { + string name=1; + string successCondition = 2; + int32 count = 3; + int32 inconclusiveLimit = 4; + int32 failureLimit = 5; + } + service RolloutService { rpc GetRolloutInfo(RolloutInfoQuery) returns (RolloutInfo) { option (google.api.http).get = "/api/v1/rollouts/{namespace}/{name}/info"; diff --git a/pkg/apiclient/rollout/rollout.swagger.json b/pkg/apiclient/rollout/rollout.swagger.json index 000a008fbb..ac5b5a404b 100644 --- a/pkg/apiclient/rollout/rollout.swagger.json +++ b/pkg/apiclient/rollout/rollout.swagger.json @@ -526,6 +526,10 @@ "type": "string", "title": "RootService references the service in the ingress to the controller should add the action to" }, + "stickinessConfig": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StickinessConfig", + "title": "AdditionalForwardConfig allows to specify further settings on the ForwaredConfig\n+optional" + }, "annotationPrefix": { "type": "string", "title": "AnnotationPrefix has to match the configured annotation prefix on the alb ingress controller\n+optional" @@ -594,6 +598,61 @@ }, "title": "AntiAffinity defines which inter-pod scheduling rule to use for anti-affinity injection" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshTrafficRouting": { + "type": "object", + "properties": { + "virtualService": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualService", + "title": "VirtualService references an AppMesh VirtualService and VirtualRouter to modify to shape traffic" + }, + "virtualNodeGroup": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeGroup", + "title": "VirtualNodeGroup references an AppMesh Route targets that are formed by a set of VirtualNodes that are used to shape traffic" + } + }, + "title": "AppMeshTrafficRouting configuration for AWS AppMesh service mesh to enable fine grain configuration" + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeGroup": { + "type": "object", + "properties": { + "canaryVirtualNodeRef": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeReference", + "title": "CanaryVirtualNodeRef is the virtual node ref to modify labels with canary ReplicaSet pod template hash value" + }, + "stableVirtualNodeRef": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeReference", + "title": "StableVirtualNodeRef is the virtual node name to modify labels with stable ReplicaSet pod template hash value" + } + }, + "title": "AppMeshVirtualNodeGroup holds information about targets used for routing traffic to a virtual service" + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeReference": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of VirtualNode CR" + } + }, + "title": "AppMeshVirtualNodeReference holds a reference to VirtualNode.appmesh.k8s.aws" + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualService": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of virtual service" + }, + "routes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Routes is list of HTTP routes within virtual router associated with virtual service to edit. If omitted, virtual service must have a single route of this type." + } + }, + "title": "AppMeshVirtualService holds information on the virtual service the rollout needs to modify" + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ArgumentValueFrom": { "type": "object", "properties": { @@ -730,6 +789,10 @@ "weights": { "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TrafficWeights", "title": "Weights records the weights which have been set on traffic provider. Only valid when using traffic routing" + }, + "stablePingPong": { + "type": "string", + "title": "StablePingPong For the ping-pong feature holds the current stable service, ping or pong" } }, "title": "CanaryStatus status fields that only pertain to the canary rollout" @@ -757,6 +820,14 @@ "setCanaryScale": { "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetCanaryScale", "title": "SetCanaryScale defines how to scale the newRS without changing traffic weight\n+optional" + }, + "setHeaderRoute": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetHeaderRoute", + "title": "SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service\n+optional" + }, + "setMirrorRoute": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetMirrorRoute", + "title": "SetMirrorRoutes Mirrors traffic that matches rules to a particular destination\n+optional" } }, "description": "CanaryStep defines a step of a canary deployment." @@ -825,10 +896,24 @@ "dynamicStableScale": { "type": "boolean", "description": "DynamicStableScale is a traffic routing feature which dynamically scales the stable\nReplicaSet to minimize total pods which are running during an update. This is calculated by\nscaling down the stable as traffic is increased to canary. When disabled (the default behavior)\nthe stable ReplicaSet remains fully scaled to support instantaneous aborts." + }, + "pingPong": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PingPongSpec", + "title": "PingPongSpec holds the ping and pong services" } }, "title": "CanaryStrategy defines parameters for a Replica Based Canary" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun": { + "type": "object", + "properties": { + "metricName": { + "type": "string", + "description": "Name of the metric which needs to be evaluated in the Dry-Run mode. Wildcard '*' is supported and denotes all\nthe available metrics." + } + }, + "description": "DryRun defines the settings for running the analysis in Dry-Run mode." + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.FieldRef": { "type": "object", "properties": { @@ -838,6 +923,19 @@ } } }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.HeaderRoutingMatch": { + "type": "object", + "properties": { + "headerName": { + "type": "string", + "title": "HeaderName the name of the request header" + }, + "headerValue": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch", + "title": "HeaderValue the value of the header" + } + } + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.IstioDestinationRule": { "type": "object", "properties": { @@ -901,6 +999,29 @@ }, "title": "IstioVirtualService holds information on the virtual service the rollout needs to modify" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MangedRoutes": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MeasurementRetention": { + "type": "object", + "properties": { + "metricName": { + "type": "string", + "description": "MetricName is the name of the metric on which this retention policy should be applied." + }, + "limit": { + "type": "integer", + "format": "int32", + "description": "Limit is the maximum number of measurements to be retained for this given metric." + } + }, + "description": "MeasurementRetention defines the settings for retaining the number of measurements during the analysis." + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NginxTrafficRouting": { "type": "object", "properties": { @@ -918,6 +1039,13 @@ "type": "string" }, "title": "+optional" + }, + "additionalStableIngresses": { + "type": "array", + "items": { + "type": "string" + }, + "title": "AdditionalStableIngresses refers to the names of `Ingress` resources in the same namespace as the `Rollout` in a multi ingress scenario\n+optional" } }, "title": "NginxTrafficRouting configuration for Nginx ingress controller to control traffic routing" @@ -952,6 +1080,20 @@ }, "title": "PauseCondition the reason for a pause and when it started" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PingPongSpec": { + "type": "object", + "properties": { + "pingService": { + "type": "string", + "title": "name of the ping service" + }, + "pongService": { + "type": "string", + "title": "name of the pong service" + } + }, + "description": "PingPongSpec holds the ping and pong service name." + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PodTemplateMetadata": { "type": "object", "properties": { @@ -1018,6 +1160,20 @@ "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunArgument" }, "title": "Args the arguments that will be added to the AnalysisRuns\n+patchMergeKey=name\n+patchStrategy=merge" + }, + "dryRun": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun" + }, + "title": "DryRun object contains the settings for running the analysis in Dry-Run mode\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional" + }, + "measurementRetention": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MeasurementRetention" + }, + "title": "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional" } }, "title": "RolloutAnalysis defines a template that is used to create a analysisRun" @@ -1232,7 +1388,7 @@ }, "progressDeadlineAbort": { "type": "boolean", - "title": "ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds\nis exceeded if analysis is not used. Default is false.\n+optional" + "title": "ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds\nis exceeded.\n+optional" }, "restartAt": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", @@ -1257,7 +1413,7 @@ "items": { "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PauseCondition" }, - "title": "PauseConditions indicates why the rollout is currently paused" + "title": "PauseConditions is a list of reasons why rollout became automatically paused (e.g.\nCanaryPauseStep, BlueGreenPause, InconclusiveAnalysis). The items in this list are populated\nby the controller but are cleared by the user (e.g. plugin, argo-cd resume action) when they\nwish to unpause. If pause conditions is empty, but controllerPause is true, it indicates\nthe user manually unpaused the Rollout" }, "controllerPause": { "type": "boolean", @@ -1400,10 +1556,45 @@ "ambassador": { "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AmbassadorTrafficRouting", "title": "Ambassador holds specific configuration to use Ambassador to route traffic" + }, + "appMesh": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshTrafficRouting", + "title": "AppMesh holds specific configuration to use AppMesh to route traffic" + }, + "traefik": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TraefikTrafficRouting", + "title": "Traefik holds specific configuration to use Traefik to route traffic" + }, + "managedRoutes": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MangedRoutes" + }, + "description": "A list of HTTP routes that Argo Rollouts manages, the order of this array also becomes the precedence in the upstream\ntraffic router." } }, "title": "RolloutTrafficRouting hosts all the different configuration for supported service meshes to enable more fine-grained traffic routing" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RouteMatch": { + "type": "object", + "properties": { + "method": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch", + "title": "Method What http methods should be mirrored\n+optional" + }, + "path": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch", + "title": "Path What url paths should be mirrored\n+optional" + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch" + }, + "title": "Headers What request with matching headers should be mirrored\n+optional" + } + } + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SMITrafficRouting": { "type": "object", "properties": { @@ -1438,6 +1629,73 @@ }, "title": "SetCanaryScale defines how to scale the newRS without changing traffic weight" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetHeaderRoute": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name this is the name of the route to use for the mirroring of traffic this also needs\nto be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field" + }, + "match": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.HeaderRoutingMatch" + } + } + }, + "title": "SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service" + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetMirrorRoute": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name this is the name of the route to use for the mirroring of traffic this also needs\nto be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field" + }, + "match": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RouteMatch" + }, + "title": "Match Contains a list of rules that if mated will mirror the traffic to the services\n+optional" + }, + "percentage": { + "type": "integer", + "format": "int32", + "title": "Services The list of services to mirror the traffic to if the method, path, headers match\nService string `json:\"service\" protobuf:\"bytes,3,opt,name=service\"`\nPercentage What percent of the traffic that matched the rules should be mirrored" + } + } + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StickinessConfig": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "durationSeconds": { + "type": "string", + "format": "int64" + } + } + }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch": { + "type": "object", + "properties": { + "exact": { + "type": "string", + "title": "Exact The string must match exactly" + }, + "prefix": { + "type": "string", + "title": "Prefix The string will be prefixed matched" + }, + "regex": { + "type": "string", + "title": "Regex The string will be regular expression matched" + } + }, + "title": "StringMatch Used to define what type of matching we will use exact, prefix, or regular expression" + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TLSRoute": { "type": "object", "properties": { @@ -1456,6 +1714,16 @@ }, "description": "TLSRoute holds the information on the virtual service's TLS/HTTPS routes that are desired to be matched for changing weights." }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TraefikTrafficRouting": { + "type": "object", + "properties": { + "weightedTraefikServiceName": { + "type": "string", + "title": "TraefikServiceName refer to the name of the Traefik service used to route traffic to the service" + } + }, + "title": "TraefikTrafficRouting defines the configuration required to use Traefik as traffic router" + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TrafficWeights": { "type": "object", "properties": { @@ -1562,20 +1830,20 @@ "properties": { "volumeID": { "type": "string", - "title": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + "title": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" }, "fsType": { "type": "string", - "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + "title": "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" }, "partition": { "type": "integer", "format": "int32", - "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional" + "title": "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional" }, "readOnly": { "type": "boolean", - "title": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\".\nIf omitted, the default is \"false\".\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + "title": "readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" } }, "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk\nmust also be in the same AWS zone as the kubelet. An AWS EBS disk\ncan only be mounted as read/write once. AWS EBS volumes support\nownership management and SELinux relabeling." @@ -1603,27 +1871,27 @@ "properties": { "diskName": { "type": "string", - "title": "The Name of the data disk in the blob storage" + "title": "diskName is the Name of the data disk in the blob storage" }, "diskURI": { "type": "string", - "title": "The URI the data disk in the blob storage" + "title": "diskURI is the URI of data disk in the blob storage" }, "cachingMode": { "type": "string", - "title": "Host Caching mode: None, Read Only, Read Write.\n+optional" + "title": "cachingMode is the Host Caching mode: None, Read Only, Read Write.\n+optional" }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + "title": "fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" }, "readOnly": { "type": "boolean", - "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" }, "kind": { "type": "string", - "title": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" + "title": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" } }, "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." @@ -1633,15 +1901,15 @@ "properties": { "secretName": { "type": "string", - "title": "the name of secret that contains Azure Storage Account Name and Key" + "title": "secretName is the name of secret that contains Azure Storage Account Name and Key" }, "shareName": { "type": "string", - "title": "Share Name" + "title": "shareName is the azure share Name" }, "readOnly": { "type": "boolean", - "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" } }, "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod." @@ -1651,26 +1919,26 @@ "properties": { "driver": { "type": "string", - "description": "Driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." + "description": "driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." }, "readOnly": { "type": "boolean", - "title": "Specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional" + "title": "readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional" }, "fsType": { "type": "string", - "title": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional" + "title": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional" }, "volumeAttributes": { "type": "object", "additionalProperties": { "type": "string" }, - "title": "VolumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional" + "title": "volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional" }, "nodePublishSecretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "NodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.\n+optional" + "title": "nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.\n+optional" } }, "title": "Represents a source location of a volume to mount, managed by an external CSI driver" @@ -1703,27 +1971,27 @@ "items": { "type": "string" }, - "title": "Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + "title": "monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" }, "path": { "type": "string", - "title": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional" + "title": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional" }, "user": { "type": "string", - "title": "Optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" }, "secretFile": { "type": "string", - "title": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" }, "readOnly": { "type": "boolean", - "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" } }, "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod\nCephfs volumes do not support ownership management or SELinux relabeling." @@ -1733,19 +2001,19 @@ "properties": { "volumeID": { "type": "string", - "title": "volume id used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + "title": "volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + "title": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" }, "readOnly": { "type": "boolean", - "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + "title": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "Optional: points to a secret object containing parameters used to connect\nto OpenStack.\n+optional" + "title": "secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack.\n+optional" } }, "description": "Represents a cinder volume resource in Openstack.\nA Cinder volume must exist before mounting to a container.\nThe volume must also be in the same region as the kubelet.\nCinder volumes support ownership management and SELinux relabeling." @@ -1780,7 +2048,7 @@ "title": "Specify whether the ConfigMap or its key must be defined\n+optional" } }, - "description": "Selects a key from a ConfigMap." + "title": "Selects a key from a ConfigMap.\n+structType=atomic" }, "k8s.io.api.core.v1.ConfigMapProjection": { "type": "object", @@ -1793,11 +2061,11 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" }, - "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + "title": "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" }, "optional": { "type": "boolean", - "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" + "title": "optional specify whether the ConfigMap or its keys must be defined\n+optional" } }, "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names,\nunless the items element is populated with specific mappings of keys to paths.\nNote that this is identical to a configmap volume source without the default\nmode." @@ -1813,16 +2081,16 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" }, - "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + "title": "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" }, "defaultMode": { "type": "integer", "format": "int32", - "title": "Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + "title": "defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" }, "optional": { "type": "boolean", - "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" + "title": "optional specify whether the ConfigMap or its keys must be defined\n+optional" } }, "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nvolume as files using the keys in the Data field as the file names, unless\nthe items element is populated with specific mappings of keys to paths.\nConfigMap volumes support ownership management and SELinux relabeling." @@ -1836,21 +2104,21 @@ }, "image": { "type": "string", - "title": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional" + "title": "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional" }, "command": { "type": "array", "items": { "type": "string" }, - "title": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + "title": "Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" }, "args": { "type": "array", "items": { "type": "string" }, - "title": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + "title": "Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" }, "workingDir": { "type": "string", @@ -1925,7 +2193,7 @@ }, "securityContext": { "$ref": "#/definitions/k8s.io.api.core.v1.SecurityContext", - "title": "Security options the pod should run with.\nMore info: https://kubernetes.io/docs/concepts/policy/security-context/\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/\n+optional" + "title": "SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/\n+optional" }, "stdin": { "type": "boolean", @@ -2029,11 +2297,11 @@ "properties": { "medium": { "type": "string", - "title": "What type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + "title": "medium represents what type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" }, "sizeLimit": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity", - "title": "Total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: http://kubernetes.io/docs/user-guide/volumes#emptydir\n+optional" + "title": "sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: http://kubernetes.io/docs/user-guide/volumes#emptydir\n+optional" } }, "description": "Represents an empty directory for a pod.\nEmpty directory volumes support ownership management and SELinux relabeling." @@ -2065,7 +2333,7 @@ }, "value": { "type": "string", - "title": "Variable references $(VAR_NAME) are expanded\nusing the previous defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. The $(VAR_NAME)\nsyntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped\nreferences will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\".\n+optional" + "title": "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\".\n+optional" }, "valueFrom": { "$ref": "#/definitions/k8s.io.api.core.v1.EnvVarSource", @@ -2105,10 +2373,10 @@ }, "targetContainerName": { "type": "string", - "title": "If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container is run in whatever namespaces are shared\nfor the pod. Note that the container runtime must support this feature.\n+optional" + "description": "If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not\nsupport namespace targeting then the result of setting this field is undefined.\n+optional" } }, - "description": "An EphemeralContainer is a container that may be added temporarily to an existing pod for\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\nscheduling guarantees, and they will not be restarted when they exit or when a pod is\nremoved or restarted. If an ephemeral container causes a pod to exceed its resource\nallocation, the pod may be evicted.\nEphemeral containers may not be added by directly updating the pod spec. They must be added\nvia the pod's ephemeralcontainers subresource, and they will appear in the pod spec\nonce added.\nThis is an alpha feature enabled by the EphemeralContainers feature flag." + "description": "An EphemeralContainer is a temporary container that you may add to an existing Pod for\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\nscheduling guarantees, and they will not be restarted when they exit or when a Pod is\nremoved or restarted. The kubelet may evict a Pod if an ephemeral container causes the\nPod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing\nPod. Ephemeral containers may not be removed or restarted.\n\nThis is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate." }, "k8s.io.api.core.v1.EphemeralContainerCommon": { "type": "object", @@ -2119,21 +2387,21 @@ }, "image": { "type": "string", - "title": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images" + "title": "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images" }, "command": { "type": "array", "items": { "type": "string" }, - "title": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + "title": "Entrypoint array. Not executed within a shell.\nThe image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" }, "args": { "type": "array", "items": { "type": "string" }, - "title": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + "title": "Arguments to the entrypoint.\nThe image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" }, "workingDir": { "type": "string", @@ -2144,7 +2412,7 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.ContainerPort" }, - "description": "Ports are not allowed for ephemeral containers." + "title": "Ports are not allowed for ephemeral containers.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol" }, "envFrom": { "type": "array", @@ -2169,7 +2437,7 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.VolumeMount" }, - "title": "Pod volumes to mount into the container's filesystem.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge" + "title": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge" }, "volumeDevices": { "type": "array", @@ -2208,7 +2476,7 @@ }, "securityContext": { "$ref": "#/definitions/k8s.io.api.core.v1.SecurityContext", - "title": "SecurityContext is not allowed for ephemeral containers.\n+optional" + "title": "Optional: SecurityContext defines the security options the ephemeral container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\n+optional" }, "stdin": { "type": "boolean", @@ -2256,27 +2524,27 @@ "items": { "type": "string" }, - "title": "Optional: FC target worldwide names (WWNs)\n+optional" + "title": "targetWWNs is Optional: FC target worldwide names (WWNs)\n+optional" }, "lun": { "type": "integer", "format": "int32", - "title": "Optional: FC target lun number\n+optional" + "title": "lun is Optional: FC target lun number\n+optional" }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + "title": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" }, "readOnly": { "type": "boolean", - "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" }, "wwids": { "type": "array", "items": { "type": "string" }, - "title": "Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional" + "title": "wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional" } }, "description": "Represents a Fibre Channel volume.\nFibre Channel volumes can only be mounted as read/write once.\nFibre Channel volumes support ownership management and SELinux relabeling." @@ -2286,26 +2554,26 @@ "properties": { "driver": { "type": "string", - "description": "Driver is the name of the driver to use for this volume." + "description": "driver is the name of the driver to use for this volume." }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional" + "title": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional" }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "Optional: SecretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.\n+optional" + "title": "secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.\n+optional" }, "readOnly": { "type": "boolean", - "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" }, "options": { "type": "object", "additionalProperties": { "type": "string" }, - "title": "Optional: Extra command options if any.\n+optional" + "title": "options is Optional: this field holds extra command options if any.\n+optional" } }, "description": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." @@ -2315,11 +2583,11 @@ "properties": { "datasetName": { "type": "string", - "title": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional" + "title": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional" }, "datasetUUID": { "type": "string", - "title": "UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional" + "title": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional" } }, "description": "Represents a Flocker volume mounted by the Flocker agent.\nOne and only one of datasetName and datasetUUID should be set.\nFlocker volumes do not support ownership management or SELinux relabeling." @@ -2329,38 +2597,52 @@ "properties": { "pdName": { "type": "string", - "title": "Unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + "title": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" }, "fsType": { "type": "string", - "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + "title": "fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" }, "partition": { "type": "integer", "format": "int32", - "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + "title": "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" }, "readOnly": { "type": "boolean", - "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + "title": "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" } }, "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must\nalso be in the same GCE project and zone as the kubelet. A GCE PD\ncan only be mounted as read/write once or read-only many times. GCE\nPDs support ownership management and SELinux relabeling." }, + "k8s.io.api.core.v1.GRPCAction": { + "type": "object", + "properties": { + "port": { + "type": "integer", + "format": "int32", + "description": "Port number of the gRPC service. Number must be in the range 1 to 65535." + }, + "service": { + "type": "string", + "description": "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.\n+optional\n+default=\"\"" + } + } + }, "k8s.io.api.core.v1.GitRepoVolumeSource": { "type": "object", "properties": { "repository": { "type": "string", - "title": "Repository URL" + "title": "repository is the URL" }, "revision": { "type": "string", - "title": "Commit hash for the specified revision.\n+optional" + "title": "revision is the commit hash for the specified revision.\n+optional" }, "directory": { "type": "string", - "title": "Target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional" + "title": "directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional" } }, "description": "Represents a volume that is populated with the contents of a git repository.\nGit repo volumes do not support ownership management.\nGit repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." @@ -2370,15 +2652,15 @@ "properties": { "endpoints": { "type": "string", - "title": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + "title": "endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" }, "path": { "type": "string", - "title": "Path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + "title": "path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" }, "readOnly": { "type": "boolean", - "title": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional" + "title": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional" } }, "description": "Represents a Glusterfs mount that lasts the lifetime of a pod.\nGlusterfs volumes do not support ownership management or SELinux relabeling." @@ -2426,24 +2708,6 @@ }, "title": "HTTPHeader describes a custom header to be used in HTTP probes" }, - "k8s.io.api.core.v1.Handler": { - "type": "object", - "properties": { - "exec": { - "$ref": "#/definitions/k8s.io.api.core.v1.ExecAction", - "title": "One and only one of the following should be specified.\nExec specifies the action to take.\n+optional" - }, - "httpGet": { - "$ref": "#/definitions/k8s.io.api.core.v1.HTTPGetAction", - "title": "HTTPGet specifies the http request to perform.\n+optional" - }, - "tcpSocket": { - "$ref": "#/definitions/k8s.io.api.core.v1.TCPSocketAction", - "title": "TCPSocket specifies an action involving a TCP port.\nTCP hooks not yet supported\nTODO: implement a realistic TCP lifecycle hook\n+optional" - } - }, - "description": "Handler defines a specific action that should be taken\nTODO: pass structured data to these actions, and document that data here." - }, "k8s.io.api.core.v1.HostAlias": { "type": "object", "properties": { @@ -2466,11 +2730,11 @@ "properties": { "path": { "type": "string", - "title": "Path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + "title": "path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" }, "type": { "type": "string", - "title": "Type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n+optional" + "title": "type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n+optional" } }, "description": "Represents a host path mapped into a pod.\nHost path volumes do not support ownership management or SELinux relabeling." @@ -2480,51 +2744,51 @@ "properties": { "targetPortal": { "type": "string", - "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." }, "iqn": { "type": "string", - "description": "Target iSCSI Qualified Name." + "description": "iqn is the target iSCSI Qualified Name." }, "lun": { "type": "integer", "format": "int32", - "description": "iSCSI Target Lun number." + "description": "lun represents iSCSI Target Lun number." }, "iscsiInterface": { "type": "string", - "title": "iSCSI Interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional" + "title": "iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional" }, "fsType": { "type": "string", - "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + "title": "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" }, "readOnly": { "type": "boolean", - "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional" + "title": "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional" }, "portals": { "type": "array", "items": { "type": "string" }, - "title": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional" + "title": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional" }, "chapAuthDiscovery": { "type": "boolean", - "title": "whether support iSCSI Discovery CHAP authentication\n+optional" + "title": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication\n+optional" }, "chapAuthSession": { "type": "boolean", - "title": "whether support iSCSI Session CHAP authentication\n+optional" + "title": "chapAuthSession defines whether support iSCSI Session CHAP authentication\n+optional" }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "CHAP Secret for iSCSI target and initiator authentication\n+optional" + "title": "secretRef is the CHAP Secret for iSCSI target and initiator authentication\n+optional" }, "initiatorName": { "type": "string", - "title": "Custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional" + "title": "initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional" } }, "description": "Represents an ISCSI disk.\nISCSI volumes can only be mounted as read/write once.\nISCSI volumes support ownership management and SELinux relabeling." @@ -2534,16 +2798,16 @@ "properties": { "key": { "type": "string", - "description": "The key to project." + "description": "key is the key to project." }, "path": { "type": "string", - "description": "The relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + "description": "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." }, "mode": { "type": "integer", "format": "int32", - "title": "Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + "title": "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" } }, "description": "Maps a string key to a path within a volume." @@ -2552,16 +2816,34 @@ "type": "object", "properties": { "postStart": { - "$ref": "#/definitions/k8s.io.api.core.v1.Handler", + "$ref": "#/definitions/k8s.io.api.core.v1.LifecycleHandler", "title": "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" }, "preStop": { - "$ref": "#/definitions/k8s.io.api.core.v1.Handler", - "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The reason for termination is passed to the\nhandler. The Pod's termination grace period countdown begins before the\nPreStop hooked is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod. Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + "$ref": "#/definitions/k8s.io.api.core.v1.LifecycleHandler", + "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" } }, "description": "Lifecycle describes actions that the management system should take in response to container lifecycle\nevents. For the PostStart and PreStop lifecycle handlers, management of the container blocks\nuntil the action is complete, unless the container process fails, in which case the handler is aborted." }, + "k8s.io.api.core.v1.LifecycleHandler": { + "type": "object", + "properties": { + "exec": { + "$ref": "#/definitions/k8s.io.api.core.v1.ExecAction", + "title": "Exec specifies the action to take.\n+optional" + }, + "httpGet": { + "$ref": "#/definitions/k8s.io.api.core.v1.HTTPGetAction", + "title": "HTTPGet specifies the http request to perform.\n+optional" + }, + "tcpSocket": { + "$ref": "#/definitions/k8s.io.api.core.v1.TCPSocketAction", + "title": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.\n+optional" + } + }, + "description": "LifecycleHandler defines a specific action that should be taken in a lifecycle\nhook. One and only one of the fields, except TCPSocket must be specified." + }, "k8s.io.api.core.v1.LocalObjectReference": { "type": "object", "properties": { @@ -2570,22 +2852,22 @@ "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?\n+optional" } }, - "description": "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace." + "title": "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace.\n+structType=atomic" }, "k8s.io.api.core.v1.NFSVolumeSource": { "type": "object", "properties": { "server": { "type": "string", - "title": "Server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + "title": "server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" }, "path": { "type": "string", - "title": "Path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + "title": "path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" }, "readOnly": { "type": "boolean", - "title": "ReadOnly here will force\nthe NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + "title": "readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" } }, "description": "Represents an NFS mount that lasts the lifetime of a pod.\nNFS volumes do not support ownership management or SELinux relabeling." @@ -2618,7 +2900,7 @@ "description": "Required. A list of node selector terms. The terms are ORed." } }, - "description": "A node selector represents the union of the results of one or more label queries\nover a set of nodes; that is, it represents the OR of the selectors represented\nby the node selector terms." + "title": "A node selector represents the union of the results of one or more label queries\nover a set of nodes; that is, it represents the OR of the selectors represented\nby the node selector terms.\n+structType=atomic" }, "k8s.io.api.core.v1.NodeSelectorRequirement": { "type": "object", @@ -2659,7 +2941,7 @@ "title": "A list of node selector requirements by node's fields.\n+optional" } }, - "description": "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm." + "title": "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm.\n+structType=atomic" }, "k8s.io.api.core.v1.ObjectFieldSelector": { "type": "object", @@ -2673,7 +2955,7 @@ "description": "Path of the field to select in the specified API version." } }, - "description": "ObjectFieldSelector selects an APIVersioned field of an object." + "title": "ObjectFieldSelector selects an APIVersioned field of an object.\n+structType=atomic" }, "k8s.io.api.core.v1.PersistentVolumeClaimSpec": { "type": "object", @@ -2683,23 +2965,23 @@ "items": { "type": "string" }, - "title": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional" + "title": "accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional" }, "selector": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", - "title": "A label query over volumes to consider for binding.\n+optional" + "title": "selector is a label query over volumes to consider for binding.\n+optional" }, "resources": { "$ref": "#/definitions/k8s.io.api.core.v1.ResourceRequirements", - "title": "Resources represents the minimum resources the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional" + "title": "resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional" }, "volumeName": { "type": "string", - "title": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional" + "title": "volumeName is the binding reference to the PersistentVolume backing this claim.\n+optional" }, "storageClassName": { "type": "string", - "title": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional" + "title": "storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional" }, "volumeMode": { "type": "string", @@ -2707,7 +2989,11 @@ }, "dataSource": { "$ref": "#/definitions/k8s.io.api.core.v1.TypedLocalObjectReference", - "title": "This field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\n* An existing custom resource that implements data population (Alpha)\nIn order to use custom resource types that implement data population,\nthe AnyVolumeDataSource feature gate must be enabled.\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\n+optional" + "title": "dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nIf the AnyVolumeDataSource feature gate is enabled, this field will always have\nthe same contents as the DataSourceRef field.\n+optional" + }, + "dataSourceRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.TypedLocalObjectReference", + "title": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any local object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the DataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, both fields (DataSource and DataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nThere are two important differences between DataSource and DataSourceRef:\n* While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n+optional" } }, "title": "PersistentVolumeClaimSpec describes the common attributes of storage devices\nand allows a Source for provider-specific attributes" @@ -2731,11 +3017,11 @@ "properties": { "claimName": { "type": "string", - "title": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + "title": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" }, "readOnly": { "type": "boolean", - "title": "Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional" + "title": "readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional" } }, "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.\nThis volume finds the bound PV and mounts that volume for the pod. A\nPersistentVolumeClaimVolumeSource is, essentially, a wrapper around another\ntype of volume that is owned by someone else (the system)." @@ -2745,11 +3031,11 @@ "properties": { "pdID": { "type": "string", - "title": "ID that identifies Photon Controller persistent disk" + "title": "pdID is the ID that identifies Photon Controller persistent disk" }, "fsType": { "type": "string", - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." } }, "description": "Represents a Photon Controller persistent disk resource." @@ -2786,7 +3072,7 @@ "items": { "type": "string" }, - "title": "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"\n+optional" + "title": "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\".\n+optional" }, "topologyKey": { "type": "string", @@ -2794,7 +3080,7 @@ }, "namespaceSelector": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", - "title": "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces.\nThis field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled.\n+optional" + "title": "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces.\n+optional" } }, "title": "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key \u003ctopologyKey\u003e matches that of any node on which\na pod of the set of pods is running" @@ -2860,6 +3146,16 @@ }, "description": "PodDNSConfigOption defines DNS resolver options of a pod." }, + "k8s.io.api.core.v1.PodOS": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the operating system. The currently supported values are linux and windows.\nAdditional value may be defined in future and can be one of:\nhttps://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration\nClients should expect to handle additional values and treat unrecognized values in this field as os: null" + } + }, + "description": "PodOS defines the OS parameters of a pod." + }, "k8s.io.api.core.v1.PodReadinessGate": { "type": "object", "properties": { @@ -2875,21 +3171,21 @@ "properties": { "seLinuxOptions": { "$ref": "#/definitions/k8s.io.api.core.v1.SELinuxOptions", - "title": "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\n+optional" + "title": "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "windowsOptions": { "$ref": "#/definitions/k8s.io.api.core.v1.WindowsSecurityContextOptions", - "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux.\n+optional" }, "runAsUser": { "type": "string", "format": "int64", - "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "runAsGroup": { "type": "string", "format": "int64", - "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "runAsNonRoot": { "type": "boolean", @@ -2901,12 +3197,12 @@ "type": "string", "format": "int64" }, - "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\n+optional" + "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "fsGroup": { "type": "string", "format": "int64", - "description": "1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\n+optional", + "description": "1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "title": "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:" }, "sysctls": { @@ -2914,15 +3210,15 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.Sysctl" }, - "title": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\n+optional" + "title": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "fsGroupChangePolicy": { "type": "string", - "title": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\nbefore being exposed inside Pod. This field will only apply to\nvolume types which support fsGroup based ownership(and permissions).\nIt will have no effect on ephemeral volume types such as: secret, configmaps\nand emptydir.\nValid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.\n+optional" + "title": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\nbefore being exposed inside Pod. This field will only apply to\nvolume types which support fsGroup based ownership(and permissions).\nIt will have no effect on ephemeral volume types such as: secret, configmaps\nand emptydir.\nValid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "seccompProfile": { "$ref": "#/definitions/k8s.io.api.core.v1.SeccompProfile", - "title": "The seccomp options to use by the containers in this pod.\n+optional" + "title": "The seccomp options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" } }, "description": "PodSecurityContext holds pod-level security attributes and common container settings.\nSome fields are also present in container.securityContext. Field values of\ncontainer.securityContext take precedence over field values of PodSecurityContext." @@ -2956,7 +3252,7 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.EphemeralContainer" }, - "title": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\nThis field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" + "title": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\nThis field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" }, "restartPolicy": { "type": "string", @@ -2981,7 +3277,7 @@ "additionalProperties": { "type": "string" }, - "title": "NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n+optional" + "title": "NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n+optional\n+mapType=atomic" }, "serviceAccountName": { "type": "string", @@ -3024,7 +3320,7 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference" }, - "title": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\nin the case of docker, only DockerConfig type secrets are honored.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" + "title": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" }, "hostname": { "type": "string", @@ -3074,11 +3370,11 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.PodReadinessGate" }, - "title": "If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md\n+optional" + "title": "If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates\n+optional" }, "runtimeClassName": { "type": "string", - "title": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md\nThis is a beta feature as of Kubernetes v1.14.\n+optional" + "title": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class\n+optional" }, "enableServiceLinks": { "type": "boolean", @@ -3086,14 +3382,14 @@ }, "preemptionPolicy": { "type": "string", - "title": "PreemptionPolicy is the Policy for preempting pods with lower priority.\nOne of Never, PreemptLowerPriority.\nDefaults to PreemptLowerPriority if unset.\nThis field is beta-level, gated by the NonPreemptingPriority feature-gate.\n+optional" + "title": "PreemptionPolicy is the Policy for preempting pods with lower priority.\nOne of Never, PreemptLowerPriority.\nDefaults to PreemptLowerPriority if unset.\n+optional" }, "overhead": { "type": "object", "additionalProperties": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" }, - "title": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md\nThis field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.\n+optional" + "title": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md\n+optional" }, "topologySpreadConstraints": { "type": "array", @@ -3105,6 +3401,10 @@ "setHostnameAsFQDN": { "type": "boolean", "title": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).\nIn Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).\nIn Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN.\nIf a pod does not have FQDN, this has no effect.\nDefault to false.\n+optional" + }, + "os": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodOS", + "description": "Specifies the OS of the containers in the pod.\nSome pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset:\n-securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset:\n- spec.hostPID\n- spec.hostIPC\n- spec.securityContext.seLinuxOptions\n- spec.securityContext.seccompProfile\n- spec.securityContext.fsGroup\n- spec.securityContext.fsGroupChangePolicy\n- spec.securityContext.sysctls\n- spec.shareProcessNamespace\n- spec.securityContext.runAsUser\n- spec.securityContext.runAsGroup\n- spec.securityContext.supplementalGroups\n- spec.containers[*].securityContext.seLinuxOptions\n- spec.containers[*].securityContext.seccompProfile\n- spec.containers[*].securityContext.capabilities\n- spec.containers[*].securityContext.readOnlyRootFilesystem\n- spec.containers[*].securityContext.privileged\n- spec.containers[*].securityContext.allowPrivilegeEscalation\n- spec.containers[*].securityContext.procMount\n- spec.containers[*].securityContext.runAsUser\n- spec.containers[*].securityContext.runAsGroup\n+optional\nThis is a beta field and requires the IdentifyPodOS feature" } }, "description": "PodSpec is a description of a pod." @@ -3128,15 +3428,15 @@ "properties": { "volumeID": { "type": "string", - "title": "VolumeID uniquely identifies a Portworx volume" + "title": "volumeID uniquely identifies a Portworx volume" }, "fsType": { "type": "string", - "description": "FSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + "description": "fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." }, "readOnly": { "type": "boolean", - "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" } }, "description": "PortworxVolumeSource represents a Portworx volume resource." @@ -3160,7 +3460,7 @@ "type": "object", "properties": { "handler": { - "$ref": "#/definitions/k8s.io.api.core.v1.Handler", + "$ref": "#/definitions/k8s.io.api.core.v1.ProbeHandler", "title": "The action taken to determine the health of a container" }, "initialDelaySeconds": { @@ -3191,11 +3491,33 @@ "terminationGracePeriodSeconds": { "type": "string", "format": "int64", - "title": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\n+optional" + "title": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\n+optional" } }, "description": "Probe describes a health check to be performed against a container to determine whether it is\nalive or ready to receive traffic." }, + "k8s.io.api.core.v1.ProbeHandler": { + "type": "object", + "properties": { + "exec": { + "$ref": "#/definitions/k8s.io.api.core.v1.ExecAction", + "title": "Exec specifies the action to take.\n+optional" + }, + "httpGet": { + "$ref": "#/definitions/k8s.io.api.core.v1.HTTPGetAction", + "title": "HTTPGet specifies the http request to perform.\n+optional" + }, + "tcpSocket": { + "$ref": "#/definitions/k8s.io.api.core.v1.TCPSocketAction", + "title": "TCPSocket specifies an action involving a TCP port.\n+optional" + }, + "grpc": { + "$ref": "#/definitions/k8s.io.api.core.v1.GRPCAction", + "title": "GRPC specifies an action involving a GRPC port.\nThis is a beta field and requires enabling GRPCContainerProbe feature gate.\n+featureGate=GRPCContainerProbe\n+optional" + } + }, + "description": "ProbeHandler defines a specific action that should be taken in a probe.\nOne and only one of the fields must be specified." + }, "k8s.io.api.core.v1.ProjectedVolumeSource": { "type": "object", "properties": { @@ -3204,12 +3526,12 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.VolumeProjection" }, - "title": "list of volume projections\n+optional" + "title": "sources is the list of volume projections\n+optional" }, "defaultMode": { "type": "integer", "format": "int32", - "title": "Mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + "title": "defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" } }, "title": "Represents a projected volume source" @@ -3219,27 +3541,27 @@ "properties": { "registry": { "type": "string", - "title": "Registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" + "title": "registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" }, "volume": { "type": "string", - "description": "Volume is a string that references an already created Quobyte volume by name." + "description": "volume is a string that references an already created Quobyte volume by name." }, "readOnly": { "type": "boolean", - "title": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional" + "title": "readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional" }, "user": { "type": "string", - "title": "User to map volume access to\nDefaults to serivceaccount user\n+optional" + "title": "user to map volume access to\nDefaults to serivceaccount user\n+optional" }, "group": { "type": "string", - "title": "Group to map volume access to\nDefault is no group\n+optional" + "title": "group to map volume access to\nDefault is no group\n+optional" }, "tenant": { "type": "string", - "title": "Tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional" + "title": "tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional" } }, "description": "Represents a Quobyte mount that lasts the lifetime of a pod.\nQuobyte volumes do not support ownership management or SELinux relabeling." @@ -3252,35 +3574,35 @@ "items": { "type": "string" }, - "title": "A collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + "title": "monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" }, "image": { "type": "string", - "title": "The rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + "title": "image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" }, "fsType": { "type": "string", - "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + "title": "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" }, "pool": { "type": "string", - "title": "The rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "user": { "type": "string", - "title": "The rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "keyring": { "type": "string", - "title": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "SecretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "readOnly": { "type": "boolean", - "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" } }, "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod.\nRBD volumes support ownership management and SELinux relabeling." @@ -3301,7 +3623,7 @@ "title": "Specifies the output format of the exposed resources, defaults to \"1\"\n+optional" } }, - "title": "ResourceFieldSelector represents container resources (cpu, memory) and their output format" + "title": "ResourceFieldSelector represents container resources (cpu, memory) and their output format\n+structType=atomic" }, "k8s.io.api.core.v1.ResourceRequirements": { "type": "object", @@ -3350,43 +3672,43 @@ "properties": { "gateway": { "type": "string", - "description": "The host address of the ScaleIO API Gateway." + "description": "gateway is the host address of the ScaleIO API Gateway." }, "system": { "type": "string", - "description": "The name of the storage system as configured in ScaleIO." + "description": "system is the name of the storage system as configured in ScaleIO." }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "description": "SecretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." + "description": "secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." }, "sslEnabled": { "type": "boolean", - "title": "Flag to enable/disable SSL communication with Gateway, default false\n+optional" + "title": "sslEnabled Flag enable/disable SSL communication with Gateway, default false\n+optional" }, "protectionDomain": { "type": "string", - "title": "The name of the ScaleIO Protection Domain for the configured storage.\n+optional" + "title": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.\n+optional" }, "storagePool": { "type": "string", - "title": "The ScaleIO Storage Pool associated with the protection domain.\n+optional" + "title": "storagePool is the ScaleIO Storage Pool associated with the protection domain.\n+optional" }, "storageMode": { "type": "string", - "title": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional" + "title": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional" }, "volumeName": { "type": "string", - "description": "The name of a volume already created in the ScaleIO system\nthat is associated with this volume source." + "description": "volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source." }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional" + "title": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional" }, "readOnly": { "type": "boolean", - "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" } }, "title": "ScaleIOVolumeSource represents a persistent ScaleIO volume" @@ -3436,7 +3758,7 @@ "title": "Specify whether the Secret or its key must be defined\n+optional" } }, - "description": "SecretKeySelector selects a key of a Secret." + "title": "SecretKeySelector selects a key of a Secret.\n+structType=atomic" }, "k8s.io.api.core.v1.SecretProjection": { "type": "object", @@ -3449,11 +3771,11 @@ "items": { "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" }, - "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + "title": "items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" }, "optional": { "type": "boolean", - "title": "Specify whether the Secret or its key must be defined\n+optional" + "title": "optional field specify whether the Secret or its key must be defined\n+optional" } }, "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names.\nNote that this is identical to a secret volume source without the default\nmode." @@ -3463,23 +3785,23 @@ "properties": { "secretName": { "type": "string", - "title": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + "title": "secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" }, "items": { "type": "array", "items": { "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" }, - "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + "title": "items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" }, "defaultMode": { "type": "integer", "format": "int32", - "title": "Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + "title": "defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" }, "optional": { "type": "boolean", - "title": "Specify whether the Secret or its keys must be defined\n+optional" + "title": "optional field specify whether the Secret or its keys must be defined\n+optional" } }, "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume\nas files using the keys in the Data field as the file names.\nSecret volumes support ownership management and SELinux relabeling." @@ -3489,29 +3811,29 @@ "properties": { "capabilities": { "$ref": "#/definitions/k8s.io.api.core.v1.Capabilities", - "title": "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\n+optional" + "title": "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "privileged": { "type": "boolean", - "title": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\n+optional" + "title": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "seLinuxOptions": { "$ref": "#/definitions/k8s.io.api.core.v1.SELinuxOptions", - "title": "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + "title": "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "windowsOptions": { "$ref": "#/definitions/k8s.io.api.core.v1.WindowsSecurityContextOptions", - "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux.\n+optional" }, "runAsUser": { "type": "string", "format": "int64", - "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "runAsGroup": { "type": "string", "format": "int64", - "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "runAsNonRoot": { "type": "boolean", @@ -3519,19 +3841,19 @@ }, "readOnlyRootFilesystem": { "type": "boolean", - "title": "Whether this container has a read-only root filesystem.\nDefault is false.\n+optional" + "title": "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "allowPrivilegeEscalation": { "type": "boolean", - "title": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\n+optional" + "title": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "procMount": { "type": "string", - "title": "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\n+optional" + "title": "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "seccompProfile": { "$ref": "#/definitions/k8s.io.api.core.v1.SeccompProfile", - "title": "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod \u0026 container level, the container options\noverride the pod options.\n+optional" + "title": "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod \u0026 container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" } }, "description": "SecurityContext holds security configuration that will be applied to a container.\nSome fields are present in both SecurityContext and PodSecurityContext. When both\nare set, the values in SecurityContext take precedence." @@ -3541,16 +3863,16 @@ "properties": { "audience": { "type": "string", - "title": "Audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional" + "title": "audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional" }, "expirationSeconds": { "type": "string", "format": "int64", - "title": "ExpirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional" + "title": "expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional" }, "path": { "type": "string", - "description": "Path is the path relative to the mount point of the file to project the\ntoken into." + "description": "path is the path relative to the mount point of the file to project the\ntoken into." } }, "description": "ServiceAccountTokenProjection represents a projected service account token\nvolume. This projection can be used to insert a service account token into\nthe pods runtime filesystem for use against APIs (Kubernetes API Server or\notherwise)." @@ -3560,23 +3882,23 @@ "properties": { "volumeName": { "type": "string", - "description": "VolumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." + "description": "volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." }, "volumeNamespace": { "type": "string", - "title": "VolumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional" + "title": "volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional" }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + "title": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" }, "readOnly": { "type": "boolean", - "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + "title": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" }, "secretRef": { "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", - "title": "SecretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.\n+optional" + "title": "secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.\n+optional" } }, "description": "Represents a StorageOS persistent volume resource." @@ -3642,19 +3964,24 @@ "maxSkew": { "type": "integer", "format": "int32", - "description": "MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 1/1/0:\n+-------+-------+-------+\n| zone1 | zone2 | zone3 |\n+-------+-------+-------+\n| P | P | |\n+-------+-------+-------+\n- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1;\nscheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)\nviolate MaxSkew(1).\n- if MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed." + "description": "MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nThe global minimum is the minimum number of matching pods in an eligible domain\nor zero if the number of eligible domains is less than MinDomains.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 2/2/1:\nIn this case, the global minimum is 1.\n+-------+-------+-------+\n| zone1 | zone2 | zone3 |\n+-------+-------+-------+\n| P P | P P | P |\n+-------+-------+-------+\n- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\nviolate MaxSkew(1).\n- if MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed." }, "topologyKey": { "type": "string", - "description": "TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nIt's a required field." + "description": "TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nWe define a domain as a particular instance of a topology.\nAlso, we define an eligible domain as a domain whose nodes match the node selector.\ne.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology.\nAnd, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology.\nIt's a required field." }, "whenUnsatisfiable": { "type": "string", - "description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy\nthe spread constraint.\n- DoNotSchedule (default) tells the scheduler not to schedule it.\n- ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod\nif and only if every possible node assigment for that pod would violate\n\"MaxSkew\" on some topology.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 3/1/1:\n+-------+-------+-------+\n| zone1 | zone2 | zone3 |\n+-------+-------+-------+\n| P P P | P | P |\n+-------+-------+-------+\nIf WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled\nto zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\nMaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler\nwon't make it *more* imbalanced.\nIt's a required field." + "description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy\nthe spread constraint.\n- DoNotSchedule (default) tells the scheduler not to schedule it.\n- ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod\nif and only if every possible node assignment for that pod would violate\n\"MaxSkew\" on some topology.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 3/1/1:\n+-------+-------+-------+\n| zone1 | zone2 | zone3 |\n+-------+-------+-------+\n| P P P | P | P |\n+-------+-------+-------+\nIf WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled\nto zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\nMaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler\nwon't make it *more* imbalanced.\nIt's a required field." }, "labelSelector": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", "title": "LabelSelector is used to find matching pods.\nPods that match this label selector are counted to determine the number of pods\nin their corresponding topology domain.\n+optional" + }, + "minDomains": { + "type": "integer", + "format": "int32", + "description": "MinDomains indicates a minimum number of eligible domains.\nWhen the number of eligible domains with matching topology keys is less than minDomains,\nPod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed.\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\nthis value has no effect on scheduling.\nAs a result, when the number of eligible domains is less than minDomains,\nscheduler won't schedule more than maxSkew Pods to those domains.\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\nValid values are integers greater than 0.\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\nlabelSelector spread as 2/2/2:\n+-------+-------+-------+\n| zone1 | zone2 | zone3 |\n+-------+-------+-------+\n| P P | P P | P P |\n+-------+-------+-------+\nThe number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0.\nIn this situation, new pod with the same labelSelector cannot be scheduled,\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\nit will violate MaxSkew.\n\nThis is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.\n+optional" } }, "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology." @@ -3675,18 +4002,18 @@ "title": "Name is the name of resource being referenced" } }, - "description": "TypedLocalObjectReference contains enough information to let you locate the\ntyped referenced object inside the same namespace." + "title": "TypedLocalObjectReference contains enough information to let you locate the\ntyped referenced object inside the same namespace.\n+structType=atomic" }, "k8s.io.api.core.v1.Volume": { "type": "object", "properties": { "name": { "type": "string", - "title": "Volume's name.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + "title": "name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" }, "volumeSource": { "$ref": "#/definitions/k8s.io.api.core.v1.VolumeSource", - "description": "VolumeSource represents the location and type of the mounted volume.\nIf not specified, the Volume is implied to be an EmptyDir.\nThis implied behavior is deprecated and will be removed in a future version." + "description": "volumeSource represents the location and type of the mounted volume.\nIf not specified, the Volume is implied to be an EmptyDir.\nThis implied behavior is deprecated and will be removed in a future version." } }, "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod." @@ -3740,19 +4067,19 @@ "properties": { "secret": { "$ref": "#/definitions/k8s.io.api.core.v1.SecretProjection", - "title": "information about the secret data to project\n+optional" + "title": "secret information about the secret data to project\n+optional" }, "downwardAPI": { "$ref": "#/definitions/k8s.io.api.core.v1.DownwardAPIProjection", - "title": "information about the downwardAPI data to project\n+optional" + "title": "downwardAPI information about the downwardAPI data to project\n+optional" }, "configMap": { "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapProjection", - "title": "information about the configMap data to project\n+optional" + "title": "configMap information about the configMap data to project\n+optional" }, "serviceAccountToken": { "$ref": "#/definitions/k8s.io.api.core.v1.ServiceAccountTokenProjection", - "title": "information about the serviceAccountToken data to project\n+optional" + "title": "serviceAccountToken is information about the serviceAccountToken data to project\n+optional" } }, "title": "Projection that may be projected along with other supported volume types" @@ -3762,119 +4089,119 @@ "properties": { "hostPath": { "$ref": "#/definitions/k8s.io.api.core.v1.HostPathVolumeSource", - "title": "HostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional" + "title": "hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional" }, "emptyDir": { "$ref": "#/definitions/k8s.io.api.core.v1.EmptyDirVolumeSource", - "title": "EmptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + "title": "emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" }, "gcePersistentDisk": { "$ref": "#/definitions/k8s.io.api.core.v1.GCEPersistentDiskVolumeSource", - "title": "GCEPersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + "title": "gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" }, "awsElasticBlockStore": { "$ref": "#/definitions/k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource", - "title": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + "title": "awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" }, "gitRepo": { "$ref": "#/definitions/k8s.io.api.core.v1.GitRepoVolumeSource", - "title": "GitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional" + "title": "gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional" }, "secret": { "$ref": "#/definitions/k8s.io.api.core.v1.SecretVolumeSource", - "title": "Secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + "title": "secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" }, "nfs": { "$ref": "#/definitions/k8s.io.api.core.v1.NFSVolumeSource", - "title": "NFS represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + "title": "nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" }, "iscsi": { "$ref": "#/definitions/k8s.io.api.core.v1.ISCSIVolumeSource", - "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\n+optional" + "title": "iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\n+optional" }, "glusterfs": { "$ref": "#/definitions/k8s.io.api.core.v1.GlusterfsVolumeSource", - "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\n+optional" + "title": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\n+optional" }, "persistentVolumeClaim": { "$ref": "#/definitions/k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource", - "title": "PersistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + "title": "persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" }, "rbd": { "$ref": "#/definitions/k8s.io.api.core.v1.RBDVolumeSource", - "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md\n+optional" + "title": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md\n+optional" }, "flexVolume": { "$ref": "#/definitions/k8s.io.api.core.v1.FlexVolumeSource", - "title": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional" + "title": "flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional" }, "cinder": { "$ref": "#/definitions/k8s.io.api.core.v1.CinderVolumeSource", - "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + "title": "cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" }, "cephfs": { "$ref": "#/definitions/k8s.io.api.core.v1.CephFSVolumeSource", - "title": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional" + "title": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional" }, "flocker": { "$ref": "#/definitions/k8s.io.api.core.v1.FlockerVolumeSource", - "title": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional" + "title": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional" }, "downwardAPI": { "$ref": "#/definitions/k8s.io.api.core.v1.DownwardAPIVolumeSource", - "title": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional" + "title": "downwardAPI represents downward API about the pod that should populate this volume\n+optional" }, "fc": { "$ref": "#/definitions/k8s.io.api.core.v1.FCVolumeSource", - "title": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional" + "title": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional" }, "azureFile": { "$ref": "#/definitions/k8s.io.api.core.v1.AzureFileVolumeSource", - "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional" + "title": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional" }, "configMap": { "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapVolumeSource", - "title": "ConfigMap represents a configMap that should populate this volume\n+optional" + "title": "configMap represents a configMap that should populate this volume\n+optional" }, "vsphereVolume": { "$ref": "#/definitions/k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource", - "title": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional" + "title": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional" }, "quobyte": { "$ref": "#/definitions/k8s.io.api.core.v1.QuobyteVolumeSource", - "title": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional" + "title": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional" }, "azureDisk": { "$ref": "#/definitions/k8s.io.api.core.v1.AzureDiskVolumeSource", - "title": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional" + "title": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional" }, "photonPersistentDisk": { "$ref": "#/definitions/k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource", - "title": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + "title": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" }, "projected": { "$ref": "#/definitions/k8s.io.api.core.v1.ProjectedVolumeSource", - "title": "Items for all in one resources secrets, configmaps, and downward API" + "title": "projected items for all in one resources secrets, configmaps, and downward API" }, "portworxVolume": { "$ref": "#/definitions/k8s.io.api.core.v1.PortworxVolumeSource", - "title": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional" + "title": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional" }, "scaleIO": { "$ref": "#/definitions/k8s.io.api.core.v1.ScaleIOVolumeSource", - "title": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional" + "title": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional" }, "storageos": { "$ref": "#/definitions/k8s.io.api.core.v1.StorageOSVolumeSource", - "title": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional" + "title": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional" }, "csi": { "$ref": "#/definitions/k8s.io.api.core.v1.CSIVolumeSource", - "title": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).\n+optional" + "title": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).\n+optional" }, "ephemeral": { "$ref": "#/definitions/k8s.io.api.core.v1.EphemeralVolumeSource", - "description": "Ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time.\n\nThis is a beta feature and only available when the GenericEphemeralVolume\nfeature gate is enabled.\n\n+optional" + "description": "ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time.\n\n+optional" } }, "description": "Represents the source of a volume to mount.\nOnly one of its members may be specified." @@ -3884,19 +4211,19 @@ "properties": { "volumePath": { "type": "string", - "title": "Path that identifies vSphere volume vmdk" + "title": "volumePath is the path that identifies vSphere volume vmdk" }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + "title": "fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" }, "storagePolicyName": { "type": "string", - "title": "Storage Policy Based Management (SPBM) profile name.\n+optional" + "title": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.\n+optional" }, "storagePolicyID": { "type": "string", - "title": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional" + "title": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional" } }, "description": "Represents a vSphere volume resource." @@ -3930,6 +4257,10 @@ "runAsUserName": { "type": "string", "title": "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "hostProcess": { + "type": "boolean", + "title": "HostProcess determines if a container should be run as a 'Host Process' container.\nThis field is alpha-level and will only be honored by components that enable the\nWindowsHostProcessContainers feature flag. Setting this field without the feature\nflag will result in errors when validating the Pod. All of a Pod's containers must\nhave the same effective HostProcess value (it is not allowed to have a mix of HostProcess\ncontainers and non-HostProcess containers). In addition, if HostProcess is true\nthen HostNetwork must also be set to true.\n+optional" } }, "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials." @@ -4012,7 +4343,7 @@ }, "time": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", - "title": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'\n+optional" + "title": "Time is the timestamp of when the ManagedFields entry was added. The\ntimestamp will also be updated if a field is added, the manager\nchanges any of the owned fields value or removes a field. The\ntimestamp does not update when a field is removed from the entry\nbecause another manager took it over.\n+optional" }, "fieldsType": { "type": "string", @@ -4021,6 +4352,10 @@ "fieldsV1": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1", "title": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.\n+optional" + }, + "subresource": { + "type": "string", + "description": "Subresource is the name of the subresource used to update that object, or\nempty string if the object was updated through the main resource. The\nvalue of this field is used to distinguish between managers, even if they\nshare the same name. For example, a status update will be distinct from a\nregular update using the same manager name.\nNote that the APIVersion field is not related to the Subresource field and\nit always corresponds to the version of the main resource." } }, "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to." @@ -4034,7 +4369,7 @@ }, "generateName": { "type": "string", - "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional" + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional" }, "namespace": { "type": "string", @@ -4042,7 +4377,7 @@ }, "selfLink": { "type": "string", - "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" + "title": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional" }, "uid": { "type": "string", @@ -4100,7 +4435,7 @@ }, "clusterName": { "type": "string", - "title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional" + "description": "Deprecated: ClusterName is a legacy field that was always cleared by\nthe system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect\naccidental use.\n\n+optional" }, "managedFields": { "type": "array", @@ -4137,10 +4472,10 @@ }, "blockOwnerDeletion": { "type": "boolean", - "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" } }, - "description": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + "title": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field.\n+structType=atomic" }, "k8s.io.apimachinery.pkg.apis.meta.v1.Time": { "type": "object", @@ -4197,8 +4532,8 @@ "type": "string" }, "revision": { - "type": "integer", - "format": "int32" + "type": "string", + "format": "int64" }, "status": { "type": "string" @@ -4224,6 +4559,18 @@ "items": { "$ref": "#/definitions/rollout.JobInfo" } + }, + "nonJobInfo": { + "type": "array", + "items": { + "$ref": "#/definitions/rollout.NonJobInfo" + } + }, + "metrics": { + "type": "array", + "items": { + "$ref": "#/definitions/rollout.Metrics" + } } } }, @@ -4248,8 +4595,8 @@ "type": "string" }, "revision": { - "type": "integer", - "format": "int32" + "type": "string", + "format": "int64" }, "status": { "type": "string" @@ -4282,6 +4629,35 @@ }, "icon": { "type": "string" + }, + "metricName": { + "type": "string" + }, + "startedAt": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "rollout.Metrics": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "successCondition": { + "type": "string" + }, + "count": { + "type": "integer", + "format": "int32" + }, + "inconclusiveLimit": { + "type": "integer", + "format": "int32" + }, + "failureLimit": { + "type": "integer", + "format": "int32" } } }, @@ -4299,6 +4675,23 @@ } } }, + "rollout.NonJobInfo": { + "type": "object", + "properties": { + "value": { + "type": "string" + }, + "status": { + "type": "string" + }, + "metricName": { + "type": "string" + }, + "startedAt": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, "rollout.PodInfo": { "type": "object", "properties": { @@ -4347,8 +4740,8 @@ "type": "string" }, "revision": { - "type": "integer", - "format": "int32" + "type": "string", + "format": "int64" }, "stable": { "type": "boolean" @@ -4387,6 +4780,12 @@ "items": { "$ref": "#/definitions/rollout.PodInfo" } + }, + "ping": { + "type": "boolean" + }, + "pong": { + "type": "boolean" } } }, diff --git a/pkg/apis/api-rules/violation_exceptions.list b/pkg/apis/api-rules/violation_exceptions.list index 2040f4570d..4393612dcd 100644 --- a/pkg/apis/api-rules/violation_exceptions.list +++ b/pkg/apis/api-rules/violation_exceptions.list @@ -1,14 +1,21 @@ API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AmbassadorTrafficRouting,Mappings API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisRunSpec,Args +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisRunSpec,DryRun +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisRunSpec,MeasurementRetention API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisRunSpec,Metrics API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisRunStatus,MetricResults API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,Args +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,DryRun +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,MeasurementRetention API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,Metrics +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AppMeshVirtualService,Routes API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,CanaryStrategy,Steps API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,CloudWatchMetric,MetricDataQueries API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,CloudWatchMetricStatMetric,Dimensions API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentAnalysisTemplateRef,Args API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentSpec,Analyses +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentSpec,DryRun +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentSpec,MeasurementRetention API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentSpec,Templates API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentStatus,AnalysisRuns API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ExperimentStatus,Conditions @@ -18,13 +25,19 @@ API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,IstioVirtualService,TLSRoutes API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,KayentaMetric,Scopes API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,MetricResult,Measurements +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,NginxTrafficRouting,AdditionalStableIngresses API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,Args +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,DryRun +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,MeasurementRetention API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,Templates API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,Analyses API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,Templates API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStepAnalysisTemplateRef,Args API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutStatus,Conditions API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutStatus,PauseConditions +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutTrafficRouting,ManagedRoutes +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,SetHeaderRoute,Match +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,SetMirrorRoute,Match API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,TLSRoute,SNIHosts API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,TrafficWeights,Additional API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,WebMetric,Headers diff --git a/pkg/apis/rollouts/v1alpha1/analysis_types.go b/pkg/apis/rollouts/v1alpha1/analysis_types.go index 65d54789c6..7d5931e458 100644 --- a/pkg/apis/rollouts/v1alpha1/analysis_types.go +++ b/pkg/apis/rollouts/v1alpha1/analysis_types.go @@ -13,7 +13,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:path=clusteranalysistemplates,shortName=cat +// +kubebuilder:resource:path=clusteranalysistemplates,shortName=cat,scope=Cluster // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time since resource was created" type ClusterAnalysisTemplate struct { metav1.TypeMeta `json:",inline"` @@ -61,6 +61,16 @@ type AnalysisTemplateSpec struct { // +patchStrategy=merge // +optional Args []Argument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=args"` + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + DryRun []DryRun `json:"dryRun,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,3,rep,name=dryRun"` + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,4,rep,name=measurementRetention"` } // DurationString is a string representing a duration (e.g. 30s, 5m, 1h) @@ -108,6 +118,21 @@ type Metric struct { Provider MetricProvider `json:"provider" protobuf:"bytes,10,opt,name=provider"` } +// DryRun defines the settings for running the analysis in Dry-Run mode. +type DryRun struct { + // Name of the metric which needs to be evaluated in the Dry-Run mode. Wildcard '*' is supported and denotes all + // the available metrics. + MetricName string `json:"metricName" protobuf:"bytes,1,opt,name=metricName"` +} + +// MeasurementRetention defines the settings for retaining the number of measurements during the analysis. +type MeasurementRetention struct { + // MetricName is the name of the metric on which this retention policy should be applied. + MetricName string `json:"metricName" protobuf:"bytes,1,opt,name=metricName"` + // Limit is the maximum number of measurements to be retained for this given metric. + Limit int32 `json:"limit" protobuf:"varint,2,opt,name=limit"` +} + // EffectiveCount is the effective count based on whether or not count/interval is specified // If neither count or interval is specified, the effective count is 1 // If only interval is specified, metric runs indefinitely and there is no effective count (nil) @@ -145,6 +170,8 @@ type MetricProvider struct { CloudWatch *CloudWatchMetric `json:"cloudWatch,omitempty" protobuf:"bytes,8,opt,name=cloudWatch"` // Graphite specifies the Graphite metric to query Graphite *GraphiteMetric `json:"graphite,omitempty" protobuf:"bytes,9,opt,name=graphite"` + // Influxdb specifies the influxdb metric to query + Influxdb *InfluxdbMetric `json:"influxdb,omitempty" protobuf:"bytes,10,opt,name=influxdb"` } // AnalysisPhase is the overall phase of an AnalysisRun, MetricResult, or Measurement @@ -207,6 +234,14 @@ type GraphiteMetric struct { Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` } +// InfluxdbMetric defines the InfluxDB Flux query to perform canary analysis +type InfluxdbMetric struct { + // Profile is the name of the secret holding InfluxDB account configuration + Profile string `json:"profile,omitempty" protobuf:"bytes,1,opt,name=profile"` + // Query is a raw InfluxDB flux query to perform + Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` +} + // CloudWatchMetric defines the cloudwatch query to perform canary analysis type CloudWatchMetric struct { Interval DurationString `json:"interval,omitempty" protobuf:"bytes,1,opt,name=interval,casttype=DurationString"` @@ -275,6 +310,16 @@ type AnalysisRunSpec struct { Args []Argument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=args"` // Terminate is used to prematurely stop the run (e.g. rollout completed and analysis is no longer desired) Terminate bool `json:"terminate,omitempty" protobuf:"varint,3,opt,name=terminate"` + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + DryRun []DryRun `json:"dryRun,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,4,rep,name=dryRun"` + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,5,rep,name=measurementRetention"` } // Argument is an argument to an AnalysisRun @@ -316,6 +361,24 @@ type AnalysisRunStatus struct { MetricResults []MetricResult `json:"metricResults,omitempty" protobuf:"bytes,3,rep,name=metricResults"` // StartedAt indicates when the analysisRun first started StartedAt *metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,4,opt,name=startedAt"` + // RunSummary contains the final results from the metric executions + RunSummary RunSummary `json:"runSummary,omitempty" protobuf:"bytes,5,opt,name=runSummary"` + // DryRunSummary contains the final results from the metric executions in the dry-run mode + DryRunSummary *RunSummary `json:"dryRunSummary,omitempty" protobuf:"bytes,6,opt,name=dryRunSummary"` +} + +// RunSummary contains the final results from the metric executions +type RunSummary struct { + // This is equal to the sum of Successful, Failed, Inconclusive + Count int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` + // Successful is the number of times the metric was measured Successful + Successful int32 `json:"successful,omitempty" protobuf:"varint,2,opt,name=successful"` + // Failed is the number of times the metric was measured Failed + Failed int32 `json:"failed,omitempty" protobuf:"varint,3,opt,name=failed"` + // Inconclusive is the number of times the metric was measured Inconclusive + Inconclusive int32 `json:"inconclusive,omitempty" protobuf:"varint,4,opt,name=inconclusive"` + // Error is the number of times an error was encountered during measurement + Error int32 `json:"error,omitempty" protobuf:"varint,5,opt,name=error"` } // MetricResult contain a list of the most recent measurements for a single metric along with @@ -343,6 +406,12 @@ type MetricResult struct { // ConsecutiveError is the number of times an error was encountered during measurement in succession // Resets to zero when non-errors are encountered ConsecutiveError int32 `json:"consecutiveError,omitempty" protobuf:"varint,10,opt,name=consecutiveError"` + // DryRun indicates whether this metric is running in a dry-run mode or not + DryRun bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Metadata stores additional metadata about this metric. It is used by different providers to store + // the final state which gets used while taking measurements. For example, Prometheus uses this field + // to store the final resolved query after substituting the template arguments. + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,12,rep,name=metadata"` } // Measurement is a point in time result value of a single metric, and the time it was measured diff --git a/pkg/apis/rollouts/v1alpha1/experiment_types.go b/pkg/apis/rollouts/v1alpha1/experiment_types.go index 8be63abbb0..5fcc27cb7c 100644 --- a/pkg/apis/rollouts/v1alpha1/experiment_types.go +++ b/pkg/apis/rollouts/v1alpha1/experiment_types.go @@ -55,6 +55,16 @@ type ExperimentSpec struct { // more information // +optional ScaleDownDelaySeconds *int32 `json:"scaleDownDelaySeconds,omitempty" protobuf:"varint,6,opt,name=scaleDownDelaySeconds"` + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + DryRun []DryRun `json:"dryRun,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,7,rep,name=dryRun"` + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,8,rep,name=measurementRetention"` } type TemplateSpec struct { diff --git a/pkg/apis/rollouts/v1alpha1/generated.pb.go b/pkg/apis/rollouts/v1alpha1/generated.pb.go index 4af5129854..fc7ecb4fbb 100644 --- a/pkg/apis/rollouts/v1alpha1/generated.pb.go +++ b/pkg/apis/rollouts/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes sample-controller Authors. +Copyright 2022 The Kubernetes sample-controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -48,10 +48,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ALBStatus) Reset() { *m = ALBStatus{} } +func (*ALBStatus) ProtoMessage() {} +func (*ALBStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{0} +} +func (m *ALBStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ALBStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ALBStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ALBStatus.Merge(m, src) +} +func (m *ALBStatus) XXX_Size() int { + return m.Size() +} +func (m *ALBStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ALBStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ALBStatus proto.InternalMessageInfo + func (m *ALBTrafficRouting) Reset() { *m = ALBTrafficRouting{} } func (*ALBTrafficRouting) ProtoMessage() {} func (*ALBTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{0} + return fileDescriptor_e0e705f843545fab, []int{1} } func (m *ALBTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +107,7 @@ var xxx_messageInfo_ALBTrafficRouting proto.InternalMessageInfo func (m *AmbassadorTrafficRouting) Reset() { *m = AmbassadorTrafficRouting{} } func (*AmbassadorTrafficRouting) ProtoMessage() {} func (*AmbassadorTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{1} + return fileDescriptor_e0e705f843545fab, []int{2} } func (m *AmbassadorTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +135,7 @@ var xxx_messageInfo_AmbassadorTrafficRouting proto.InternalMessageInfo func (m *AnalysisRun) Reset() { *m = AnalysisRun{} } func (*AnalysisRun) ProtoMessage() {} func (*AnalysisRun) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{2} + return fileDescriptor_e0e705f843545fab, []int{3} } func (m *AnalysisRun) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +163,7 @@ var xxx_messageInfo_AnalysisRun proto.InternalMessageInfo func (m *AnalysisRunArgument) Reset() { *m = AnalysisRunArgument{} } func (*AnalysisRunArgument) ProtoMessage() {} func (*AnalysisRunArgument) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{3} + return fileDescriptor_e0e705f843545fab, []int{4} } func (m *AnalysisRunArgument) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +191,7 @@ var xxx_messageInfo_AnalysisRunArgument proto.InternalMessageInfo func (m *AnalysisRunList) Reset() { *m = AnalysisRunList{} } func (*AnalysisRunList) ProtoMessage() {} func (*AnalysisRunList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{4} + return fileDescriptor_e0e705f843545fab, []int{5} } func (m *AnalysisRunList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +219,7 @@ var xxx_messageInfo_AnalysisRunList proto.InternalMessageInfo func (m *AnalysisRunSpec) Reset() { *m = AnalysisRunSpec{} } func (*AnalysisRunSpec) ProtoMessage() {} func (*AnalysisRunSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{5} + return fileDescriptor_e0e705f843545fab, []int{6} } func (m *AnalysisRunSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +247,7 @@ var xxx_messageInfo_AnalysisRunSpec proto.InternalMessageInfo func (m *AnalysisRunStatus) Reset() { *m = AnalysisRunStatus{} } func (*AnalysisRunStatus) ProtoMessage() {} func (*AnalysisRunStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{6} + return fileDescriptor_e0e705f843545fab, []int{7} } func (m *AnalysisRunStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +275,7 @@ var xxx_messageInfo_AnalysisRunStatus proto.InternalMessageInfo func (m *AnalysisRunStrategy) Reset() { *m = AnalysisRunStrategy{} } func (*AnalysisRunStrategy) ProtoMessage() {} func (*AnalysisRunStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{7} + return fileDescriptor_e0e705f843545fab, []int{8} } func (m *AnalysisRunStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +303,7 @@ var xxx_messageInfo_AnalysisRunStrategy proto.InternalMessageInfo func (m *AnalysisTemplate) Reset() { *m = AnalysisTemplate{} } func (*AnalysisTemplate) ProtoMessage() {} func (*AnalysisTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{8} + return fileDescriptor_e0e705f843545fab, []int{9} } func (m *AnalysisTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +331,7 @@ var xxx_messageInfo_AnalysisTemplate proto.InternalMessageInfo func (m *AnalysisTemplateList) Reset() { *m = AnalysisTemplateList{} } func (*AnalysisTemplateList) ProtoMessage() {} func (*AnalysisTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{9} + return fileDescriptor_e0e705f843545fab, []int{10} } func (m *AnalysisTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +359,7 @@ var xxx_messageInfo_AnalysisTemplateList proto.InternalMessageInfo func (m *AnalysisTemplateSpec) Reset() { *m = AnalysisTemplateSpec{} } func (*AnalysisTemplateSpec) ProtoMessage() {} func (*AnalysisTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{10} + return fileDescriptor_e0e705f843545fab, []int{11} } func (m *AnalysisTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +387,7 @@ var xxx_messageInfo_AnalysisTemplateSpec proto.InternalMessageInfo func (m *AntiAffinity) Reset() { *m = AntiAffinity{} } func (*AntiAffinity) ProtoMessage() {} func (*AntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{11} + return fileDescriptor_e0e705f843545fab, []int{12} } func (m *AntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,10 +412,122 @@ func (m *AntiAffinity) XXX_DiscardUnknown() { var xxx_messageInfo_AntiAffinity proto.InternalMessageInfo +func (m *AppMeshTrafficRouting) Reset() { *m = AppMeshTrafficRouting{} } +func (*AppMeshTrafficRouting) ProtoMessage() {} +func (*AppMeshTrafficRouting) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{13} +} +func (m *AppMeshTrafficRouting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppMeshTrafficRouting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppMeshTrafficRouting) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppMeshTrafficRouting.Merge(m, src) +} +func (m *AppMeshTrafficRouting) XXX_Size() int { + return m.Size() +} +func (m *AppMeshTrafficRouting) XXX_DiscardUnknown() { + xxx_messageInfo_AppMeshTrafficRouting.DiscardUnknown(m) +} + +var xxx_messageInfo_AppMeshTrafficRouting proto.InternalMessageInfo + +func (m *AppMeshVirtualNodeGroup) Reset() { *m = AppMeshVirtualNodeGroup{} } +func (*AppMeshVirtualNodeGroup) ProtoMessage() {} +func (*AppMeshVirtualNodeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{14} +} +func (m *AppMeshVirtualNodeGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppMeshVirtualNodeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppMeshVirtualNodeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppMeshVirtualNodeGroup.Merge(m, src) +} +func (m *AppMeshVirtualNodeGroup) XXX_Size() int { + return m.Size() +} +func (m *AppMeshVirtualNodeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_AppMeshVirtualNodeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_AppMeshVirtualNodeGroup proto.InternalMessageInfo + +func (m *AppMeshVirtualNodeReference) Reset() { *m = AppMeshVirtualNodeReference{} } +func (*AppMeshVirtualNodeReference) ProtoMessage() {} +func (*AppMeshVirtualNodeReference) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{15} +} +func (m *AppMeshVirtualNodeReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppMeshVirtualNodeReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppMeshVirtualNodeReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppMeshVirtualNodeReference.Merge(m, src) +} +func (m *AppMeshVirtualNodeReference) XXX_Size() int { + return m.Size() +} +func (m *AppMeshVirtualNodeReference) XXX_DiscardUnknown() { + xxx_messageInfo_AppMeshVirtualNodeReference.DiscardUnknown(m) +} + +var xxx_messageInfo_AppMeshVirtualNodeReference proto.InternalMessageInfo + +func (m *AppMeshVirtualService) Reset() { *m = AppMeshVirtualService{} } +func (*AppMeshVirtualService) ProtoMessage() {} +func (*AppMeshVirtualService) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{16} +} +func (m *AppMeshVirtualService) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppMeshVirtualService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppMeshVirtualService) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppMeshVirtualService.Merge(m, src) +} +func (m *AppMeshVirtualService) XXX_Size() int { + return m.Size() +} +func (m *AppMeshVirtualService) XXX_DiscardUnknown() { + xxx_messageInfo_AppMeshVirtualService.DiscardUnknown(m) +} + +var xxx_messageInfo_AppMeshVirtualService proto.InternalMessageInfo + func (m *Argument) Reset() { *m = Argument{} } func (*Argument) ProtoMessage() {} func (*Argument) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{12} + return fileDescriptor_e0e705f843545fab, []int{17} } func (m *Argument) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +555,7 @@ var xxx_messageInfo_Argument proto.InternalMessageInfo func (m *ArgumentValueFrom) Reset() { *m = ArgumentValueFrom{} } func (*ArgumentValueFrom) ProtoMessage() {} func (*ArgumentValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{13} + return fileDescriptor_e0e705f843545fab, []int{18} } func (m *ArgumentValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,10 +580,38 @@ func (m *ArgumentValueFrom) XXX_DiscardUnknown() { var xxx_messageInfo_ArgumentValueFrom proto.InternalMessageInfo +func (m *AwsResourceRef) Reset() { *m = AwsResourceRef{} } +func (*AwsResourceRef) ProtoMessage() {} +func (*AwsResourceRef) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{19} +} +func (m *AwsResourceRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AwsResourceRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AwsResourceRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_AwsResourceRef.Merge(m, src) +} +func (m *AwsResourceRef) XXX_Size() int { + return m.Size() +} +func (m *AwsResourceRef) XXX_DiscardUnknown() { + xxx_messageInfo_AwsResourceRef.DiscardUnknown(m) +} + +var xxx_messageInfo_AwsResourceRef proto.InternalMessageInfo + func (m *BlueGreenStatus) Reset() { *m = BlueGreenStatus{} } func (*BlueGreenStatus) ProtoMessage() {} func (*BlueGreenStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{14} + return fileDescriptor_e0e705f843545fab, []int{20} } func (m *BlueGreenStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +639,7 @@ var xxx_messageInfo_BlueGreenStatus proto.InternalMessageInfo func (m *BlueGreenStrategy) Reset() { *m = BlueGreenStrategy{} } func (*BlueGreenStrategy) ProtoMessage() {} func (*BlueGreenStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{15} + return fileDescriptor_e0e705f843545fab, []int{21} } func (m *BlueGreenStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +667,7 @@ var xxx_messageInfo_BlueGreenStrategy proto.InternalMessageInfo func (m *CanaryStatus) Reset() { *m = CanaryStatus{} } func (*CanaryStatus) ProtoMessage() {} func (*CanaryStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{16} + return fileDescriptor_e0e705f843545fab, []int{22} } func (m *CanaryStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +695,7 @@ var xxx_messageInfo_CanaryStatus proto.InternalMessageInfo func (m *CanaryStep) Reset() { *m = CanaryStep{} } func (*CanaryStep) ProtoMessage() {} func (*CanaryStep) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{17} + return fileDescriptor_e0e705f843545fab, []int{23} } func (m *CanaryStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +723,7 @@ var xxx_messageInfo_CanaryStep proto.InternalMessageInfo func (m *CanaryStrategy) Reset() { *m = CanaryStrategy{} } func (*CanaryStrategy) ProtoMessage() {} func (*CanaryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{18} + return fileDescriptor_e0e705f843545fab, []int{24} } func (m *CanaryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +751,7 @@ var xxx_messageInfo_CanaryStrategy proto.InternalMessageInfo func (m *CloudWatchMetric) Reset() { *m = CloudWatchMetric{} } func (*CloudWatchMetric) ProtoMessage() {} func (*CloudWatchMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{19} + return fileDescriptor_e0e705f843545fab, []int{25} } func (m *CloudWatchMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +779,7 @@ var xxx_messageInfo_CloudWatchMetric proto.InternalMessageInfo func (m *CloudWatchMetricDataQuery) Reset() { *m = CloudWatchMetricDataQuery{} } func (*CloudWatchMetricDataQuery) ProtoMessage() {} func (*CloudWatchMetricDataQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{20} + return fileDescriptor_e0e705f843545fab, []int{26} } func (m *CloudWatchMetricDataQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +807,7 @@ var xxx_messageInfo_CloudWatchMetricDataQuery proto.InternalMessageInfo func (m *CloudWatchMetricStat) Reset() { *m = CloudWatchMetricStat{} } func (*CloudWatchMetricStat) ProtoMessage() {} func (*CloudWatchMetricStat) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{21} + return fileDescriptor_e0e705f843545fab, []int{27} } func (m *CloudWatchMetricStat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +835,7 @@ var xxx_messageInfo_CloudWatchMetricStat proto.InternalMessageInfo func (m *CloudWatchMetricStatMetric) Reset() { *m = CloudWatchMetricStatMetric{} } func (*CloudWatchMetricStatMetric) ProtoMessage() {} func (*CloudWatchMetricStatMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{22} + return fileDescriptor_e0e705f843545fab, []int{28} } func (m *CloudWatchMetricStatMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +863,7 @@ var xxx_messageInfo_CloudWatchMetricStatMetric proto.InternalMessageInfo func (m *CloudWatchMetricStatMetricDimension) Reset() { *m = CloudWatchMetricStatMetricDimension{} } func (*CloudWatchMetricStatMetricDimension) ProtoMessage() {} func (*CloudWatchMetricStatMetricDimension) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{23} + return fileDescriptor_e0e705f843545fab, []int{29} } func (m *CloudWatchMetricStatMetricDimension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +891,7 @@ var xxx_messageInfo_CloudWatchMetricStatMetricDimension proto.InternalMessageInf func (m *ClusterAnalysisTemplate) Reset() { *m = ClusterAnalysisTemplate{} } func (*ClusterAnalysisTemplate) ProtoMessage() {} func (*ClusterAnalysisTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{24} + return fileDescriptor_e0e705f843545fab, []int{30} } func (m *ClusterAnalysisTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +919,7 @@ var xxx_messageInfo_ClusterAnalysisTemplate proto.InternalMessageInfo func (m *ClusterAnalysisTemplateList) Reset() { *m = ClusterAnalysisTemplateList{} } func (*ClusterAnalysisTemplateList) ProtoMessage() {} func (*ClusterAnalysisTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{25} + return fileDescriptor_e0e705f843545fab, []int{31} } func (m *ClusterAnalysisTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +947,7 @@ var xxx_messageInfo_ClusterAnalysisTemplateList proto.InternalMessageInfo func (m *DatadogMetric) Reset() { *m = DatadogMetric{} } func (*DatadogMetric) ProtoMessage() {} func (*DatadogMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{26} + return fileDescriptor_e0e705f843545fab, []int{32} } func (m *DatadogMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -804,10 +972,38 @@ func (m *DatadogMetric) XXX_DiscardUnknown() { var xxx_messageInfo_DatadogMetric proto.InternalMessageInfo +func (m *DryRun) Reset() { *m = DryRun{} } +func (*DryRun) ProtoMessage() {} +func (*DryRun) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{33} +} +func (m *DryRun) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DryRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DryRun) XXX_Merge(src proto.Message) { + xxx_messageInfo_DryRun.Merge(m, src) +} +func (m *DryRun) XXX_Size() int { + return m.Size() +} +func (m *DryRun) XXX_DiscardUnknown() { + xxx_messageInfo_DryRun.DiscardUnknown(m) +} + +var xxx_messageInfo_DryRun proto.InternalMessageInfo + func (m *Experiment) Reset() { *m = Experiment{} } func (*Experiment) ProtoMessage() {} func (*Experiment) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{27} + return fileDescriptor_e0e705f843545fab, []int{34} } func (m *Experiment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,7 +1031,7 @@ var xxx_messageInfo_Experiment proto.InternalMessageInfo func (m *ExperimentAnalysisRunStatus) Reset() { *m = ExperimentAnalysisRunStatus{} } func (*ExperimentAnalysisRunStatus) ProtoMessage() {} func (*ExperimentAnalysisRunStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{28} + return fileDescriptor_e0e705f843545fab, []int{35} } func (m *ExperimentAnalysisRunStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,7 +1059,7 @@ var xxx_messageInfo_ExperimentAnalysisRunStatus proto.InternalMessageInfo func (m *ExperimentAnalysisTemplateRef) Reset() { *m = ExperimentAnalysisTemplateRef{} } func (*ExperimentAnalysisTemplateRef) ProtoMessage() {} func (*ExperimentAnalysisTemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{29} + return fileDescriptor_e0e705f843545fab, []int{36} } func (m *ExperimentAnalysisTemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -891,7 +1087,7 @@ var xxx_messageInfo_ExperimentAnalysisTemplateRef proto.InternalMessageInfo func (m *ExperimentCondition) Reset() { *m = ExperimentCondition{} } func (*ExperimentCondition) ProtoMessage() {} func (*ExperimentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{30} + return fileDescriptor_e0e705f843545fab, []int{37} } func (m *ExperimentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -919,7 +1115,7 @@ var xxx_messageInfo_ExperimentCondition proto.InternalMessageInfo func (m *ExperimentList) Reset() { *m = ExperimentList{} } func (*ExperimentList) ProtoMessage() {} func (*ExperimentList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{31} + return fileDescriptor_e0e705f843545fab, []int{38} } func (m *ExperimentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,7 +1143,7 @@ var xxx_messageInfo_ExperimentList proto.InternalMessageInfo func (m *ExperimentSpec) Reset() { *m = ExperimentSpec{} } func (*ExperimentSpec) ProtoMessage() {} func (*ExperimentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{32} + return fileDescriptor_e0e705f843545fab, []int{39} } func (m *ExperimentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +1171,7 @@ var xxx_messageInfo_ExperimentSpec proto.InternalMessageInfo func (m *ExperimentStatus) Reset() { *m = ExperimentStatus{} } func (*ExperimentStatus) ProtoMessage() {} func (*ExperimentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{33} + return fileDescriptor_e0e705f843545fab, []int{40} } func (m *ExperimentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1199,7 @@ var xxx_messageInfo_ExperimentStatus proto.InternalMessageInfo func (m *FieldRef) Reset() { *m = FieldRef{} } func (*FieldRef) ProtoMessage() {} func (*FieldRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{34} + return fileDescriptor_e0e705f843545fab, []int{41} } func (m *FieldRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1031,7 +1227,7 @@ var xxx_messageInfo_FieldRef proto.InternalMessageInfo func (m *GraphiteMetric) Reset() { *m = GraphiteMetric{} } func (*GraphiteMetric) ProtoMessage() {} func (*GraphiteMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{35} + return fileDescriptor_e0e705f843545fab, []int{42} } func (m *GraphiteMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1056,10 +1252,66 @@ func (m *GraphiteMetric) XXX_DiscardUnknown() { var xxx_messageInfo_GraphiteMetric proto.InternalMessageInfo +func (m *HeaderRoutingMatch) Reset() { *m = HeaderRoutingMatch{} } +func (*HeaderRoutingMatch) ProtoMessage() {} +func (*HeaderRoutingMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{43} +} +func (m *HeaderRoutingMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeaderRoutingMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HeaderRoutingMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderRoutingMatch.Merge(m, src) +} +func (m *HeaderRoutingMatch) XXX_Size() int { + return m.Size() +} +func (m *HeaderRoutingMatch) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderRoutingMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderRoutingMatch proto.InternalMessageInfo + +func (m *InfluxdbMetric) Reset() { *m = InfluxdbMetric{} } +func (*InfluxdbMetric) ProtoMessage() {} +func (*InfluxdbMetric) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{44} +} +func (m *InfluxdbMetric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfluxdbMetric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *InfluxdbMetric) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfluxdbMetric.Merge(m, src) +} +func (m *InfluxdbMetric) XXX_Size() int { + return m.Size() +} +func (m *InfluxdbMetric) XXX_DiscardUnknown() { + xxx_messageInfo_InfluxdbMetric.DiscardUnknown(m) +} + +var xxx_messageInfo_InfluxdbMetric proto.InternalMessageInfo + func (m *IstioDestinationRule) Reset() { *m = IstioDestinationRule{} } func (*IstioDestinationRule) ProtoMessage() {} func (*IstioDestinationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{36} + return fileDescriptor_e0e705f843545fab, []int{45} } func (m *IstioDestinationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1087,7 +1339,7 @@ var xxx_messageInfo_IstioDestinationRule proto.InternalMessageInfo func (m *IstioTrafficRouting) Reset() { *m = IstioTrafficRouting{} } func (*IstioTrafficRouting) ProtoMessage() {} func (*IstioTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{37} + return fileDescriptor_e0e705f843545fab, []int{46} } func (m *IstioTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1115,7 +1367,7 @@ var xxx_messageInfo_IstioTrafficRouting proto.InternalMessageInfo func (m *IstioVirtualService) Reset() { *m = IstioVirtualService{} } func (*IstioVirtualService) ProtoMessage() {} func (*IstioVirtualService) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{38} + return fileDescriptor_e0e705f843545fab, []int{47} } func (m *IstioVirtualService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1143,7 +1395,7 @@ var xxx_messageInfo_IstioVirtualService proto.InternalMessageInfo func (m *JobMetric) Reset() { *m = JobMetric{} } func (*JobMetric) ProtoMessage() {} func (*JobMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{39} + return fileDescriptor_e0e705f843545fab, []int{48} } func (m *JobMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1171,7 +1423,7 @@ var xxx_messageInfo_JobMetric proto.InternalMessageInfo func (m *KayentaMetric) Reset() { *m = KayentaMetric{} } func (*KayentaMetric) ProtoMessage() {} func (*KayentaMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{40} + return fileDescriptor_e0e705f843545fab, []int{49} } func (m *KayentaMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1199,7 +1451,7 @@ var xxx_messageInfo_KayentaMetric proto.InternalMessageInfo func (m *KayentaScope) Reset() { *m = KayentaScope{} } func (*KayentaScope) ProtoMessage() {} func (*KayentaScope) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{41} + return fileDescriptor_e0e705f843545fab, []int{50} } func (m *KayentaScope) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1227,7 +1479,7 @@ var xxx_messageInfo_KayentaScope proto.InternalMessageInfo func (m *KayentaThreshold) Reset() { *m = KayentaThreshold{} } func (*KayentaThreshold) ProtoMessage() {} func (*KayentaThreshold) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{42} + return fileDescriptor_e0e705f843545fab, []int{51} } func (m *KayentaThreshold) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1252,15 +1504,15 @@ func (m *KayentaThreshold) XXX_DiscardUnknown() { var xxx_messageInfo_KayentaThreshold proto.InternalMessageInfo -func (m *Measurement) Reset() { *m = Measurement{} } -func (*Measurement) ProtoMessage() {} -func (*Measurement) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{43} +func (m *MangedRoutes) Reset() { *m = MangedRoutes{} } +func (*MangedRoutes) ProtoMessage() {} +func (*MangedRoutes) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{52} } -func (m *Measurement) XXX_Unmarshal(b []byte) error { +func (m *MangedRoutes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Measurement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *MangedRoutes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1268,22 +1520,78 @@ func (m *Measurement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *Measurement) XXX_Merge(src proto.Message) { - xxx_messageInfo_Measurement.Merge(m, src) +func (m *MangedRoutes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MangedRoutes.Merge(m, src) } -func (m *Measurement) XXX_Size() int { +func (m *MangedRoutes) XXX_Size() int { return m.Size() } -func (m *Measurement) XXX_DiscardUnknown() { - xxx_messageInfo_Measurement.DiscardUnknown(m) +func (m *MangedRoutes) XXX_DiscardUnknown() { + xxx_messageInfo_MangedRoutes.DiscardUnknown(m) } -var xxx_messageInfo_Measurement proto.InternalMessageInfo +var xxx_messageInfo_MangedRoutes proto.InternalMessageInfo -func (m *Metric) Reset() { *m = Metric{} } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{44} +func (m *Measurement) Reset() { *m = Measurement{} } +func (*Measurement) ProtoMessage() {} +func (*Measurement) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{53} +} +func (m *Measurement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Measurement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Measurement) XXX_Merge(src proto.Message) { + xxx_messageInfo_Measurement.Merge(m, src) +} +func (m *Measurement) XXX_Size() int { + return m.Size() +} +func (m *Measurement) XXX_DiscardUnknown() { + xxx_messageInfo_Measurement.DiscardUnknown(m) +} + +var xxx_messageInfo_Measurement proto.InternalMessageInfo + +func (m *MeasurementRetention) Reset() { *m = MeasurementRetention{} } +func (*MeasurementRetention) ProtoMessage() {} +func (*MeasurementRetention) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{54} +} +func (m *MeasurementRetention) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MeasurementRetention) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MeasurementRetention) XXX_Merge(src proto.Message) { + xxx_messageInfo_MeasurementRetention.Merge(m, src) +} +func (m *MeasurementRetention) XXX_Size() int { + return m.Size() +} +func (m *MeasurementRetention) XXX_DiscardUnknown() { + xxx_messageInfo_MeasurementRetention.DiscardUnknown(m) +} + +var xxx_messageInfo_MeasurementRetention proto.InternalMessageInfo + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{55} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1311,7 +1619,7 @@ var xxx_messageInfo_Metric proto.InternalMessageInfo func (m *MetricProvider) Reset() { *m = MetricProvider{} } func (*MetricProvider) ProtoMessage() {} func (*MetricProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{45} + return fileDescriptor_e0e705f843545fab, []int{56} } func (m *MetricProvider) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1339,7 +1647,7 @@ var xxx_messageInfo_MetricProvider proto.InternalMessageInfo func (m *MetricResult) Reset() { *m = MetricResult{} } func (*MetricResult) ProtoMessage() {} func (*MetricResult) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{46} + return fileDescriptor_e0e705f843545fab, []int{57} } func (m *MetricResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1367,7 +1675,7 @@ var xxx_messageInfo_MetricResult proto.InternalMessageInfo func (m *NewRelicMetric) Reset() { *m = NewRelicMetric{} } func (*NewRelicMetric) ProtoMessage() {} func (*NewRelicMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{47} + return fileDescriptor_e0e705f843545fab, []int{58} } func (m *NewRelicMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1395,7 +1703,7 @@ var xxx_messageInfo_NewRelicMetric proto.InternalMessageInfo func (m *NginxTrafficRouting) Reset() { *m = NginxTrafficRouting{} } func (*NginxTrafficRouting) ProtoMessage() {} func (*NginxTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{48} + return fileDescriptor_e0e705f843545fab, []int{59} } func (m *NginxTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1423,7 +1731,7 @@ var xxx_messageInfo_NginxTrafficRouting proto.InternalMessageInfo func (m *ObjectRef) Reset() { *m = ObjectRef{} } func (*ObjectRef) ProtoMessage() {} func (*ObjectRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{49} + return fileDescriptor_e0e705f843545fab, []int{60} } func (m *ObjectRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1451,7 +1759,7 @@ var xxx_messageInfo_ObjectRef proto.InternalMessageInfo func (m *PauseCondition) Reset() { *m = PauseCondition{} } func (*PauseCondition) ProtoMessage() {} func (*PauseCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{50} + return fileDescriptor_e0e705f843545fab, []int{61} } func (m *PauseCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1476,10 +1784,38 @@ func (m *PauseCondition) XXX_DiscardUnknown() { var xxx_messageInfo_PauseCondition proto.InternalMessageInfo +func (m *PingPongSpec) Reset() { *m = PingPongSpec{} } +func (*PingPongSpec) ProtoMessage() {} +func (*PingPongSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{62} +} +func (m *PingPongSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingPongSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PingPongSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingPongSpec.Merge(m, src) +} +func (m *PingPongSpec) XXX_Size() int { + return m.Size() +} +func (m *PingPongSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PingPongSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PingPongSpec proto.InternalMessageInfo + func (m *PodTemplateMetadata) Reset() { *m = PodTemplateMetadata{} } func (*PodTemplateMetadata) ProtoMessage() {} func (*PodTemplateMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{51} + return fileDescriptor_e0e705f843545fab, []int{63} } func (m *PodTemplateMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1509,7 +1845,7 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Reset() { } func (*PreferredDuringSchedulingIgnoredDuringExecution) ProtoMessage() {} func (*PreferredDuringSchedulingIgnoredDuringExecution) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{52} + return fileDescriptor_e0e705f843545fab, []int{64} } func (m *PreferredDuringSchedulingIgnoredDuringExecution) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1537,7 +1873,7 @@ var xxx_messageInfo_PreferredDuringSchedulingIgnoredDuringExecution proto.Intern func (m *PrometheusMetric) Reset() { *m = PrometheusMetric{} } func (*PrometheusMetric) ProtoMessage() {} func (*PrometheusMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{53} + return fileDescriptor_e0e705f843545fab, []int{65} } func (m *PrometheusMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1567,7 +1903,7 @@ func (m *RequiredDuringSchedulingIgnoredDuringExecution) Reset() { } func (*RequiredDuringSchedulingIgnoredDuringExecution) ProtoMessage() {} func (*RequiredDuringSchedulingIgnoredDuringExecution) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{54} + return fileDescriptor_e0e705f843545fab, []int{66} } func (m *RequiredDuringSchedulingIgnoredDuringExecution) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1595,7 +1931,7 @@ var xxx_messageInfo_RequiredDuringSchedulingIgnoredDuringExecution proto.Interna func (m *Rollout) Reset() { *m = Rollout{} } func (*Rollout) ProtoMessage() {} func (*Rollout) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{55} + return fileDescriptor_e0e705f843545fab, []int{67} } func (m *Rollout) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1623,7 +1959,7 @@ var xxx_messageInfo_Rollout proto.InternalMessageInfo func (m *RolloutAnalysis) Reset() { *m = RolloutAnalysis{} } func (*RolloutAnalysis) ProtoMessage() {} func (*RolloutAnalysis) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{56} + return fileDescriptor_e0e705f843545fab, []int{68} } func (m *RolloutAnalysis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1651,7 +1987,7 @@ var xxx_messageInfo_RolloutAnalysis proto.InternalMessageInfo func (m *RolloutAnalysisBackground) Reset() { *m = RolloutAnalysisBackground{} } func (*RolloutAnalysisBackground) ProtoMessage() {} func (*RolloutAnalysisBackground) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{57} + return fileDescriptor_e0e705f843545fab, []int{69} } func (m *RolloutAnalysisBackground) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1679,7 +2015,7 @@ var xxx_messageInfo_RolloutAnalysisBackground proto.InternalMessageInfo func (m *RolloutAnalysisRunStatus) Reset() { *m = RolloutAnalysisRunStatus{} } func (*RolloutAnalysisRunStatus) ProtoMessage() {} func (*RolloutAnalysisRunStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{58} + return fileDescriptor_e0e705f843545fab, []int{70} } func (m *RolloutAnalysisRunStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1707,7 +2043,7 @@ var xxx_messageInfo_RolloutAnalysisRunStatus proto.InternalMessageInfo func (m *RolloutAnalysisTemplate) Reset() { *m = RolloutAnalysisTemplate{} } func (*RolloutAnalysisTemplate) ProtoMessage() {} func (*RolloutAnalysisTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{59} + return fileDescriptor_e0e705f843545fab, []int{71} } func (m *RolloutAnalysisTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1735,7 +2071,7 @@ var xxx_messageInfo_RolloutAnalysisTemplate proto.InternalMessageInfo func (m *RolloutCondition) Reset() { *m = RolloutCondition{} } func (*RolloutCondition) ProtoMessage() {} func (*RolloutCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{60} + return fileDescriptor_e0e705f843545fab, []int{72} } func (m *RolloutCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1763,7 +2099,7 @@ var xxx_messageInfo_RolloutCondition proto.InternalMessageInfo func (m *RolloutExperimentStep) Reset() { *m = RolloutExperimentStep{} } func (*RolloutExperimentStep) ProtoMessage() {} func (*RolloutExperimentStep) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{61} + return fileDescriptor_e0e705f843545fab, []int{73} } func (m *RolloutExperimentStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1793,7 +2129,7 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Reset() { } func (*RolloutExperimentStepAnalysisTemplateRef) ProtoMessage() {} func (*RolloutExperimentStepAnalysisTemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{62} + return fileDescriptor_e0e705f843545fab, []int{74} } func (m *RolloutExperimentStepAnalysisTemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1821,7 +2157,7 @@ var xxx_messageInfo_RolloutExperimentStepAnalysisTemplateRef proto.InternalMessa func (m *RolloutExperimentTemplate) Reset() { *m = RolloutExperimentTemplate{} } func (*RolloutExperimentTemplate) ProtoMessage() {} func (*RolloutExperimentTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{63} + return fileDescriptor_e0e705f843545fab, []int{75} } func (m *RolloutExperimentTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1849,7 +2185,7 @@ var xxx_messageInfo_RolloutExperimentTemplate proto.InternalMessageInfo func (m *RolloutList) Reset() { *m = RolloutList{} } func (*RolloutList) ProtoMessage() {} func (*RolloutList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{64} + return fileDescriptor_e0e705f843545fab, []int{76} } func (m *RolloutList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1877,7 +2213,7 @@ var xxx_messageInfo_RolloutList proto.InternalMessageInfo func (m *RolloutPause) Reset() { *m = RolloutPause{} } func (*RolloutPause) ProtoMessage() {} func (*RolloutPause) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{65} + return fileDescriptor_e0e705f843545fab, []int{77} } func (m *RolloutPause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1905,7 +2241,7 @@ var xxx_messageInfo_RolloutPause proto.InternalMessageInfo func (m *RolloutSpec) Reset() { *m = RolloutSpec{} } func (*RolloutSpec) ProtoMessage() {} func (*RolloutSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{66} + return fileDescriptor_e0e705f843545fab, []int{78} } func (m *RolloutSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1933,7 +2269,7 @@ var xxx_messageInfo_RolloutSpec proto.InternalMessageInfo func (m *RolloutStatus) Reset() { *m = RolloutStatus{} } func (*RolloutStatus) ProtoMessage() {} func (*RolloutStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{67} + return fileDescriptor_e0e705f843545fab, []int{79} } func (m *RolloutStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1961,7 +2297,7 @@ var xxx_messageInfo_RolloutStatus proto.InternalMessageInfo func (m *RolloutStrategy) Reset() { *m = RolloutStrategy{} } func (*RolloutStrategy) ProtoMessage() {} func (*RolloutStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{68} + return fileDescriptor_e0e705f843545fab, []int{80} } func (m *RolloutStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1989,7 +2325,7 @@ var xxx_messageInfo_RolloutStrategy proto.InternalMessageInfo func (m *RolloutTrafficRouting) Reset() { *m = RolloutTrafficRouting{} } func (*RolloutTrafficRouting) ProtoMessage() {} func (*RolloutTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{69} + return fileDescriptor_e0e705f843545fab, []int{81} } func (m *RolloutTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2014,10 +2350,66 @@ func (m *RolloutTrafficRouting) XXX_DiscardUnknown() { var xxx_messageInfo_RolloutTrafficRouting proto.InternalMessageInfo +func (m *RouteMatch) Reset() { *m = RouteMatch{} } +func (*RouteMatch) ProtoMessage() {} +func (*RouteMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{82} +} +func (m *RouteMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteMatch.Merge(m, src) +} +func (m *RouteMatch) XXX_Size() int { + return m.Size() +} +func (m *RouteMatch) XXX_DiscardUnknown() { + xxx_messageInfo_RouteMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteMatch proto.InternalMessageInfo + +func (m *RunSummary) Reset() { *m = RunSummary{} } +func (*RunSummary) ProtoMessage() {} +func (*RunSummary) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{83} +} +func (m *RunSummary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunSummary) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunSummary.Merge(m, src) +} +func (m *RunSummary) XXX_Size() int { + return m.Size() +} +func (m *RunSummary) XXX_DiscardUnknown() { + xxx_messageInfo_RunSummary.DiscardUnknown(m) +} + +var xxx_messageInfo_RunSummary proto.InternalMessageInfo + func (m *SMITrafficRouting) Reset() { *m = SMITrafficRouting{} } func (*SMITrafficRouting) ProtoMessage() {} func (*SMITrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{70} + return fileDescriptor_e0e705f843545fab, []int{84} } func (m *SMITrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2045,7 +2437,7 @@ var xxx_messageInfo_SMITrafficRouting proto.InternalMessageInfo func (m *ScopeDetail) Reset() { *m = ScopeDetail{} } func (*ScopeDetail) ProtoMessage() {} func (*ScopeDetail) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{71} + return fileDescriptor_e0e705f843545fab, []int{85} } func (m *ScopeDetail) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2073,7 +2465,7 @@ var xxx_messageInfo_ScopeDetail proto.InternalMessageInfo func (m *SecretKeyRef) Reset() { *m = SecretKeyRef{} } func (*SecretKeyRef) ProtoMessage() {} func (*SecretKeyRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{72} + return fileDescriptor_e0e705f843545fab, []int{86} } func (m *SecretKeyRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2101,7 +2493,7 @@ var xxx_messageInfo_SecretKeyRef proto.InternalMessageInfo func (m *SetCanaryScale) Reset() { *m = SetCanaryScale{} } func (*SetCanaryScale) ProtoMessage() {} func (*SetCanaryScale) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{73} + return fileDescriptor_e0e705f843545fab, []int{87} } func (m *SetCanaryScale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2126,10 +2518,122 @@ func (m *SetCanaryScale) XXX_DiscardUnknown() { var xxx_messageInfo_SetCanaryScale proto.InternalMessageInfo +func (m *SetHeaderRoute) Reset() { *m = SetHeaderRoute{} } +func (*SetHeaderRoute) ProtoMessage() {} +func (*SetHeaderRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{88} +} +func (m *SetHeaderRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SetHeaderRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SetHeaderRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetHeaderRoute.Merge(m, src) +} +func (m *SetHeaderRoute) XXX_Size() int { + return m.Size() +} +func (m *SetHeaderRoute) XXX_DiscardUnknown() { + xxx_messageInfo_SetHeaderRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_SetHeaderRoute proto.InternalMessageInfo + +func (m *SetMirrorRoute) Reset() { *m = SetMirrorRoute{} } +func (*SetMirrorRoute) ProtoMessage() {} +func (*SetMirrorRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{89} +} +func (m *SetMirrorRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SetMirrorRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SetMirrorRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMirrorRoute.Merge(m, src) +} +func (m *SetMirrorRoute) XXX_Size() int { + return m.Size() +} +func (m *SetMirrorRoute) XXX_DiscardUnknown() { + xxx_messageInfo_SetMirrorRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMirrorRoute proto.InternalMessageInfo + +func (m *StickinessConfig) Reset() { *m = StickinessConfig{} } +func (*StickinessConfig) ProtoMessage() {} +func (*StickinessConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{90} +} +func (m *StickinessConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StickinessConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StickinessConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_StickinessConfig.Merge(m, src) +} +func (m *StickinessConfig) XXX_Size() int { + return m.Size() +} +func (m *StickinessConfig) XXX_DiscardUnknown() { + xxx_messageInfo_StickinessConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_StickinessConfig proto.InternalMessageInfo + +func (m *StringMatch) Reset() { *m = StringMatch{} } +func (*StringMatch) ProtoMessage() {} +func (*StringMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{91} +} +func (m *StringMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StringMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringMatch.Merge(m, src) +} +func (m *StringMatch) XXX_Size() int { + return m.Size() +} +func (m *StringMatch) XXX_DiscardUnknown() { + xxx_messageInfo_StringMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_StringMatch proto.InternalMessageInfo + func (m *TLSRoute) Reset() { *m = TLSRoute{} } func (*TLSRoute) ProtoMessage() {} func (*TLSRoute) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{74} + return fileDescriptor_e0e705f843545fab, []int{92} } func (m *TLSRoute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2157,7 +2661,7 @@ var xxx_messageInfo_TLSRoute proto.InternalMessageInfo func (m *TemplateService) Reset() { *m = TemplateService{} } func (*TemplateService) ProtoMessage() {} func (*TemplateService) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{75} + return fileDescriptor_e0e705f843545fab, []int{93} } func (m *TemplateService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2185,7 +2689,7 @@ var xxx_messageInfo_TemplateService proto.InternalMessageInfo func (m *TemplateSpec) Reset() { *m = TemplateSpec{} } func (*TemplateSpec) ProtoMessage() {} func (*TemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{76} + return fileDescriptor_e0e705f843545fab, []int{94} } func (m *TemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2213,7 +2717,7 @@ var xxx_messageInfo_TemplateSpec proto.InternalMessageInfo func (m *TemplateStatus) Reset() { *m = TemplateStatus{} } func (*TemplateStatus) ProtoMessage() {} func (*TemplateStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{77} + return fileDescriptor_e0e705f843545fab, []int{95} } func (m *TemplateStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2238,10 +2742,38 @@ func (m *TemplateStatus) XXX_DiscardUnknown() { var xxx_messageInfo_TemplateStatus proto.InternalMessageInfo +func (m *TraefikTrafficRouting) Reset() { *m = TraefikTrafficRouting{} } +func (*TraefikTrafficRouting) ProtoMessage() {} +func (*TraefikTrafficRouting) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{96} +} +func (m *TraefikTrafficRouting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraefikTrafficRouting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TraefikTrafficRouting) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraefikTrafficRouting.Merge(m, src) +} +func (m *TraefikTrafficRouting) XXX_Size() int { + return m.Size() +} +func (m *TraefikTrafficRouting) XXX_DiscardUnknown() { + xxx_messageInfo_TraefikTrafficRouting.DiscardUnknown(m) +} + +var xxx_messageInfo_TraefikTrafficRouting proto.InternalMessageInfo + func (m *TrafficWeights) Reset() { *m = TrafficWeights{} } func (*TrafficWeights) ProtoMessage() {} func (*TrafficWeights) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{78} + return fileDescriptor_e0e705f843545fab, []int{97} } func (m *TrafficWeights) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2269,7 +2801,7 @@ var xxx_messageInfo_TrafficWeights proto.InternalMessageInfo func (m *ValueFrom) Reset() { *m = ValueFrom{} } func (*ValueFrom) ProtoMessage() {} func (*ValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{79} + return fileDescriptor_e0e705f843545fab, []int{98} } func (m *ValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2297,7 +2829,7 @@ var xxx_messageInfo_ValueFrom proto.InternalMessageInfo func (m *WavefrontMetric) Reset() { *m = WavefrontMetric{} } func (*WavefrontMetric) ProtoMessage() {} func (*WavefrontMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{80} + return fileDescriptor_e0e705f843545fab, []int{99} } func (m *WavefrontMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2325,7 +2857,7 @@ var xxx_messageInfo_WavefrontMetric proto.InternalMessageInfo func (m *WebMetric) Reset() { *m = WebMetric{} } func (*WebMetric) ProtoMessage() {} func (*WebMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{81} + return fileDescriptor_e0e705f843545fab, []int{100} } func (m *WebMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2353,7 +2885,7 @@ var xxx_messageInfo_WebMetric proto.InternalMessageInfo func (m *WebMetricHeader) Reset() { *m = WebMetricHeader{} } func (*WebMetricHeader) ProtoMessage() {} func (*WebMetricHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{82} + return fileDescriptor_e0e705f843545fab, []int{101} } func (m *WebMetricHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2381,7 +2913,7 @@ var xxx_messageInfo_WebMetricHeader proto.InternalMessageInfo func (m *WeightDestination) Reset() { *m = WeightDestination{} } func (*WeightDestination) ProtoMessage() {} func (*WeightDestination) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{83} + return fileDescriptor_e0e705f843545fab, []int{102} } func (m *WeightDestination) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2407,6 +2939,7 @@ func (m *WeightDestination) XXX_DiscardUnknown() { var xxx_messageInfo_WeightDestination proto.InternalMessageInfo func init() { + proto.RegisterType((*ALBStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ALBStatus") proto.RegisterType((*ALBTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ALBTrafficRouting") proto.RegisterType((*AmbassadorTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AmbassadorTrafficRouting") proto.RegisterType((*AnalysisRun)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRun") @@ -2419,8 +2952,13 @@ func init() { proto.RegisterType((*AnalysisTemplateList)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateList") proto.RegisterType((*AnalysisTemplateSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateSpec") proto.RegisterType((*AntiAffinity)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AntiAffinity") + proto.RegisterType((*AppMeshTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshTrafficRouting") + proto.RegisterType((*AppMeshVirtualNodeGroup)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeGroup") + proto.RegisterType((*AppMeshVirtualNodeReference)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualNodeReference") + proto.RegisterType((*AppMeshVirtualService)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualService") proto.RegisterType((*Argument)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Argument") proto.RegisterType((*ArgumentValueFrom)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ArgumentValueFrom") + proto.RegisterType((*AwsResourceRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AwsResourceRef") proto.RegisterType((*BlueGreenStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.BlueGreenStatus") proto.RegisterType((*BlueGreenStrategy)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.BlueGreenStrategy") proto.RegisterType((*CanaryStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CanaryStatus") @@ -2434,6 +2972,7 @@ func init() { proto.RegisterType((*ClusterAnalysisTemplate)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ClusterAnalysisTemplate") proto.RegisterType((*ClusterAnalysisTemplateList)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ClusterAnalysisTemplateList") proto.RegisterType((*DatadogMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DatadogMetric") + proto.RegisterType((*DryRun)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun") proto.RegisterType((*Experiment)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Experiment") proto.RegisterType((*ExperimentAnalysisRunStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ExperimentAnalysisRunStatus") proto.RegisterType((*ExperimentAnalysisTemplateRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ExperimentAnalysisTemplateRef") @@ -2443,6 +2982,8 @@ func init() { proto.RegisterType((*ExperimentStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ExperimentStatus") proto.RegisterType((*FieldRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.FieldRef") proto.RegisterType((*GraphiteMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.GraphiteMetric") + proto.RegisterType((*HeaderRoutingMatch)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.HeaderRoutingMatch") + proto.RegisterType((*InfluxdbMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.InfluxdbMetric") proto.RegisterType((*IstioDestinationRule)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.IstioDestinationRule") proto.RegisterType((*IstioTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.IstioTrafficRouting") proto.RegisterType((*IstioVirtualService)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.IstioVirtualService") @@ -2450,16 +2991,20 @@ func init() { proto.RegisterType((*KayentaMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaMetric") proto.RegisterType((*KayentaScope)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaScope") proto.RegisterType((*KayentaThreshold)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaThreshold") + proto.RegisterType((*MangedRoutes)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MangedRoutes") proto.RegisterType((*Measurement)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Measurement") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Measurement.MetadataEntry") + proto.RegisterType((*MeasurementRetention)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MeasurementRetention") proto.RegisterType((*Metric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Metric") proto.RegisterType((*MetricProvider)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricProvider") proto.RegisterType((*MetricResult)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricResult") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricResult.MetadataEntry") proto.RegisterType((*NewRelicMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NewRelicMetric") proto.RegisterType((*NginxTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NginxTrafficRouting") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NginxTrafficRouting.AdditionalIngressAnnotationsEntry") proto.RegisterType((*ObjectRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ObjectRef") proto.RegisterType((*PauseCondition)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PauseCondition") + proto.RegisterType((*PingPongSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PingPongSpec") proto.RegisterType((*PodTemplateMetadata)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PodTemplateMetadata") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PodTemplateMetadata.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PodTemplateMetadata.LabelsEntry") @@ -2481,14 +3026,22 @@ func init() { proto.RegisterType((*RolloutStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutStatus") proto.RegisterType((*RolloutStrategy)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutStrategy") proto.RegisterType((*RolloutTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutTrafficRouting") + proto.RegisterType((*RouteMatch)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RouteMatch") + proto.RegisterMapType((map[string]StringMatch)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RouteMatch.HeadersEntry") + proto.RegisterType((*RunSummary)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RunSummary") proto.RegisterType((*SMITrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SMITrafficRouting") proto.RegisterType((*ScopeDetail)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ScopeDetail") proto.RegisterType((*SecretKeyRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SecretKeyRef") proto.RegisterType((*SetCanaryScale)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetCanaryScale") + proto.RegisterType((*SetHeaderRoute)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetHeaderRoute") + proto.RegisterType((*SetMirrorRoute)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetMirrorRoute") + proto.RegisterType((*StickinessConfig)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StickinessConfig") + proto.RegisterType((*StringMatch)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch") proto.RegisterType((*TLSRoute)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TLSRoute") proto.RegisterType((*TemplateService)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateService") proto.RegisterType((*TemplateSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateSpec") proto.RegisterType((*TemplateStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateStatus") + proto.RegisterType((*TraefikTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TraefikTrafficRouting") proto.RegisterType((*TrafficWeights)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TrafficWeights") proto.RegisterType((*ValueFrom)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ValueFrom") proto.RegisterType((*WavefrontMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WavefrontMetric") @@ -2502,414 +3055,482 @@ func init() { } var fileDescriptor_e0e705f843545fab = []byte{ - // 6463 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3d, 0x6d, 0x8c, 0x1c, 0xc9, - 0x55, 0xd7, 0x33, 0xfb, 0x31, 0xfb, 0x66, 0x3f, 0xcb, 0xeb, 0x78, 0x6f, 0xef, 0xec, 0x71, 0xfa, - 0xa2, 0xc3, 0x81, 0x64, 0x9d, 0xf8, 0xee, 0xe0, 0x92, 0x8b, 0x4e, 0xcc, 0xec, 0xda, 0xe7, 0xf5, - 0xed, 0xda, 0xeb, 0x9a, 0xb5, 0x9d, 0x5c, 0x72, 0x21, 0xbd, 0x33, 0xb5, 0xb3, 0x6d, 0xf7, 0x74, - 0x4f, 0xba, 0x7b, 0xd6, 0xde, 0x4b, 0x94, 0x0f, 0xa2, 0x84, 0x80, 0x12, 0x25, 0x90, 0xe4, 0x07, - 0x42, 0x41, 0x11, 0xe2, 0x07, 0x22, 0xfc, 0x40, 0x11, 0x88, 0x3f, 0x41, 0x20, 0x20, 0x52, 0xf8, - 0x01, 0x0a, 0x12, 0x70, 0x01, 0x25, 0x43, 0x32, 0xe1, 0x0f, 0xfc, 0x41, 0x41, 0x41, 0x28, 0x96, - 0x90, 0x50, 0x7d, 0x76, 0x75, 0x4f, 0xcf, 0xee, 0xcc, 0x4e, 0xaf, 0x89, 0x80, 0x7f, 0x33, 0xf5, - 0x5e, 0xbd, 0x57, 0x1f, 0xaf, 0x5e, 0xbd, 0x7a, 0xf5, 0xea, 0x35, 0x6c, 0x34, 0xec, 0x70, 0xaf, - 0xbd, 0xb3, 0x52, 0xf3, 0x9a, 0x17, 0x2d, 0xbf, 0xe1, 0xb5, 0x7c, 0xef, 0x2e, 0xfb, 0xf1, 0x56, - 0xdf, 0x73, 0x1c, 0xaf, 0x1d, 0x06, 0x17, 0x5b, 0xf7, 0x1a, 0x17, 0xad, 0x96, 0x1d, 0x5c, 0x54, - 0x25, 0xfb, 0x6f, 0xb7, 0x9c, 0xd6, 0x9e, 0xf5, 0xf6, 0x8b, 0x0d, 0xe2, 0x12, 0xdf, 0x0a, 0x49, - 0x7d, 0xa5, 0xe5, 0x7b, 0xa1, 0x87, 0xde, 0x15, 0x51, 0x5b, 0x91, 0xd4, 0xd8, 0x8f, 0x5f, 0x90, - 0x75, 0x57, 0x5a, 0xf7, 0x1a, 0x2b, 0x94, 0xda, 0x8a, 0x2a, 0x91, 0xd4, 0x96, 0xdf, 0xaa, 0xb5, - 0xa5, 0xe1, 0x35, 0xbc, 0x8b, 0x8c, 0xe8, 0x4e, 0x7b, 0x97, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, - 0x6c, 0xf9, 0xa9, 0x7b, 0xcf, 0x07, 0x2b, 0xb6, 0x47, 0xdb, 0x76, 0x71, 0xc7, 0x0a, 0x6b, 0x7b, - 0x17, 0xf7, 0x7b, 0x5a, 0xb4, 0x6c, 0x6a, 0x48, 0x35, 0xcf, 0x27, 0x69, 0x38, 0xcf, 0x46, 0x38, - 0x4d, 0xab, 0xb6, 0x67, 0xbb, 0xc4, 0x3f, 0x88, 0x7a, 0xdd, 0x24, 0xa1, 0x95, 0x56, 0xeb, 0x62, - 0xbf, 0x5a, 0x7e, 0xdb, 0x0d, 0xed, 0x26, 0xe9, 0xa9, 0xf0, 0xb3, 0x47, 0x55, 0x08, 0x6a, 0x7b, - 0xa4, 0x69, 0xf5, 0xd4, 0x7b, 0xa6, 0x5f, 0xbd, 0x76, 0x68, 0x3b, 0x17, 0x6d, 0x37, 0x0c, 0x42, - 0x3f, 0x59, 0xc9, 0xfc, 0x77, 0x03, 0x16, 0xca, 0x1b, 0x95, 0x6d, 0xdf, 0xda, 0xdd, 0xb5, 0x6b, - 0xd8, 0x6b, 0x87, 0xb6, 0xdb, 0x40, 0x6f, 0x86, 0x49, 0xdb, 0x6d, 0xf8, 0x24, 0x08, 0x96, 0x8c, - 0xf3, 0xc6, 0x85, 0xa9, 0xca, 0xdc, 0x37, 0x3b, 0xa5, 0xc7, 0xba, 0x9d, 0xd2, 0xe4, 0x3a, 0x2f, - 0xc6, 0x12, 0x8e, 0x9e, 0x83, 0x62, 0x40, 0xfc, 0x7d, 0xbb, 0x46, 0xb6, 0x3c, 0x3f, 0x5c, 0xca, - 0x9d, 0x37, 0x2e, 0x8c, 0x57, 0x4e, 0x09, 0xf4, 0x62, 0x35, 0x02, 0x61, 0x1d, 0x8f, 0x56, 0xf3, - 0x3d, 0x2f, 0x14, 0xf0, 0xa5, 0x3c, 0xe3, 0xa2, 0xaa, 0xe1, 0x08, 0x84, 0x75, 0x3c, 0xb4, 0x06, - 0xf3, 0x96, 0xeb, 0x7a, 0xa1, 0x15, 0xda, 0x9e, 0xbb, 0xe5, 0x93, 0x5d, 0xfb, 0xc1, 0xd2, 0x18, - 0xab, 0xbb, 0x24, 0xea, 0xce, 0x97, 0x13, 0x70, 0xdc, 0x53, 0xc3, 0x5c, 0x83, 0xa5, 0x72, 0x73, - 0xc7, 0x0a, 0x02, 0xab, 0xee, 0xf9, 0x89, 0xae, 0x5f, 0x80, 0x42, 0xd3, 0x6a, 0xb5, 0x6c, 0xb7, - 0x41, 0xfb, 0x9e, 0xbf, 0x30, 0x55, 0x99, 0xee, 0x76, 0x4a, 0x85, 0x4d, 0x51, 0x86, 0x15, 0xd4, - 0xfc, 0x87, 0x1c, 0x14, 0xcb, 0xae, 0xe5, 0x1c, 0x04, 0x76, 0x80, 0xdb, 0x2e, 0xfa, 0x00, 0x14, - 0xa8, 0x0c, 0xd4, 0xad, 0xd0, 0x62, 0xa3, 0x56, 0xbc, 0xf4, 0xb6, 0x15, 0x3e, 0x25, 0x2b, 0xfa, - 0x94, 0x44, 0x92, 0x4d, 0xb1, 0x57, 0xf6, 0xdf, 0xbe, 0x72, 0x63, 0xe7, 0x2e, 0xa9, 0x85, 0x9b, - 0x24, 0xb4, 0x2a, 0x48, 0xf4, 0x02, 0xa2, 0x32, 0xac, 0xa8, 0x22, 0x0f, 0xc6, 0x82, 0x16, 0xa9, - 0xb1, 0x41, 0x2e, 0x5e, 0xda, 0x5c, 0x19, 0x65, 0x15, 0xad, 0x68, 0x4d, 0xaf, 0xb6, 0x48, 0xad, - 0x32, 0x2d, 0x58, 0x8f, 0xd1, 0x7f, 0x98, 0x31, 0x42, 0xf7, 0x61, 0x22, 0x08, 0xad, 0xb0, 0x1d, - 0xb0, 0x09, 0x2a, 0x5e, 0xba, 0x91, 0x1d, 0x4b, 0x46, 0xb6, 0x32, 0x2b, 0x98, 0x4e, 0xf0, 0xff, - 0x58, 0xb0, 0x33, 0xff, 0xd1, 0x80, 0x53, 0x1a, 0x76, 0xd9, 0x6f, 0xb4, 0x9b, 0xc4, 0x0d, 0xd1, - 0x79, 0x18, 0x73, 0xad, 0x26, 0x11, 0x52, 0xa9, 0x9a, 0x7c, 0xdd, 0x6a, 0x12, 0xcc, 0x20, 0xe8, - 0x29, 0x18, 0xdf, 0xb7, 0x9c, 0x36, 0x61, 0x83, 0x34, 0x55, 0x99, 0x11, 0x28, 0xe3, 0xb7, 0x69, - 0x21, 0xe6, 0x30, 0xf4, 0x61, 0x98, 0x62, 0x3f, 0xae, 0xf8, 0x5e, 0x33, 0xa3, 0xae, 0x89, 0x16, - 0xde, 0x96, 0x64, 0x2b, 0x33, 0xdd, 0x4e, 0x69, 0x4a, 0xfd, 0xc5, 0x11, 0x43, 0xf3, 0x9f, 0x0c, - 0x98, 0xd3, 0x3a, 0xb7, 0x61, 0x07, 0x21, 0x7a, 0x5f, 0x8f, 0xf0, 0xac, 0x0c, 0x26, 0x3c, 0xb4, - 0x36, 0x13, 0x9d, 0x79, 0xd1, 0xd3, 0x82, 0x2c, 0xd1, 0x04, 0xc7, 0x85, 0x71, 0x3b, 0x24, 0xcd, - 0x60, 0x29, 0x77, 0x3e, 0x7f, 0xa1, 0x78, 0x69, 0x3d, 0xb3, 0x69, 0x8c, 0xc6, 0x77, 0x9d, 0xd2, - 0xc7, 0x9c, 0x8d, 0xf9, 0xe5, 0x5c, 0xac, 0x87, 0x54, 0xa2, 0x90, 0x07, 0x93, 0x4d, 0x12, 0xfa, - 0x76, 0x8d, 0xaf, 0xab, 0xe2, 0xa5, 0xb5, 0xd1, 0x5a, 0xb1, 0xc9, 0x88, 0x45, 0x9a, 0x89, 0xff, - 0x0f, 0xb0, 0xe4, 0x82, 0xf6, 0x60, 0xcc, 0xf2, 0x1b, 0xb2, 0xcf, 0x57, 0xb2, 0x99, 0xdf, 0x48, - 0xe6, 0xca, 0x7e, 0x23, 0xc0, 0x8c, 0x03, 0xba, 0x08, 0x53, 0x21, 0xf1, 0x9b, 0xb6, 0x6b, 0x85, - 0x5c, 0x95, 0x15, 0x2a, 0x0b, 0x02, 0x6d, 0x6a, 0x5b, 0x02, 0x70, 0x84, 0x63, 0xbe, 0x9e, 0x83, - 0x85, 0x9e, 0xc5, 0x80, 0x9e, 0x85, 0xf1, 0xd6, 0x9e, 0x15, 0x48, 0xe9, 0x3e, 0x27, 0x87, 0x76, - 0x8b, 0x16, 0x3e, 0xec, 0x94, 0x66, 0x64, 0x15, 0x56, 0x80, 0x39, 0x32, 0xd5, 0xd5, 0x4d, 0x12, - 0x04, 0x56, 0x43, 0x8a, 0xbc, 0x36, 0x22, 0xac, 0x18, 0x4b, 0x38, 0xfa, 0x25, 0x03, 0x66, 0xf8, - 0xe8, 0x60, 0x12, 0xb4, 0x9d, 0x90, 0x2e, 0x6b, 0x3a, 0x36, 0xd7, 0xb2, 0x98, 0x09, 0x4e, 0xb2, - 0x72, 0x5a, 0x70, 0x9f, 0xd1, 0x4b, 0x03, 0x1c, 0xe7, 0x8b, 0xee, 0xc0, 0x54, 0x10, 0x5a, 0x7e, - 0x48, 0xea, 0xe5, 0x90, 0x29, 0xf0, 0xe2, 0xa5, 0x9f, 0x1e, 0x4c, 0xde, 0xb7, 0xed, 0x26, 0xe1, - 0x6b, 0xab, 0x2a, 0x09, 0xe0, 0x88, 0x96, 0xf9, 0x77, 0x71, 0xc5, 0x51, 0x0d, 0xe9, 0x66, 0xd7, - 0x38, 0x40, 0xef, 0x85, 0xc7, 0x83, 0x76, 0xad, 0x46, 0x82, 0x60, 0xb7, 0xed, 0xe0, 0xb6, 0x7b, - 0xd5, 0x0e, 0x42, 0xcf, 0x3f, 0xd8, 0xb0, 0x9b, 0x76, 0xc8, 0xc6, 0x7b, 0xbc, 0x72, 0xb6, 0xdb, - 0x29, 0x3d, 0x5e, 0xed, 0x87, 0x84, 0xfb, 0xd7, 0x47, 0x16, 0x3c, 0xd1, 0x76, 0xfb, 0x93, 0xe7, - 0x7b, 0x62, 0xa9, 0xdb, 0x29, 0x3d, 0x71, 0xab, 0x3f, 0x1a, 0x3e, 0x8c, 0x86, 0xf9, 0xaf, 0x06, - 0xcc, 0xcb, 0x7e, 0x6d, 0x93, 0x66, 0xcb, 0xb1, 0x42, 0xf2, 0x08, 0x76, 0x9c, 0x30, 0xb6, 0xe3, - 0xe0, 0x6c, 0xf4, 0x86, 0x6c, 0x7f, 0xbf, 0x6d, 0xc7, 0xfc, 0x17, 0x03, 0x16, 0x93, 0xc8, 0x8f, - 0x40, 0x4b, 0x06, 0x71, 0x2d, 0x79, 0x3d, 0xdb, 0xde, 0xf6, 0x51, 0x95, 0x3f, 0x4c, 0xe9, 0xeb, - 0xff, 0x72, 0x7d, 0x69, 0xfe, 0xce, 0x18, 0x4c, 0x97, 0xdd, 0xd0, 0x2e, 0xef, 0xee, 0xda, 0xae, - 0x1d, 0x1e, 0xa0, 0xcf, 0xe4, 0xe0, 0x62, 0xcb, 0x27, 0xbb, 0xc4, 0xf7, 0x49, 0x7d, 0xad, 0xed, - 0xdb, 0x6e, 0xa3, 0x5a, 0xdb, 0x23, 0xf5, 0xb6, 0x63, 0xbb, 0x8d, 0xf5, 0x86, 0xeb, 0xa9, 0xe2, - 0xcb, 0x0f, 0x48, 0xad, 0x4d, 0x4d, 0x39, 0x31, 0xff, 0xcd, 0xd1, 0x9a, 0xb9, 0x35, 0x1c, 0xd3, - 0xca, 0x33, 0xdd, 0x4e, 0xe9, 0xe2, 0x90, 0x95, 0xf0, 0xb0, 0x5d, 0x43, 0x9f, 0xce, 0xc1, 0x8a, - 0x4f, 0x3e, 0xd8, 0xb6, 0x07, 0x1f, 0x0d, 0xbe, 0x40, 0x9d, 0xd1, 0x46, 0x03, 0x0f, 0xc5, 0xb3, - 0x72, 0xa9, 0xdb, 0x29, 0x0d, 0x59, 0x07, 0x0f, 0xd9, 0x2f, 0xf3, 0xcf, 0x0d, 0x28, 0x0c, 0x61, - 0xfd, 0x95, 0xe2, 0xd6, 0xdf, 0x54, 0x8f, 0xe5, 0x17, 0xf6, 0x5a, 0x7e, 0x2f, 0x8d, 0x36, 0x68, - 0x83, 0x58, 0x7c, 0xff, 0x46, 0x4f, 0x59, 0x49, 0x0b, 0x11, 0xed, 0xc1, 0x62, 0xcb, 0xab, 0xcb, - 0x45, 0x7f, 0xd5, 0x0a, 0xf6, 0x18, 0x4c, 0x74, 0xef, 0xd9, 0x6e, 0xa7, 0xb4, 0xb8, 0x95, 0x02, - 0x7f, 0xd8, 0x29, 0x2d, 0x29, 0x22, 0x09, 0x04, 0x9c, 0x4a, 0x11, 0xb5, 0xa0, 0xb0, 0x6b, 0x13, - 0xa7, 0x8e, 0xc9, 0xae, 0x90, 0x94, 0x11, 0x97, 0xf7, 0x15, 0x41, 0x8d, 0x1f, 0x8e, 0xe4, 0x3f, - 0xac, 0xb8, 0x98, 0x3f, 0x1e, 0x83, 0xb9, 0x8a, 0xd3, 0x26, 0x2f, 0xf9, 0x84, 0x48, 0xfb, 0xa6, - 0x0c, 0x73, 0x2d, 0x9f, 0xec, 0xdb, 0xe4, 0x7e, 0x95, 0x38, 0xa4, 0x16, 0x7a, 0xbe, 0xe8, 0xea, - 0x19, 0x31, 0x93, 0x73, 0x5b, 0x71, 0x30, 0x4e, 0xe2, 0xa3, 0x17, 0x61, 0xd6, 0xaa, 0x85, 0xf6, - 0x3e, 0x51, 0x14, 0xf8, 0x44, 0xbf, 0x41, 0x50, 0x98, 0x2d, 0xc7, 0xa0, 0x38, 0x81, 0x8d, 0xde, - 0x07, 0x4b, 0x41, 0xcd, 0x72, 0xc8, 0xad, 0x96, 0x60, 0xb5, 0xba, 0x47, 0x6a, 0xf7, 0xb6, 0x3c, - 0xdb, 0x0d, 0x85, 0xe1, 0x76, 0x5e, 0x50, 0x5a, 0xaa, 0xf6, 0xc1, 0xc3, 0x7d, 0x29, 0xa0, 0x3f, - 0x31, 0xe0, 0x6c, 0xcb, 0x27, 0x5b, 0xbe, 0xd7, 0xf4, 0xa8, 0xf4, 0xf6, 0x98, 0x78, 0xc2, 0xd4, - 0xb9, 0x3d, 0xe2, 0x32, 0xe5, 0x25, 0xbd, 0xa7, 0xa9, 0x37, 0x76, 0x3b, 0xa5, 0xb3, 0x5b, 0x87, - 0x35, 0x00, 0x1f, 0xde, 0x3e, 0xf4, 0x67, 0x06, 0x9c, 0x6b, 0x79, 0x41, 0x78, 0x48, 0x17, 0xc6, - 0x4f, 0xb4, 0x0b, 0x66, 0xb7, 0x53, 0x3a, 0xb7, 0x75, 0x68, 0x0b, 0xf0, 0x11, 0x2d, 0x34, 0xbb, - 0x45, 0x58, 0xd0, 0x64, 0x4f, 0x58, 0x80, 0x2f, 0xc0, 0x8c, 0x14, 0x06, 0xee, 0x73, 0xe0, 0xb2, - 0xa7, 0xec, 0xd5, 0xb2, 0x0e, 0xc4, 0x71, 0x5c, 0x2a, 0x77, 0x4a, 0x14, 0x79, 0xed, 0x84, 0xdc, - 0x6d, 0xc5, 0xa0, 0x38, 0x81, 0x8d, 0xd6, 0xe1, 0x94, 0x28, 0xc1, 0xa4, 0xe5, 0xd8, 0x35, 0x6b, - 0xd5, 0x6b, 0x0b, 0x91, 0x1b, 0xaf, 0x9c, 0xe9, 0x76, 0x4a, 0xa7, 0xb6, 0x7a, 0xc1, 0x38, 0xad, - 0x0e, 0xda, 0x80, 0x45, 0xab, 0x1d, 0x7a, 0xaa, 0xff, 0x97, 0x5d, 0x6b, 0xc7, 0x21, 0x75, 0x26, - 0x5a, 0x85, 0xca, 0x12, 0xd5, 0x1a, 0xe5, 0x14, 0x38, 0x4e, 0xad, 0x85, 0xb6, 0x12, 0xd4, 0xaa, - 0xa4, 0xe6, 0xb9, 0x75, 0x3e, 0xcb, 0xe3, 0x95, 0x27, 0x45, 0xf7, 0xe2, 0x14, 0x05, 0x0e, 0x4e, - 0xad, 0x89, 0x1c, 0x98, 0x6d, 0x5a, 0x0f, 0x6e, 0xb9, 0xd6, 0xbe, 0x65, 0x3b, 0x94, 0xc9, 0xd2, - 0xc4, 0x11, 0xa6, 0x69, 0x3b, 0xb4, 0x9d, 0x15, 0xee, 0x9f, 0x5a, 0x59, 0x77, 0xc3, 0x1b, 0x7e, - 0x35, 0xa4, 0x9b, 0x40, 0x05, 0xd1, 0x81, 0xdd, 0x8c, 0xd1, 0xc2, 0x09, 0xda, 0xe8, 0x06, 0x9c, - 0x66, 0xcb, 0x71, 0xcd, 0xbb, 0xef, 0xae, 0x11, 0xc7, 0x3a, 0x90, 0x1d, 0x98, 0x64, 0x1d, 0x78, - 0xbc, 0xdb, 0x29, 0x9d, 0xae, 0xa6, 0x21, 0xe0, 0xf4, 0x7a, 0xd4, 0x96, 0x8f, 0x03, 0x30, 0xd9, - 0xb7, 0x03, 0xdb, 0x73, 0xb9, 0x2d, 0x5f, 0x88, 0x6c, 0xf9, 0x6a, 0x7f, 0x34, 0x7c, 0x18, 0x0d, - 0xf4, 0x1b, 0x06, 0x2c, 0xa6, 0x2d, 0xc3, 0xa5, 0xa9, 0x2c, 0xfc, 0x3a, 0x89, 0xa5, 0xc5, 0x25, - 0x22, 0x55, 0x29, 0xa4, 0x36, 0x02, 0x7d, 0xcc, 0x80, 0x69, 0x4b, 0x33, 0xce, 0x96, 0x80, 0xb5, - 0xea, 0xda, 0xa8, 0xd6, 0x70, 0x44, 0xb1, 0x32, 0xdf, 0xed, 0x94, 0x62, 0x06, 0x20, 0x8e, 0x71, - 0x44, 0xbf, 0x69, 0xc0, 0xe9, 0xd4, 0x35, 0xbe, 0x54, 0x3c, 0x89, 0x11, 0x62, 0x42, 0x92, 0xae, - 0x73, 0xd2, 0x9b, 0x81, 0x3e, 0x6f, 0xa8, 0xad, 0x6c, 0x53, 0x9e, 0x47, 0xa6, 0x59, 0xd3, 0x6e, - 0x8e, 0x68, 0x8f, 0x46, 0xbb, 0xb7, 0x24, 0x5c, 0x39, 0xa5, 0xed, 0x8c, 0xb2, 0x10, 0x27, 0xd9, - 0xa3, 0xcf, 0x1a, 0x72, 0x6b, 0x54, 0x2d, 0x9a, 0x39, 0xa9, 0x16, 0xa1, 0x68, 0xa7, 0x55, 0x0d, - 0x4a, 0x30, 0x47, 0xef, 0x87, 0x65, 0x6b, 0xc7, 0xf3, 0xc3, 0xd4, 0xc5, 0xb7, 0x34, 0xcb, 0x96, - 0xd1, 0xb9, 0x6e, 0xa7, 0xb4, 0x5c, 0xee, 0x8b, 0x85, 0x0f, 0xa1, 0x60, 0x7e, 0x67, 0x0c, 0xa6, - 0x57, 0x2d, 0xd7, 0xf2, 0x0f, 0xc4, 0xd6, 0xf5, 0x75, 0x03, 0x9e, 0xac, 0xb5, 0x7d, 0x9f, 0xb8, - 0x61, 0x35, 0x24, 0xad, 0xde, 0x8d, 0xcb, 0x38, 0xd1, 0x8d, 0xeb, 0x7c, 0xb7, 0x53, 0x7a, 0x72, - 0xf5, 0x10, 0xfe, 0xf8, 0xd0, 0xd6, 0xa1, 0xbf, 0x36, 0xc0, 0x14, 0x08, 0x15, 0xab, 0x76, 0xaf, - 0xe1, 0x7b, 0x6d, 0xb7, 0xde, 0xdb, 0x89, 0xdc, 0x89, 0x76, 0xe2, 0xe9, 0x6e, 0xa7, 0x64, 0xae, - 0x1e, 0xd9, 0x0a, 0x3c, 0x40, 0x4b, 0xd1, 0x4b, 0xb0, 0x20, 0xb0, 0x2e, 0x3f, 0x68, 0x11, 0xdf, - 0xa6, 0xb6, 0xaf, 0xf0, 0xf3, 0x3f, 0x2e, 0xb6, 0x95, 0x85, 0xd5, 0x24, 0x02, 0xee, 0xad, 0x83, - 0x02, 0x98, 0xbc, 0x4f, 0xec, 0xc6, 0x5e, 0x28, 0xcd, 0xa7, 0x8d, 0xd1, 0x7a, 0x2f, 0x1c, 0xfe, - 0x77, 0x38, 0xcd, 0x4a, 0x91, 0x1e, 0x86, 0xc5, 0x1f, 0x2c, 0x39, 0x99, 0xbf, 0x3f, 0x06, 0x20, - 0xc5, 0x8b, 0xb4, 0xd0, 0xcf, 0xc0, 0x54, 0x40, 0x42, 0x8e, 0x25, 0xdc, 0x45, 0xdc, 0x07, 0x25, - 0x0b, 0x71, 0x04, 0x47, 0xf7, 0x60, 0xbc, 0x65, 0xb5, 0x03, 0x22, 0x26, 0xeb, 0x5a, 0x26, 0x93, - 0xb5, 0x45, 0x29, 0xf2, 0x03, 0x0d, 0xfb, 0x89, 0x39, 0x0f, 0xf4, 0x09, 0x03, 0x80, 0xc4, 0x07, - 0xb8, 0x78, 0xa9, 0x9a, 0x09, 0xcb, 0x68, 0x0e, 0xe8, 0x18, 0x54, 0x66, 0xbb, 0x9d, 0x12, 0x68, - 0x53, 0xa5, 0xb1, 0x45, 0xf7, 0xa1, 0x60, 0x49, 0x1d, 0x3d, 0x76, 0x12, 0x3a, 0x9a, 0x9d, 0x33, - 0x94, 0x90, 0x29, 0x66, 0xe8, 0xd3, 0x06, 0xcc, 0x06, 0x24, 0x14, 0x53, 0x45, 0x35, 0x85, 0x30, - 0x50, 0x47, 0x14, 0x92, 0x6a, 0x8c, 0x26, 0xd7, 0x78, 0xf1, 0x32, 0x9c, 0xe0, 0x6b, 0x7e, 0xa7, - 0x08, 0xb3, 0x52, 0x64, 0x22, 0x9b, 0xb3, 0xc6, 0x4b, 0xd2, 0x6d, 0xce, 0x55, 0x1d, 0x88, 0xe3, - 0xb8, 0xb4, 0x72, 0x10, 0x52, 0x23, 0x27, 0x6e, 0x72, 0xaa, 0xca, 0x55, 0x1d, 0x88, 0xe3, 0xb8, - 0xa8, 0x09, 0xe3, 0x41, 0x48, 0x5a, 0xd2, 0xc3, 0x7b, 0x75, 0xb4, 0xd1, 0x88, 0x56, 0x42, 0xe4, - 0xc5, 0xa2, 0xff, 0x02, 0xcc, 0xb9, 0xa0, 0xcf, 0x19, 0x30, 0x1b, 0xc6, 0x2e, 0xd2, 0x84, 0x18, - 0x64, 0x23, 0x89, 0xf1, 0x3b, 0x3a, 0x3e, 0x1b, 0xf1, 0x32, 0x9c, 0x60, 0x9f, 0x62, 0x86, 0x8e, - 0x9f, 0xa0, 0x19, 0xfa, 0x0a, 0x14, 0x9a, 0xd6, 0x83, 0x6a, 0xdb, 0x6f, 0x1c, 0xdf, 0xdc, 0x15, - 0xf7, 0x8c, 0x9c, 0x0a, 0x56, 0xf4, 0xd0, 0xc7, 0x0d, 0x6d, 0x71, 0x4d, 0x32, 0xe2, 0x77, 0xb2, - 0x5d, 0x5c, 0x4a, 0x8b, 0xf7, 0x5d, 0x66, 0x3d, 0x46, 0x61, 0xe1, 0x91, 0x1b, 0x85, 0xd4, 0xc0, - 0xe1, 0x0b, 0x44, 0x19, 0x38, 0x53, 0x27, 0x6a, 0xe0, 0xac, 0xc6, 0x98, 0xe1, 0x04, 0x73, 0xd6, - 0x1e, 0xbe, 0xe6, 0x54, 0x7b, 0xe0, 0x44, 0xdb, 0x53, 0x8d, 0x31, 0xc3, 0x09, 0xe6, 0xfd, 0x4f, - 0x42, 0xc5, 0x93, 0x39, 0x09, 0x4d, 0x67, 0x70, 0x12, 0x3a, 0xdc, 0x48, 0x9c, 0x19, 0xd5, 0x48, - 0x44, 0xd7, 0x00, 0xd5, 0x0f, 0x5c, 0xab, 0x69, 0xd7, 0x84, 0xb2, 0x64, 0x1b, 0xc4, 0x2c, 0x3b, - 0x29, 0x2f, 0x0b, 0x45, 0x86, 0xd6, 0x7a, 0x30, 0x70, 0x4a, 0x2d, 0xf3, 0x3f, 0x0c, 0x98, 0x5f, - 0x75, 0xbc, 0x76, 0xfd, 0x8e, 0x15, 0xd6, 0xf6, 0xb8, 0xf7, 0x1c, 0xbd, 0x08, 0x05, 0xdb, 0x0d, - 0x89, 0xbf, 0x6f, 0x39, 0x42, 0xb7, 0x9b, 0xf2, 0x82, 0x61, 0x5d, 0x94, 0x3f, 0xec, 0x94, 0x66, - 0xd7, 0xda, 0x3e, 0x8b, 0x3f, 0xe0, 0x2b, 0x1d, 0xab, 0x3a, 0xe8, 0x2b, 0x06, 0x2c, 0x70, 0xff, - 0xfb, 0x9a, 0x15, 0x5a, 0x37, 0xdb, 0xc4, 0xb7, 0x89, 0xf4, 0xc0, 0x8f, 0xb8, 0xc8, 0x93, 0x6d, - 0x95, 0x0c, 0x0e, 0x22, 0xf3, 0x6b, 0x33, 0xc9, 0x19, 0xf7, 0x36, 0xc6, 0xfc, 0x42, 0x1e, 0x1e, - 0xef, 0x4b, 0x0b, 0x2d, 0x43, 0xce, 0xae, 0x8b, 0xae, 0x83, 0xa0, 0x9b, 0x5b, 0xaf, 0xe3, 0x9c, - 0x5d, 0x47, 0x2b, 0xcc, 0x32, 0xf1, 0x49, 0x10, 0x48, 0x0f, 0xf5, 0x94, 0x32, 0x22, 0x44, 0x29, - 0xd6, 0x30, 0x50, 0x09, 0xc6, 0x1d, 0x6b, 0x87, 0x38, 0xc2, 0x4a, 0x64, 0xb6, 0xce, 0x06, 0x2d, - 0xc0, 0xbc, 0x1c, 0xfd, 0xa2, 0x01, 0xc0, 0x1b, 0x48, 0x6d, 0x4c, 0xb1, 0xc3, 0xe0, 0x6c, 0x87, - 0x89, 0x52, 0xe6, 0xad, 0x8c, 0xfe, 0x63, 0x8d, 0x2b, 0xda, 0x86, 0x09, 0x6a, 0xf6, 0x78, 0xf5, - 0x63, 0x6f, 0x28, 0xd0, 0xed, 0x94, 0x26, 0xb6, 0x18, 0x0d, 0x2c, 0x68, 0xd1, 0xb1, 0xf2, 0x49, - 0xd8, 0xf6, 0x5d, 0x3a, 0xb4, 0x6c, 0x0b, 0x29, 0xf0, 0x56, 0x60, 0x55, 0x8a, 0x35, 0x0c, 0xf3, - 0x8f, 0x72, 0xb0, 0x98, 0xd6, 0x74, 0xaa, 0xa9, 0x27, 0x78, 0x6b, 0xc5, 0x81, 0xe7, 0xdd, 0xd9, - 0x8f, 0x8f, 0xb8, 0x4a, 0x52, 0xc1, 0x1b, 0xe2, 0xaa, 0x57, 0xf0, 0x45, 0xef, 0x56, 0x23, 0x94, - 0x3b, 0xe6, 0x08, 0x29, 0xca, 0x89, 0x51, 0x3a, 0x0f, 0x63, 0x01, 0x9d, 0xf9, 0x7c, 0xfc, 0x02, - 0x80, 0xcd, 0x11, 0x83, 0x50, 0x8c, 0xb6, 0x6b, 0x87, 0x22, 0x28, 0x48, 0x61, 0xdc, 0x72, 0xed, - 0x10, 0x33, 0x88, 0xf9, 0xa5, 0x1c, 0x2c, 0xf7, 0xef, 0x14, 0xfa, 0x92, 0x01, 0x50, 0xa7, 0x46, - 0x2d, 0x15, 0x49, 0x79, 0xf5, 0x66, 0x9d, 0xd4, 0x18, 0xae, 0x49, 0x4e, 0xd1, 0x3d, 0xac, 0x2a, - 0x0a, 0xb0, 0xd6, 0x10, 0x74, 0x49, 0x8a, 0xfe, 0x75, 0xab, 0x29, 0x4d, 0x41, 0x55, 0x67, 0x53, - 0x41, 0xb0, 0x86, 0x45, 0x4f, 0x2d, 0xae, 0xd5, 0x24, 0x41, 0xcb, 0x52, 0x21, 0x56, 0xec, 0xd4, - 0x72, 0x5d, 0x16, 0xe2, 0x08, 0x6e, 0x3a, 0xf0, 0xd4, 0x00, 0xed, 0xcc, 0x28, 0x02, 0xc7, 0xfc, - 0xa1, 0x01, 0x67, 0x56, 0x9d, 0x76, 0x10, 0x12, 0xff, 0xff, 0xcc, 0xb5, 0xf6, 0x7f, 0x1a, 0xf0, - 0x44, 0x9f, 0x3e, 0x3f, 0x82, 0xdb, 0xed, 0xd7, 0xe2, 0xb7, 0xdb, 0xb7, 0x46, 0x15, 0xe9, 0xd4, - 0x7e, 0xf4, 0xb9, 0xe4, 0x0e, 0x61, 0x86, 0x6a, 0xad, 0xba, 0xd7, 0xc8, 0x68, 0xdf, 0x7c, 0x0a, - 0xc6, 0x3f, 0x48, 0xf7, 0x9f, 0xa4, 0x8c, 0xb1, 0x4d, 0x09, 0x73, 0x98, 0xf9, 0xf7, 0x39, 0xd0, - 0xce, 0xab, 0x8f, 0x40, 0xac, 0xdc, 0x98, 0x58, 0x8d, 0x78, 0x02, 0xd5, 0x4e, 0xdf, 0xfd, 0xc2, - 0xf3, 0xf6, 0x13, 0xe1, 0x79, 0xd7, 0x33, 0xe3, 0x78, 0x78, 0x74, 0xde, 0xeb, 0x06, 0x3c, 0x11, - 0x21, 0xf7, 0xba, 0x7e, 0x8e, 0xd6, 0x11, 0xcf, 0x41, 0xd1, 0x8a, 0xaa, 0x89, 0x59, 0x54, 0xe1, - 0x9f, 0x1a, 0x45, 0xac, 0xe3, 0x45, 0x11, 0x52, 0xf9, 0x63, 0x46, 0x48, 0x8d, 0x1d, 0x1e, 0x21, - 0x65, 0xfe, 0x28, 0x07, 0x67, 0x7b, 0x7b, 0x26, 0xa5, 0x1b, 0x93, 0xdd, 0x01, 0xfa, 0xf6, 0x3c, - 0x4c, 0x87, 0xa2, 0x82, 0xa6, 0xab, 0x17, 0x05, 0xe6, 0xf4, 0xb6, 0x06, 0xc3, 0x31, 0x4c, 0x5a, - 0xb3, 0xc6, 0xd7, 0x55, 0xb5, 0xe6, 0xb5, 0x64, 0x28, 0x99, 0xaa, 0xb9, 0xaa, 0xc1, 0x70, 0x0c, - 0x53, 0xc5, 0x6e, 0x8c, 0x9d, 0x78, 0xac, 0x5b, 0x15, 0x4e, 0xcb, 0x2b, 0xfc, 0x2b, 0x9e, 0xbf, - 0xea, 0x35, 0x5b, 0x0e, 0x61, 0x11, 0x08, 0xe3, 0xac, 0xb1, 0x67, 0x45, 0x95, 0xd3, 0x38, 0x0d, - 0x09, 0xa7, 0xd7, 0x35, 0x5f, 0xcf, 0xc3, 0xa9, 0x68, 0xd8, 0x57, 0x3d, 0xb7, 0x6e, 0xb3, 0x40, - 0x88, 0x17, 0x60, 0x2c, 0x3c, 0x68, 0xc9, 0xc1, 0xfe, 0x29, 0xd9, 0x9c, 0xed, 0x83, 0x16, 0x9d, - 0xed, 0x33, 0x29, 0x55, 0x28, 0x08, 0xb3, 0x4a, 0x68, 0x43, 0xad, 0x0e, 0x3e, 0x03, 0xcf, 0xc6, - 0xa5, 0xf9, 0x61, 0xa7, 0x94, 0x12, 0xf4, 0xbd, 0xa2, 0x28, 0xc5, 0x65, 0x1e, 0xdd, 0x85, 0x59, - 0xc7, 0x0a, 0xc2, 0x5b, 0xad, 0xba, 0x15, 0x92, 0x6d, 0xbb, 0x49, 0xc4, 0x9a, 0x1b, 0x26, 0x6c, - 0x4d, 0xdd, 0x16, 0x6e, 0xc4, 0x28, 0xe1, 0x04, 0x65, 0xb4, 0x0f, 0x88, 0x96, 0x6c, 0xfb, 0x96, - 0x1b, 0xf0, 0x5e, 0x51, 0x7e, 0xc3, 0x87, 0xc9, 0xa9, 0x23, 0xce, 0x46, 0x0f, 0x35, 0x9c, 0xc2, - 0x01, 0x3d, 0x0d, 0x13, 0x3e, 0xb1, 0x02, 0x31, 0x99, 0x53, 0xd1, 0xfa, 0xc7, 0xac, 0x14, 0x0b, - 0xa8, 0xbe, 0xa0, 0x26, 0x8e, 0x58, 0x50, 0xdf, 0x35, 0x60, 0x36, 0x9a, 0xa6, 0x47, 0xb0, 0xcd, - 0x35, 0xe3, 0xdb, 0xdc, 0xd5, 0xac, 0x54, 0x62, 0x9f, 0x9d, 0xed, 0x6b, 0x63, 0x7a, 0xff, 0x58, - 0xe0, 0xd6, 0x87, 0x60, 0x4a, 0xae, 0x6a, 0x69, 0x3f, 0x8e, 0xe8, 0x27, 0x89, 0x59, 0x16, 0x5a, - 0x64, 0xa9, 0x60, 0x82, 0x23, 0x7e, 0x74, 0x63, 0xad, 0x8b, 0x4d, 0x53, 0x88, 0xbd, 0xda, 0x58, - 0xe5, 0x66, 0x9a, 0xb6, 0xb1, 0xca, 0x3a, 0xe8, 0x16, 0x9c, 0x69, 0xf9, 0x1e, 0x0b, 0xed, 0x5f, - 0x23, 0x56, 0xdd, 0xb1, 0x5d, 0x22, 0x8f, 0xe3, 0xfc, 0xb2, 0xfa, 0x89, 0x6e, 0xa7, 0x74, 0x66, - 0x2b, 0x1d, 0x05, 0xf7, 0xab, 0x1b, 0x8f, 0x90, 0x1d, 0x3b, 0x3a, 0x42, 0x16, 0xfd, 0xb2, 0x72, - 0x7a, 0x91, 0x60, 0x69, 0x9c, 0x0d, 0xe2, 0x7b, 0xb3, 0x9a, 0xca, 0x14, 0xb5, 0x1e, 0x89, 0x54, - 0x59, 0x30, 0xc5, 0x8a, 0x7d, 0x7f, 0xcf, 0xca, 0xc4, 0xf1, 0x3c, 0x2b, 0xe6, 0x27, 0xc7, 0x61, - 0x3e, 0xb9, 0xd9, 0x9e, 0x7c, 0xf4, 0xef, 0xaf, 0x19, 0x30, 0x2f, 0x05, 0x85, 0xf3, 0x24, 0xd2, - 0x3d, 0xbc, 0x91, 0x91, 0x7c, 0x72, 0xb3, 0x41, 0x3d, 0xc5, 0xd8, 0x4e, 0x70, 0xc3, 0x3d, 0xfc, - 0xd1, 0xab, 0x50, 0x54, 0x5e, 0xd4, 0x63, 0x85, 0x02, 0xcf, 0x31, 0x83, 0x21, 0x22, 0x81, 0x75, - 0x7a, 0xe8, 0x93, 0x06, 0x40, 0x4d, 0x6a, 0x74, 0x29, 0x48, 0x37, 0xb3, 0x12, 0x24, 0xb5, 0x57, - 0x44, 0x76, 0xa1, 0x2a, 0x0a, 0xb0, 0xc6, 0x18, 0x7d, 0x81, 0xf9, 0x4f, 0x95, 0x21, 0x43, 0x45, - 0x87, 0xb6, 0xe4, 0x3d, 0x59, 0x8b, 0x74, 0x74, 0x95, 0xa7, 0xac, 0x06, 0x0d, 0x14, 0xe0, 0x58, - 0x23, 0xcc, 0x17, 0x40, 0x85, 0x6e, 0xd1, 0x15, 0xca, 0x82, 0xb7, 0xb6, 0xac, 0x70, 0x4f, 0x88, - 0xa0, 0x5a, 0xa1, 0x57, 0x24, 0x00, 0x47, 0x38, 0xe6, 0x07, 0x60, 0xf6, 0x25, 0xdf, 0x6a, 0xed, - 0xd9, 0xcc, 0x4f, 0x49, 0x8d, 0xfa, 0x37, 0xc3, 0xa4, 0x55, 0xaf, 0xa7, 0xbd, 0x1a, 0x2a, 0xf3, - 0x62, 0x2c, 0xe1, 0x83, 0xd9, 0xef, 0x7f, 0x61, 0xc0, 0xe2, 0x7a, 0x10, 0xda, 0xde, 0x1a, 0x09, - 0x42, 0xaa, 0x16, 0xa8, 0x05, 0xd1, 0x76, 0xc8, 0x00, 0x36, 0xd8, 0x1a, 0xcc, 0x8b, 0xcb, 0x94, - 0xf6, 0x4e, 0x40, 0x42, 0xcd, 0x0e, 0x53, 0xc2, 0xb9, 0x9a, 0x80, 0xe3, 0x9e, 0x1a, 0x94, 0x8a, - 0xb8, 0x55, 0x89, 0xa8, 0xe4, 0xe3, 0x54, 0xaa, 0x09, 0x38, 0xee, 0xa9, 0x61, 0x7e, 0x2b, 0x0f, - 0xa7, 0x58, 0x37, 0x12, 0x2f, 0x8d, 0x3e, 0x6b, 0xc0, 0xec, 0xbe, 0xed, 0x87, 0x6d, 0xcb, 0xd1, - 0xaf, 0x87, 0x46, 0x96, 0x4f, 0xc6, 0xeb, 0x76, 0x8c, 0x30, 0x77, 0x20, 0xc7, 0xcb, 0x70, 0x82, - 0x39, 0xfa, 0x55, 0x03, 0xe6, 0xea, 0xf1, 0x91, 0xce, 0xe6, 0x80, 0x9c, 0x36, 0x87, 0x3c, 0xaa, - 0x21, 0x51, 0x88, 0x93, 0xfc, 0xd1, 0x17, 0x0d, 0x98, 0x8b, 0x37, 0x53, 0xaa, 0xac, 0x13, 0x18, - 0x24, 0x15, 0x86, 0x18, 0x2f, 0x0f, 0x70, 0xb2, 0x09, 0xe6, 0xdf, 0x1a, 0x62, 0x4a, 0xe3, 0x98, - 0x03, 0x08, 0xa6, 0x09, 0x13, 0xbe, 0xd7, 0x0e, 0x85, 0x93, 0x77, 0x8a, 0xfb, 0x02, 0x31, 0x2b, - 0xc1, 0x02, 0x82, 0xee, 0xc3, 0x54, 0xe8, 0x04, 0xbc, 0x50, 0xf4, 0x76, 0x44, 0x8b, 0x7e, 0x7b, - 0xa3, 0xca, 0xc8, 0x69, 0x9b, 0xae, 0x28, 0xa1, 0xc6, 0x83, 0xe4, 0x65, 0x7e, 0xd5, 0x80, 0xa9, - 0x6b, 0xde, 0x8e, 0x58, 0xce, 0xef, 0xcf, 0xe0, 0xbc, 0xac, 0xb6, 0x55, 0x75, 0x6d, 0x11, 0x59, - 0x6a, 0x2f, 0xc6, 0x4e, 0xcb, 0x4f, 0x6a, 0xb4, 0x57, 0xd8, 0x33, 0x4d, 0x4a, 0xea, 0x9a, 0xb7, - 0xd3, 0xd7, 0x9d, 0xf2, 0x5b, 0xe3, 0x30, 0xf3, 0xb2, 0x75, 0x40, 0xdc, 0xd0, 0x1a, 0x5e, 0x01, - 0xd1, 0x03, 0x68, 0x8b, 0x45, 0xd5, 0x69, 0xa6, 0x52, 0x74, 0x00, 0x8d, 0x40, 0x58, 0xc7, 0x8b, - 0xf4, 0xca, 0xaa, 0xe7, 0xee, 0xda, 0x8d, 0x34, 0x8d, 0xb0, 0x9a, 0x80, 0xe3, 0x9e, 0x1a, 0xe8, - 0x1a, 0x20, 0x11, 0x74, 0x5f, 0xae, 0xd5, 0xbc, 0xb6, 0xcb, 0x35, 0x0b, 0x3f, 0x9b, 0x2a, 0x9b, - 0x7d, 0xb3, 0x07, 0x03, 0xa7, 0xd4, 0x42, 0xef, 0x83, 0xa5, 0x1a, 0xa3, 0x2c, 0x2c, 0x38, 0x9d, - 0x22, 0xb7, 0xe2, 0x55, 0x44, 0xeb, 0x6a, 0x1f, 0x3c, 0xdc, 0x97, 0x02, 0x6d, 0x69, 0x10, 0x7a, - 0xbe, 0xd5, 0x20, 0x3a, 0xdd, 0x89, 0x78, 0x4b, 0xab, 0x3d, 0x18, 0x38, 0xa5, 0x16, 0xfa, 0x28, - 0x4c, 0x85, 0x7b, 0x3e, 0x09, 0xf6, 0x3c, 0xa7, 0x2e, 0xee, 0x31, 0x47, 0x74, 0x58, 0x88, 0xd9, - 0xdf, 0x96, 0x54, 0x35, 0xf1, 0x96, 0x45, 0x38, 0xe2, 0x89, 0x7c, 0x98, 0x08, 0xe8, 0x69, 0x39, - 0x58, 0x2a, 0x64, 0x61, 0x95, 0x0b, 0xee, 0xec, 0x00, 0xae, 0xb9, 0x4a, 0x18, 0x07, 0x2c, 0x38, - 0x99, 0xdf, 0xc8, 0xc1, 0xb4, 0x8e, 0x38, 0x80, 0x8a, 0xf8, 0x84, 0x01, 0xd3, 0x35, 0xcf, 0x0d, - 0x7d, 0xcf, 0xe1, 0x6e, 0x00, 0xbe, 0x40, 0x46, 0x7c, 0xb4, 0xc7, 0x48, 0xad, 0x91, 0xd0, 0xb2, - 0x1d, 0xcd, 0xa3, 0xa0, 0xb1, 0xc1, 0x31, 0xa6, 0xe8, 0x33, 0x06, 0xcc, 0x45, 0x01, 0x1e, 0x91, - 0x3f, 0x22, 0xd3, 0x86, 0x28, 0x8d, 0x7b, 0x39, 0xce, 0x09, 0x27, 0x59, 0x9b, 0x3b, 0x30, 0x9f, - 0x9c, 0x6d, 0x3a, 0x94, 0x2d, 0x4b, 0xac, 0xf5, 0x7c, 0x34, 0x94, 0x5b, 0x56, 0x10, 0x60, 0x06, - 0x41, 0x6f, 0x81, 0x42, 0xd3, 0xf2, 0x1b, 0xb6, 0x6b, 0x39, 0x6c, 0x14, 0xf3, 0x9a, 0x42, 0x12, - 0xe5, 0x58, 0x61, 0x98, 0x3f, 0x18, 0x83, 0xe2, 0x26, 0xb1, 0x82, 0xb6, 0x4f, 0x98, 0xc3, 0xf0, - 0xc4, 0x2d, 0xf2, 0xd8, 0x2b, 0xb8, 0x7c, 0x76, 0xaf, 0xe0, 0xd0, 0x2b, 0x00, 0xbb, 0xb6, 0x6b, - 0x07, 0x7b, 0xc7, 0x7c, 0x5f, 0xc7, 0x6e, 0x9e, 0xae, 0x28, 0x0a, 0x58, 0xa3, 0x16, 0xb9, 0xf7, - 0xc7, 0x0f, 0x79, 0x60, 0xfb, 0x49, 0x43, 0xdb, 0x3c, 0x26, 0xb2, 0xb8, 0xce, 0xd4, 0x26, 0x66, - 0x45, 0x6e, 0x26, 0x97, 0xdd, 0xd0, 0x3f, 0x38, 0x74, 0x8f, 0xd9, 0x86, 0x82, 0x4f, 0x82, 0x76, - 0x93, 0x9e, 0x2d, 0x26, 0x87, 0x1e, 0x06, 0x16, 0x0d, 0x81, 0x45, 0x7d, 0xac, 0x28, 0x2d, 0xbf, - 0x00, 0x33, 0xb1, 0x26, 0xa0, 0x79, 0xc8, 0xdf, 0x23, 0x07, 0x5c, 0x4e, 0x30, 0xfd, 0x89, 0x16, - 0x63, 0x97, 0x20, 0x62, 0x58, 0xde, 0x99, 0x7b, 0xde, 0x30, 0x7f, 0x34, 0x01, 0xe2, 0xc2, 0x6c, - 0x00, 0x5d, 0xa0, 0xfb, 0xc9, 0x73, 0xc7, 0xf0, 0x93, 0x5f, 0x83, 0x69, 0xdb, 0xb5, 0x43, 0xdb, - 0x72, 0xd8, 0x01, 0x54, 0xec, 0x55, 0x4f, 0xcb, 0xf5, 0xbf, 0xae, 0xc1, 0x52, 0xe8, 0xc4, 0xea, - 0xa2, 0x9b, 0x30, 0xce, 0x94, 0xb9, 0x90, 0xa7, 0xe1, 0x6f, 0xf5, 0xd8, 0x85, 0x2e, 0x8f, 0x6c, - 0xe7, 0x94, 0x98, 0x81, 0xcd, 0x9f, 0x3c, 0xaa, 0x73, 0x93, 0x10, 0xab, 0xc8, 0xc0, 0x4e, 0xc0, - 0x71, 0x4f, 0x0d, 0x4a, 0x65, 0xd7, 0xb2, 0x9d, 0xb6, 0x4f, 0x22, 0x2a, 0x13, 0x71, 0x2a, 0x57, - 0x12, 0x70, 0xdc, 0x53, 0x03, 0xed, 0xc2, 0xb4, 0x28, 0xe3, 0xf1, 0x0d, 0x93, 0xc7, 0xec, 0x25, - 0x8b, 0x63, 0xb9, 0xa2, 0x51, 0xc2, 0x31, 0xba, 0xa8, 0x0d, 0x0b, 0xb6, 0x5b, 0xf3, 0xdc, 0x9a, - 0xd3, 0x0e, 0xec, 0x7d, 0x12, 0x85, 0x95, 0x1f, 0x87, 0xd9, 0xe9, 0x6e, 0xa7, 0xb4, 0xb0, 0x9e, - 0x24, 0x87, 0x7b, 0x39, 0xa0, 0x8f, 0x1b, 0x70, 0xba, 0xe6, 0xb9, 0x01, 0x7b, 0x58, 0xb5, 0x4f, - 0x2e, 0xfb, 0xbe, 0xe7, 0x73, 0xde, 0x53, 0xc7, 0xe4, 0xcd, 0xfc, 0x1e, 0xab, 0x69, 0x24, 0x71, - 0x3a, 0x27, 0xf4, 0x1a, 0x14, 0x5a, 0xbe, 0xb7, 0x6f, 0xd7, 0x89, 0x2f, 0x62, 0x65, 0x36, 0xb2, - 0x78, 0xd3, 0xb8, 0x25, 0x68, 0x46, 0x9a, 0x40, 0x96, 0x60, 0xc5, 0xcf, 0xfc, 0xbd, 0x02, 0xcc, - 0xc6, 0xd1, 0xd1, 0x47, 0x00, 0x5a, 0xbe, 0xd7, 0x24, 0xe1, 0x1e, 0x51, 0xe1, 0xc1, 0xd7, 0x47, - 0x7d, 0x4f, 0x28, 0xe9, 0xc9, 0x3b, 0x72, 0xaa, 0x49, 0xa3, 0x52, 0xac, 0x71, 0x44, 0x3e, 0x4c, - 0xde, 0xe3, 0x7b, 0x9a, 0xd8, 0xe2, 0x5f, 0xce, 0xc4, 0x20, 0x11, 0x9c, 0x59, 0x5c, 0xab, 0x28, - 0xc2, 0x92, 0x11, 0xda, 0x81, 0xfc, 0x7d, 0xb2, 0x93, 0xcd, 0xcb, 0xb7, 0x3b, 0x44, 0x1c, 0x15, - 0x2a, 0x93, 0xdd, 0x4e, 0x29, 0x7f, 0x87, 0xec, 0x60, 0x4a, 0x9c, 0xf6, 0xab, 0xce, 0x6f, 0xfb, - 0x84, 0xaa, 0x18, 0xb1, 0x5f, 0xb1, 0xab, 0x43, 0xde, 0x2f, 0x51, 0x84, 0x25, 0x23, 0xf4, 0x1a, - 0x4c, 0xdd, 0xb7, 0xf6, 0xc9, 0xae, 0xef, 0xb9, 0xa1, 0x08, 0xcc, 0x18, 0x31, 0x02, 0xf5, 0x8e, - 0x24, 0x27, 0xf8, 0xb2, 0xdd, 0x56, 0x15, 0xe2, 0x88, 0x1d, 0xda, 0x87, 0x82, 0x4b, 0xee, 0x63, - 0xe2, 0xd8, 0x35, 0x11, 0xfc, 0x37, 0xa2, 0x58, 0x5f, 0x17, 0xd4, 0x04, 0x67, 0xb6, 0x0d, 0xc9, - 0x32, 0xac, 0x78, 0xd1, 0xb9, 0xbc, 0xeb, 0xed, 0x08, 0x45, 0x35, 0xe2, 0x5c, 0xaa, 0x63, 0x1f, - 0x9f, 0xcb, 0x6b, 0xde, 0x0e, 0xa6, 0xc4, 0xe9, 0x1a, 0xa9, 0xa9, 0xa8, 0x00, 0xa1, 0xa6, 0xae, - 0x67, 0x1b, 0x0d, 0xc1, 0xd7, 0x48, 0x54, 0x8a, 0x35, 0x8e, 0x74, 0x6c, 0x1b, 0xc2, 0xcb, 0x24, - 0x14, 0xd5, 0x88, 0x63, 0x1b, 0xf7, 0x59, 0xf1, 0xb1, 0x95, 0x65, 0x58, 0xf1, 0x32, 0xff, 0x78, - 0x0c, 0xa6, 0xf5, 0x0c, 0x06, 0x03, 0xec, 0xd5, 0xca, 0x5c, 0xcc, 0x0d, 0x63, 0x2e, 0x52, 0x6b, - 0xbf, 0x19, 0xd9, 0x36, 0xf2, 0xc0, 0xbf, 0x9e, 0x99, 0xb5, 0x14, 0x59, 0xfb, 0x5a, 0x61, 0x80, - 0x63, 0x4c, 0x87, 0xb8, 0x22, 0xa5, 0xf6, 0x1f, 0x37, 0x03, 0xf8, 0x13, 0x31, 0x65, 0xff, 0xc5, - 0x36, 0xf6, 0x4b, 0x00, 0x51, 0x2e, 0x03, 0xe1, 0x27, 0x57, 0x3e, 0x52, 0x2d, 0xc7, 0x82, 0x86, - 0x85, 0x9e, 0x86, 0x09, 0xba, 0x51, 0x92, 0xba, 0x78, 0xbb, 0xa5, 0x8e, 0x54, 0x57, 0x58, 0x29, - 0x16, 0x50, 0xf4, 0x3c, 0xb5, 0x69, 0xa2, 0xed, 0x4d, 0x3c, 0xc9, 0x5a, 0x8c, 0x6c, 0x9a, 0x08, - 0x86, 0x63, 0x98, 0xb4, 0xe9, 0x84, 0xee, 0x46, 0x4c, 0x92, 0xb4, 0xa6, 0xb3, 0x2d, 0x0a, 0x73, - 0x18, 0x3b, 0xe2, 0x27, 0x76, 0x2f, 0xb6, 0x59, 0x8d, 0x6b, 0x47, 0xfc, 0x04, 0x1c, 0xf7, 0xd4, - 0x30, 0x3f, 0x00, 0xb3, 0xf1, 0x55, 0x4c, 0x87, 0xb8, 0xe5, 0x7b, 0xbb, 0xb6, 0x43, 0x92, 0xce, - 0x89, 0x2d, 0x5e, 0x8c, 0x25, 0x7c, 0x30, 0xef, 0xe8, 0x5f, 0xe6, 0xe1, 0xd4, 0xf5, 0x86, 0xed, - 0x3e, 0x48, 0xb8, 0x15, 0xd3, 0x52, 0x24, 0x19, 0xc3, 0xa6, 0x48, 0x8a, 0x82, 0xcf, 0x45, 0xc2, - 0xa7, 0xf4, 0xe0, 0x73, 0x99, 0x0d, 0x2a, 0x8e, 0x8b, 0xbe, 0x6b, 0xc0, 0x93, 0x56, 0x9d, 0xdb, - 0x55, 0x96, 0x23, 0x4a, 0x23, 0xa6, 0x52, 0xc6, 0x83, 0x11, 0xb5, 0x64, 0x6f, 0xe7, 0x57, 0xca, - 0x87, 0x70, 0xe5, 0xa7, 0x85, 0x37, 0x89, 0x1e, 0x3c, 0x79, 0x18, 0x2a, 0x3e, 0xb4, 0xf9, 0xcb, - 0x37, 0xe0, 0x8d, 0x47, 0x32, 0x1a, 0xea, 0x4c, 0xf0, 0x09, 0x03, 0xa6, 0xb8, 0xd7, 0x0c, 0x93, - 0x5d, 0xba, 0x78, 0xac, 0x96, 0x7d, 0x9b, 0xf8, 0x81, 0xcc, 0x73, 0xa0, 0x85, 0x7a, 0x95, 0xb7, - 0xd6, 0x05, 0x04, 0x6b, 0x58, 0x54, 0x3d, 0xdd, 0xb3, 0xdd, 0xba, 0x98, 0x26, 0xa5, 0x9e, 0x5e, - 0xb6, 0xdd, 0x3a, 0x66, 0x10, 0xa5, 0xc0, 0xf2, 0xfd, 0x14, 0x98, 0xf9, 0xdb, 0x06, 0xcc, 0xb2, - 0xb7, 0x25, 0x91, 0x51, 0xfc, 0x9c, 0xba, 0x11, 0xe6, 0xcd, 0x38, 0x1b, 0xbf, 0x11, 0x7e, 0xd8, - 0x29, 0x15, 0xf9, 0x6b, 0x94, 0xf8, 0x05, 0xf1, 0x7b, 0xc5, 0xc1, 0x96, 0xdd, 0x5b, 0xe7, 0x86, - 0x3e, 0x77, 0x29, 0x37, 0x4e, 0x55, 0x12, 0xc1, 0x11, 0x3d, 0xf3, 0x0f, 0xf2, 0x70, 0x2a, 0x25, - 0x48, 0x9a, 0x9e, 0x39, 0x27, 0x58, 0x9c, 0xa8, 0xbc, 0x75, 0x7d, 0x35, 0xf3, 0x40, 0xec, 0x15, - 0x16, 0x8e, 0x2a, 0x24, 0x49, 0xe9, 0x27, 0x5e, 0x88, 0x05, 0x73, 0xf4, 0xeb, 0x06, 0x14, 0x2d, - 0x4d, 0xd8, 0xf9, 0x45, 0xf4, 0x4e, 0xf6, 0x8d, 0xe9, 0x91, 0x6d, 0x2d, 0x80, 0x26, 0x12, 0x65, - 0xbd, 0x2d, 0xcb, 0xef, 0x80, 0xa2, 0xd6, 0x85, 0x61, 0x64, 0x74, 0xf9, 0x45, 0x98, 0x1f, 0x49, - 0xc6, 0xdf, 0x03, 0xc3, 0x26, 0xce, 0xa0, 0x3b, 0xc2, 0x7d, 0xfd, 0xc9, 0x95, 0x1a, 0x71, 0xf1, - 0xe6, 0x4a, 0x40, 0xcd, 0x1d, 0x98, 0x4f, 0x1a, 0xde, 0x99, 0x5f, 0x46, 0xbd, 0x0d, 0x86, 0x4c, - 0x75, 0x61, 0xfe, 0x55, 0x0e, 0x26, 0xc5, 0x4b, 0x8b, 0x47, 0x10, 0x7b, 0x76, 0x2f, 0xe6, 0x4d, - 0x5f, 0xcf, 0xe4, 0x81, 0x48, 0xdf, 0xc0, 0xb3, 0x20, 0x11, 0x78, 0xf6, 0x72, 0x36, 0xec, 0x0e, - 0x8f, 0x3a, 0xfb, 0x5c, 0x0e, 0xe6, 0x12, 0x2f, 0x57, 0xd0, 0xa7, 0x8c, 0xde, 0x60, 0x8b, 0x5b, - 0x99, 0x3e, 0x8e, 0x51, 0x91, 0x8d, 0x87, 0xc7, 0x5d, 0x04, 0xb1, 0xe4, 0x39, 0x37, 0x33, 0x4b, - 0xb0, 0x76, 0x68, 0x1e, 0x9d, 0x7f, 0x36, 0xe0, 0xf1, 0xbe, 0x6f, 0x79, 0xd8, 0x23, 0x65, 0x3f, - 0x0e, 0x15, 0xb2, 0x97, 0xf1, 0xdb, 0x3c, 0xe5, 0xc5, 0x4d, 0xbe, 0x2b, 0x4d, 0xb2, 0x47, 0xcf, - 0xc2, 0x34, 0xd3, 0xe3, 0x74, 0xf9, 0x84, 0xa4, 0x25, 0x32, 0x63, 0x31, 0x8f, 0x49, 0x55, 0x2b, - 0xc7, 0x31, 0x2c, 0xf3, 0x2b, 0x06, 0x2c, 0xf5, 0x7b, 0xb2, 0x3a, 0x80, 0x5d, 0xfe, 0x73, 0x89, - 0x38, 0xb0, 0x52, 0x4f, 0x1c, 0x58, 0xc2, 0x32, 0x97, 0x21, 0x5f, 0x9a, 0x51, 0x9c, 0x3f, 0x22, - 0xcc, 0xe9, 0xb3, 0x06, 0x9c, 0xe9, 0x23, 0x38, 0x3d, 0xf1, 0x80, 0xc6, 0xb1, 0xe3, 0x01, 0x73, - 0x83, 0xc6, 0x03, 0x9a, 0x7f, 0x93, 0x87, 0x79, 0xd1, 0x9e, 0x68, 0x33, 0x7f, 0x3e, 0x16, 0x4d, - 0xf7, 0xa6, 0x44, 0x34, 0xdd, 0x62, 0x12, 0xff, 0xff, 0x43, 0xe9, 0x7e, 0xb2, 0x42, 0xe9, 0x7e, - 0x9c, 0x83, 0xd3, 0xa9, 0x2f, 0x73, 0xd1, 0xa7, 0x53, 0xb4, 0xe0, 0x9d, 0x8c, 0x9f, 0x00, 0x0f, - 0xa8, 0x07, 0x47, 0x8d, 0x3f, 0xfb, 0xa2, 0x1e, 0xf7, 0xc5, 0x8f, 0x09, 0xbb, 0x27, 0xf0, 0x98, - 0x79, 0xc8, 0x10, 0x30, 0xf3, 0x57, 0xf2, 0x70, 0x61, 0x50, 0x42, 0x3f, 0xa1, 0x21, 0xc2, 0x41, - 0x2c, 0x44, 0xf8, 0xd1, 0xec, 0x50, 0x27, 0x13, 0x2d, 0xfc, 0xd5, 0xbc, 0xda, 0xf6, 0x7a, 0xe5, - 0x73, 0xa0, 0x4b, 0x95, 0x49, 0x6a, 0xc5, 0xc8, 0x64, 0x58, 0x91, 0x2a, 0x9c, 0xac, 0xf2, 0xe2, - 0x87, 0x9d, 0xd2, 0x82, 0xc8, 0xb9, 0x53, 0x25, 0xa1, 0x28, 0xc4, 0xb2, 0x12, 0xba, 0x00, 0x05, - 0x9f, 0x43, 0x65, 0x50, 0xa4, 0xb8, 0x28, 0xe2, 0x65, 0x58, 0x41, 0xd1, 0x47, 0x35, 0xb3, 0x6f, - 0xec, 0xa4, 0x1e, 0x87, 0x1e, 0x76, 0xff, 0xf5, 0x2a, 0x14, 0x02, 0x99, 0x29, 0x8b, 0x7b, 0x45, - 0x9f, 0x19, 0x30, 0xd6, 0x96, 0x9e, 0x12, 0x64, 0xda, 0x2c, 0xde, 0x3f, 0x95, 0x54, 0x4b, 0x91, - 0x44, 0xa6, 0x32, 0xd0, 0xb9, 0x8b, 0x07, 0x52, 0x8c, 0xf3, 0xd7, 0x0d, 0x28, 0x8a, 0xd9, 0x7a, - 0x04, 0xe1, 0xbf, 0x77, 0xe3, 0xe1, 0xbf, 0x97, 0x33, 0xd1, 0x1d, 0x7d, 0x62, 0x7f, 0xef, 0xc2, - 0xb4, 0x9e, 0x9c, 0x01, 0xbd, 0xa2, 0xe9, 0x3e, 0x63, 0x94, 0x47, 0xe0, 0x52, 0x3b, 0x46, 0x7a, - 0xd1, 0xfc, 0x72, 0x41, 0x8d, 0x22, 0x0b, 0x32, 0xd6, 0x65, 0xd0, 0x38, 0x54, 0x06, 0x75, 0x11, - 0xc8, 0x65, 0x2f, 0x02, 0x37, 0xa1, 0x20, 0x15, 0x94, 0xd8, 0xc6, 0x9f, 0xd2, 0x23, 0x79, 0xa8, - 0x2d, 0x40, 0x89, 0x69, 0x82, 0xcb, 0x4e, 0x15, 0x6a, 0x0e, 0x95, 0xe2, 0x54, 0x64, 0xd0, 0x6b, - 0x50, 0xbc, 0xef, 0xf9, 0xf7, 0x1c, 0xcf, 0x62, 0x09, 0xeb, 0x20, 0x0b, 0xff, 0xb6, 0xf2, 0xae, - 0xf0, 0x80, 0xd1, 0x3b, 0x11, 0x7d, 0xac, 0x33, 0x43, 0x65, 0x98, 0x6b, 0xda, 0x2e, 0x26, 0x56, - 0x5d, 0x45, 0xf9, 0x8e, 0xf1, 0x24, 0x5d, 0xd2, 0xc8, 0xdd, 0x8c, 0x83, 0x71, 0x12, 0x1f, 0x7d, - 0x08, 0x0a, 0x81, 0x48, 0x00, 0x91, 0xcd, 0x4d, 0x84, 0x3a, 0x1e, 0x71, 0xa2, 0xd1, 0xd8, 0xc9, - 0x12, 0xac, 0x18, 0xa2, 0x0d, 0x58, 0xf4, 0xc5, 0x13, 0xeb, 0x58, 0x0e, 0x5a, 0xbe, 0x3e, 0x59, - 0x2e, 0x28, 0x9c, 0x02, 0xc7, 0xa9, 0xb5, 0xa8, 0x15, 0xc3, 0xb2, 0x8c, 0x70, 0x97, 0x6c, 0x41, - 0x7b, 0x97, 0xc9, 0x4a, 0xb1, 0x80, 0x1e, 0x16, 0x35, 0x5e, 0x18, 0x21, 0x6a, 0xbc, 0x0a, 0xa7, - 0x93, 0x20, 0xf6, 0x10, 0x9c, 0xbd, 0x3d, 0xd7, 0x76, 0x8f, 0xad, 0x34, 0x24, 0x9c, 0x5e, 0x17, - 0xdd, 0x81, 0x29, 0x9f, 0xb0, 0xf3, 0x45, 0x59, 0xde, 0x7d, 0x0e, 0x1d, 0x74, 0x81, 0x25, 0x01, - 0x1c, 0xd1, 0xa2, 0xf3, 0x6e, 0xc5, 0xf3, 0x54, 0xdd, 0xcc, 0x30, 0x5d, 0xba, 0x98, 0xfb, 0x3e, - 0x09, 0x1a, 0xcc, 0xff, 0x9a, 0x81, 0x99, 0xd8, 0x31, 0x1a, 0x3d, 0x05, 0xe3, 0xec, 0x65, 0x3c, - 0x53, 0x0f, 0x85, 0x48, 0x85, 0xf1, 0xc1, 0xe1, 0x30, 0xf4, 0x39, 0x03, 0xe6, 0x5a, 0x31, 0x97, - 0x9f, 0xd4, 0x9c, 0x23, 0x5e, 0xb3, 0xc4, 0xfd, 0x88, 0x5a, 0x86, 0xc7, 0x38, 0x33, 0x9c, 0xe4, - 0x4e, 0x17, 0xa0, 0x88, 0x43, 0x72, 0x88, 0xcf, 0xb0, 0x85, 0x8d, 0xa3, 0x48, 0xac, 0xc6, 0xc1, - 0x38, 0x89, 0x4f, 0x67, 0x98, 0xf5, 0x6e, 0x94, 0xe4, 0xd2, 0x65, 0x49, 0x00, 0x47, 0xb4, 0xd0, - 0x8b, 0x30, 0x2b, 0xd2, 0x13, 0x6d, 0x79, 0xf5, 0xab, 0x56, 0xb0, 0x27, 0x8c, 0x7b, 0x75, 0x18, - 0x59, 0x8d, 0x41, 0x71, 0x02, 0x9b, 0xf5, 0x2d, 0xca, 0x01, 0xc5, 0x08, 0x4c, 0xc4, 0x13, 0x60, - 0xae, 0xc6, 0xc1, 0x38, 0x89, 0x8f, 0xde, 0xa2, 0xe9, 0x7d, 0x7e, 0x4d, 0xa2, 0xb4, 0x41, 0x8a, - 0xee, 0x2f, 0xc3, 0x5c, 0x9b, 0x9d, 0x85, 0xea, 0x12, 0x28, 0xd6, 0xa3, 0x62, 0x78, 0x2b, 0x0e, - 0xc6, 0x49, 0x7c, 0xf4, 0x02, 0xcc, 0xf8, 0x54, 0xbb, 0x29, 0x02, 0xfc, 0xee, 0x44, 0x5d, 0x04, - 0x60, 0x1d, 0x88, 0xe3, 0xb8, 0xe8, 0x25, 0x58, 0x88, 0x72, 0xa6, 0x48, 0x02, 0xfc, 0x32, 0x45, - 0x25, 0x21, 0x28, 0x27, 0x11, 0x70, 0x6f, 0x1d, 0xf4, 0xf3, 0x30, 0xaf, 0x8d, 0xc4, 0xba, 0x5b, - 0x27, 0x0f, 0x44, 0x5e, 0x8b, 0x45, 0x76, 0x21, 0x93, 0x80, 0xe1, 0x1e, 0x6c, 0xf4, 0x4e, 0x98, - 0xad, 0x79, 0x8e, 0xc3, 0x74, 0x1c, 0x4f, 0xbe, 0xc8, 0x13, 0x58, 0xf0, 0x54, 0x1f, 0x31, 0x08, - 0x4e, 0x60, 0xa2, 0x6b, 0x80, 0xbc, 0x9d, 0x80, 0xf8, 0xfb, 0xa4, 0xfe, 0x12, 0xff, 0x7e, 0x06, - 0xdd, 0xe2, 0x67, 0xe2, 0x51, 0x90, 0x37, 0x7a, 0x30, 0x70, 0x4a, 0x2d, 0xb4, 0x03, 0xcb, 0x72, - 0xbf, 0xe9, 0xad, 0xb1, 0xb4, 0x14, 0x3b, 0x32, 0x2d, 0xdf, 0xe9, 0x8b, 0x89, 0x0f, 0xa1, 0xc2, - 0xf2, 0x24, 0x68, 0xaf, 0x1e, 0x66, 0xb3, 0x48, 0x67, 0x9d, 0xf4, 0x0e, 0x1c, 0xf9, 0xe4, 0xc1, - 0x87, 0x09, 0x1e, 0xf8, 0xba, 0x34, 0x97, 0x45, 0xae, 0x18, 0x3d, 0xd7, 0x5b, 0xb4, 0x0f, 0xf1, - 0x52, 0x2c, 0x38, 0xa1, 0x8f, 0xc0, 0xd4, 0x8e, 0x4c, 0xfc, 0xb9, 0x34, 0x9f, 0xc5, 0xde, 0x9b, - 0xc8, 0x61, 0x1b, 0x9d, 0x7e, 0x15, 0x00, 0x47, 0x2c, 0xd1, 0xd3, 0x50, 0xbc, 0xba, 0x55, 0x56, - 0x92, 0xbe, 0xc0, 0x24, 0x6c, 0x8c, 0x56, 0xc1, 0x3a, 0x80, 0xae, 0x62, 0x65, 0x93, 0x21, 0x36, - 0xe5, 0xd1, 0x9e, 0xde, 0x6b, 0x62, 0x51, 0x6c, 0x76, 0xbf, 0x86, 0xab, 0x4b, 0xa7, 0x12, 0xd8, - 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x14, 0xc5, 0x9e, 0xc4, 0xf4, 0xdf, 0xe2, 0xf1, 0x5e, 0xd4, - 0xe0, 0x88, 0x04, 0xd6, 0xe9, 0xa1, 0xe7, 0xa0, 0xd8, 0x62, 0xf9, 0x10, 0xc9, 0x95, 0xb6, 0xe3, - 0x2c, 0x9d, 0x66, 0xba, 0x59, 0x5d, 0x3c, 0x6c, 0x45, 0x20, 0xac, 0xe3, 0xa1, 0x67, 0xe4, 0xe5, - 0xf8, 0x1b, 0x62, 0xf7, 0x48, 0xea, 0x72, 0x5c, 0x59, 0xd2, 0x7d, 0x42, 0x29, 0xcf, 0x1c, 0xe1, - 0x1c, 0xf9, 0x78, 0xe4, 0x1c, 0x56, 0xd9, 0xb7, 0x3e, 0xac, 0x4b, 0x83, 0x91, 0xc5, 0x57, 0x3e, - 0x7a, 0xb2, 0xca, 0xf2, 0xcd, 0x22, 0x55, 0x16, 0x5a, 0x4a, 0xfe, 0x33, 0x79, 0x0e, 0x1e, 0xcf, - 0x2c, 0xc6, 0x4f, 0x5a, 0x71, 0xe9, 0x37, 0xbf, 0x37, 0xa6, 0x1c, 0x44, 0x89, 0x3b, 0x61, 0x1f, - 0xc6, 0xed, 0x20, 0xb4, 0xbd, 0x0c, 0x1f, 0x98, 0x24, 0x52, 0x72, 0xb1, 0xd8, 0x3e, 0x06, 0xc0, - 0x9c, 0x15, 0xe5, 0xe9, 0x36, 0x6c, 0xf7, 0x81, 0xe8, 0xfe, 0xcd, 0xcc, 0x2f, 0x7b, 0x39, 0x4f, - 0x06, 0xc0, 0x9c, 0x15, 0xba, 0x0b, 0x79, 0xcb, 0xd9, 0xc9, 0xe8, 0x8b, 0x2e, 0xc9, 0xaf, 0x22, - 0xf1, 0xc8, 0x98, 0xf2, 0x46, 0x05, 0x53, 0x26, 0x94, 0x57, 0xd0, 0xb4, 0x85, 0x7d, 0x31, 0x22, - 0xaf, 0xea, 0xe6, 0x7a, 0x1a, 0xaf, 0xea, 0xe6, 0x3a, 0xa6, 0x4c, 0xd0, 0xa7, 0x0c, 0x00, 0x4b, - 0x7d, 0xb1, 0x28, 0x9b, 0x14, 0xcc, 0xfd, 0xbe, 0x80, 0xc4, 0xc3, 0x71, 0x22, 0x28, 0xd6, 0x38, - 0x9b, 0x9f, 0x37, 0x60, 0xa1, 0xa7, 0xb1, 0xc9, 0x8f, 0x39, 0x19, 0x83, 0x7f, 0xcc, 0x49, 0x24, - 0x6d, 0xab, 0xb6, 0x1c, 0x3b, 0xf5, 0x91, 0xd6, 0x76, 0x02, 0x8e, 0x7b, 0x6a, 0x98, 0x7f, 0x6a, - 0x40, 0x51, 0x8b, 0x29, 0xa7, 0x76, 0x2f, 0x8b, 0xbd, 0x17, 0xcd, 0x88, 0xf2, 0xd5, 0x31, 0x9f, - 0x18, 0x87, 0x71, 0xf7, 0x6c, 0x43, 0x4b, 0x4b, 0x14, 0xb9, 0x67, 0x69, 0x29, 0x16, 0x50, 0x9e, - 0x70, 0x86, 0xb4, 0x98, 0x44, 0xe5, 0xf5, 0x84, 0x33, 0xa4, 0x85, 0x19, 0x84, 0xb1, 0xa3, 0xca, - 0x51, 0xc4, 0xcd, 0x68, 0xe9, 0xf1, 0x2c, 0x6a, 0x66, 0x33, 0x18, 0x3a, 0x0b, 0x79, 0xe2, 0xd6, - 0x85, 0xb5, 0x58, 0x14, 0x28, 0xf9, 0xcb, 0x6e, 0x1d, 0xd3, 0x72, 0xf3, 0x06, 0x4c, 0x57, 0x49, - 0xcd, 0x27, 0xe1, 0xcb, 0xe4, 0x60, 0x30, 0x07, 0xe2, 0x59, 0x7e, 0xf1, 0x9a, 0x8b, 0x13, 0xa4, - 0xd5, 0x69, 0xb9, 0xf9, 0xbb, 0x06, 0x24, 0xb2, 0x15, 0x6a, 0xae, 0x1a, 0xa3, 0x9f, 0xab, 0x26, - 0xe6, 0x54, 0xc8, 0x1d, 0xea, 0x54, 0xb8, 0x06, 0xa8, 0x69, 0x85, 0xb5, 0xbd, 0x58, 0x2e, 0x4d, - 0x61, 0xa8, 0x47, 0x2f, 0x58, 0x7a, 0x30, 0x70, 0x4a, 0x2d, 0xf3, 0x36, 0x14, 0xe4, 0x6b, 0x24, - 0x16, 0xd2, 0x2f, 0xcf, 0x2c, 0x7a, 0x48, 0x3f, 0x3d, 0xb2, 0x30, 0x08, 0x6d, 0x63, 0xe0, 0xda, - 0x57, 0xbd, 0x20, 0x94, 0x4f, 0xa8, 0xb8, 0x67, 0xe2, 0xfa, 0x3a, 0x2b, 0xc3, 0x0a, 0x6a, 0x2e, - 0xc0, 0x9c, 0x72, 0x39, 0x70, 0x89, 0x33, 0xbf, 0x91, 0x87, 0xe9, 0xd8, 0x47, 0x36, 0x8e, 0x1e, - 0xe9, 0xc1, 0xc7, 0x24, 0xc5, 0x75, 0x90, 0x1f, 0xd2, 0x75, 0xa0, 0xfb, 0x6a, 0xc6, 0x4e, 0xd6, - 0x57, 0x33, 0x9e, 0x8d, 0xaf, 0x26, 0x84, 0x49, 0xf1, 0x59, 0x37, 0x11, 0xfa, 0xb8, 0x99, 0xd1, - 0x53, 0x62, 0xf1, 0x26, 0x8f, 0x45, 0x7b, 0x4a, 0xed, 0x21, 0x59, 0x99, 0x5f, 0x1f, 0x87, 0xd9, - 0xf8, 0xe3, 0xe2, 0x01, 0x66, 0xf2, 0x2d, 0x3d, 0x33, 0x39, 0xe4, 0xd1, 0x29, 0x3f, 0xea, 0xd1, - 0x69, 0x6c, 0xd4, 0xa3, 0xd3, 0xf8, 0x31, 0x8e, 0x4e, 0xbd, 0x07, 0x9f, 0x89, 0x81, 0x0f, 0x3e, - 0xef, 0x52, 0xf7, 0x7e, 0x93, 0x31, 0x47, 0x79, 0x74, 0xef, 0x87, 0xe2, 0xd3, 0xb0, 0xea, 0xd5, - 0x53, 0xef, 0x4f, 0x0b, 0x47, 0x04, 0x15, 0xfa, 0xa9, 0xd7, 0x74, 0xc3, 0x7b, 0x67, 0xde, 0x30, - 0xc4, 0x15, 0x5d, 0xf4, 0xe5, 0x42, 0xb6, 0xf3, 0x40, 0x7c, 0xd7, 0xaa, 0x46, 0x20, 0xac, 0xe3, - 0xb1, 0xaf, 0x58, 0xc4, 0xbf, 0xb1, 0xc1, 0x4e, 0xa2, 0xfa, 0x57, 0x2c, 0x12, 0xdf, 0xe4, 0x48, - 0xe2, 0x9b, 0x5f, 0xcb, 0xc3, 0x6c, 0x3c, 0x0b, 0x31, 0xba, 0xaf, 0xac, 0xc5, 0x4c, 0x0c, 0x55, - 0x4e, 0x56, 0x7b, 0x5e, 0xdb, 0xf7, 0xc8, 0xc4, 0x3f, 0xf1, 0xb7, 0xa3, 0xde, 0xfa, 0x9e, 0x1c, - 0x63, 0x71, 0x56, 0x11, 0xec, 0x58, 0xe2, 0xe2, 0x28, 0xca, 0x4e, 0xdc, 0xf5, 0x65, 0xce, 0x3d, - 0x8a, 0x9b, 0x53, 0xac, 0xb0, 0xc6, 0x96, 0xaa, 0xf7, 0x7d, 0xe2, 0xdb, 0xbb, 0xb6, 0xfa, 0x82, - 0x02, 0x53, 0x9e, 0xb7, 0x45, 0x19, 0x56, 0x50, 0xf3, 0x63, 0x39, 0x88, 0x3e, 0xee, 0xc2, 0x12, - 0xa2, 0x06, 0xda, 0x9e, 0x2d, 0xa6, 0xed, 0xda, 0xa8, 0x59, 0x87, 0x23, 0x8a, 0x22, 0x2c, 0x42, - 0x2b, 0xc1, 0x31, 0x8e, 0xff, 0x03, 0x1f, 0x75, 0xb1, 0x60, 0x2e, 0x11, 0x15, 0x9f, 0x79, 0x98, - 0xd5, 0x97, 0xf3, 0x30, 0xa5, 0xde, 0x15, 0xa0, 0x77, 0xb0, 0x5c, 0x86, 0x7b, 0x9e, 0xcc, 0x30, - 0xf9, 0x46, 0x2d, 0xe3, 0xe0, 0x9e, 0x57, 0x7f, 0xd8, 0x29, 0xcd, 0x29, 0x64, 0x5e, 0x84, 0x45, - 0x05, 0x6a, 0x21, 0xb5, 0x7d, 0x27, 0x69, 0x21, 0xdd, 0xc2, 0x1b, 0x98, 0x96, 0xa3, 0x07, 0x30, - 0xb9, 0x47, 0xac, 0x3a, 0xf1, 0xe5, 0x2d, 0xf3, 0x66, 0x46, 0x6f, 0x21, 0xae, 0x32, 0xaa, 0xd1, - 0x30, 0xf0, 0xff, 0x01, 0x96, 0xec, 0xe8, 0x46, 0xb5, 0xe3, 0xd5, 0x0f, 0x92, 0x19, 0x0a, 0x2b, - 0x5e, 0xfd, 0x00, 0x33, 0x08, 0x7a, 0x11, 0x66, 0x43, 0xbb, 0x49, 0xe8, 0x51, 0x56, 0xfb, 0x1a, - 0x47, 0x3e, 0x72, 0x33, 0x6e, 0xc7, 0xa0, 0x38, 0x81, 0x4d, 0x37, 0xba, 0xbb, 0x81, 0xe7, 0xb2, - 0x4c, 0x0e, 0x13, 0x71, 0x7f, 0xc1, 0xb5, 0xea, 0x8d, 0xeb, 0x2c, 0x91, 0x83, 0xc2, 0xa0, 0xd8, - 0x36, 0x0b, 0x5e, 0xf6, 0x89, 0xf0, 0xf2, 0xcf, 0x47, 0x4f, 0xcc, 0x78, 0x39, 0x56, 0x18, 0xe6, - 0x2d, 0x98, 0x4b, 0x74, 0x55, 0xda, 0xa2, 0x46, 0xba, 0x2d, 0x3a, 0x58, 0x3a, 0xc0, 0x3f, 0x34, - 0x60, 0xa1, 0x67, 0xf1, 0x0e, 0x1a, 0xff, 0x97, 0xd4, 0xe4, 0xb9, 0xe3, 0x6b, 0xf2, 0xfc, 0x70, - 0x9a, 0xbc, 0xb2, 0xf2, 0xcd, 0xef, 0x9f, 0x7b, 0xec, 0x5b, 0xdf, 0x3f, 0xf7, 0xd8, 0xb7, 0xbf, - 0x7f, 0xee, 0xb1, 0x8f, 0x75, 0xcf, 0x19, 0xdf, 0xec, 0x9e, 0x33, 0xbe, 0xd5, 0x3d, 0x67, 0x7c, - 0xbb, 0x7b, 0xce, 0xf8, 0x5e, 0xf7, 0x9c, 0xf1, 0xf9, 0x1f, 0x9c, 0x7b, 0xec, 0x95, 0x82, 0x14, - 0x93, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xe9, 0xbc, 0xfd, 0x47, 0x79, 0x00, 0x00, -} - -func (m *ALBTrafficRouting) Marshal() (dAtA []byte, err error) { + // 7541 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x23, 0xd7, + 0x75, 0xb0, 0x87, 0x14, 0x25, 0xf2, 0xe8, 0xff, 0xae, 0x36, 0x2b, 0xcb, 0xde, 0xa5, 0x33, 0x0e, + 0xfc, 0x39, 0x5f, 0x13, 0x29, 0xf1, 0x4f, 0xeb, 0xc4, 0x86, 0x5b, 0x52, 0xda, 0xf5, 0x6a, 0x2d, + 0x69, 0xb5, 0x97, 0xda, 0xdd, 0xc4, 0x89, 0x93, 0x8c, 0xc8, 0x2b, 0x6a, 0x56, 0xe4, 0x0c, 0x33, + 0x33, 0xd4, 0xae, 0x1c, 0x23, 0xb1, 0x1b, 0xd8, 0x4d, 0x8b, 0x04, 0x71, 0x9b, 0x04, 0x45, 0x51, + 0xa4, 0x08, 0x0a, 0x03, 0xfd, 0x49, 0x9f, 0x82, 0x16, 0x7d, 0x09, 0xd0, 0xa2, 0xf9, 0x69, 0xfa, + 0xd0, 0x22, 0x29, 0xda, 0x26, 0x29, 0x10, 0xb6, 0x56, 0xfa, 0xd2, 0xa2, 0x45, 0x50, 0x20, 0x45, + 0x91, 0x7d, 0x2a, 0xee, 0xef, 0xdc, 0x19, 0x0e, 0xb5, 0xa4, 0x38, 0xda, 0x18, 0x6d, 0xde, 0xc8, + 0x7b, 0xce, 0x3d, 0xe7, 0xdc, 0xdf, 0x73, 0xee, 0xb9, 0xe7, 0x9e, 0x81, 0xb5, 0xba, 0x1d, 0xec, + 0xb6, 0xb7, 0x17, 0xab, 0x6e, 0x73, 0xc9, 0xf2, 0xea, 0x6e, 0xcb, 0x73, 0x6f, 0xb0, 0x1f, 0xef, + 0xf4, 0xdc, 0x46, 0xc3, 0x6d, 0x07, 0xfe, 0x52, 0x6b, 0xaf, 0xbe, 0x64, 0xb5, 0x6c, 0x7f, 0x49, + 0x95, 0xec, 0xbf, 0xdb, 0x6a, 0xb4, 0x76, 0xad, 0x77, 0x2f, 0xd5, 0x89, 0x43, 0x3c, 0x2b, 0x20, + 0xb5, 0xc5, 0x96, 0xe7, 0x06, 0x2e, 0x7a, 0x2a, 0xa4, 0xb6, 0x28, 0xa9, 0xb1, 0x1f, 0x1f, 0x96, + 0x75, 0x17, 0x5b, 0x7b, 0xf5, 0x45, 0x4a, 0x6d, 0x51, 0x95, 0x48, 0x6a, 0x0b, 0xef, 0xd4, 0x64, + 0xa9, 0xbb, 0x75, 0x77, 0x89, 0x11, 0xdd, 0x6e, 0xef, 0xb0, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x99, + 0x2d, 0x3c, 0xb8, 0xf7, 0x84, 0xbf, 0x68, 0xbb, 0x54, 0xb6, 0xa5, 0x6d, 0x2b, 0xa8, 0xee, 0x2e, + 0xed, 0x77, 0x49, 0xb4, 0x60, 0x6a, 0x48, 0x55, 0xd7, 0x23, 0x49, 0x38, 0x8f, 0x85, 0x38, 0x4d, + 0xab, 0xba, 0x6b, 0x3b, 0xc4, 0x3b, 0x08, 0x5b, 0xdd, 0x24, 0x81, 0x95, 0x54, 0x6b, 0xa9, 0x57, + 0x2d, 0xaf, 0xed, 0x04, 0x76, 0x93, 0x74, 0x55, 0xf8, 0xf9, 0x3b, 0x55, 0xf0, 0xab, 0xbb, 0xa4, + 0x69, 0x75, 0xd5, 0x7b, 0xb4, 0x57, 0xbd, 0x76, 0x60, 0x37, 0x96, 0x6c, 0x27, 0xf0, 0x03, 0x2f, + 0x5e, 0xc9, 0xfc, 0x7a, 0x16, 0x0a, 0xa5, 0xb5, 0x72, 0x25, 0xb0, 0x82, 0xb6, 0x8f, 0x5e, 0x35, + 0x60, 0xa2, 0xe1, 0x5a, 0xb5, 0xb2, 0xd5, 0xb0, 0x9c, 0x2a, 0xf1, 0xe6, 0x8d, 0x07, 0x8c, 0x87, + 0xc7, 0x1f, 0x59, 0x5b, 0x1c, 0x66, 0xbc, 0x16, 0x4b, 0x37, 0x7d, 0x4c, 0x7c, 0xb7, 0xed, 0x55, + 0x09, 0x26, 0x3b, 0xe5, 0xb9, 0x6f, 0x75, 0x8a, 0xf7, 0x1c, 0x76, 0x8a, 0x13, 0x6b, 0x1a, 0x27, + 0x1c, 0xe1, 0x8b, 0xbe, 0x60, 0xc0, 0x6c, 0xd5, 0x72, 0x2c, 0xef, 0x60, 0xcb, 0xf2, 0xea, 0x24, + 0x78, 0xc6, 0x73, 0xdb, 0xad, 0xf9, 0xcc, 0x09, 0x48, 0x73, 0xaf, 0x90, 0x66, 0x76, 0x39, 0xce, + 0x0e, 0x77, 0x4b, 0xc0, 0xe4, 0xf2, 0x03, 0x6b, 0xbb, 0x41, 0x74, 0xb9, 0xb2, 0x27, 0x29, 0x57, + 0x25, 0xce, 0x0e, 0x77, 0x4b, 0x60, 0xbe, 0x92, 0x85, 0xd9, 0xd2, 0x5a, 0x79, 0xcb, 0xb3, 0x76, + 0x76, 0xec, 0x2a, 0x76, 0xdb, 0x81, 0xed, 0xd4, 0xd1, 0xdb, 0x61, 0xcc, 0x76, 0xea, 0x1e, 0xf1, + 0x7d, 0x36, 0x90, 0x85, 0xf2, 0xb4, 0x20, 0x3a, 0xb6, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0xc7, 0x61, + 0xdc, 0x27, 0xde, 0xbe, 0x5d, 0x25, 0x9b, 0xae, 0x17, 0xb0, 0x9e, 0xce, 0x95, 0x4f, 0x09, 0xf4, + 0xf1, 0x4a, 0x08, 0xc2, 0x3a, 0x1e, 0xad, 0xe6, 0xb9, 0x6e, 0x20, 0xe0, 0xac, 0x23, 0x0a, 0x61, + 0x35, 0x1c, 0x82, 0xb0, 0x8e, 0x87, 0x5e, 0x33, 0x60, 0xc6, 0x0f, 0xec, 0xea, 0x9e, 0xed, 0x10, + 0xdf, 0x5f, 0x76, 0x9d, 0x1d, 0xbb, 0x3e, 0x9f, 0x63, 0xbd, 0xb8, 0x31, 0x5c, 0x2f, 0x56, 0x62, + 0x54, 0xcb, 0x73, 0x87, 0x9d, 0xe2, 0x4c, 0xbc, 0x14, 0x77, 0x71, 0x47, 0x2b, 0x30, 0x63, 0x39, + 0x8e, 0x1b, 0x58, 0x81, 0xed, 0x3a, 0x9b, 0x1e, 0xd9, 0xb1, 0x6f, 0xcd, 0x8f, 0xb0, 0xe6, 0xcc, + 0x8b, 0xe6, 0xcc, 0x94, 0x62, 0x70, 0xdc, 0x55, 0xc3, 0x5c, 0x81, 0xf9, 0x52, 0x73, 0xdb, 0xf2, + 0x7d, 0xab, 0xe6, 0x7a, 0xb1, 0xd1, 0x78, 0x18, 0xf2, 0x4d, 0xab, 0xd5, 0xb2, 0x9d, 0x3a, 0x1d, + 0x8e, 0xec, 0xc3, 0x85, 0xf2, 0xc4, 0x61, 0xa7, 0x98, 0x5f, 0x17, 0x65, 0x58, 0x41, 0xcd, 0xef, + 0x67, 0x60, 0xbc, 0xe4, 0x58, 0x8d, 0x03, 0xdf, 0xf6, 0x71, 0xdb, 0x41, 0x1f, 0x81, 0x3c, 0xdd, + 0x5d, 0x6a, 0x56, 0x60, 0x89, 0x15, 0xf9, 0xae, 0x45, 0xbe, 0xd8, 0x17, 0xf5, 0xc5, 0x1e, 0xf6, + 0x0b, 0xc5, 0x5e, 0xdc, 0x7f, 0xf7, 0xe2, 0xe5, 0xed, 0x1b, 0xa4, 0x1a, 0xac, 0x93, 0xc0, 0x2a, + 0x23, 0xd1, 0x0a, 0x08, 0xcb, 0xb0, 0xa2, 0x8a, 0x5c, 0x18, 0xf1, 0x5b, 0xa4, 0x2a, 0x56, 0xd8, + 0xfa, 0x90, 0x33, 0x39, 0x14, 0xbd, 0xd2, 0x22, 0xd5, 0xf2, 0x84, 0x60, 0x3d, 0x42, 0xff, 0x61, + 0xc6, 0x08, 0xdd, 0x84, 0x51, 0x9f, 0xed, 0x39, 0x62, 0xf1, 0x5c, 0x4e, 0x8f, 0x25, 0x23, 0x5b, + 0x9e, 0x12, 0x4c, 0x47, 0xf9, 0x7f, 0x2c, 0xd8, 0x99, 0xff, 0x68, 0xc0, 0x29, 0x0d, 0xbb, 0xe4, + 0xd5, 0xdb, 0x4d, 0xe2, 0x04, 0xe8, 0x01, 0x18, 0x71, 0xac, 0x26, 0x11, 0x0b, 0x45, 0x89, 0xbc, + 0x61, 0x35, 0x09, 0x66, 0x10, 0xf4, 0x20, 0xe4, 0xf6, 0xad, 0x46, 0x9b, 0xb0, 0x4e, 0x2a, 0x94, + 0x27, 0x05, 0x4a, 0xee, 0x1a, 0x2d, 0xc4, 0x1c, 0x86, 0x5e, 0x84, 0x02, 0xfb, 0x71, 0xc1, 0x73, + 0x9b, 0x29, 0x35, 0x4d, 0x48, 0x78, 0x4d, 0x92, 0x2d, 0x4f, 0x1e, 0x76, 0x8a, 0x05, 0xf5, 0x17, + 0x87, 0x0c, 0xcd, 0x7f, 0x32, 0x60, 0x5a, 0x6b, 0xdc, 0x9a, 0xed, 0x07, 0xe8, 0x83, 0x5d, 0x93, + 0x67, 0xb1, 0xbf, 0xc9, 0x43, 0x6b, 0xb3, 0xa9, 0x33, 0x23, 0x5a, 0x9a, 0x97, 0x25, 0xda, 0xc4, + 0x71, 0x20, 0x67, 0x07, 0xa4, 0xe9, 0xcf, 0x67, 0x1e, 0xc8, 0x3e, 0x3c, 0xfe, 0xc8, 0x6a, 0x6a, + 0xc3, 0x18, 0xf6, 0xef, 0x2a, 0xa5, 0x8f, 0x39, 0x1b, 0xf3, 0x2b, 0x23, 0x91, 0x16, 0xd2, 0x19, + 0x85, 0x5c, 0x18, 0x6b, 0x92, 0xc0, 0xb3, 0xab, 0x7c, 0x5d, 0x8d, 0x3f, 0xb2, 0x32, 0x9c, 0x14, + 0xeb, 0x8c, 0x58, 0xb8, 0x59, 0xf2, 0xff, 0x3e, 0x96, 0x5c, 0xd0, 0x2e, 0x8c, 0x58, 0x5e, 0x5d, + 0xb6, 0xf9, 0x42, 0x3a, 0xe3, 0x1b, 0xce, 0xb9, 0x92, 0x57, 0xf7, 0x31, 0xe3, 0x80, 0x96, 0xa0, + 0x10, 0x10, 0xaf, 0x69, 0x3b, 0x56, 0xc0, 0x77, 0xd7, 0x7c, 0x79, 0x56, 0xa0, 0x15, 0xb6, 0x24, + 0x00, 0x87, 0x38, 0xa8, 0x01, 0xa3, 0x35, 0xef, 0x00, 0xb7, 0x9d, 0xf9, 0x91, 0x34, 0xba, 0x62, + 0x85, 0xd1, 0x0a, 0x17, 0x13, 0xff, 0x8f, 0x05, 0x0f, 0xf4, 0xba, 0x01, 0x73, 0x4d, 0x62, 0xf9, + 0x6d, 0x8f, 0xd0, 0x26, 0x60, 0x12, 0x10, 0x87, 0xee, 0x86, 0xf3, 0x39, 0xc6, 0x1c, 0x0f, 0x3b, + 0x0e, 0xdd, 0x94, 0xcb, 0xf7, 0x0b, 0x51, 0xe6, 0x92, 0xa0, 0x38, 0x51, 0x1a, 0xf3, 0xfb, 0x23, + 0x30, 0xdb, 0xb5, 0x43, 0xa0, 0xc7, 0x20, 0xd7, 0xda, 0xb5, 0x7c, 0xb9, 0xe4, 0xcf, 0xc9, 0xf9, + 0xb6, 0x49, 0x0b, 0x6f, 0x77, 0x8a, 0x93, 0xb2, 0x0a, 0x2b, 0xc0, 0x1c, 0x99, 0xea, 0xd4, 0x26, + 0xf1, 0x7d, 0xab, 0x2e, 0xf7, 0x01, 0x6d, 0x9a, 0xb0, 0x62, 0x2c, 0xe1, 0xe8, 0x57, 0x0c, 0x98, + 0xe4, 0x53, 0x06, 0x13, 0xbf, 0xdd, 0x08, 0xe8, 0x5e, 0x47, 0xbb, 0xe5, 0x52, 0x1a, 0xd3, 0x93, + 0x93, 0x2c, 0x9f, 0x16, 0xdc, 0x27, 0xf5, 0x52, 0x1f, 0x47, 0xf9, 0xa2, 0xeb, 0x50, 0xf0, 0x03, + 0xcb, 0x0b, 0x48, 0xad, 0x14, 0x30, 0xad, 0x36, 0xfe, 0xc8, 0xff, 0xef, 0x6f, 0x13, 0xd8, 0xb2, + 0x9b, 0x84, 0x6f, 0x38, 0x15, 0x49, 0x00, 0x87, 0xb4, 0xd0, 0x8b, 0x00, 0x5e, 0xdb, 0xa9, 0xb4, + 0x9b, 0x4d, 0xcb, 0x3b, 0x10, 0x1a, 0xfc, 0xe2, 0x70, 0xcd, 0xc3, 0x8a, 0x5e, 0xa8, 0xb3, 0xc2, + 0x32, 0xac, 0xf1, 0x43, 0x2f, 0x1b, 0x30, 0xc9, 0x67, 0xa2, 0x94, 0x60, 0x34, 0x65, 0x09, 0x66, + 0x69, 0xd7, 0xae, 0xe8, 0x2c, 0x70, 0x94, 0xa3, 0xf9, 0xf7, 0x51, 0x7d, 0x52, 0x09, 0xa8, 0x75, + 0x5d, 0x3f, 0x40, 0x1f, 0x80, 0x7b, 0xfd, 0x76, 0xb5, 0x4a, 0x7c, 0x7f, 0xa7, 0xdd, 0xc0, 0x6d, + 0xe7, 0xa2, 0xed, 0x07, 0xae, 0x77, 0xb0, 0x66, 0x37, 0xed, 0x80, 0xcd, 0xb8, 0x5c, 0xf9, 0xec, + 0x61, 0xa7, 0x78, 0x6f, 0xa5, 0x17, 0x12, 0xee, 0x5d, 0x1f, 0x59, 0x70, 0x5f, 0xdb, 0xe9, 0x4d, + 0x9e, 0x5b, 0x6f, 0xc5, 0xc3, 0x4e, 0xf1, 0xbe, 0xab, 0xbd, 0xd1, 0xf0, 0x51, 0x34, 0xcc, 0x7f, + 0x33, 0x60, 0x46, 0xb6, 0x6b, 0x8b, 0x34, 0x5b, 0x0d, 0xba, 0xbb, 0x9c, 0xbc, 0x21, 0x12, 0x44, + 0x0c, 0x11, 0x9c, 0x8e, 0x3a, 0x91, 0xf2, 0xf7, 0xb2, 0x46, 0xcc, 0x7f, 0x35, 0x60, 0x2e, 0x8e, + 0x7c, 0x17, 0x94, 0xa7, 0x1f, 0x55, 0x9e, 0x1b, 0xe9, 0xb6, 0xb6, 0x87, 0x06, 0x7d, 0x75, 0xa4, + 0xbb, 0xad, 0xff, 0xdb, 0xd5, 0x68, 0xa8, 0x15, 0xb3, 0x3f, 0x4d, 0xad, 0x38, 0xf2, 0xa6, 0xd2, + 0x8a, 0xbf, 0x3f, 0x02, 0x13, 0x25, 0x27, 0xb0, 0x4b, 0x3b, 0x3b, 0xb6, 0x63, 0x07, 0x07, 0xe8, + 0xd3, 0x19, 0x58, 0x6a, 0x79, 0x64, 0x87, 0x78, 0x1e, 0xa9, 0xad, 0xb4, 0x3d, 0xdb, 0xa9, 0x57, + 0xaa, 0xbb, 0xa4, 0xd6, 0x6e, 0xd8, 0x4e, 0x7d, 0xb5, 0xee, 0xb8, 0xaa, 0xf8, 0xfc, 0x2d, 0x52, + 0x6d, 0xb3, 0x26, 0xf1, 0x45, 0xd1, 0x1c, 0xae, 0x49, 0x9b, 0x83, 0x31, 0x2d, 0x3f, 0x7a, 0xd8, + 0x29, 0x2e, 0x0d, 0x58, 0x09, 0x0f, 0xda, 0x34, 0xf4, 0xa9, 0x0c, 0x2c, 0x7a, 0xe4, 0xa3, 0x6d, + 0xbb, 0xff, 0xde, 0xe0, 0xbb, 0x56, 0x63, 0x48, 0xf5, 0x33, 0x10, 0xcf, 0xf2, 0x23, 0x87, 0x9d, + 0xe2, 0x80, 0x75, 0xf0, 0x80, 0xed, 0x32, 0xbf, 0x96, 0x81, 0xd3, 0xa5, 0x56, 0x6b, 0x9d, 0xf8, + 0xbb, 0xb1, 0x43, 0xed, 0x67, 0x0d, 0x98, 0xda, 0xb7, 0xbd, 0xa0, 0x6d, 0x35, 0xa4, 0x13, 0x80, + 0x4f, 0x89, 0xca, 0x90, 0xcb, 0x99, 0x73, 0xbb, 0x16, 0x21, 0x5d, 0x46, 0x87, 0x9d, 0xe2, 0x54, + 0xb4, 0x0c, 0xc7, 0xd8, 0xa3, 0xdf, 0x34, 0x60, 0x46, 0x14, 0x6d, 0xb8, 0x35, 0xa2, 0x7b, 0x8e, + 0xae, 0xa6, 0x29, 0x93, 0x22, 0xce, 0x5d, 0x0c, 0xf1, 0x52, 0xdc, 0x25, 0x84, 0xf9, 0x1f, 0x19, + 0x38, 0xd3, 0x83, 0x06, 0xfa, 0x3d, 0x03, 0xe6, 0xb8, 0xbb, 0x49, 0x03, 0x61, 0xb2, 0x23, 0x7a, + 0xf3, 0xfd, 0x69, 0x4b, 0x8e, 0xe9, 0x5a, 0x20, 0x4e, 0x95, 0x94, 0xe7, 0xe9, 0xb6, 0xb1, 0x9c, + 0xc0, 0x1a, 0x27, 0x0a, 0xc4, 0x24, 0xe5, 0x0e, 0xa8, 0x98, 0xa4, 0x99, 0xbb, 0x22, 0x69, 0x25, + 0x81, 0x35, 0x4e, 0x14, 0xc8, 0xfc, 0x45, 0xb8, 0xef, 0x08, 0x72, 0x77, 0x3e, 0xf1, 0x9b, 0xcf, + 0xab, 0x59, 0x1f, 0x9d, 0x73, 0x7d, 0x38, 0x0b, 0x4c, 0x18, 0xf5, 0xdc, 0x76, 0x40, 0xb8, 0x76, + 0x2b, 0x94, 0x81, 0xea, 0x09, 0xcc, 0x4a, 0xb0, 0x80, 0x98, 0x5f, 0x33, 0x20, 0x3f, 0x80, 0xff, + 0xa1, 0x18, 0xf5, 0x3f, 0x14, 0xba, 0x7c, 0x0f, 0x41, 0xb7, 0xef, 0xe1, 0x99, 0xe1, 0x46, 0xa3, + 0x1f, 0x9f, 0xc3, 0x8f, 0x0c, 0x98, 0xed, 0xf2, 0x51, 0xa0, 0x5d, 0x98, 0x6b, 0xb9, 0x35, 0x69, + 0x5f, 0x5c, 0xb4, 0xfc, 0x5d, 0x06, 0x13, 0xcd, 0x7b, 0x8c, 0x8e, 0xe4, 0x66, 0x02, 0xfc, 0x76, + 0xa7, 0x38, 0xaf, 0x88, 0xc4, 0x10, 0x70, 0x22, 0x45, 0xd4, 0x82, 0xfc, 0x8e, 0x4d, 0x1a, 0xb5, + 0x70, 0x0a, 0x0e, 0x69, 0x49, 0x5c, 0x10, 0xd4, 0xb8, 0x7b, 0x4e, 0xfe, 0xc3, 0x8a, 0x8b, 0x79, + 0x05, 0xa6, 0xa2, 0xce, 0xda, 0x3e, 0x06, 0xef, 0x2c, 0x64, 0x2d, 0xcf, 0x11, 0x43, 0x37, 0x2e, + 0x10, 0xb2, 0x25, 0xbc, 0x81, 0x69, 0xb9, 0xf9, 0x93, 0x11, 0x98, 0x2e, 0x37, 0xda, 0xe4, 0x19, + 0x8f, 0x10, 0x79, 0x3e, 0x2d, 0xc1, 0x74, 0xcb, 0x23, 0xfb, 0x36, 0xb9, 0x59, 0x21, 0x0d, 0x52, + 0x0d, 0x5c, 0x4f, 0xd0, 0x3f, 0x23, 0xaa, 0x4f, 0x6f, 0x46, 0xc1, 0x38, 0x8e, 0x8f, 0x9e, 0x86, + 0x29, 0xab, 0x1a, 0xd8, 0xfb, 0x44, 0x51, 0xe0, 0x02, 0xbc, 0x45, 0x50, 0x98, 0x2a, 0x45, 0xa0, + 0x38, 0x86, 0x8d, 0x3e, 0x08, 0xf3, 0x7e, 0xd5, 0x6a, 0x90, 0xab, 0x2d, 0xc1, 0x6a, 0x79, 0x97, + 0x54, 0xf7, 0x36, 0x5d, 0xdb, 0x09, 0x84, 0x37, 0xe2, 0x01, 0x41, 0x69, 0xbe, 0xd2, 0x03, 0x0f, + 0xf7, 0xa4, 0x80, 0xfe, 0xcc, 0x80, 0xb3, 0x2d, 0x8f, 0x6c, 0x7a, 0x6e, 0xd3, 0xa5, 0x6a, 0xa6, + 0xeb, 0x88, 0x2e, 0x8e, 0xaa, 0xd7, 0x86, 0xd4, 0xa7, 0xbc, 0xa4, 0xdb, 0x45, 0xf8, 0xd6, 0xc3, + 0x4e, 0xf1, 0xec, 0xe6, 0x51, 0x02, 0xe0, 0xa3, 0xe5, 0x43, 0x7f, 0x61, 0xc0, 0xb9, 0x96, 0xeb, + 0x07, 0x47, 0x34, 0x21, 0x77, 0xa2, 0x4d, 0x30, 0x0f, 0x3b, 0xc5, 0x73, 0x9b, 0x47, 0x4a, 0x80, + 0xef, 0x20, 0xa1, 0x79, 0x38, 0x0e, 0xb3, 0xda, 0xdc, 0x13, 0xe7, 0xd7, 0x27, 0x61, 0x52, 0x4e, + 0x86, 0x50, 0xad, 0x17, 0x42, 0x7f, 0x43, 0x49, 0x07, 0xe2, 0x28, 0x2e, 0x9d, 0x77, 0x6a, 0x2a, + 0xf2, 0xda, 0xb1, 0x79, 0xb7, 0x19, 0x81, 0xe2, 0x18, 0x36, 0x5a, 0x85, 0x53, 0xa2, 0x04, 0x93, + 0x56, 0xc3, 0xae, 0x5a, 0xcb, 0x6e, 0x5b, 0x4c, 0xb9, 0x5c, 0xf9, 0xcc, 0x61, 0xa7, 0x78, 0x6a, + 0xb3, 0x1b, 0x8c, 0x93, 0xea, 0xa0, 0x35, 0x98, 0xb3, 0xda, 0x81, 0xab, 0xda, 0x7f, 0xde, 0xa1, + 0x9a, 0xa2, 0xc6, 0xa6, 0x56, 0x9e, 0xab, 0x94, 0x52, 0x02, 0x1c, 0x27, 0xd6, 0x42, 0x9b, 0x31, + 0x6a, 0x15, 0x52, 0x75, 0x9d, 0x1a, 0x1f, 0xe5, 0x5c, 0x68, 0x85, 0x97, 0x12, 0x70, 0x70, 0x62, + 0x4d, 0xd4, 0x80, 0xa9, 0xa6, 0x75, 0xeb, 0xaa, 0x63, 0xed, 0x5b, 0x76, 0x83, 0x32, 0x11, 0x3e, + 0x8c, 0xde, 0x07, 0xeb, 0x76, 0x60, 0x37, 0x16, 0xf9, 0x75, 0xde, 0xe2, 0xaa, 0x13, 0x5c, 0xf6, + 0x2a, 0x01, 0xb5, 0xd6, 0xb8, 0x71, 0xb4, 0x1e, 0xa1, 0x85, 0x63, 0xb4, 0xd1, 0x65, 0x38, 0xcd, + 0x96, 0xe3, 0x8a, 0x7b, 0xd3, 0x59, 0x21, 0x0d, 0xeb, 0x40, 0x36, 0x60, 0x8c, 0x35, 0xe0, 0xde, + 0xc3, 0x4e, 0xf1, 0x74, 0x25, 0x09, 0x01, 0x27, 0xd7, 0x43, 0x16, 0xdc, 0x17, 0x05, 0x60, 0xb2, + 0x6f, 0xfb, 0xb6, 0xeb, 0x70, 0x4f, 0x44, 0x3e, 0xf4, 0x44, 0x54, 0x7a, 0xa3, 0xe1, 0xa3, 0x68, + 0xa0, 0xdf, 0x36, 0x60, 0x2e, 0x69, 0x19, 0xce, 0x17, 0xd2, 0xb8, 0xac, 0x88, 0x2d, 0x2d, 0x3e, + 0x23, 0x12, 0x37, 0x85, 0x44, 0x21, 0xd0, 0x4b, 0x06, 0x4c, 0x58, 0xda, 0x29, 0x6a, 0x1e, 0x98, + 0x54, 0x97, 0x86, 0x3d, 0xcb, 0x87, 0x14, 0xcb, 0x33, 0x87, 0x9d, 0x62, 0xe4, 0xa4, 0x86, 0x23, + 0x1c, 0xd1, 0xef, 0x18, 0x70, 0x3a, 0x71, 0x8d, 0xcf, 0x8f, 0x9f, 0x44, 0x0f, 0xb1, 0x49, 0x92, + 0xbc, 0xe7, 0x24, 0x8b, 0x81, 0x5e, 0x33, 0x94, 0x2a, 0x5b, 0x97, 0xde, 0x94, 0x09, 0x26, 0xda, + 0x95, 0x21, 0x0f, 0x8e, 0xa1, 0x41, 0x20, 0x09, 0x97, 0x4f, 0x69, 0x9a, 0x51, 0x16, 0xe2, 0x38, + 0x7b, 0xf4, 0x19, 0x43, 0xaa, 0x46, 0x25, 0xd1, 0xe4, 0x49, 0x49, 0x84, 0x42, 0x4d, 0xab, 0x04, + 0x8a, 0x31, 0x47, 0x1f, 0x82, 0x05, 0x6b, 0xdb, 0xf5, 0x82, 0xc4, 0xc5, 0x37, 0x3f, 0xc5, 0x96, + 0xd1, 0xb9, 0xc3, 0x4e, 0x71, 0xa1, 0xd4, 0x13, 0x0b, 0x1f, 0x41, 0xc1, 0xfc, 0xa3, 0x1c, 0x4c, + 0x70, 0x23, 0x5f, 0xa8, 0xae, 0xaf, 0x1a, 0x70, 0x7f, 0xb5, 0xed, 0x79, 0xc4, 0x09, 0x2a, 0x01, + 0x69, 0x75, 0x2b, 0x2e, 0xe3, 0x44, 0x15, 0xd7, 0x03, 0x87, 0x9d, 0xe2, 0xfd, 0xcb, 0x47, 0xf0, + 0xc7, 0x47, 0x4a, 0x87, 0xfe, 0xc6, 0x00, 0x53, 0x20, 0x94, 0xad, 0xea, 0x5e, 0xdd, 0x73, 0xdb, + 0x4e, 0xad, 0xbb, 0x11, 0x99, 0x13, 0x6d, 0xc4, 0x43, 0x87, 0x9d, 0xa2, 0xb9, 0x7c, 0x47, 0x29, + 0x70, 0x1f, 0x92, 0xa2, 0x67, 0x60, 0x56, 0x60, 0x9d, 0xbf, 0xd5, 0x22, 0x9e, 0x4d, 0xcd, 0x69, + 0x71, 0x9f, 0x1e, 0x86, 0x28, 0xc4, 0x11, 0x70, 0x77, 0x1d, 0xe4, 0xc3, 0xd8, 0x4d, 0x62, 0xd7, + 0x77, 0x03, 0x69, 0x3e, 0x0d, 0x19, 0x97, 0x20, 0x0e, 0xfc, 0xd7, 0x39, 0xcd, 0xf2, 0xf8, 0x61, + 0xa7, 0x38, 0x26, 0xfe, 0x60, 0xc9, 0x09, 0x6d, 0xc0, 0x14, 0x3f, 0x82, 0x6d, 0xda, 0x4e, 0x7d, + 0xd3, 0x75, 0xf8, 0x6d, 0x7e, 0xa1, 0xfc, 0x90, 0x54, 0xf8, 0x95, 0x08, 0xf4, 0x76, 0xa7, 0x38, + 0x21, 0x7f, 0x6f, 0x1d, 0xb4, 0x08, 0x8e, 0xd5, 0x36, 0xbf, 0x39, 0x0a, 0x20, 0xa7, 0x2b, 0x69, + 0xa1, 0x9f, 0x83, 0x82, 0x4f, 0x02, 0xce, 0x55, 0x38, 0xcf, 0xf9, 0x9d, 0x84, 0x2c, 0xc4, 0x21, + 0x1c, 0xed, 0x41, 0xae, 0x65, 0xb5, 0x7d, 0x22, 0x06, 0xff, 0x52, 0x2a, 0x83, 0xbf, 0x49, 0x29, + 0xf2, 0x33, 0x17, 0xfb, 0x89, 0x39, 0x0f, 0xf4, 0x49, 0x03, 0x80, 0x44, 0x07, 0x6c, 0x68, 0xdf, + 0x87, 0x60, 0x19, 0x8e, 0x29, 0xed, 0x83, 0xf2, 0xd4, 0x61, 0xa7, 0x08, 0xda, 0xd0, 0x6b, 0x6c, + 0xd1, 0x4d, 0xc8, 0x5b, 0x72, 0xcf, 0x1f, 0x39, 0x89, 0x3d, 0x9f, 0x1d, 0x85, 0xd4, 0xa4, 0x55, + 0xcc, 0xd0, 0xa7, 0x0c, 0x98, 0xf2, 0x49, 0x20, 0x86, 0x8a, 0xee, 0x3c, 0xc2, 0xe0, 0x1d, 0x72, + 0xd2, 0x55, 0x22, 0x34, 0xf9, 0x0e, 0x1a, 0x2d, 0xc3, 0x31, 0xbe, 0x52, 0x94, 0x8b, 0xc4, 0xaa, + 0x11, 0x8f, 0x9d, 0xb4, 0x85, 0x25, 0x35, 0xbc, 0x28, 0x1a, 0x4d, 0x25, 0x8a, 0x56, 0x86, 0x63, + 0x7c, 0xa5, 0x28, 0xeb, 0xb6, 0xe7, 0xb9, 0x42, 0x94, 0x7c, 0x4a, 0xa2, 0x68, 0x34, 0x95, 0x28, + 0x5a, 0x19, 0x8e, 0xf1, 0x35, 0xff, 0x76, 0x02, 0xa6, 0xe4, 0x42, 0x0a, 0x2d, 0x7b, 0xee, 0xd8, + 0xe9, 0x61, 0xd9, 0x2f, 0xeb, 0x40, 0x1c, 0xc5, 0xa5, 0x95, 0xf9, 0x52, 0x8d, 0x1a, 0xf6, 0xaa, + 0x72, 0x45, 0x07, 0xe2, 0x28, 0x2e, 0x6a, 0x42, 0xce, 0x0f, 0x48, 0x4b, 0xde, 0x83, 0x0e, 0x79, + 0x4d, 0x17, 0xee, 0x0f, 0xe1, 0x4d, 0x07, 0xfd, 0xe7, 0x63, 0xce, 0x85, 0xf9, 0x26, 0x83, 0x88, + 0xbb, 0x52, 0x2c, 0x8e, 0x74, 0xd6, 0x67, 0xd4, 0x13, 0xca, 0x47, 0x23, 0x5a, 0x86, 0x63, 0xec, + 0x13, 0x8c, 0xfd, 0xdc, 0x09, 0x1a, 0xfb, 0xcf, 0x41, 0xbe, 0x69, 0xdd, 0xaa, 0xb4, 0xbd, 0xfa, + 0xf1, 0x0f, 0x15, 0x22, 0x44, 0x89, 0x53, 0xc1, 0x8a, 0x1e, 0x7a, 0xd9, 0xd0, 0xb6, 0x9c, 0x31, + 0x46, 0xfc, 0x7a, 0xba, 0x5b, 0x8e, 0xd2, 0x95, 0x3d, 0x37, 0x9f, 0x2e, 0xd3, 0x3b, 0x7f, 0xd7, + 0x4d, 0x6f, 0x6a, 0x46, 0xf2, 0x05, 0xa2, 0xcc, 0xc8, 0xc2, 0x89, 0x9a, 0x91, 0xcb, 0x11, 0x66, + 0x38, 0xc6, 0x9c, 0xc9, 0xc3, 0xd7, 0x9c, 0x92, 0x07, 0x4e, 0x54, 0x9e, 0x4a, 0x84, 0x19, 0x8e, + 0x31, 0xef, 0x7d, 0xde, 0x1c, 0x3f, 0x99, 0xf3, 0xe6, 0x44, 0x0a, 0xe7, 0xcd, 0xa3, 0x4d, 0xf1, + 0xc9, 0x61, 0x4d, 0x71, 0x74, 0x09, 0x50, 0xed, 0xc0, 0xb1, 0x9a, 0x76, 0x55, 0x6c, 0x96, 0x4c, + 0x6d, 0x4e, 0x31, 0x7f, 0xc4, 0x82, 0xd8, 0xc8, 0xd0, 0x4a, 0x17, 0x06, 0x4e, 0xa8, 0x85, 0x02, + 0xc8, 0xb7, 0xa4, 0xc5, 0x35, 0x9d, 0xc6, 0xec, 0x97, 0x16, 0x18, 0xbf, 0x2a, 0xa7, 0x0b, 0x4f, + 0x96, 0x60, 0xc5, 0xc9, 0xfc, 0x2f, 0x03, 0x66, 0x96, 0x1b, 0x6e, 0xbb, 0x76, 0xdd, 0x0a, 0xaa, + 0xbb, 0xfc, 0x5e, 0x17, 0x3d, 0x0d, 0x79, 0xdb, 0x09, 0x88, 0xb7, 0x6f, 0x35, 0x84, 0x46, 0x31, + 0xe5, 0xd5, 0xf7, 0xaa, 0x28, 0xbf, 0xdd, 0x29, 0x4e, 0xad, 0xb4, 0x3d, 0x16, 0x30, 0xc9, 0xf7, + 0x17, 0xac, 0xea, 0xa0, 0x2f, 0x19, 0x30, 0xcb, 0x6f, 0x86, 0x57, 0xac, 0xc0, 0xba, 0xd2, 0x26, + 0x9e, 0x4d, 0xe4, 0xdd, 0xf0, 0x90, 0x5b, 0x4b, 0x5c, 0x56, 0xc9, 0xe0, 0x20, 0x34, 0xad, 0xd7, + 0xe3, 0x9c, 0x71, 0xb7, 0x30, 0xe6, 0xe7, 0xb2, 0x70, 0x6f, 0x4f, 0x5a, 0x68, 0x01, 0x32, 0x76, + 0x4d, 0x34, 0x1d, 0x04, 0xdd, 0xcc, 0x6a, 0x0d, 0x67, 0xec, 0x1a, 0x5a, 0x64, 0x56, 0xa2, 0x47, + 0x7c, 0x5f, 0x5e, 0x13, 0x16, 0x94, 0x41, 0x27, 0x4a, 0xb1, 0x86, 0x81, 0x8a, 0x90, 0x6b, 0x58, + 0xdb, 0xa4, 0x21, 0x4e, 0x00, 0xcc, 0xee, 0x5c, 0xa3, 0x05, 0x98, 0x97, 0xa3, 0x5f, 0x36, 0x00, + 0xb8, 0x80, 0xf4, 0xfc, 0x20, 0xf4, 0x1a, 0x4e, 0xb7, 0x9b, 0x28, 0x65, 0x2e, 0x65, 0xf8, 0x1f, + 0x6b, 0x5c, 0xd1, 0x16, 0x8c, 0x52, 0x13, 0xd4, 0xad, 0x1d, 0x5b, 0x8d, 0xb1, 0x6b, 0x91, 0x4d, + 0x46, 0x03, 0x0b, 0x5a, 0xb4, 0xaf, 0x3c, 0x12, 0xb4, 0x3d, 0x87, 0x76, 0x2d, 0x53, 0x5c, 0x79, + 0x2e, 0x05, 0x56, 0xa5, 0x58, 0xc3, 0x30, 0xff, 0x34, 0x03, 0x73, 0x49, 0xa2, 0x53, 0xfd, 0x30, + 0xca, 0xa5, 0x15, 0x87, 0xd9, 0xf7, 0xa5, 0xdf, 0x3f, 0x22, 0xc8, 0x41, 0x85, 0x02, 0x88, 0x30, + 0x2c, 0xc1, 0x17, 0xbd, 0x4f, 0xf5, 0x50, 0xe6, 0x98, 0x3d, 0xa4, 0x28, 0xc7, 0x7a, 0xe9, 0x01, + 0x18, 0xf1, 0xe9, 0xc8, 0x67, 0xa3, 0x57, 0x0e, 0x6c, 0x8c, 0x18, 0x84, 0x62, 0xb4, 0x1d, 0x3b, + 0x10, 0x51, 0xcc, 0x0a, 0xe3, 0xaa, 0x63, 0x07, 0x98, 0x41, 0xcc, 0x2f, 0x64, 0x60, 0xa1, 0x77, + 0xa3, 0xd0, 0x17, 0x0c, 0x80, 0x1a, 0x3d, 0x60, 0xd0, 0x29, 0x29, 0x83, 0x42, 0xac, 0x93, 0xea, + 0xc3, 0x15, 0xc9, 0x29, 0x8c, 0x10, 0x52, 0x45, 0x3e, 0xd6, 0x04, 0x41, 0x8f, 0xc8, 0xa9, 0xbf, + 0x61, 0x35, 0xa5, 0x01, 0xaa, 0xea, 0xac, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0x09, 0xd2, 0xb1, 0x9a, + 0xc4, 0x6f, 0x59, 0x2a, 0x4c, 0x9d, 0x9d, 0x20, 0x37, 0x64, 0x21, 0x0e, 0xe1, 0x66, 0x03, 0x1e, + 0xec, 0x43, 0xce, 0x94, 0x42, 0x86, 0xcd, 0xff, 0x34, 0xe0, 0xcc, 0x72, 0xa3, 0xed, 0x07, 0xc4, + 0xfb, 0x3f, 0x13, 0x70, 0xf5, 0xdf, 0x06, 0xdc, 0xd7, 0xa3, 0xcd, 0x77, 0x21, 0xee, 0xea, 0x85, + 0x68, 0xdc, 0xd5, 0xd5, 0x61, 0xa7, 0x74, 0x62, 0x3b, 0x7a, 0x84, 0x5f, 0x05, 0x30, 0x49, 0x77, + 0xad, 0x9a, 0x5b, 0x4f, 0x49, 0x6f, 0x3e, 0x08, 0xb9, 0x8f, 0x52, 0xfd, 0x13, 0x9f, 0x63, 0x4c, + 0x29, 0x61, 0x0e, 0x33, 0x9f, 0x02, 0x11, 0xa4, 0x14, 0x5b, 0x3c, 0x46, 0x3f, 0x8b, 0xc7, 0xfc, + 0x87, 0x0c, 0x68, 0x9e, 0x87, 0xbb, 0x30, 0x29, 0x9d, 0xc8, 0xa4, 0x1c, 0xf2, 0xd4, 0xac, 0xf9, + 0x51, 0x7a, 0xbd, 0x46, 0xd8, 0x8f, 0xbd, 0x46, 0xd8, 0x48, 0x8d, 0xe3, 0xd1, 0x8f, 0x11, 0xbe, + 0x6b, 0xc0, 0x7d, 0x21, 0x72, 0xb7, 0x53, 0xf0, 0xce, 0x3b, 0xcc, 0xe3, 0x30, 0x6e, 0x85, 0xd5, + 0xc4, 0x1c, 0x50, 0x0f, 0x70, 0x34, 0x8a, 0x58, 0xc7, 0x0b, 0x63, 0x9f, 0xb3, 0xc7, 0x8c, 0x7d, + 0x1e, 0x39, 0x3a, 0xf6, 0xd9, 0xfc, 0x71, 0x06, 0xce, 0x76, 0xb7, 0x4c, 0xae, 0x8d, 0xfe, 0xee, + 0xcc, 0x9f, 0x80, 0x89, 0x40, 0x54, 0xd0, 0x76, 0x7a, 0xf5, 0x7c, 0x6c, 0x4b, 0x83, 0xe1, 0x08, + 0x26, 0xad, 0x59, 0xe5, 0xab, 0xb2, 0x52, 0x75, 0x5b, 0x32, 0x72, 0x5e, 0xd5, 0x5c, 0xd6, 0x60, + 0x38, 0x82, 0xa9, 0x62, 0x12, 0x47, 0x4e, 0x3c, 0x26, 0xb1, 0x02, 0xa7, 0x65, 0x14, 0xd6, 0x05, + 0xd7, 0x5b, 0x76, 0x9b, 0xad, 0x06, 0x11, 0xb1, 0xf3, 0x54, 0xd8, 0xb3, 0xa2, 0xca, 0x69, 0x9c, + 0x84, 0x84, 0x93, 0xeb, 0x9a, 0xdf, 0xcd, 0xc2, 0xa9, 0xb0, 0xdb, 0x97, 0x5d, 0xa7, 0x66, 0xb3, + 0x58, 0xb6, 0x27, 0x61, 0x24, 0x38, 0x68, 0xc9, 0xce, 0xfe, 0x7f, 0x52, 0x9c, 0xad, 0x83, 0x16, + 0x1d, 0xed, 0x33, 0x09, 0x55, 0x98, 0x5b, 0x96, 0x55, 0x42, 0x6b, 0x6a, 0x75, 0xf0, 0x11, 0x78, + 0x2c, 0x3a, 0x9b, 0x6f, 0x77, 0x8a, 0x09, 0xaf, 0x27, 0x17, 0x15, 0xa5, 0xe8, 0x9c, 0x47, 0x37, + 0x60, 0xaa, 0x61, 0xf9, 0xc1, 0xd5, 0x56, 0xcd, 0x0a, 0xc8, 0x96, 0xdd, 0x24, 0x62, 0xcd, 0x0d, + 0x12, 0x90, 0xae, 0xee, 0x91, 0xd7, 0x22, 0x94, 0x70, 0x8c, 0x32, 0xda, 0x07, 0x44, 0x4b, 0xb6, + 0x3c, 0xcb, 0xf1, 0x79, 0xab, 0x28, 0xbf, 0xc1, 0x03, 0xe0, 0xd5, 0xb1, 0x6c, 0xad, 0x8b, 0x1a, + 0x4e, 0xe0, 0x80, 0x1e, 0x82, 0x51, 0x8f, 0x58, 0xbe, 0x18, 0xcc, 0x42, 0xb8, 0xfe, 0x31, 0x2b, + 0xc5, 0x02, 0xaa, 0x2f, 0xa8, 0xd1, 0x3b, 0x2c, 0xa8, 0x1f, 0x18, 0x30, 0x15, 0x0e, 0xd3, 0x5d, + 0x50, 0x92, 0xcd, 0xa8, 0x92, 0xbc, 0x98, 0xd6, 0x96, 0xd8, 0x43, 0x2f, 0xfe, 0xe5, 0xa8, 0xde, + 0x3e, 0x16, 0x90, 0xfc, 0x31, 0x28, 0xc8, 0x55, 0x2d, 0xad, 0xcf, 0x21, 0x4f, 0xb7, 0x11, 0xbb, + 0x44, 0x7b, 0x48, 0x23, 0x98, 0xe0, 0x90, 0x1f, 0x55, 0xcb, 0x35, 0xa1, 0x72, 0xc5, 0xb4, 0x57, + 0x6a, 0x59, 0xaa, 0xe2, 0x24, 0xb5, 0x2c, 0xeb, 0xa0, 0xab, 0x70, 0xa6, 0xe5, 0xb9, 0xec, 0x71, + 0xe5, 0x0a, 0xb1, 0x6a, 0x0d, 0xdb, 0x21, 0xd2, 0x85, 0xc0, 0xc3, 0x18, 0xee, 0x3b, 0xec, 0x14, + 0xcf, 0x6c, 0x26, 0xa3, 0xe0, 0x5e, 0x75, 0xa3, 0x0f, 0x82, 0x46, 0xfa, 0x78, 0x10, 0xf4, 0xab, + 0xca, 0x51, 0x47, 0x7c, 0xf1, 0x2c, 0xe7, 0x03, 0x69, 0x0d, 0x65, 0xc2, 0xb6, 0x1e, 0x4e, 0xa9, + 0x92, 0x60, 0x8a, 0x15, 0xfb, 0xde, 0xde, 0xa0, 0xd1, 0x63, 0x7a, 0x83, 0xc2, 0xb8, 0xee, 0xb1, + 0x9f, 0x66, 0x5c, 0x77, 0xfe, 0x4d, 0x15, 0xd7, 0xfd, 0x4a, 0x0e, 0x66, 0xe2, 0x16, 0xc8, 0xc9, + 0x3f, 0x76, 0xfa, 0x0d, 0x03, 0x66, 0xe4, 0xea, 0xe1, 0x3c, 0x89, 0xf4, 0xf3, 0xaf, 0xa5, 0xb4, + 0x68, 0xb9, 0x2d, 0xa5, 0x9e, 0xe3, 0x6e, 0xc5, 0xb8, 0xe1, 0x2e, 0xfe, 0xe8, 0x79, 0x18, 0x57, + 0xee, 0xf0, 0x63, 0xbd, 0x7c, 0x9a, 0x66, 0x56, 0x54, 0x48, 0x02, 0xeb, 0xf4, 0xd0, 0x2b, 0x06, + 0x40, 0x55, 0xaa, 0x39, 0xb9, 0xba, 0xae, 0xa4, 0xb5, 0xba, 0x94, 0x02, 0x0d, 0x8d, 0x65, 0x55, + 0xe4, 0x63, 0x8d, 0x31, 0xfa, 0x1c, 0x73, 0x84, 0x2b, 0xeb, 0x8e, 0xae, 0xa7, 0xec, 0xf0, 0xa1, + 0xb8, 0x47, 0x18, 0xa6, 0xa1, 0x29, 0xa5, 0x81, 0x7c, 0x1c, 0x11, 0xc2, 0x7c, 0x12, 0x54, 0xf0, + 0x24, 0xdd, 0xb6, 0x58, 0xf8, 0xe4, 0xa6, 0x15, 0xec, 0x8a, 0x29, 0xa8, 0xb6, 0xad, 0x0b, 0x12, + 0x80, 0x43, 0x1c, 0xf3, 0x23, 0x30, 0xf5, 0x8c, 0x67, 0xb5, 0x76, 0x6d, 0xe6, 0x70, 0xa6, 0xe7, + 0xa4, 0xb7, 0xc3, 0x98, 0x55, 0xab, 0x25, 0x3d, 0x66, 0x2f, 0xf1, 0x62, 0x2c, 0xe1, 0xfd, 0x1d, + 0x89, 0xbe, 0x69, 0x00, 0x0a, 0x2f, 0xed, 0x6c, 0xa7, 0xbe, 0x4e, 0x4f, 0xfb, 0xf4, 0x7c, 0xb4, + 0xcb, 0x4a, 0x93, 0xce, 0x47, 0x17, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x45, 0x18, 0xe7, 0xff, 0xae, + 0xa9, 0xc3, 0xfe, 0xd0, 0x4f, 0x61, 0xb9, 0x42, 0x61, 0x32, 0xf1, 0x59, 0x78, 0x31, 0xe4, 0x80, + 0x75, 0x76, 0xb4, 0xab, 0x56, 0x9d, 0x9d, 0x46, 0xfb, 0x56, 0x6d, 0x3b, 0xec, 0xaa, 0x96, 0xe7, + 0xee, 0xd8, 0x0d, 0x12, 0xef, 0xaa, 0x4d, 0x5e, 0x8c, 0x25, 0xbc, 0xbf, 0xae, 0xfa, 0xba, 0x01, + 0x73, 0xab, 0x7e, 0x60, 0xbb, 0x2b, 0xc4, 0x0f, 0xa8, 0x5a, 0xa1, 0x9b, 0x4f, 0xbb, 0xd1, 0x4f, + 0x1c, 0xf4, 0x0a, 0xcc, 0x88, 0x0b, 0xc4, 0xf6, 0xb6, 0x4f, 0x02, 0xcd, 0x8e, 0x57, 0xeb, 0x78, + 0x39, 0x06, 0xc7, 0x5d, 0x35, 0x28, 0x15, 0x71, 0x93, 0x18, 0x52, 0xc9, 0x46, 0xa9, 0x54, 0x62, + 0x70, 0xdc, 0x55, 0xc3, 0xfc, 0x76, 0x16, 0x4e, 0xb1, 0x66, 0xc4, 0xde, 0x30, 0x7c, 0xa6, 0xd7, + 0x1b, 0x86, 0x21, 0x97, 0x32, 0xe3, 0x75, 0x8c, 0x17, 0x0c, 0xbf, 0x6e, 0xc0, 0x74, 0x2d, 0xda, + 0xd3, 0xe9, 0xb8, 0x67, 0x92, 0xc6, 0x90, 0xc7, 0x4b, 0xc5, 0x0a, 0x71, 0x9c, 0x3f, 0xfa, 0xbc, + 0x01, 0xd3, 0x51, 0x31, 0xe5, 0xee, 0x7e, 0x02, 0x9d, 0xa4, 0x02, 0x9c, 0xa3, 0xe5, 0x3e, 0x8e, + 0x8b, 0x60, 0xfe, 0x9d, 0x21, 0x86, 0xf4, 0x24, 0x02, 0xf4, 0xd1, 0x4d, 0x28, 0x04, 0x0d, 0x9f, + 0x17, 0x8a, 0xd6, 0x0e, 0x79, 0x22, 0xdc, 0x5a, 0xab, 0xf0, 0xbb, 0xfb, 0xd0, 0x68, 0x13, 0x25, + 0xd4, 0xf8, 0x94, 0xbc, 0xcc, 0x2f, 0x1b, 0x50, 0xb8, 0xe4, 0xca, 0xe5, 0xfc, 0xa1, 0x14, 0xfc, + 0x2d, 0xca, 0x2c, 0x53, 0x57, 0x75, 0xa1, 0xa5, 0xff, 0x74, 0xc4, 0xdb, 0x72, 0xbf, 0x46, 0x7b, + 0x91, 0xe5, 0xcb, 0xa1, 0xa4, 0x2e, 0xb9, 0xdb, 0x3d, 0x9d, 0x79, 0xbf, 0x9b, 0x83, 0xc9, 0x67, + 0xad, 0x03, 0xe2, 0x04, 0xd6, 0xe0, 0x7b, 0xf5, 0xe3, 0x30, 0x6e, 0xb5, 0x58, 0xbc, 0xae, 0x66, + 0x6a, 0x87, 0x0e, 0x8c, 0x10, 0x84, 0x75, 0xbc, 0x70, 0x5f, 0xe1, 0xe9, 0x3b, 0x92, 0x76, 0x84, + 0xe5, 0x18, 0x1c, 0x77, 0xd5, 0x40, 0x97, 0x00, 0x89, 0xc7, 0x88, 0xa5, 0x6a, 0xd5, 0x6d, 0x3b, + 0x7c, 0x67, 0xe1, 0xbe, 0x0d, 0x75, 0xe6, 0x5b, 0xef, 0xc2, 0xc0, 0x09, 0xb5, 0xd0, 0x07, 0x61, + 0xbe, 0xca, 0x28, 0x8b, 0x13, 0x80, 0x4e, 0x91, 0x9f, 0x02, 0x55, 0xac, 0xfc, 0x72, 0x0f, 0x3c, + 0xdc, 0x93, 0x02, 0x95, 0xd4, 0x0f, 0x5c, 0xcf, 0xaa, 0x13, 0x9d, 0xee, 0x68, 0x54, 0xd2, 0x4a, + 0x17, 0x06, 0x4e, 0xa8, 0x85, 0x3e, 0x01, 0x85, 0x60, 0xd7, 0x23, 0xfe, 0xae, 0xdb, 0xa8, 0x89, + 0xbb, 0xfb, 0x21, 0x1d, 0x5e, 0x62, 0xf4, 0xb7, 0x24, 0x55, 0x6d, 0x7a, 0xcb, 0x22, 0x1c, 0xf2, + 0x44, 0x1e, 0x8c, 0xfa, 0x55, 0xb7, 0x45, 0x7c, 0x61, 0x39, 0x5f, 0x4a, 0x85, 0x3b, 0x73, 0xe0, + 0x68, 0xae, 0x36, 0xc6, 0x01, 0x0b, 0x4e, 0xe6, 0x37, 0x32, 0x30, 0xa1, 0x23, 0xf6, 0xb1, 0x45, + 0x7c, 0xd2, 0x80, 0x89, 0xaa, 0xeb, 0x04, 0x9e, 0xdb, 0xe0, 0x6e, 0xa4, 0x74, 0x14, 0x3b, 0x25, + 0xb5, 0x42, 0x02, 0xcb, 0x6e, 0x68, 0x1e, 0x29, 0x8d, 0x0d, 0x8e, 0x30, 0x45, 0x9f, 0x36, 0x60, + 0x3a, 0x0c, 0xf5, 0x0a, 0xfd, 0x59, 0xa9, 0x0a, 0xa2, 0x76, 0xdc, 0xf3, 0x51, 0x4e, 0x38, 0xce, + 0xda, 0xdc, 0x86, 0x99, 0xf8, 0x68, 0xd3, 0xae, 0x6c, 0x59, 0x62, 0xad, 0x67, 0xc3, 0xae, 0xdc, + 0xb4, 0x7c, 0x1f, 0x33, 0x08, 0x7a, 0x07, 0xe4, 0x9b, 0x96, 0x57, 0xb7, 0x1d, 0xab, 0xc1, 0x7a, + 0x31, 0xab, 0x6d, 0x48, 0xa2, 0x1c, 0x2b, 0x0c, 0xf3, 0x5d, 0x30, 0xb1, 0x6e, 0x39, 0x75, 0x52, + 0xe3, 0xdb, 0x61, 0x1f, 0x2f, 0xb5, 0x7e, 0x38, 0x02, 0xe3, 0xda, 0x11, 0xe9, 0xe4, 0x8f, 0x3b, + 0x91, 0x8c, 0x0a, 0xd9, 0x14, 0x33, 0x2a, 0x3c, 0x07, 0xb0, 0x63, 0x3b, 0xb6, 0xbf, 0x7b, 0xcc, + 0x5c, 0x0d, 0xec, 0xa6, 0xf4, 0x82, 0xa2, 0x80, 0x35, 0x6a, 0xe1, 0x75, 0x54, 0xee, 0x88, 0x0c, + 0x36, 0xaf, 0x18, 0x9a, 0xba, 0x19, 0x4d, 0xe3, 0xfa, 0x5d, 0x1b, 0x98, 0x45, 0xa9, 0x7e, 0xce, + 0x3b, 0x81, 0x77, 0x70, 0xa4, 0x56, 0xda, 0x82, 0xbc, 0x47, 0xfc, 0x76, 0x93, 0x1e, 0xdc, 0xc6, + 0x06, 0xee, 0x06, 0x16, 0xba, 0x80, 0x45, 0x7d, 0xac, 0x28, 0x2d, 0x3c, 0x09, 0x93, 0x11, 0x11, + 0xd0, 0x0c, 0x64, 0xf7, 0xc8, 0x01, 0x9f, 0x27, 0x98, 0xfe, 0x44, 0x73, 0x91, 0x4b, 0x3b, 0xd1, + 0x2d, 0xef, 0xcd, 0x3c, 0x61, 0x98, 0x2e, 0x24, 0x9e, 0xc3, 0x8f, 0x73, 0xa7, 0x42, 0xc7, 0xa2, + 0xa1, 0x25, 0x6b, 0x50, 0x63, 0xc1, 0x03, 0x54, 0x38, 0xcc, 0xfc, 0xf1, 0x28, 0x88, 0x1b, 0xe5, + 0x3e, 0xb6, 0x2b, 0xfd, 0x22, 0x29, 0x73, 0x8c, 0x8b, 0xa4, 0x4b, 0x30, 0x61, 0x3b, 0x76, 0x60, + 0x5b, 0x0d, 0xe6, 0x63, 0x11, 0xea, 0x54, 0x46, 0xf0, 0x4e, 0xac, 0x6a, 0xb0, 0x04, 0x3a, 0x91, + 0xba, 0xe8, 0x0a, 0xe4, 0x98, 0xbe, 0x11, 0x13, 0x78, 0xf0, 0x6b, 0x6f, 0x16, 0xf1, 0xc0, 0x9f, + 0xf5, 0x70, 0x4a, 0xec, 0x0c, 0xc0, 0xb3, 0x55, 0xa8, 0x53, 0xb0, 0x98, 0xc7, 0xe1, 0x19, 0x20, + 0x06, 0xc7, 0x5d, 0x35, 0x28, 0x95, 0x1d, 0xcb, 0x6e, 0xb4, 0x3d, 0x12, 0x52, 0x19, 0x8d, 0x52, + 0xb9, 0x10, 0x83, 0xe3, 0xae, 0x1a, 0x68, 0x07, 0x26, 0x44, 0x19, 0x0f, 0x3b, 0x1a, 0x3b, 0x66, + 0x2b, 0x59, 0x78, 0xd9, 0x05, 0x8d, 0x12, 0x8e, 0xd0, 0x45, 0x6d, 0x98, 0xb5, 0x9d, 0xaa, 0xeb, + 0x54, 0x1b, 0x6d, 0xdf, 0xde, 0x27, 0xe1, 0x9b, 0x9a, 0xe3, 0x30, 0x3b, 0x7d, 0xd8, 0x29, 0xce, + 0xae, 0xc6, 0xc9, 0xe1, 0x6e, 0x0e, 0xe8, 0x65, 0x03, 0x4e, 0x57, 0x5d, 0xc7, 0x67, 0xcf, 0xbf, + 0xf7, 0xc9, 0x79, 0xcf, 0x73, 0x3d, 0xce, 0xbb, 0x70, 0x4c, 0xde, 0xcc, 0xb5, 0xb7, 0x9c, 0x44, + 0x12, 0x27, 0x73, 0x42, 0x2f, 0x40, 0xbe, 0xe5, 0xb9, 0xfb, 0x76, 0x8d, 0x78, 0x22, 0x84, 0x6d, + 0x2d, 0x8d, 0x74, 0x14, 0x9b, 0x82, 0x66, 0xb8, 0xf5, 0xc8, 0x12, 0xac, 0xf8, 0x99, 0xaf, 0x17, + 0x60, 0x2a, 0x8a, 0x8e, 0x3e, 0x0e, 0xd0, 0xf2, 0xdc, 0x26, 0x09, 0x76, 0x89, 0x7a, 0x1b, 0xb1, + 0x31, 0x6c, 0xd6, 0x03, 0x49, 0x4f, 0x06, 0x91, 0xd0, 0xed, 0x22, 0x2c, 0xc5, 0x1a, 0x47, 0xe4, + 0xc1, 0xd8, 0x1e, 0x57, 0xbb, 0xc2, 0x0a, 0x79, 0x36, 0x15, 0x9b, 0x49, 0x70, 0x66, 0x41, 0xfd, + 0xa2, 0x08, 0x4b, 0x46, 0x68, 0x1b, 0xb2, 0x37, 0xc9, 0x76, 0x3a, 0x2f, 0x89, 0xaf, 0x13, 0x71, + 0x9a, 0x29, 0x8f, 0x1d, 0x76, 0x8a, 0xd9, 0xeb, 0x64, 0x1b, 0x53, 0xe2, 0xb4, 0x5d, 0x35, 0x7e, + 0x1d, 0x2e, 0xb6, 0x8a, 0x21, 0xdb, 0x15, 0xb9, 0x5b, 0xe7, 0xed, 0x12, 0x45, 0x58, 0x32, 0x42, + 0x2f, 0x40, 0xe1, 0xa6, 0xb5, 0x4f, 0x76, 0x3c, 0xd7, 0x09, 0x44, 0xe4, 0xd2, 0x90, 0xe1, 0xf2, + 0xd7, 0x25, 0x39, 0xc1, 0x97, 0xa9, 0x77, 0x55, 0x88, 0x43, 0x76, 0x68, 0x1f, 0xf2, 0x0e, 0xb9, + 0x89, 0x49, 0xc3, 0xae, 0xa6, 0x13, 0x9e, 0xbe, 0x21, 0xa8, 0x09, 0xce, 0x4c, 0xef, 0xc9, 0x32, + 0xac, 0x78, 0xd1, 0xb1, 0xbc, 0xe1, 0x6e, 0x8b, 0x8d, 0x6a, 0xc8, 0xb1, 0x54, 0x27, 0x53, 0x3e, + 0x96, 0x97, 0xdc, 0x6d, 0x4c, 0x89, 0xd3, 0x35, 0x52, 0x55, 0x61, 0x33, 0x62, 0x9b, 0xda, 0x48, + 0x37, 0x5c, 0x88, 0xaf, 0x91, 0xb0, 0x14, 0x6b, 0x1c, 0x69, 0xdf, 0xd6, 0x85, 0xcf, 0x50, 0x6c, + 0x54, 0x43, 0xf6, 0x6d, 0xd4, 0x03, 0xc9, 0xfb, 0x56, 0x96, 0x61, 0xc5, 0x8b, 0xf2, 0xb5, 0x85, + 0x03, 0x2e, 0x9d, 0xad, 0x2a, 0xea, 0xce, 0xe3, 0x7c, 0x65, 0x19, 0x56, 0xbc, 0xcc, 0x2f, 0x8f, + 0xc2, 0x84, 0x9e, 0xf6, 0xab, 0x0f, 0x1b, 0x41, 0xd9, 0xc5, 0x99, 0x41, 0xec, 0x62, 0x7a, 0x10, + 0xd2, 0xae, 0x1a, 0xa4, 0x2f, 0x64, 0x35, 0x35, 0xb3, 0x30, 0x3c, 0x08, 0x69, 0x85, 0x3e, 0x8e, + 0x30, 0x1d, 0x20, 0xfa, 0x80, 0x1a, 0x57, 0xdc, 0xfc, 0xc8, 0x45, 0x8d, 0xab, 0x88, 0x41, 0xf1, + 0x08, 0x40, 0x98, 0xfe, 0x4a, 0x5c, 0x41, 0x29, 0xab, 0x4d, 0x4b, 0xcb, 0xa5, 0x61, 0xa1, 0x87, + 0x60, 0x94, 0x2a, 0x68, 0x52, 0x13, 0x0f, 0x66, 0xd5, 0x69, 0xf3, 0x02, 0x2b, 0xc5, 0x02, 0x8a, + 0x9e, 0xa0, 0xb6, 0x54, 0xa8, 0x56, 0xc5, 0x3b, 0xd8, 0xb9, 0xd0, 0x96, 0x0a, 0x61, 0x38, 0x82, + 0x49, 0x45, 0x27, 0x54, 0x0b, 0xb2, 0x19, 0xac, 0x89, 0xce, 0x54, 0x23, 0xe6, 0x30, 0xe6, 0xfd, + 0x88, 0x69, 0x4d, 0x36, 0xf3, 0x72, 0x9a, 0xf7, 0x23, 0x06, 0xc7, 0x5d, 0x35, 0x68, 0x63, 0xc4, + 0xed, 0xd9, 0x38, 0x0f, 0xb2, 0xec, 0x71, 0xef, 0xf5, 0xaa, 0x7e, 0x22, 0x98, 0x60, 0x43, 0xff, + 0xbe, 0xf4, 0x52, 0xd8, 0xf5, 0x7f, 0x24, 0x18, 0xce, 0x78, 0xff, 0x08, 0x4c, 0x45, 0xf7, 0xca, + 0xd4, 0xdd, 0xe4, 0x7f, 0x95, 0x85, 0x53, 0x1b, 0x75, 0xdb, 0xb9, 0x15, 0xf3, 0x2f, 0x27, 0xa5, + 0x96, 0x35, 0x06, 0x4d, 0x2d, 0x1b, 0xbe, 0xbc, 0x11, 0xb9, 0x7b, 0x93, 0x5f, 0xde, 0xc8, 0xc4, + 0xbe, 0x51, 0x5c, 0xf4, 0x03, 0x03, 0xee, 0xb7, 0x6a, 0xdc, 0x7a, 0xb5, 0x1a, 0xa2, 0x34, 0x64, + 0x2a, 0x57, 0xb4, 0x3f, 0xa4, 0x2e, 0xea, 0x6e, 0xfc, 0x62, 0xe9, 0x08, 0xae, 0x7c, 0xc4, 0xdf, + 0x26, 0x5a, 0x70, 0xff, 0x51, 0xa8, 0xf8, 0x48, 0xf1, 0x17, 0x2e, 0xc3, 0x5b, 0xef, 0xc8, 0x68, + 0xa0, 0xd9, 0xf2, 0x49, 0x03, 0x0a, 0xdc, 0x7d, 0x8a, 0xc9, 0x0e, 0xdd, 0x2a, 0xac, 0x96, 0x7d, + 0x8d, 0x78, 0xbe, 0xcc, 0x79, 0xa5, 0x1d, 0xf0, 0x4a, 0x9b, 0xab, 0x02, 0x82, 0x35, 0x2c, 0xba, + 0x19, 0xef, 0xd9, 0x4e, 0x4d, 0x0c, 0x93, 0xda, 0x8c, 0x9f, 0xb5, 0x9d, 0x1a, 0x66, 0x10, 0xb5, + 0x5d, 0x67, 0x7b, 0xba, 0x35, 0x5e, 0x37, 0x60, 0x8a, 0x3d, 0x37, 0x0c, 0x8f, 0x1e, 0x8f, 0xab, + 0xd0, 0x12, 0x2e, 0xc6, 0xd9, 0x68, 0x68, 0xc9, 0xed, 0x4e, 0x71, 0x9c, 0x3f, 0x50, 0x8c, 0x46, + 0x9a, 0x7c, 0x40, 0xf8, 0x2b, 0x58, 0x00, 0x4c, 0x66, 0xe0, 0xe3, 0xb4, 0xf2, 0xe7, 0x55, 0x24, + 0x11, 0x1c, 0xd2, 0x33, 0x5f, 0x84, 0x09, 0xfd, 0xdd, 0x00, 0x7a, 0x1c, 0xc6, 0x5b, 0xb6, 0x53, + 0x8f, 0xbe, 0x2f, 0x53, 0x3e, 0xdd, 0xcd, 0x10, 0x84, 0x75, 0x3c, 0x56, 0xcd, 0x0d, 0xab, 0xc5, + 0x5c, 0xc1, 0x9b, 0xae, 0x5e, 0x2d, 0xfc, 0x63, 0xfe, 0x71, 0x16, 0x4e, 0x25, 0xbc, 0x4f, 0x41, + 0xaf, 0x18, 0x30, 0xca, 0x82, 0xe5, 0x65, 0xf0, 0xc8, 0xf3, 0xa9, 0xbf, 0x81, 0x59, 0x64, 0x31, + 0xf9, 0x62, 0x1e, 0xab, 0xed, 0x93, 0x17, 0x62, 0xc1, 0x1c, 0xfd, 0x96, 0x01, 0xe3, 0x96, 0xb6, + 0xd4, 0x78, 0x3c, 0xcd, 0x76, 0xfa, 0xc2, 0x74, 0xad, 0x2c, 0x2d, 0x0e, 0x30, 0x5c, 0x48, 0xba, + 0x2c, 0x0b, 0xef, 0x81, 0x71, 0xad, 0x09, 0x83, 0xac, 0x90, 0x85, 0xa7, 0x61, 0x66, 0xa8, 0x15, + 0xf6, 0x7e, 0x18, 0x34, 0x85, 0x1b, 0x55, 0x58, 0x37, 0xf5, 0x37, 0xc0, 0xaa, 0xc7, 0xc5, 0x23, + 0x60, 0x01, 0x35, 0xb7, 0x61, 0x26, 0x7e, 0xb8, 0x4a, 0xfd, 0xfa, 0xf8, 0x5d, 0x30, 0x60, 0xd2, + 0x35, 0xf3, 0xaf, 0x33, 0x30, 0x26, 0x1e, 0xb9, 0xdd, 0x85, 0x10, 0xda, 0xbd, 0xc8, 0xa5, 0xce, + 0x6a, 0x2a, 0x6f, 0xf3, 0x7a, 0xc6, 0xcf, 0xfa, 0xb1, 0xf8, 0xd9, 0x67, 0xd3, 0x61, 0x77, 0x74, + 0xf0, 0xec, 0xeb, 0x23, 0x30, 0x1d, 0x7b, 0x34, 0x48, 0x4d, 0x95, 0xae, 0x98, 0xb1, 0xab, 0xa9, + 0xbe, 0x4b, 0x54, 0xe1, 0xdd, 0x47, 0x87, 0x8f, 0xf9, 0x91, 0xdc, 0x96, 0x57, 0x52, 0x4b, 0x8b, + 0xfd, 0xb3, 0x34, 0x97, 0x83, 0x86, 0x43, 0xfd, 0x8b, 0x01, 0xf7, 0xf6, 0x7c, 0x5b, 0xca, 0x52, + 0x93, 0x78, 0x51, 0xa8, 0x58, 0x90, 0x29, 0xbf, 0xa0, 0x57, 0x37, 0x2c, 0xf1, 0x6c, 0x12, 0x71, + 0xf6, 0xe8, 0x31, 0x98, 0x60, 0xaa, 0x95, 0xee, 0x29, 0x01, 0x69, 0x09, 0x07, 0x31, 0x73, 0x15, + 0x56, 0xb4, 0x72, 0x1c, 0xc1, 0x32, 0xbf, 0x64, 0xc0, 0x7c, 0xaf, 0x44, 0x15, 0x7d, 0x1c, 0x0c, + 0x7f, 0x21, 0x16, 0xe3, 0x5b, 0xec, 0x8a, 0xf1, 0x8d, 0x1d, 0x0d, 0x65, 0x38, 0xaf, 0x76, 0x2a, + 0xcb, 0xde, 0x21, 0x84, 0xf5, 0x33, 0x06, 0x9c, 0xe9, 0xb1, 0x9a, 0xba, 0x62, 0xbd, 0x8d, 0x63, + 0xc7, 0x7a, 0x67, 0xfa, 0x8d, 0xf5, 0x36, 0xbf, 0x93, 0x85, 0x19, 0x21, 0x4f, 0x68, 0x5f, 0x3d, + 0x11, 0x89, 0x94, 0x7e, 0x5b, 0x2c, 0x52, 0x7a, 0x2e, 0x8e, 0xff, 0xb3, 0x30, 0xe9, 0x37, 0x57, + 0x98, 0xf4, 0x4f, 0x32, 0x70, 0x3a, 0x31, 0x7f, 0x06, 0xfa, 0x54, 0x82, 0x6a, 0xb8, 0x9e, 0x72, + 0xa2, 0x8e, 0x3e, 0x95, 0xc3, 0xb0, 0xb1, 0xc5, 0x9f, 0xd7, 0x63, 0x7a, 0xf9, 0x56, 0xbf, 0x73, + 0x02, 0x29, 0x47, 0x06, 0x0c, 0xef, 0x35, 0x7f, 0x2d, 0x0b, 0x0f, 0xf7, 0x4b, 0xe8, 0x4d, 0xfa, + 0xfc, 0xc3, 0x8f, 0x3c, 0xff, 0xb8, 0x4b, 0x6a, 0xfb, 0x44, 0x5e, 0x82, 0x7c, 0x39, 0xab, 0xd4, + 0x5e, 0xf7, 0xfc, 0xec, 0xeb, 0x36, 0x71, 0x8c, 0x9a, 0x76, 0x32, 0xab, 0x66, 0xb8, 0x15, 0x8e, + 0x55, 0x78, 0xf1, 0xed, 0x4e, 0x71, 0x56, 0x64, 0xda, 0xab, 0x90, 0x40, 0x14, 0x62, 0x59, 0x09, + 0x3d, 0x0c, 0x79, 0x8f, 0x43, 0x65, 0xc0, 0xbb, 0xb8, 0x92, 0xe5, 0x65, 0x58, 0x41, 0xd1, 0x27, + 0x34, 0x5b, 0x78, 0xe4, 0xa4, 0x92, 0x15, 0x1c, 0x75, 0xd3, 0xfc, 0x3c, 0xe4, 0x7d, 0x99, 0x1f, + 0x93, 0x5f, 0x07, 0x3c, 0xda, 0xe7, 0x3b, 0x0a, 0x7a, 0x74, 0x92, 0xc9, 0x32, 0x79, 0xfb, 0x54, + 0x2a, 0x4d, 0x45, 0x12, 0x99, 0xea, 0xd4, 0xc2, 0x7d, 0x8c, 0x90, 0x70, 0x62, 0xf9, 0xae, 0x01, + 0xe3, 0x62, 0xb4, 0xee, 0xc2, 0xd3, 0x8e, 0x1b, 0xd1, 0xa7, 0x1d, 0xe7, 0x53, 0xd9, 0x3b, 0x7a, + 0xbc, 0xeb, 0xb8, 0x01, 0x13, 0x7a, 0x0a, 0x25, 0xf4, 0x9c, 0xb6, 0xf7, 0x19, 0xc3, 0x24, 0x25, + 0x91, 0xbb, 0x63, 0xb8, 0x2f, 0x9a, 0x5f, 0xcc, 0xab, 0x5e, 0x64, 0x7e, 0x08, 0x7d, 0x0e, 0x1a, + 0x47, 0xce, 0x41, 0x7d, 0x0a, 0x64, 0xd2, 0x9f, 0x02, 0x57, 0x20, 0x2f, 0x37, 0x28, 0xa1, 0xc6, + 0x1f, 0xd4, 0xa3, 0xec, 0xa8, 0x2d, 0x40, 0x89, 0x69, 0x13, 0x97, 0x1d, 0xb5, 0xd4, 0x18, 0xaa, + 0x8d, 0x53, 0x91, 0x41, 0x2f, 0xc0, 0xf8, 0x4d, 0xd7, 0xdb, 0x6b, 0xb8, 0x16, 0xcb, 0x7c, 0x0b, + 0x69, 0x5c, 0xec, 0x28, 0x87, 0x17, 0x8f, 0x38, 0xbe, 0x1e, 0xd2, 0xc7, 0x3a, 0x33, 0x54, 0x82, + 0xe9, 0xa6, 0xed, 0x60, 0x62, 0xd5, 0xd4, 0x0b, 0x8e, 0x11, 0x9e, 0x9a, 0x53, 0x1a, 0xb9, 0xeb, + 0x51, 0x30, 0x8e, 0xe3, 0xa3, 0x8f, 0x41, 0xde, 0x17, 0x09, 0x89, 0xd2, 0xb9, 0x82, 0x53, 0x67, + 0x46, 0x4e, 0x34, 0xec, 0x3b, 0x59, 0x82, 0x15, 0x43, 0xb4, 0x06, 0x73, 0x9e, 0x48, 0xf9, 0x11, + 0xf9, 0x6e, 0x06, 0x5f, 0x9f, 0x2c, 0x03, 0x24, 0x4e, 0x80, 0xe3, 0xc4, 0x5a, 0xd4, 0x8a, 0x61, + 0xb9, 0xc0, 0xf8, 0x9d, 0x80, 0xe6, 0x46, 0x67, 0x13, 0xbe, 0x86, 0x05, 0xf4, 0xa8, 0x17, 0x41, + 0xf9, 0x21, 0x5e, 0x04, 0x55, 0xe0, 0x74, 0x1c, 0xc4, 0x12, 0x93, 0xb0, 0x5c, 0x28, 0x9a, 0xf6, + 0xd8, 0x4c, 0x42, 0xc2, 0xc9, 0x75, 0xd1, 0x75, 0x28, 0x78, 0x84, 0x9d, 0x2f, 0x4a, 0xf2, 0xd2, + 0x7f, 0xe0, 0xf0, 0x26, 0x2c, 0x09, 0xe0, 0x90, 0x16, 0x1d, 0x77, 0x2b, 0x9a, 0x9d, 0xf2, 0x4a, + 0x8a, 0x5f, 0xfe, 0x12, 0x63, 0xdf, 0x23, 0x61, 0x90, 0xf9, 0xc6, 0x14, 0x4c, 0x46, 0x7c, 0x0b, + 0xe8, 0x41, 0xc8, 0xb1, 0x4c, 0x2d, 0x6c, 0x7b, 0xc8, 0x87, 0x5b, 0x18, 0xef, 0x1c, 0x0e, 0x43, + 0x9f, 0x35, 0x60, 0xba, 0x15, 0xf1, 0xc2, 0xca, 0x9d, 0x73, 0xc8, 0x7b, 0xbe, 0xa8, 0x6b, 0x57, + 0xcb, 0xeb, 0x1c, 0x65, 0x86, 0xe3, 0xdc, 0xe9, 0x02, 0x14, 0x31, 0x82, 0x0d, 0xe2, 0x31, 0x6c, + 0x61, 0xe3, 0x28, 0x12, 0xcb, 0x51, 0x30, 0x8e, 0xe3, 0xd3, 0x11, 0x66, 0xad, 0x1b, 0xe6, 0x93, + 0x40, 0x25, 0x49, 0x00, 0x87, 0xb4, 0xd0, 0xd3, 0x30, 0x25, 0x92, 0x12, 0x6e, 0xba, 0xb5, 0x8b, + 0x96, 0xbf, 0x2b, 0x8c, 0x7b, 0x75, 0x18, 0x59, 0x8e, 0x40, 0x71, 0x0c, 0x9b, 0xb5, 0x2d, 0xcc, + 0xfc, 0xc8, 0x08, 0x8c, 0x46, 0xd3, 0x5e, 0x2f, 0x47, 0xc1, 0x38, 0x8e, 0x8f, 0xde, 0xa1, 0xed, + 0xfb, 0xfc, 0x9e, 0x4e, 0xed, 0x06, 0x09, 0x7b, 0x7f, 0x09, 0xa6, 0xdb, 0xec, 0x2c, 0x54, 0x93, + 0x40, 0xb1, 0x1e, 0x15, 0xc3, 0xab, 0x51, 0x30, 0x8e, 0xe3, 0xa3, 0x27, 0x61, 0xd2, 0xa3, 0xbb, + 0x9b, 0x22, 0xc0, 0x2f, 0xef, 0xd4, 0xdd, 0x0c, 0xd6, 0x81, 0x38, 0x8a, 0x8b, 0x9e, 0x81, 0xd9, + 0x30, 0x87, 0x97, 0x24, 0xc0, 0x6f, 0xf3, 0x54, 0x7a, 0x9a, 0x52, 0x1c, 0x01, 0x77, 0xd7, 0x41, + 0xbf, 0x04, 0x33, 0x5a, 0x4f, 0xac, 0x3a, 0x35, 0x72, 0x4b, 0xe4, 0x59, 0x62, 0x5f, 0x28, 0x58, + 0x8e, 0xc1, 0x70, 0x17, 0x36, 0x7a, 0x2f, 0x4c, 0x55, 0xdd, 0x46, 0x83, 0xed, 0x71, 0x3c, 0xe5, + 0x32, 0x4f, 0xa8, 0xc4, 0x53, 0x4f, 0x45, 0x20, 0x38, 0x86, 0x89, 0x2e, 0x01, 0x72, 0xb7, 0x7d, + 0xe2, 0xed, 0x93, 0xda, 0x33, 0xfc, 0x23, 0xa3, 0x54, 0xc5, 0x4f, 0x46, 0x23, 0x94, 0x2f, 0x77, + 0x61, 0xe0, 0x84, 0x5a, 0x2c, 0xbb, 0x8d, 0xf6, 0xb0, 0x6a, 0x2a, 0x8d, 0xcf, 0xe3, 0xc4, 0x4f, + 0xee, 0x77, 0x7c, 0x55, 0xe5, 0xc1, 0x28, 0x0f, 0x18, 0x4f, 0x27, 0xb3, 0x92, 0x9e, 0x7d, 0x35, + 0xd4, 0x11, 0xbc, 0x14, 0x0b, 0x4e, 0xe8, 0xe3, 0x50, 0xd8, 0x96, 0xa9, 0xb8, 0xe7, 0x67, 0xd2, + 0xd0, 0x8b, 0xb1, 0xac, 0xf2, 0xe1, 0xc9, 0x54, 0x01, 0x70, 0xc8, 0x12, 0x3d, 0x04, 0xe3, 0x17, + 0x37, 0x4b, 0x6a, 0x16, 0xce, 0xb2, 0xd1, 0x1f, 0xa1, 0x55, 0xb0, 0x0e, 0xa0, 0x2b, 0x4c, 0xd9, + 0x4b, 0x88, 0x0d, 0x71, 0xa8, 0x6f, 0xbb, 0xcd, 0x1f, 0x8a, 0xcd, 0xae, 0x23, 0x71, 0x65, 0xfe, + 0x54, 0x0c, 0x5b, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x87, 0x71, 0xa1, 0x2f, 0xd8, 0xde, 0x34, 0x77, + 0xbc, 0x47, 0x7b, 0x38, 0x24, 0x81, 0x75, 0x7a, 0xec, 0x96, 0x89, 0x65, 0x28, 0x26, 0x17, 0xda, + 0x8d, 0xc6, 0xfc, 0x69, 0xb6, 0x6f, 0x86, 0xb7, 0x4c, 0x21, 0x08, 0xeb, 0x78, 0xe8, 0x51, 0x19, + 0x39, 0xf1, 0x96, 0xc8, 0xb5, 0x9b, 0x8a, 0x9c, 0x50, 0x56, 0x6e, 0x8f, 0x80, 0xe2, 0x33, 0x77, + 0x08, 0x59, 0xd8, 0x86, 0x05, 0x69, 0x62, 0x75, 0x2f, 0x92, 0xf9, 0xf9, 0x88, 0x97, 0x60, 0xe1, + 0x7a, 0x4f, 0x4c, 0x7c, 0x04, 0x15, 0xb4, 0x0d, 0x59, 0xab, 0xb1, 0x3d, 0x7f, 0x6f, 0x1a, 0xb6, + 0xa2, 0xfa, 0x68, 0x30, 0x0f, 0x02, 0x2a, 0xad, 0x95, 0x31, 0x25, 0x6e, 0xbe, 0x9c, 0x51, 0x5e, + 0x79, 0x95, 0x71, 0xf2, 0x45, 0x7d, 0x56, 0x1b, 0x69, 0x7c, 0x14, 0xb3, 0x2b, 0x5f, 0x3d, 0x57, + 0x48, 0x89, 0x73, 0xba, 0xa5, 0xd6, 0x71, 0x2a, 0xe9, 0x44, 0xa2, 0xd9, 0x34, 0xf9, 0x69, 0x2e, + 0xba, 0x8a, 0xcd, 0xc3, 0x31, 0xe5, 0x84, 0x8a, 0x85, 0x02, 0x78, 0x90, 0xb3, 0xfd, 0xc0, 0x76, + 0x53, 0x7c, 0x60, 0x16, 0x4b, 0x43, 0xc9, 0x02, 0x67, 0x19, 0x00, 0x73, 0x56, 0x94, 0xa7, 0x53, + 0xb7, 0x9d, 0x5b, 0xa2, 0xf9, 0x57, 0x52, 0xbf, 0xe3, 0xe7, 0x3c, 0x19, 0x00, 0x73, 0x56, 0xe8, + 0x06, 0x9f, 0x69, 0xe9, 0x7c, 0x00, 0x35, 0xfe, 0x5d, 0xe3, 0xe8, 0x8c, 0xa3, 0xbc, 0xfc, 0xa6, + 0x2d, 0x6c, 0x98, 0x21, 0x79, 0x55, 0xd6, 0x57, 0x93, 0x78, 0x55, 0xd6, 0x57, 0x31, 0x65, 0x82, + 0x5e, 0x35, 0x00, 0x2c, 0xf5, 0x81, 0xdf, 0x74, 0x3e, 0xee, 0xd0, 0xeb, 0x83, 0xc1, 0x3c, 0xd6, + 0x2d, 0x84, 0x62, 0x8d, 0x33, 0x7a, 0x01, 0xc6, 0x2c, 0xfe, 0x69, 0x1a, 0x11, 0x46, 0x98, 0xce, + 0xf7, 0x96, 0x62, 0x12, 0xb0, 0xf8, 0x49, 0x01, 0xc2, 0x92, 0x21, 0xe5, 0x1d, 0x78, 0x16, 0xd9, + 0xb1, 0xf7, 0x44, 0x3c, 0x61, 0x65, 0xe8, 0x0c, 0xd3, 0x94, 0x58, 0x12, 0x6f, 0x01, 0xc2, 0x92, + 0x21, 0xff, 0xa6, 0xa6, 0xe5, 0x58, 0xea, 0x71, 0x48, 0x3a, 0x4f, 0x88, 0xf4, 0xe7, 0x26, 0xda, + 0x37, 0x35, 0x75, 0x46, 0x38, 0xca, 0xd7, 0xfc, 0x51, 0x16, 0x80, 0xfd, 0xe4, 0xef, 0x86, 0x9b, + 0x2c, 0xd7, 0xdc, 0xae, 0x5b, 0x13, 0x4b, 0x3b, 0xc5, 0xe7, 0xbf, 0x20, 0x12, 0xcb, 0xed, 0xba, + 0x35, 0x2c, 0x98, 0xa0, 0x3a, 0x8c, 0xb4, 0xac, 0x60, 0x37, 0xfd, 0xb7, 0xc6, 0x79, 0xfe, 0x72, + 0x27, 0xd8, 0xc5, 0x8c, 0x01, 0x7a, 0xc9, 0x80, 0x31, 0xfe, 0xda, 0x58, 0xba, 0x9a, 0x87, 0xbe, + 0x4f, 0x95, 0x7d, 0xb6, 0xc8, 0x9f, 0x34, 0x8b, 0x60, 0x05, 0xa5, 0x1a, 0x45, 0x29, 0x96, 0x6c, + 0x17, 0x5e, 0x31, 0x60, 0x42, 0x47, 0x4d, 0x08, 0x33, 0xf8, 0xb0, 0x1e, 0x66, 0x90, 0x66, 0x7f, + 0xe8, 0x11, 0x0b, 0xff, 0x6e, 0x80, 0xf6, 0x25, 0xd2, 0x30, 0xc8, 0xd0, 0xe8, 0x3b, 0xc8, 0x30, + 0x33, 0x60, 0x90, 0x61, 0x76, 0xa0, 0x20, 0xc3, 0x91, 0xc1, 0x83, 0x0c, 0x73, 0xbd, 0x83, 0x0c, + 0xcd, 0xd7, 0x0c, 0x98, 0xed, 0xda, 0x0f, 0xe3, 0x5f, 0x7c, 0x37, 0xfa, 0xfc, 0xe2, 0xfb, 0x0a, + 0xcc, 0x88, 0x5c, 0xc8, 0x95, 0x56, 0xc3, 0x4e, 0x7c, 0x07, 0xbe, 0x15, 0x83, 0xe3, 0xae, 0x1a, + 0xe6, 0x9f, 0x1b, 0x30, 0xae, 0x3d, 0x5b, 0xa3, 0xed, 0x60, 0xcf, 0xfb, 0x84, 0x18, 0x61, 0x1a, + 0x68, 0xe6, 0xda, 0xe7, 0x30, 0x7e, 0xcb, 0x54, 0xd7, 0xf2, 0x6e, 0x86, 0xb7, 0x4c, 0xb4, 0x14, + 0x0b, 0x28, 0xcf, 0xa8, 0x48, 0xf8, 0xd7, 0xfc, 0xb3, 0x7a, 0x46, 0x45, 0xd2, 0xc2, 0x0c, 0xc2, + 0xd8, 0x51, 0x3b, 0x52, 0xc4, 0x9f, 0x6a, 0x59, 0xa7, 0x2d, 0x2f, 0xc0, 0x1c, 0x86, 0xce, 0x42, + 0x96, 0x38, 0x35, 0x71, 0xe8, 0x55, 0x5f, 0x7a, 0x3a, 0xef, 0xd4, 0x30, 0x2d, 0x37, 0x2f, 0xc3, + 0x44, 0x85, 0x54, 0x3d, 0x12, 0x3c, 0x4b, 0x0e, 0xfa, 0xfe, 0x74, 0x14, 0x9d, 0xed, 0xb1, 0x4f, + 0x47, 0xd1, 0xea, 0xb4, 0xdc, 0xfc, 0x43, 0x03, 0x62, 0xa9, 0xd1, 0x35, 0x8f, 0xb3, 0xd1, 0xcb, + 0xe3, 0x1c, 0xf1, 0x8d, 0x66, 0x8e, 0xf4, 0x8d, 0x5e, 0x02, 0xd4, 0xa4, 0x4b, 0x21, 0xf2, 0x21, + 0x00, 0xe1, 0x6f, 0x08, 0x1f, 0xc9, 0x76, 0x61, 0xe0, 0x84, 0x5a, 0xe6, 0x1f, 0x70, 0x61, 0xf5, + 0x64, 0xe9, 0x77, 0xee, 0x80, 0x36, 0xe4, 0x18, 0x29, 0xe1, 0x74, 0xd9, 0x1c, 0x6e, 0x71, 0x77, + 0xe7, 0x7c, 0x08, 0x07, 0x52, 0x2c, 0x79, 0xc6, 0xcd, 0xfc, 0x0e, 0x97, 0x55, 0xcb, 0xa6, 0xde, + 0x87, 0xac, 0xcd, 0xa8, 0xac, 0x17, 0xd3, 0xda, 0x2b, 0x93, 0x65, 0x44, 0x8b, 0x00, 0x2d, 0xe2, + 0x55, 0x89, 0x13, 0xc8, 0xb0, 0xe8, 0x9c, 0x78, 0x46, 0xa2, 0x4a, 0xb1, 0x86, 0x61, 0xbe, 0x64, + 0xc0, 0x4c, 0x25, 0xb0, 0xab, 0x7b, 0xb6, 0xc3, 0x9f, 0x45, 0xed, 0xd8, 0x75, 0x7a, 0x4a, 0x21, + 0xe2, 0xab, 0x48, 0xdc, 0x0d, 0xa6, 0xb6, 0x62, 0xf9, 0x31, 0x24, 0x09, 0x47, 0x25, 0x98, 0x96, + 0xde, 0x76, 0xe9, 0xbb, 0xe4, 0xcf, 0x39, 0x95, 0xaf, 0x64, 0x25, 0x0a, 0xc6, 0x71, 0x7c, 0xf3, + 0x13, 0x30, 0xae, 0xed, 0xaf, 0x6c, 0x2b, 0xba, 0x65, 0x55, 0x83, 0xf8, 0x12, 0x3e, 0x4f, 0x0b, + 0x31, 0x87, 0x31, 0x17, 0x2b, 0x8f, 0x9b, 0x8d, 0x2d, 0x61, 0x11, 0x2d, 0x2b, 0xa0, 0x94, 0x98, + 0x47, 0xea, 0xe4, 0x96, 0xcc, 0xf0, 0x29, 0x89, 0x61, 0x5a, 0x88, 0x39, 0xcc, 0xbc, 0x06, 0x79, + 0xf9, 0xe8, 0x9e, 0xbd, 0x5c, 0x95, 0xee, 0x3f, 0xfd, 0xe5, 0xaa, 0xeb, 0x05, 0x98, 0x41, 0xe8, + 0x3a, 0xf1, 0x1d, 0xfb, 0xa2, 0xeb, 0x07, 0x32, 0x53, 0x00, 0x77, 0xf2, 0x6f, 0xac, 0xb2, 0x32, + 0xac, 0xa0, 0xe6, 0x2c, 0x4c, 0x2b, 0xef, 0xbd, 0x08, 0x4d, 0xfc, 0x46, 0x16, 0x26, 0x22, 0xdf, + 0xd8, 0xbd, 0xf3, 0x04, 0xea, 0x7f, 0x5d, 0x26, 0x78, 0xe1, 0xb3, 0x03, 0x7a, 0xe1, 0xf5, 0x6b, + 0x8f, 0x91, 0x93, 0xbd, 0xf6, 0xc8, 0xa5, 0x73, 0xed, 0x11, 0xc0, 0x98, 0x2f, 0x54, 0xcf, 0x68, + 0x1a, 0xee, 0x91, 0xd8, 0x88, 0x71, 0xab, 0x53, 0x6a, 0x30, 0xc9, 0xca, 0xfc, 0x6a, 0x0e, 0xa6, + 0xa2, 0xe9, 0x86, 0xfa, 0x18, 0xc9, 0x77, 0x74, 0x8d, 0xe4, 0x80, 0x5e, 0xc8, 0xec, 0xb0, 0x5e, + 0xc8, 0x91, 0x61, 0xbd, 0x90, 0xb9, 0x63, 0x78, 0x21, 0xbb, 0x7d, 0x88, 0xa3, 0x7d, 0xfb, 0x10, + 0x9f, 0x52, 0x21, 0x34, 0x63, 0x91, 0x3b, 0xe7, 0x30, 0x84, 0x06, 0x45, 0x87, 0x61, 0xd9, 0xad, + 0x25, 0x86, 0x22, 0xe5, 0xef, 0xe0, 0x6d, 0xf1, 0x12, 0x23, 0x5e, 0x06, 0xbf, 0xe8, 0x78, 0xcb, + 0x00, 0xd1, 0x2e, 0x8f, 0xc3, 0xb8, 0x98, 0x4f, 0xcc, 0xfa, 0x81, 0xa8, 0xe5, 0x54, 0x09, 0x41, + 0x58, 0xc7, 0x63, 0x9f, 0x81, 0x8c, 0x7e, 0xf7, 0x92, 0x39, 0x75, 0xf5, 0xcf, 0x40, 0xc6, 0xbe, + 0x93, 0x19, 0xc7, 0x37, 0x3f, 0x06, 0xa7, 0x13, 0xcf, 0x58, 0xcc, 0xe9, 0xc4, 0x14, 0x33, 0xa9, + 0x09, 0x04, 0x4d, 0x8c, 0x58, 0x36, 0xda, 0x85, 0xeb, 0x3d, 0x31, 0xf1, 0x11, 0x54, 0xcc, 0xaf, + 0x64, 0x61, 0x2a, 0xfa, 0x0d, 0x21, 0x74, 0x53, 0x79, 0x64, 0x52, 0x71, 0x06, 0x71, 0xb2, 0x5a, + 0x0a, 0x9b, 0x9e, 0xee, 0xd5, 0x9b, 0x6c, 0x7e, 0x6d, 0xab, 0x7c, 0x3a, 0x27, 0xc7, 0x58, 0xf8, + 0x35, 0x05, 0x3b, 0xf6, 0x99, 0xa0, 0xf0, 0x01, 0x83, 0x38, 0x48, 0xa5, 0xce, 0x3d, 0x7c, 0x92, + 0xa0, 0x58, 0x61, 0x8d, 0x2d, 0xd5, 0x2d, 0xfb, 0xc4, 0xb3, 0x77, 0x6c, 0xf5, 0xfd, 0x43, 0xb6, + 0x73, 0x5f, 0x13, 0x65, 0x58, 0x41, 0xcd, 0x97, 0x32, 0x10, 0x7e, 0xed, 0x95, 0x7d, 0x68, 0xc3, + 0xd7, 0x8c, 0x56, 0x31, 0x6c, 0x97, 0x86, 0xfd, 0x9a, 0x4d, 0x48, 0x51, 0x84, 0x37, 0x6a, 0x25, + 0x38, 0xc2, 0xf1, 0xa7, 0xf0, 0x95, 0x57, 0x0b, 0xa6, 0x63, 0xcf, 0x3a, 0x53, 0x8f, 0x21, 0xff, + 0x62, 0x16, 0x0a, 0xea, 0x61, 0x2c, 0x7a, 0x4f, 0xc4, 0x83, 0x50, 0x28, 0xbf, 0x55, 0xcb, 0x29, + 0xbf, 0xeb, 0xd6, 0x6e, 0x77, 0x8a, 0xd3, 0x0a, 0x39, 0xe6, 0x0d, 0x38, 0x0b, 0xd9, 0xb6, 0xd7, + 0x88, 0x1f, 0x11, 0xae, 0xe2, 0x35, 0x4c, 0xcb, 0xd1, 0xad, 0xf8, 0x11, 0x7e, 0x3d, 0xa5, 0xc7, + 0xbc, 0xdc, 0x96, 0xee, 0x7d, 0x74, 0xa7, 0x5a, 0x72, 0xdb, 0xad, 0x1d, 0xc4, 0x73, 0xd0, 0x97, + 0xdd, 0xda, 0x01, 0x66, 0x10, 0xf4, 0x34, 0x4c, 0x05, 0x76, 0x93, 0xb8, 0xed, 0x40, 0xff, 0x96, + 0x66, 0x36, 0xbc, 0x2e, 0xdc, 0x8a, 0x40, 0x71, 0x0c, 0x9b, 0x6a, 0xd9, 0x1b, 0xbe, 0xeb, 0xb0, + 0xc4, 0x72, 0xa3, 0xd1, 0xbb, 0x85, 0x4b, 0x95, 0xcb, 0x1b, 0xcc, 0x93, 0xa1, 0x30, 0x28, 0xb6, + 0xcd, 0x5e, 0xc1, 0x79, 0x44, 0xdc, 0xd6, 0xcf, 0x84, 0x39, 0x12, 0x78, 0x39, 0x56, 0x18, 0xe6, + 0x55, 0x98, 0x8e, 0x35, 0x55, 0x1e, 0xc6, 0x8c, 0xe4, 0xc3, 0x58, 0x7f, 0x09, 0xdf, 0xff, 0xc4, + 0x80, 0xd9, 0xae, 0xc5, 0xdb, 0xef, 0xe3, 0x86, 0xb8, 0x1a, 0xc9, 0x1c, 0x5f, 0x8d, 0x64, 0x07, + 0x53, 0x23, 0xe5, 0xed, 0x6f, 0xbd, 0x71, 0xee, 0x9e, 0x6f, 0xbf, 0x71, 0xee, 0x9e, 0xef, 0xbd, + 0x71, 0xee, 0x9e, 0x97, 0x0e, 0xcf, 0x19, 0xdf, 0x3a, 0x3c, 0x67, 0x7c, 0xfb, 0xf0, 0x9c, 0xf1, + 0xbd, 0xc3, 0x73, 0xc6, 0x3f, 0x1f, 0x9e, 0x33, 0x5e, 0xfb, 0xe1, 0xb9, 0x7b, 0x9e, 0x7b, 0x2a, + 0x9c, 0x5a, 0x4b, 0x72, 0x6a, 0xb1, 0x1f, 0xef, 0x94, 0x13, 0x69, 0xa9, 0xb5, 0x57, 0x5f, 0xa2, + 0x53, 0x6b, 0x49, 0x95, 0xc8, 0xa9, 0xf5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcd, 0x19, 0xc7, + 0x30, 0x68, 0x92, 0x00, 0x00, +} + +func (m *ALBStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2919,23 +3540,88 @@ func (m *ALBTrafficRouting) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ALBTrafficRouting) MarshalTo(dAtA []byte) (int, error) { +func (m *ALBStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ALBTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ALBStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.AnnotationPrefix) - copy(dAtA[i:], m.AnnotationPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AnnotationPrefix))) - i-- - dAtA[i] = 0x22 - i -= len(m.RootService) - copy(dAtA[i:], m.RootService) + { + size, err := m.StableTargetGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.CanaryTargetGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ALBTrafficRouting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ALBTrafficRouting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ALBTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StickinessConfig != nil { + { + size, err := m.StickinessConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.AnnotationPrefix) + copy(dAtA[i:], m.AnnotationPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AnnotationPrefix))) + i-- + dAtA[i] = 0x22 + i -= len(m.RootService) + copy(dAtA[i:], m.RootService) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RootService))) i-- dAtA[i] = 0x1a @@ -3147,6 +3833,34 @@ func (m *AnalysisRunSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MeasurementRetention) > 0 { + for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MeasurementRetention[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DryRun[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } i-- if m.Terminate { dAtA[i] = 1 @@ -3206,6 +3920,28 @@ func (m *AnalysisRunStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.DryRunSummary != nil { + { + size, err := m.DryRunSummary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.RunSummary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.StartedAt != nil { { size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) @@ -3388,6 +4124,34 @@ func (m *AnalysisTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MeasurementRetention) > 0 { + for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MeasurementRetention[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DryRun[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Args) > 0 { for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { { @@ -3466,7 +4230,7 @@ func (m *AntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Argument) Marshal() (dAtA []byte, err error) { +func (m *AppMeshTrafficRouting) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3476,19 +4240,19 @@ func (m *Argument) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Argument) MarshalTo(dAtA []byte) (int, error) { +func (m *AppMeshTrafficRouting) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Argument) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AppMeshTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ValueFrom != nil { + if m.VirtualNodeGroup != nil { { - size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.VirtualNodeGroup.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3496,24 +4260,24 @@ func (m *Argument) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } - if m.Value != nil { - i -= len(*m.Value) - copy(dAtA[i:], *m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + if m.VirtualService != nil { + { + size, err := m.VirtualService.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ArgumentValueFrom) Marshal() (dAtA []byte, err error) { +func (m *AppMeshVirtualNodeGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3523,19 +4287,19 @@ func (m *ArgumentValueFrom) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ArgumentValueFrom) MarshalTo(dAtA []byte) (int, error) { +func (m *AppMeshVirtualNodeGroup) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ArgumentValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AppMeshVirtualNodeGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.FieldRef != nil { + if m.StableVirtualNodeRef != nil { { - size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.StableVirtualNodeRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3545,17 +4309,22 @@ func (m *ArgumentValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.PodTemplateHashValue != nil { - i -= len(*m.PodTemplateHashValue) - copy(dAtA[i:], *m.PodTemplateHashValue) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PodTemplateHashValue))) + if m.CanaryVirtualNodeRef != nil { + { + size, err := m.CanaryVirtualNodeRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BlueGreenStatus) Marshal() (dAtA []byte, err error) { +func (m *AppMeshVirtualNodeReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3565,62 +4334,62 @@ func (m *BlueGreenStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlueGreenStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *AppMeshVirtualNodeReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlueGreenStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AppMeshVirtualNodeReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.PostPromotionAnalysisRunStatus != nil { - { - size, err := m.PostPromotionAnalysisRunStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AppMeshVirtualService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.PrePromotionAnalysisRunStatus != nil { - { - size, err := m.PrePromotionAnalysisRunStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *AppMeshVirtualService) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppMeshVirtualService) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Routes[iNdEx]) + copy(dAtA[i:], m.Routes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Routes[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x22 - } - i-- - if m.ScaleUpPreviewCheckPoint { - dAtA[i] = 1 - } else { - dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.ActiveSelector) - copy(dAtA[i:], m.ActiveSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ActiveSelector))) - i-- - dAtA[i] = 0x12 - i -= len(m.PreviewSelector) - copy(dAtA[i:], m.PreviewSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreviewSelector))) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *BlueGreenStrategy) Marshal() (dAtA []byte, err error) { +func (m *Argument) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3630,19 +4399,206 @@ func (m *BlueGreenStrategy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlueGreenStrategy) MarshalTo(dAtA []byte) (int, error) { +func (m *Argument) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlueGreenStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Argument) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.AbortScaleDownDelaySeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.AbortScaleDownDelaySeconds)) - i-- + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ArgumentValueFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArgumentValueFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArgumentValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PodTemplateHashValue != nil { + i -= len(*m.PodTemplateHashValue) + copy(dAtA[i:], *m.PodTemplateHashValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PodTemplateHashValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AwsResourceRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AwsResourceRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AwsResourceRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ARN) + copy(dAtA[i:], m.ARN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ARN))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BlueGreenStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlueGreenStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlueGreenStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PostPromotionAnalysisRunStatus != nil { + { + size, err := m.PostPromotionAnalysisRunStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.PrePromotionAnalysisRunStatus != nil { + { + size, err := m.PrePromotionAnalysisRunStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.ScaleUpPreviewCheckPoint { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.ActiveSelector) + copy(dAtA[i:], m.ActiveSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ActiveSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.PreviewSelector) + copy(dAtA[i:], m.PreviewSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreviewSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BlueGreenStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlueGreenStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlueGreenStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AbortScaleDownDelaySeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AbortScaleDownDelaySeconds)) + i-- dAtA[i] = 0x70 } if m.ActiveMetadata != nil { @@ -3778,6 +4734,11 @@ func (m *CanaryStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.StablePingPong) + copy(dAtA[i:], m.StablePingPong) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StablePingPong))) + i-- + dAtA[i] = 0x2a if m.Weights != nil { { size, err := m.Weights.MarshalToSizedBuffer(dAtA[:i]) @@ -3842,6 +4803,30 @@ func (m *CanaryStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SetMirrorRoute != nil { + { + size, err := m.SetMirrorRoute.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.SetHeaderRoute != nil { + { + size, err := m.SetHeaderRoute.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if m.SetCanaryScale != nil { { size, err := m.SetCanaryScale.MarshalToSizedBuffer(dAtA[:i]) @@ -3918,6 +4903,18 @@ func (m *CanaryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PingPong != nil { + { + size, err := m.PingPong.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } i-- if m.DynamicStableScale { dAtA[i] = 1 @@ -4428,6 +5425,34 @@ func (m *DatadogMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *DryRun) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DryRun) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DryRun) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Experiment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4717,15 +5742,10 @@ func (m *ExperimentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ScaleDownDelaySeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleDownDelaySeconds)) - i-- - dAtA[i] = 0x30 - } - if len(m.Analyses) > 0 { - for iNdEx := len(m.Analyses) - 1; iNdEx >= 0; iNdEx-- { + if len(m.MeasurementRetention) > 0 { + for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Analyses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MeasurementRetention[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4733,15 +5753,48 @@ func (m *ExperimentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x42 } } - i-- - if m.Terminate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DryRun[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.ScaleDownDelaySeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleDownDelaySeconds)) + i-- + dAtA[i] = 0x30 + } + if len(m.Analyses) > 0 { + for iNdEx := len(m.Analyses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Analyses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.Terminate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- dAtA[i] = 0x20 if m.ProgressDeadlineSeconds != nil { @@ -4919,6 +5972,79 @@ func (m *GraphiteMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HeaderRoutingMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderRoutingMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeaderRoutingMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HeaderValue != nil { + { + size, err := m.HeaderValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *InfluxdbMetric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfluxdbMetric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfluxdbMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x12 + i -= len(m.Profile) + copy(dAtA[i:], m.Profile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Profile))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *IstioDestinationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5266,6 +6392,34 @@ func (m *KayentaThreshold) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MangedRoutes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MangedRoutes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MangedRoutes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Measurement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5364,6 +6518,37 @@ func (m *Measurement) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MeasurementRetention) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MeasurementRetention) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MeasurementRetention) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Metric) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5490,6 +6675,18 @@ func (m *MetricProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Influxdb != nil { + { + size, err := m.Influxdb.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.Graphite != nil { { size, err := m.Graphite.MarshalToSizedBuffer(dAtA[:i]) @@ -5621,6 +6818,38 @@ func (m *MetricResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 i = encodeVarintGenerated(dAtA, i, uint64(m.ConsecutiveError)) i-- dAtA[i] = 0x50 @@ -5837,6 +7066,39 @@ func (m *PauseCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PingPongSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingPongSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingPongSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PongService) + copy(dAtA[i:], m.PongService) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PongService))) + i-- + dAtA[i] = 0x12 + i -= len(m.PingService) + copy(dAtA[i:], m.PingService) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PingService))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodTemplateMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6063,6 +7325,34 @@ func (m *RolloutAnalysis) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MeasurementRetention) > 0 { + for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MeasurementRetention[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DryRun[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Args) > 0 { for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { { @@ -6680,6 +7970,20 @@ func (m *RolloutStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ALB != nil { + { + size, err := m.ALB.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } i -= len(m.WorkloadObservedGeneration) copy(dAtA[i:], m.WorkloadObservedGeneration) i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkloadObservedGeneration))) @@ -6929,15 +8233,53 @@ func (m *RolloutTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Ambassador != nil { - { - size, err := m.Ambassador.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if len(m.ManagedRoutes) > 0 { + for iNdEx := len(m.ManagedRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Traefik != nil { + { + size, err := m.Traefik.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.AppMesh != nil { + { + size, err := m.AppMesh.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Ambassador != nil { + { + size, err := m.Ambassador.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x2a } @@ -6992,6 +8334,120 @@ func (m *RolloutTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RouteMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Headers) > 0 { + keysForHeaders := make([]string, 0, len(m.Headers)) + for k := range m.Headers { + keysForHeaders = append(keysForHeaders, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHeaders) + for iNdEx := len(keysForHeaders) - 1; iNdEx >= 0; iNdEx-- { + v := m.Headers[string(keysForHeaders[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHeaders[iNdEx]) + copy(dAtA[i:], keysForHeaders[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHeaders[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.Path != nil { + { + size, err := m.Path.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Method != nil { + { + size, err := m.Method.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunSummary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunSummary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunSummary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Error)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Inconclusive)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Successful)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *SMITrafficRouting) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7145,7 +8601,7 @@ func (m *SetCanaryScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TLSRoute) Marshal() (dAtA []byte, err error) { +func (m *SetHeaderRoute) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7155,32 +8611,39 @@ func (m *TLSRoute) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TLSRoute) MarshalTo(dAtA []byte) (int, error) { +func (m *SetHeaderRoute) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TLSRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SetHeaderRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.SNIHosts) > 0 { - for iNdEx := len(m.SNIHosts) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SNIHosts[iNdEx]) - copy(dAtA[i:], m.SNIHosts[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SNIHosts[iNdEx]))) + if len(m.Match) > 0 { + for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Match[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TemplateService) Marshal() (dAtA []byte, err error) { +func (m *SetMirrorRoute) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7190,20 +8653,44 @@ func (m *TemplateService) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TemplateService) MarshalTo(dAtA []byte) (int, error) { +func (m *SetMirrorRoute) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TemplateService) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SetMirrorRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Percentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Percentage)) + i-- + dAtA[i] = 0x20 + } + if len(m.Match) > 0 { + for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Match[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TemplateSpec) Marshal() (dAtA []byte, err error) { +func (m *StickinessConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7213,67 +8700,31 @@ func (m *TemplateSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TemplateSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *StickinessConfig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StickinessConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationSeconds)) i-- - dAtA[i] = 0x2a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + dAtA[i] = 0x10 i-- - dAtA[i] = 0x18 - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x10 + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *TemplateStatus) Marshal() (dAtA []byte, err error) { +func (m *StringMatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7283,41 +8734,207 @@ func (m *TemplateStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TemplateStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *StringMatch) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TemplateStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StringMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.PodTemplateHash) - copy(dAtA[i:], m.PodTemplateHash) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodTemplateHash))) + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Regex))) i-- - dAtA[i] = 0x5a - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + dAtA[i] = 0x1a + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) i-- - dAtA[i] = 0x52 - if m.LastTransitionTime != nil { - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + dAtA[i] = 0x12 + i -= len(m.Exact) + copy(dAtA[i:], m.Exact) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Exact))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TLSRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SNIHosts) > 0 { + for iNdEx := len(m.SNIHosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SNIHosts[iNdEx]) + copy(dAtA[i:], m.SNIHosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SNIHosts[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TemplateService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateService) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateService) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *TemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x18 + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PodTemplateHash) + copy(dAtA[i:], m.PodTemplateHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodTemplateHash))) + i-- + dAtA[i] = 0x5a + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x52 + if m.LastTransitionTime != nil { + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x42 i -= len(m.Status) @@ -7350,6 +8967,34 @@ func (m *TemplateStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TraefikTrafficRouting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraefikTrafficRouting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraefikTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WeightedTraefikServiceName) + copy(dAtA[i:], m.WeightedTraefikServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WeightedTraefikServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TrafficWeights) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7645,6 +9290,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ALBStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.CanaryTargetGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StableTargetGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ALBTrafficRouting) Size() (n int) { if m == nil { return 0 @@ -7658,6 +9318,10 @@ func (m *ALBTrafficRouting) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.AnnotationPrefix) n += 1 + l + sovGenerated(uint64(l)) + if m.StickinessConfig != nil { + l = m.StickinessConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -7744,6 +9408,18 @@ func (m *AnalysisRunSpec) Size() (n int) { } } n += 2 + if len(m.DryRun) > 0 { + for _, e := range m.DryRun { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MeasurementRetention) > 0 { + for _, e := range m.MeasurementRetention { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -7767,6 +9443,12 @@ func (m *AnalysisRunStatus) Size() (n int) { l = m.StartedAt.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = m.RunSummary.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRunSummary != nil { + l = m.DryRunSummary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -7833,6 +9515,18 @@ func (m *AnalysisTemplateSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.DryRun) > 0 { + for _, e := range m.DryRun { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MeasurementRetention) > 0 { + for _, e := range m.MeasurementRetention { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -7853,57 +9547,132 @@ func (m *AntiAffinity) Size() (n int) { return n } -func (m *Argument) Size() (n int) { +func (m *AppMeshTrafficRouting) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) + if m.VirtualService != nil { + l = m.VirtualService.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ValueFrom != nil { - l = m.ValueFrom.Size() + if m.VirtualNodeGroup != nil { + l = m.VirtualNodeGroup.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ArgumentValueFrom) Size() (n int) { +func (m *AppMeshVirtualNodeGroup) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.PodTemplateHashValue != nil { - l = len(*m.PodTemplateHashValue) + if m.CanaryVirtualNodeRef != nil { + l = m.CanaryVirtualNodeRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.FieldRef != nil { - l = m.FieldRef.Size() + if m.StableVirtualNodeRef != nil { + l = m.StableVirtualNodeRef.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *BlueGreenStatus) Size() (n int) { +func (m *AppMeshVirtualNodeReference) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PreviewSelector) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ActiveSelector) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.PrePromotionAnalysisRunStatus != nil { - l = m.PrePromotionAnalysisRunStatus.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + return n +} + +func (m *AppMeshVirtualService) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Routes) > 0 { + for _, s := range m.Routes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Argument) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ArgumentValueFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodTemplateHashValue != nil { + l = len(*m.PodTemplateHashValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AwsResourceRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ARN) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BlueGreenStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PreviewSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ActiveSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.PrePromotionAnalysisRunStatus != nil { + l = m.PrePromotionAnalysisRunStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.PostPromotionAnalysisRunStatus != nil { l = m.PostPromotionAnalysisRunStatus.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -7984,6 +9753,8 @@ func (m *CanaryStatus) Size() (n int) { l = m.Weights.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.StablePingPong) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -8012,6 +9783,14 @@ func (m *CanaryStep) Size() (n int) { l = m.SetCanaryScale.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.SetHeaderRoute != nil { + l = m.SetHeaderRoute.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SetMirrorRoute != nil { + l = m.SetMirrorRoute.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -8069,6 +9848,10 @@ func (m *CanaryStrategy) Size() (n int) { n += 1 + sovGenerated(uint64(*m.AbortScaleDownDelaySeconds)) } n += 2 + if m.PingPong != nil { + l = m.PingPong.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -8213,6 +9996,17 @@ func (m *DatadogMetric) Size() (n int) { return n } +func (m *DryRun) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Experiment) Size() (n int) { if m == nil { return 0 @@ -8331,6 +10125,18 @@ func (m *ExperimentSpec) Size() (n int) { if m.ScaleDownDelaySeconds != nil { n += 1 + sovGenerated(uint64(*m.ScaleDownDelaySeconds)) } + if len(m.DryRun) > 0 { + for _, e := range m.DryRun { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MeasurementRetention) > 0 { + for _, e := range m.MeasurementRetention { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -8393,6 +10199,34 @@ func (m *GraphiteMetric) Size() (n int) { return n } +func (m *HeaderRoutingMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + n += 1 + l + sovGenerated(uint64(l)) + if m.HeaderValue != nil { + l = m.HeaderValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *InfluxdbMetric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Profile) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Query) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *IstioDestinationRule) Size() (n int) { if m == nil { return 0 @@ -8522,6 +10356,17 @@ func (m *KayentaThreshold) Size() (n int) { return n } +func (m *MangedRoutes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Measurement) Size() (n int) { if m == nil { return 0 @@ -8557,6 +10402,18 @@ func (m *Measurement) Size() (n int) { return n } +func (m *MeasurementRetention) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Limit)) + return n +} + func (m *Metric) Size() (n int) { if m == nil { return 0 @@ -8636,6 +10493,10 @@ func (m *MetricProvider) Size() (n int) { l = m.Graphite.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Influxdb != nil { + l = m.Influxdb.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -8663,6 +10524,15 @@ func (m *MetricResult) Size() (n int) { n += 1 + sovGenerated(uint64(m.Inconclusive)) n += 1 + sovGenerated(uint64(m.Error)) n += 1 + sovGenerated(uint64(m.ConsecutiveError)) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -8728,6 +10598,19 @@ func (m *PauseCondition) Size() (n int) { return n } +func (m *PingPongSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PingService) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PongService) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodTemplateMetadata) Size() (n int) { if m == nil { return 0 @@ -8818,6 +10701,18 @@ func (m *RolloutAnalysis) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.DryRun) > 0 { + for _, e := range m.DryRun { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MeasurementRetention) > 0 { + for _, e := range m.MeasurementRetention { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -9083,6 +10978,10 @@ func (m *RolloutStatus) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) l = len(m.WorkloadObservedGeneration) n += 2 + l + sovGenerated(uint64(l)) + if m.ALB != nil { + l = m.ALB.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -9129,6 +11028,60 @@ func (m *RolloutTrafficRouting) Size() (n int) { l = m.Ambassador.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.AppMesh != nil { + l = m.AppMesh.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Traefik != nil { + l = m.Traefik.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ManagedRoutes) > 0 { + for _, e := range m.ManagedRoutes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RouteMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Method != nil { + l = m.Method.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Path != nil { + l = m.Path.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Headers) > 0 { + for k, v := range m.Headers { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RunSummary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + n += 1 + sovGenerated(uint64(m.Successful)) + n += 1 + sovGenerated(uint64(m.Failed)) + n += 1 + sovGenerated(uint64(m.Inconclusive)) + n += 1 + sovGenerated(uint64(m.Error)) return n } @@ -9192,57 +11145,120 @@ func (m *SetCanaryScale) Size() (n int) { return n } -func (m *TLSRoute) Size() (n int) { +func (m *SetHeaderRoute) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.Port)) - if len(m.SNIHosts) > 0 { - for _, s := range m.SNIHosts { - l = len(s) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Match) > 0 { + for _, e := range m.Match { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } -func (m *TemplateService) Size() (n int) { +func (m *SetMirrorRoute) Size() (n int) { if m == nil { return 0 } var l int _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Match) > 0 { + for _, e := range m.Match { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Percentage != nil { + n += 1 + sovGenerated(uint64(*m.Percentage)) + } return n } -func (m *TemplateSpec) Size() (n int) { +func (m *StickinessConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 2 + n += 1 + sovGenerated(uint64(m.DurationSeconds)) return n } -func (m *TemplateStatus) Size() (n int) { +func (m *StringMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Exact) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Regex) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TLSRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + if len(m.SNIHosts) > 0 { + for _, s := range m.SNIHosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateService) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TemplateStatus) Size() (n int) { if m == nil { return 0 } @@ -9272,6 +11288,17 @@ func (m *TemplateStatus) Size() (n int) { return n } +func (m *TraefikTrafficRouting) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WeightedTraefikServiceName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TrafficWeights) Size() (n int) { if m == nil { return 0 @@ -9382,6 +11409,18 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ALBStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ALBStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "AwsResourceRef", "AwsResourceRef", 1), `&`, ``, 1) + `,`, + `CanaryTargetGroup:` + strings.Replace(strings.Replace(this.CanaryTargetGroup.String(), "AwsResourceRef", "AwsResourceRef", 1), `&`, ``, 1) + `,`, + `StableTargetGroup:` + strings.Replace(strings.Replace(this.StableTargetGroup.String(), "AwsResourceRef", "AwsResourceRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ALBTrafficRouting) String() string { if this == nil { return "nil" @@ -9391,6 +11430,7 @@ func (this *ALBTrafficRouting) String() string { `ServicePort:` + fmt.Sprintf("%v", this.ServicePort) + `,`, `RootService:` + fmt.Sprintf("%v", this.RootService) + `,`, `AnnotationPrefix:` + fmt.Sprintf("%v", this.AnnotationPrefix) + `,`, + `StickinessConfig:` + strings.Replace(this.StickinessConfig.String(), "StickinessConfig", "StickinessConfig", 1) + `,`, `}`, }, "") return s @@ -9459,10 +11499,22 @@ func (this *AnalysisRunSpec) String() string { repeatedStringForArgs += strings.Replace(strings.Replace(f.String(), "Argument", "Argument", 1), `&`, ``, 1) + "," } repeatedStringForArgs += "}" + repeatedStringForDryRun := "[]DryRun{" + for _, f := range this.DryRun { + repeatedStringForDryRun += strings.Replace(strings.Replace(f.String(), "DryRun", "DryRun", 1), `&`, ``, 1) + "," + } + repeatedStringForDryRun += "}" + repeatedStringForMeasurementRetention := "[]MeasurementRetention{" + for _, f := range this.MeasurementRetention { + repeatedStringForMeasurementRetention += strings.Replace(strings.Replace(f.String(), "MeasurementRetention", "MeasurementRetention", 1), `&`, ``, 1) + "," + } + repeatedStringForMeasurementRetention += "}" s := strings.Join([]string{`&AnalysisRunSpec{`, `Metrics:` + repeatedStringForMetrics + `,`, `Args:` + repeatedStringForArgs + `,`, `Terminate:` + fmt.Sprintf("%v", this.Terminate) + `,`, + `DryRun:` + repeatedStringForDryRun + `,`, + `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, `}`, }, "") return s @@ -9481,6 +11533,8 @@ func (this *AnalysisRunStatus) String() string { `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `MetricResults:` + repeatedStringForMetricResults + `,`, `StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1) + `,`, + `RunSummary:` + strings.Replace(strings.Replace(this.RunSummary.String(), "RunSummary", "RunSummary", 1), `&`, ``, 1) + `,`, + `DryRunSummary:` + strings.Replace(this.DryRunSummary.String(), "RunSummary", "RunSummary", 1) + `,`, `}`, }, "") return s @@ -9537,9 +11591,21 @@ func (this *AnalysisTemplateSpec) String() string { repeatedStringForArgs += strings.Replace(strings.Replace(f.String(), "Argument", "Argument", 1), `&`, ``, 1) + "," } repeatedStringForArgs += "}" + repeatedStringForDryRun := "[]DryRun{" + for _, f := range this.DryRun { + repeatedStringForDryRun += strings.Replace(strings.Replace(f.String(), "DryRun", "DryRun", 1), `&`, ``, 1) + "," + } + repeatedStringForDryRun += "}" + repeatedStringForMeasurementRetention := "[]MeasurementRetention{" + for _, f := range this.MeasurementRetention { + repeatedStringForMeasurementRetention += strings.Replace(strings.Replace(f.String(), "MeasurementRetention", "MeasurementRetention", 1), `&`, ``, 1) + "," + } + repeatedStringForMeasurementRetention += "}" s := strings.Join([]string{`&AnalysisTemplateSpec{`, `Metrics:` + repeatedStringForMetrics + `,`, `Args:` + repeatedStringForArgs + `,`, + `DryRun:` + repeatedStringForDryRun + `,`, + `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, `}`, }, "") return s @@ -9555,6 +11621,49 @@ func (this *AntiAffinity) String() string { }, "") return s } +func (this *AppMeshTrafficRouting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppMeshTrafficRouting{`, + `VirtualService:` + strings.Replace(this.VirtualService.String(), "AppMeshVirtualService", "AppMeshVirtualService", 1) + `,`, + `VirtualNodeGroup:` + strings.Replace(this.VirtualNodeGroup.String(), "AppMeshVirtualNodeGroup", "AppMeshVirtualNodeGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AppMeshVirtualNodeGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppMeshVirtualNodeGroup{`, + `CanaryVirtualNodeRef:` + strings.Replace(this.CanaryVirtualNodeRef.String(), "AppMeshVirtualNodeReference", "AppMeshVirtualNodeReference", 1) + `,`, + `StableVirtualNodeRef:` + strings.Replace(this.StableVirtualNodeRef.String(), "AppMeshVirtualNodeReference", "AppMeshVirtualNodeReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AppMeshVirtualNodeReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppMeshVirtualNodeReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *AppMeshVirtualService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppMeshVirtualService{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `}`, + }, "") + return s +} func (this *Argument) String() string { if this == nil { return "nil" @@ -9578,6 +11687,17 @@ func (this *ArgumentValueFrom) String() string { }, "") return s } +func (this *AwsResourceRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AwsResourceRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ARN:` + fmt.Sprintf("%v", this.ARN) + `,`, + `}`, + }, "") + return s +} func (this *BlueGreenStatus) String() string { if this == nil { return "nil" @@ -9624,6 +11744,7 @@ func (this *CanaryStatus) String() string { `CurrentBackgroundAnalysisRunStatus:` + strings.Replace(this.CurrentBackgroundAnalysisRunStatus.String(), "RolloutAnalysisRunStatus", "RolloutAnalysisRunStatus", 1) + `,`, `CurrentExperiment:` + fmt.Sprintf("%v", this.CurrentExperiment) + `,`, `Weights:` + strings.Replace(this.Weights.String(), "TrafficWeights", "TrafficWeights", 1) + `,`, + `StablePingPong:` + fmt.Sprintf("%v", this.StablePingPong) + `,`, `}`, }, "") return s @@ -9638,6 +11759,8 @@ func (this *CanaryStep) String() string { `Experiment:` + strings.Replace(this.Experiment.String(), "RolloutExperimentStep", "RolloutExperimentStep", 1) + `,`, `Analysis:` + strings.Replace(this.Analysis.String(), "RolloutAnalysis", "RolloutAnalysis", 1) + `,`, `SetCanaryScale:` + strings.Replace(this.SetCanaryScale.String(), "SetCanaryScale", "SetCanaryScale", 1) + `,`, + `SetHeaderRoute:` + strings.Replace(this.SetHeaderRoute.String(), "SetHeaderRoute", "SetHeaderRoute", 1) + `,`, + `SetMirrorRoute:` + strings.Replace(this.SetMirrorRoute.String(), "SetMirrorRoute", "SetMirrorRoute", 1) + `,`, `}`, }, "") return s @@ -9666,6 +11789,7 @@ func (this *CanaryStrategy) String() string { `ScaleDownDelayRevisionLimit:` + valueToStringGenerated(this.ScaleDownDelayRevisionLimit) + `,`, `AbortScaleDownDelaySeconds:` + valueToStringGenerated(this.AbortScaleDownDelaySeconds) + `,`, `DynamicStableScale:` + fmt.Sprintf("%v", this.DynamicStableScale) + `,`, + `PingPong:` + strings.Replace(this.PingPong.String(), "PingPongSpec", "PingPongSpec", 1) + `,`, `}`, }, "") return s @@ -9780,6 +11904,16 @@ func (this *DatadogMetric) String() string { }, "") return s } +func (this *DryRun) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DryRun{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `}`, + }, "") + return s +} func (this *Experiment) String() string { if this == nil { return "nil" @@ -9869,6 +12003,16 @@ func (this *ExperimentSpec) String() string { repeatedStringForAnalyses += strings.Replace(strings.Replace(f.String(), "ExperimentAnalysisTemplateRef", "ExperimentAnalysisTemplateRef", 1), `&`, ``, 1) + "," } repeatedStringForAnalyses += "}" + repeatedStringForDryRun := "[]DryRun{" + for _, f := range this.DryRun { + repeatedStringForDryRun += strings.Replace(strings.Replace(f.String(), "DryRun", "DryRun", 1), `&`, ``, 1) + "," + } + repeatedStringForDryRun += "}" + repeatedStringForMeasurementRetention := "[]MeasurementRetention{" + for _, f := range this.MeasurementRetention { + repeatedStringForMeasurementRetention += strings.Replace(strings.Replace(f.String(), "MeasurementRetention", "MeasurementRetention", 1), `&`, ``, 1) + "," + } + repeatedStringForMeasurementRetention += "}" s := strings.Join([]string{`&ExperimentSpec{`, `Templates:` + repeatedStringForTemplates + `,`, `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, @@ -9876,6 +12020,8 @@ func (this *ExperimentSpec) String() string { `Terminate:` + fmt.Sprintf("%v", this.Terminate) + `,`, `Analyses:` + repeatedStringForAnalyses + `,`, `ScaleDownDelaySeconds:` + valueToStringGenerated(this.ScaleDownDelaySeconds) + `,`, + `DryRun:` + repeatedStringForDryRun + `,`, + `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, `}`, }, "") return s @@ -9931,6 +12077,28 @@ func (this *GraphiteMetric) String() string { }, "") return s } +func (this *HeaderRoutingMatch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeaderRoutingMatch{`, + `HeaderName:` + fmt.Sprintf("%v", this.HeaderName) + `,`, + `HeaderValue:` + strings.Replace(this.HeaderValue.String(), "StringMatch", "StringMatch", 1) + `,`, + `}`, + }, "") + return s +} +func (this *InfluxdbMetric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfluxdbMetric{`, + `Profile:` + fmt.Sprintf("%v", this.Profile) + `,`, + `Query:` + fmt.Sprintf("%v", this.Query) + `,`, + `}`, + }, "") + return s +} func (this *IstioDestinationRule) String() string { if this == nil { return "nil" @@ -10033,6 +12201,16 @@ func (this *KayentaThreshold) String() string { }, "") return s } +func (this *MangedRoutes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MangedRoutes{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *Measurement) String() string { if this == nil { return "nil" @@ -10059,6 +12237,17 @@ func (this *Measurement) String() string { }, "") return s } +func (this *MeasurementRetention) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MeasurementRetention{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} func (this *Metric) String() string { if this == nil { return "nil" @@ -10092,6 +12281,7 @@ func (this *MetricProvider) String() string { `Job:` + strings.Replace(this.Job.String(), "JobMetric", "JobMetric", 1) + `,`, `CloudWatch:` + strings.Replace(this.CloudWatch.String(), "CloudWatchMetric", "CloudWatchMetric", 1) + `,`, `Graphite:` + strings.Replace(this.Graphite.String(), "GraphiteMetric", "GraphiteMetric", 1) + `,`, + `Influxdb:` + strings.Replace(this.Influxdb.String(), "InfluxdbMetric", "InfluxdbMetric", 1) + `,`, `}`, }, "") return s @@ -10105,6 +12295,16 @@ func (this *MetricResult) String() string { repeatedStringForMeasurements += strings.Replace(strings.Replace(f.String(), "Measurement", "Measurement", 1), `&`, ``, 1) + "," } repeatedStringForMeasurements += "}" + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" s := strings.Join([]string{`&MetricResult{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, @@ -10116,6 +12316,8 @@ func (this *MetricResult) String() string { `Inconclusive:` + fmt.Sprintf("%v", this.Inconclusive) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, `ConsecutiveError:` + fmt.Sprintf("%v", this.ConsecutiveError) + `,`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `Metadata:` + mapStringForMetadata + `,`, `}`, }, "") return s @@ -10176,6 +12378,17 @@ func (this *PauseCondition) String() string { }, "") return s } +func (this *PingPongSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PingPongSpec{`, + `PingService:` + fmt.Sprintf("%v", this.PingService) + `,`, + `PongService:` + fmt.Sprintf("%v", this.PongService) + `,`, + `}`, + }, "") + return s +} func (this *PodTemplateMetadata) String() string { if this == nil { return "nil" @@ -10263,9 +12476,21 @@ func (this *RolloutAnalysis) String() string { repeatedStringForArgs += strings.Replace(strings.Replace(f.String(), "AnalysisRunArgument", "AnalysisRunArgument", 1), `&`, ``, 1) + "," } repeatedStringForArgs += "}" + repeatedStringForDryRun := "[]DryRun{" + for _, f := range this.DryRun { + repeatedStringForDryRun += strings.Replace(strings.Replace(f.String(), "DryRun", "DryRun", 1), `&`, ``, 1) + "," + } + repeatedStringForDryRun += "}" + repeatedStringForMeasurementRetention := "[]MeasurementRetention{" + for _, f := range this.MeasurementRetention { + repeatedStringForMeasurementRetention += strings.Replace(strings.Replace(f.String(), "MeasurementRetention", "MeasurementRetention", 1), `&`, ``, 1) + "," + } + repeatedStringForMeasurementRetention += "}" s := strings.Join([]string{`&RolloutAnalysis{`, `Templates:` + repeatedStringForTemplates + `,`, `Args:` + repeatedStringForArgs + `,`, + `DryRun:` + repeatedStringForDryRun + `,`, + `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, `}`, }, "") return s @@ -10461,6 +12686,7 @@ func (this *RolloutStatus) String() string { `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `WorkloadObservedGeneration:` + fmt.Sprintf("%v", this.WorkloadObservedGeneration) + `,`, + `ALB:` + strings.Replace(this.ALB.String(), "ALBStatus", "ALBStatus", 1) + `,`, `}`, }, "") return s @@ -10480,12 +12706,56 @@ func (this *RolloutTrafficRouting) String() string { if this == nil { return "nil" } + repeatedStringForManagedRoutes := "[]MangedRoutes{" + for _, f := range this.ManagedRoutes { + repeatedStringForManagedRoutes += strings.Replace(strings.Replace(f.String(), "MangedRoutes", "MangedRoutes", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedRoutes += "}" s := strings.Join([]string{`&RolloutTrafficRouting{`, `Istio:` + strings.Replace(this.Istio.String(), "IstioTrafficRouting", "IstioTrafficRouting", 1) + `,`, `Nginx:` + strings.Replace(this.Nginx.String(), "NginxTrafficRouting", "NginxTrafficRouting", 1) + `,`, `ALB:` + strings.Replace(this.ALB.String(), "ALBTrafficRouting", "ALBTrafficRouting", 1) + `,`, `SMI:` + strings.Replace(this.SMI.String(), "SMITrafficRouting", "SMITrafficRouting", 1) + `,`, `Ambassador:` + strings.Replace(this.Ambassador.String(), "AmbassadorTrafficRouting", "AmbassadorTrafficRouting", 1) + `,`, + `AppMesh:` + strings.Replace(this.AppMesh.String(), "AppMeshTrafficRouting", "AppMeshTrafficRouting", 1) + `,`, + `Traefik:` + strings.Replace(this.Traefik.String(), "TraefikTrafficRouting", "TraefikTrafficRouting", 1) + `,`, + `ManagedRoutes:` + repeatedStringForManagedRoutes + `,`, + `}`, + }, "") + return s +} +func (this *RouteMatch) String() string { + if this == nil { + return "nil" + } + keysForHeaders := make([]string, 0, len(this.Headers)) + for k := range this.Headers { + keysForHeaders = append(keysForHeaders, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHeaders) + mapStringForHeaders := "map[string]StringMatch{" + for _, k := range keysForHeaders { + mapStringForHeaders += fmt.Sprintf("%v: %v,", k, this.Headers[k]) + } + mapStringForHeaders += "}" + s := strings.Join([]string{`&RouteMatch{`, + `Method:` + strings.Replace(this.Method.String(), "StringMatch", "StringMatch", 1) + `,`, + `Path:` + strings.Replace(this.Path.String(), "StringMatch", "StringMatch", 1) + `,`, + `Headers:` + mapStringForHeaders + `,`, + `}`, + }, "") + return s +} +func (this *RunSummary) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunSummary{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Successful:` + fmt.Sprintf("%v", this.Successful) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `Inconclusive:` + fmt.Sprintf("%v", this.Inconclusive) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, `}`, }, "") return s @@ -10538,6 +12808,62 @@ func (this *SetCanaryScale) String() string { }, "") return s } +func (this *SetHeaderRoute) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatch := "[]HeaderRoutingMatch{" + for _, f := range this.Match { + repeatedStringForMatch += strings.Replace(strings.Replace(f.String(), "HeaderRoutingMatch", "HeaderRoutingMatch", 1), `&`, ``, 1) + "," + } + repeatedStringForMatch += "}" + s := strings.Join([]string{`&SetHeaderRoute{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Match:` + repeatedStringForMatch + `,`, + `}`, + }, "") + return s +} +func (this *SetMirrorRoute) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatch := "[]RouteMatch{" + for _, f := range this.Match { + repeatedStringForMatch += strings.Replace(strings.Replace(f.String(), "RouteMatch", "RouteMatch", 1), `&`, ``, 1) + "," + } + repeatedStringForMatch += "}" + s := strings.Join([]string{`&SetMirrorRoute{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Match:` + repeatedStringForMatch + `,`, + `Percentage:` + valueToStringGenerated(this.Percentage) + `,`, + `}`, + }, "") + return s +} +func (this *StickinessConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StickinessConfig{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `DurationSeconds:` + fmt.Sprintf("%v", this.DurationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *StringMatch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringMatch{`, + `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `Regex:` + fmt.Sprintf("%v", this.Regex) + `,`, + `}`, + }, "") + return s +} func (this *TLSRoute) String() string { if this == nil { return "nil" @@ -10593,6 +12919,16 @@ func (this *TemplateStatus) String() string { }, "") return s } +func (this *TraefikTrafficRouting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TraefikTrafficRouting{`, + `WeightedTraefikServiceName:` + fmt.Sprintf("%v", this.WeightedTraefikServiceName) + `,`, + `}`, + }, "") + return s +} func (this *TrafficWeights) String() string { if this == nil { return "nil" @@ -10685,7 +13021,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ALBTrafficRouting) Unmarshal(dAtA []byte) error { +func (m *ALBStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10708,17 +13044,17 @@ func (m *ALBTrafficRouting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ALBTrafficRouting: wiretype end group for non-group") + return fmt.Errorf("proto: ALBStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ALBTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ALBStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10728,16 +13064,165 @@ func (m *ALBTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CanaryTargetGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CanaryTargetGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StableTargetGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StableTargetGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ALBTrafficRouting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ALBTrafficRouting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ALBTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } @@ -10829,6 +13314,42 @@ func (m *ALBTrafficRouting) Unmarshal(dAtA []byte) error { } m.AnnotationPrefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StickinessConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StickinessConfig == nil { + m.StickinessConfig = &StickinessConfig{} + } + if err := m.StickinessConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11465,6 +13986,74 @@ func (m *AnalysisRunSpec) Unmarshal(dAtA []byte) error { } } m.Terminate = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, DryRun{}) + if err := m.DryRun[len(m.DryRun)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MeasurementRetention", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MeasurementRetention = append(m.MeasurementRetention, MeasurementRetention{}) + if err := m.MeasurementRetention[len(m.MeasurementRetention)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11649,21 +14238,90 @@ func (m *AnalysisRunStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunSummary", wireType) } - iNdEx += skippy - } - } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunSummary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRunSummary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DryRunSummary == nil { + m.DryRunSummary = &RunSummary{} + } + if err := m.DryRunSummary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } if iNdEx > l { return io.ErrUnexpectedEOF @@ -12090,6 +14748,74 @@ func (m *AnalysisTemplateSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, DryRun{}) + if err := m.DryRun[len(m.DryRun)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MeasurementRetention", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MeasurementRetention = append(m.MeasurementRetention, MeasurementRetention{}) + if err := m.MeasurementRetention[len(m.MeasurementRetention)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12233,7 +14959,7 @@ func (m *AntiAffinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *Argument) Unmarshal(dAtA []byte) error { +func (m *AppMeshTrafficRouting) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12256,17 +14982,17 @@ func (m *Argument) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Argument: wiretype end group for non-group") + return fmt.Errorf("proto: AppMeshTrafficRouting: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Argument: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AppMeshTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VirtualService", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12276,60 +15002,31 @@ func (m *Argument) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.VirtualService == nil { + m.VirtualService = &AppMeshVirtualService{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.VirtualService.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := string(dAtA[iNdEx:postIndex]) - m.Value = &s iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VirtualNodeGroup", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12356,10 +15053,10 @@ func (m *Argument) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &ValueFrom{} + if m.VirtualNodeGroup == nil { + m.VirtualNodeGroup = &AppMeshVirtualNodeGroup{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.VirtualNodeGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12384,7 +15081,7 @@ func (m *Argument) Unmarshal(dAtA []byte) error { } return nil } -func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { +func (m *AppMeshVirtualNodeGroup) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12407,17 +15104,17 @@ func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ArgumentValueFrom: wiretype end group for non-group") + return fmt.Errorf("proto: AppMeshVirtualNodeGroup: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ArgumentValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AppMeshVirtualNodeGroup: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodTemplateHashValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CanaryVirtualNodeRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12427,28 +15124,31 @@ func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := ValueFromPodTemplateHash(dAtA[iNdEx:postIndex]) - m.PodTemplateHashValue = &s + if m.CanaryVirtualNodeRef == nil { + m.CanaryVirtualNodeRef = &AppMeshVirtualNodeReference{} + } + if err := m.CanaryVirtualNodeRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StableVirtualNodeRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12475,10 +15175,10 @@ func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &FieldRef{} + if m.StableVirtualNodeRef == nil { + m.StableVirtualNodeRef = &AppMeshVirtualNodeReference{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StableVirtualNodeRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12503,7 +15203,7 @@ func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { +func (m *AppMeshVirtualNodeReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12526,15 +15226,15 @@ func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlueGreenStatus: wiretype end group for non-group") + return fmt.Errorf("proto: AppMeshVirtualNodeReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlueGreenStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AppMeshVirtualNodeReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreviewSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12562,65 +15262,63 @@ func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PreviewSelector = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.ActiveSelector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleUpPreviewCheckPoint", wireType) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AppMeshVirtualService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.ScaleUpPreviewCheckPoint = bool(v != 0) - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppMeshVirtualService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppMeshVirtualService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrePromotionAnalysisRunStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12630,33 +15328,29 @@ func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PrePromotionAnalysisRunStatus == nil { - m.PrePromotionAnalysisRunStatus = &RolloutAnalysisRunStatus{} - } - if err := m.PrePromotionAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostPromotionAnalysisRunStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12666,27 +15360,23 @@ func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PostPromotionAnalysisRunStatus == nil { - m.PostPromotionAnalysisRunStatus = &RolloutAnalysisRunStatus{} - } - if err := m.PostPromotionAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Routes = append(m.Routes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -12709,7 +15399,7 @@ func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { +func (m *Argument) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12732,15 +15422,15 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlueGreenStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: Argument: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlueGreenStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Argument: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12768,11 +15458,11 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ActiveService = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreviewService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12800,13 +15490,14 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PreviewService = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreviewReplicaCount", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12816,38 +15507,83 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.PreviewReplicaCount = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoPromotionEnabled", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.AutoPromotionEnabled = &b - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoPromotionSeconds", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.AutoPromotionSeconds = 0 + if m.ValueFrom == nil { + m.ValueFrom = &ValueFrom{} + } + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArgumentValueFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArgumentValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodTemplateHashValue", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12857,14 +15593,28 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AutoPromotionSeconds |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 6: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ValueFromPodTemplateHash(dAtA[iNdEx:postIndex]) + m.PodTemplateHashValue = &s + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12891,38 +15641,68 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.FieldRef == nil { + m.FieldRef = &FieldRef{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelaySeconds", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ScaleDownDelaySeconds = &v - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayRevisionLimit", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int32 + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AwsResourceRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AwsResourceRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12932,17 +15712,29 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ScaleDownDelayRevisionLimit = &v - case 9: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrePromotionAnalysis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ARN", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12952,33 +15744,79 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PrePromotionAnalysis == nil { - m.PrePromotionAnalysis = &RolloutAnalysis{} - } - if err := m.PrePromotionAnalysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ARN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 10: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlueGreenStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlueGreenStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AntiAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreviewSelector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12988,33 +15826,29 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.AntiAffinity == nil { - m.AntiAffinity = &AntiAffinity{} - } - if err := m.AntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PreviewSelector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostPromotionAnalysis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActiveSelector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13024,31 +15858,47 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PostPromotionAnalysis == nil { - m.PostPromotionAnalysis = &RolloutAnalysis{} + m.ActiveSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleUpPreviewCheckPoint", wireType) } - if err := m.PostPromotionAnalysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 12: + m.ScaleUpPreviewCheckPoint = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreviewMetadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrePromotionAnalysisRunStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13075,16 +15925,16 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PreviewMetadata == nil { - m.PreviewMetadata = &PodTemplateMetadata{} + if m.PrePromotionAnalysisRunStatus == nil { + m.PrePromotionAnalysisRunStatus = &RolloutAnalysisRunStatus{} } - if err := m.PreviewMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PrePromotionAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveMetadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PostPromotionAnalysisRunStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13111,33 +15961,13 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ActiveMetadata == nil { - m.ActiveMetadata = &PodTemplateMetadata{} + if m.PostPromotionAnalysisRunStatus == nil { + m.PostPromotionAnalysisRunStatus = &RolloutAnalysisRunStatus{} } - if err := m.ActiveMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PostPromotionAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AbortScaleDownDelaySeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AbortScaleDownDelaySeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13159,7 +15989,7 @@ func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *CanaryStatus) Unmarshal(dAtA []byte) error { +func (m *BlueGreenStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13182,17 +16012,17 @@ func (m *CanaryStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CanaryStatus: wiretype end group for non-group") + return fmt.Errorf("proto: BlueGreenStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CanaryStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlueGreenStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentStepAnalysisRunStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActiveService", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13202,33 +16032,29 @@ func (m *CanaryStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CurrentStepAnalysisRunStatus == nil { - m.CurrentStepAnalysisRunStatus = &RolloutAnalysisRunStatus{} - } - if err := m.CurrentStepAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ActiveService = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentBackgroundAnalysisRunStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreviewService", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13238,33 +16064,29 @@ func (m *CanaryStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CurrentBackgroundAnalysisRunStatus == nil { - m.CurrentBackgroundAnalysisRunStatus = &RolloutAnalysisRunStatus{} - } - if err := m.CurrentBackgroundAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PreviewService = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentExperiment", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviewReplicaCount", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13274,27 +16096,55 @@ func (m *CanaryStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + m.PreviewReplicaCount = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoPromotionEnabled", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + b := bool(v != 0) + m.AutoPromotionEnabled = &b + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoPromotionSeconds", wireType) } - m.CurrentExperiment = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + m.AutoPromotionSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AutoPromotionSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Weights", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13321,66 +16171,36 @@ func (m *CanaryStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Weights == nil { - m.Weights = &TrafficWeights{} + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} } - if err := m.Weights.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CanaryStep) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelaySeconds", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CanaryStep: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CanaryStep: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ScaleDownDelaySeconds = &v + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SetWeight", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayRevisionLimit", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -13397,10 +16217,10 @@ func (m *CanaryStep) Unmarshal(dAtA []byte) error { break } } - m.SetWeight = &v - case 2: + m.ScaleDownDelayRevisionLimit = &v + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pause", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrePromotionAnalysis", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13427,16 +16247,16 @@ func (m *CanaryStep) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Pause == nil { - m.Pause = &RolloutPause{} + if m.PrePromotionAnalysis == nil { + m.PrePromotionAnalysis = &RolloutAnalysis{} } - if err := m.Pause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PrePromotionAnalysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Experiment", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AntiAffinity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13463,16 +16283,16 @@ func (m *CanaryStep) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Experiment == nil { - m.Experiment = &RolloutExperimentStep{} + if m.AntiAffinity == nil { + m.AntiAffinity = &AntiAffinity{} } - if err := m.Experiment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Analysis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PostPromotionAnalysis", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13499,16 +16319,16 @@ func (m *CanaryStep) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Analysis == nil { - m.Analysis = &RolloutAnalysis{} + if m.PostPromotionAnalysis == nil { + m.PostPromotionAnalysis = &RolloutAnalysis{} } - if err := m.Analysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PostPromotionAnalysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SetCanaryScale", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreviewMetadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13535,13 +16355,69 @@ func (m *CanaryStep) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SetCanaryScale == nil { - m.SetCanaryScale = &SetCanaryScale{} + if m.PreviewMetadata == nil { + m.PreviewMetadata = &PodTemplateMetadata{} } - if err := m.SetCanaryScale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PreviewMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActiveMetadata == nil { + m.ActiveMetadata = &PodTemplateMetadata{} + } + if err := m.ActiveMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AbortScaleDownDelaySeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AbortScaleDownDelaySeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13563,7 +16439,7 @@ func (m *CanaryStep) Unmarshal(dAtA []byte) error { } return nil } -func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { +func (m *CanaryStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13586,17 +16462,17 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CanaryStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: CanaryStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CanaryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CanaryStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CanaryService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentStepAnalysisRunStatus", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13606,27 +16482,67 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CanaryService = string(dAtA[iNdEx:postIndex]) + if m.CurrentStepAnalysisRunStatus == nil { + m.CurrentStepAnalysisRunStatus = &RolloutAnalysisRunStatus{} + } + if err := m.CurrentStepAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StableService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentBackgroundAnalysisRunStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CurrentBackgroundAnalysisRunStatus == nil { + m.CurrentBackgroundAnalysisRunStatus = &RolloutAnalysisRunStatus{} + } + if err := m.CurrentBackgroundAnalysisRunStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentExperiment", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13654,11 +16570,11 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.StableService = string(dAtA[iNdEx:postIndex]) + m.CurrentExperiment = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Weights", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13685,16 +16601,18 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Steps = append(m.Steps, CanaryStep{}) - if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Weights == nil { + m.Weights = &TrafficWeights{} + } + if err := m.Weights.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TrafficRouting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StablePingPong", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13704,31 +16622,97 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TrafficRouting == nil { - m.TrafficRouting = &RolloutTrafficRouting{} - } - if err := m.TrafficRouting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.StablePingPong = PingPongType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 5: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanaryStep) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanaryStep: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanaryStep: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SetWeight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SetWeight = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pause", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13755,16 +16739,16 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Pause == nil { + m.Pause = &RolloutPause{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Pause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Experiment", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13791,14 +16775,14 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} + if m.Experiment == nil { + m.Experiment = &RolloutExperimentStep{} } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Experiment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Analysis", wireType) } @@ -13828,15 +16812,15 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Analysis == nil { - m.Analysis = &RolloutAnalysisBackground{} + m.Analysis = &RolloutAnalysis{} } if err := m.Analysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AntiAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SetCanaryScale", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13863,16 +16847,16 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AntiAffinity == nil { - m.AntiAffinity = &AntiAffinity{} + if m.SetCanaryScale == nil { + m.SetCanaryScale = &SetCanaryScale{} } - if err := m.AntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SetCanaryScale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CanaryMetadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SetHeaderRoute", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13899,16 +16883,16 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CanaryMetadata == nil { - m.CanaryMetadata = &PodTemplateMetadata{} + if m.SetHeaderRoute == nil { + m.SetHeaderRoute = &SetHeaderRoute{} } - if err := m.CanaryMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SetHeaderRoute.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StableMetadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SetMirrorRoute", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13935,18 +16919,68 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StableMetadata == nil { - m.StableMetadata = &PodTemplateMetadata{} + if m.SetMirrorRoute == nil { + m.SetMirrorRoute = &SetMirrorRoute{} } - if err := m.StableMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SetMirrorRoute.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelaySeconds", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int32 + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanaryStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanaryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CanaryService", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13956,125 +16990,27 @@ func (m *CanaryStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ScaleDownDelaySeconds = &v - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayRevisionLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ScaleDownDelayRevisionLimit = &v - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AbortScaleDownDelaySeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AbortScaleDownDelaySeconds = &v - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DynamicStableScale", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.DynamicStableScale = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloudWatchMetric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloudWatchMetric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloudWatchMetric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.CanaryService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StableService", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14102,11 +17038,11 @@ func (m *CloudWatchMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Interval = DurationString(dAtA[iNdEx:postIndex]) + m.StableService = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricDataQueries", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14133,66 +17069,16 @@ func (m *CloudWatchMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricDataQueries = append(m.MetricDataQueries, CloudWatchMetricDataQuery{}) - if err := m.MetricDataQueries[len(m.MetricDataQueries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Steps = append(m.Steps, CanaryStep{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloudWatchMetricDataQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloudWatchMetricDataQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TrafficRouting", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14202,29 +17088,33 @@ func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + if m.TrafficRouting == nil { + m.TrafficRouting = &RolloutTrafficRouting{} + } + if err := m.TrafficRouting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14234,30 +17124,33 @@ func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Expression = &s + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14267,28 +17160,31 @@ func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Label = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricStat", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Analysis", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14315,16 +17211,16 @@ func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MetricStat == nil { - m.MetricStat = &CloudWatchMetricStat{} + if m.Analysis == nil { + m.Analysis = &RolloutAnalysisBackground{} } - if err := m.MetricStat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Analysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AntiAffinity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14351,18 +17247,18 @@ func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Period == nil { - m.Period = &intstr.IntOrString{} + if m.AntiAffinity == nil { + m.AntiAffinity = &AntiAffinity{} } - if err := m.Period.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReturnData", wireType) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CanaryMetadata", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14372,66 +17268,31 @@ func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReturnData = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.CanaryMetadata == nil { + m.CanaryMetadata = &PodTemplateMetadata{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.CanaryMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloudWatchMetricStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloudWatchMetricStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StableMetadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14458,15 +17319,18 @@ func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.StableMetadata == nil { + m.StableMetadata = &PodTemplateMetadata{} + } + if err := m.StableMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelaySeconds", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14476,30 +17340,37 @@ func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.ScaleDownDelaySeconds = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayRevisionLimit", wireType) } - if err := m.Period.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) + m.ScaleDownDelayRevisionLimit = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AbortScaleDownDelaySeconds", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14509,29 +17380,37 @@ func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.AbortScaleDownDelaySeconds = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DynamicStableScale", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Stat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + m.DynamicStableScale = bool(v != 0) + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PingPong", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14541,23 +17420,27 @@ func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Unit = string(dAtA[iNdEx:postIndex]) + if m.PingPong == nil { + m.PingPong = &PingPongSpec{} + } + if err := m.PingPong.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14580,7 +17463,7 @@ func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { } return nil } -func (m *CloudWatchMetricStatMetric) Unmarshal(dAtA []byte) error { +func (m *CloudWatchMetric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14603,49 +17486,15 @@ func (m *CloudWatchMetricStatMetric) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CloudWatchMetricStatMetric: wiretype end group for non-group") + return fmt.Errorf("proto: CloudWatchMetric: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CloudWatchMetricStatMetric: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CloudWatchMetric: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dimensions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dimensions = append(m.Dimensions, CloudWatchMetricStatMetricDimension{}) - if err := m.Dimensions[len(m.Dimensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14673,13 +17522,13 @@ func (m *CloudWatchMetricStatMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricName = string(dAtA[iNdEx:postIndex]) + m.Interval = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricDataQueries", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14689,24 +17538,25 @@ func (m *CloudWatchMetricStatMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + m.MetricDataQueries = append(m.MetricDataQueries, CloudWatchMetricDataQuery{}) + if err := m.MetricDataQueries[len(m.MetricDataQueries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14729,7 +17579,7 @@ func (m *CloudWatchMetricStatMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *CloudWatchMetricStatMetricDimension) Unmarshal(dAtA []byte) error { +func (m *CloudWatchMetricDataQuery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14752,15 +17602,15 @@ func (m *CloudWatchMetricStatMetricDimension) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CloudWatchMetricStatMetricDimension: wiretype end group for non-group") + return fmt.Errorf("proto: CloudWatchMetricDataQuery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CloudWatchMetricStatMetricDimension: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CloudWatchMetricDataQuery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14788,11 +17638,11 @@ func (m *CloudWatchMetricStatMetricDimension) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Id = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14820,61 +17670,45 @@ func (m *CloudWatchMetricStatMetricDimension) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Expression = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterAnalysisTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterAnalysisTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterAnalysisTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.Label = &s + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricStat", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14901,13 +17735,16 @@ func (m *ClusterAnalysisTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MetricStat == nil { + m.MetricStat = &CloudWatchMetricStat{} + } + if err := m.MetricStat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14934,10 +17771,34 @@ func (m *ClusterAnalysisTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Period == nil { + m.Period = &intstr.IntOrString{} + } + if err := m.Period.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReturnData", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReturnData = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14959,7 +17820,7 @@ func (m *ClusterAnalysisTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterAnalysisTemplateList) Unmarshal(dAtA []byte) error { +func (m *CloudWatchMetricStat) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14982,15 +17843,15 @@ func (m *ClusterAnalysisTemplateList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterAnalysisTemplateList: wiretype end group for non-group") + return fmt.Errorf("proto: CloudWatchMetricStat: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterAnalysisTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CloudWatchMetricStat: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15017,13 +17878,13 @@ func (m *ClusterAnalysisTemplateList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15050,64 +17911,13 @@ func (m *ClusterAnalysisTemplateList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterAnalysisTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Period.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DatadogMetric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DatadogMetric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DatadogMetric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15135,11 +17945,11 @@ func (m *DatadogMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Interval = DurationString(dAtA[iNdEx:postIndex]) + m.Stat = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15167,7 +17977,7 @@ func (m *DatadogMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + m.Unit = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -15190,7 +18000,7 @@ func (m *DatadogMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *Experiment) Unmarshal(dAtA []byte) error { +func (m *CloudWatchMetricStatMetric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15213,15 +18023,15 @@ func (m *Experiment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Experiment: wiretype end group for non-group") + return fmt.Errorf("proto: CloudWatchMetricStatMetric: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Experiment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CloudWatchMetricStatMetric: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dimensions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15248,15 +18058,16 @@ func (m *Experiment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Dimensions = append(m.Dimensions, CloudWatchMetricStatMetricDimension{}) + if err := m.Dimensions[len(m.Dimensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15266,30 +18077,29 @@ func (m *Experiment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MetricName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15299,24 +18109,24 @@ func (m *Experiment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -15339,7 +18149,7 @@ func (m *Experiment) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { +func (m *CloudWatchMetricStatMetricDimension) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15362,10 +18172,10 @@ func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExperimentAnalysisRunStatus: wiretype end group for non-group") + return fmt.Errorf("proto: CloudWatchMetricStatMetricDimension: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExperimentAnalysisRunStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CloudWatchMetricStatMetricDimension: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15402,7 +18212,7 @@ func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnalysisRun", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15430,45 +18240,96 @@ func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AnalysisRun = string(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterAnalysisTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterAnalysisTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterAnalysisTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15478,23 +18339,24 @@ func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15517,7 +18379,7 @@ func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExperimentAnalysisTemplateRef) Unmarshal(dAtA []byte) error { +func (m *ClusterAnalysisTemplateList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15540,17 +18402,17 @@ func (m *ExperimentAnalysisTemplateRef) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExperimentAnalysisTemplateRef: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterAnalysisTemplateList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExperimentAnalysisTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterAnalysisTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15560,79 +18422,28 @@ func (m *ExperimentAnalysisTemplateRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TemplateName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClusterScope = bool(v != 0) - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15659,31 +18470,11 @@ func (m *ExperimentAnalysisTemplateRef) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, Argument{}) - if err := m.Args[len(m.Args)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ClusterAnalysisTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredForCompletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RequiredForCompletion = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15705,7 +18496,7 @@ func (m *ExperimentAnalysisTemplateRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { +func (m *DatadogMetric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15728,15 +18519,15 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExperimentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: DatadogMetric: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExperimentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DatadogMetric: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15764,11 +18555,11 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ExperimentConditionType(dAtA[iNdEx:postIndex]) + m.Interval = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15796,13 +18587,63 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DryRun) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DryRun: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DryRun: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15812,28 +18653,77 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Experiment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Experiment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Experiment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15860,15 +18750,15 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15878,29 +18768,30 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15910,23 +18801,24 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15949,7 +18841,7 @@ func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExperimentList) Unmarshal(dAtA []byte) error { +func (m *ExperimentAnalysisRunStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15972,17 +18864,17 @@ func (m *ExperimentList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExperimentList: wiretype end group for non-group") + return fmt.Errorf("proto: ExperimentAnalysisRunStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExperimentList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExperimentAnalysisRunStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15992,30 +18884,29 @@ func (m *ExperimentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AnalysisRun", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16025,25 +18916,87 @@ func (m *ExperimentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Experiment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.AnalysisRun = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -16066,7 +19019,7 @@ func (m *ExperimentList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { +func (m *ExperimentAnalysisTemplateRef) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16089,17 +19042,17 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExperimentSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ExperimentAnalysisTemplateRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExperimentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExperimentAnalysisTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16109,29 +19062,27 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Templates = append(m.Templates, TemplateSpec{}) - if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16159,31 +19110,11 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Duration = DurationString(dAtA[iNdEx:postIndex]) + m.TemplateName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -16200,10 +19131,10 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { break } } - m.Terminate = bool(v != 0) - case 5: + m.ClusterScope = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Analyses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16230,16 +19161,16 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Analyses = append(m.Analyses, ExperimentAnalysisTemplateRef{}) - if err := m.Analyses[len(m.Analyses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Args = append(m.Args, Argument{}) + if err := m.Args[len(m.Args)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelaySeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequiredForCompletion", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16249,12 +19180,12 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ScaleDownDelaySeconds = &v + m.RequiredForCompletion = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -16276,7 +19207,7 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { +func (m *ExperimentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16299,15 +19230,15 @@ func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExperimentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ExperimentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExperimentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExperimentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16335,11 +19266,11 @@ func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) + m.Type = ExperimentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16367,11 +19298,11 @@ func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateStatuses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16398,14 +19329,13 @@ func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TemplateStatuses = append(m.TemplateStatuses, TemplateStatus{}) - if err := m.TemplateStatuses[len(m.TemplateStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16432,18 +19362,15 @@ func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AvailableAt == nil { - m.AvailableAt = &v1.Time{} - } - if err := m.AvailableAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16453,113 +19380,27 @@ func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ExperimentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnalysisRuns", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AnalysisRuns = append(m.AnalysisRuns, ExperimentAnalysisRunStatus{}) - if err := m.AnalysisRuns[len(m.AnalysisRuns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FieldRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FieldRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FieldRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16587,7 +19428,7 @@ func (m *FieldRef) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldPath = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -16610,7 +19451,7 @@ func (m *FieldRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *GraphiteMetric) Unmarshal(dAtA []byte) error { +func (m *ExperimentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16633,17 +19474,17 @@ func (m *GraphiteMetric) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GraphiteMetric: wiretype end group for non-group") + return fmt.Errorf("proto: ExperimentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GraphiteMetric: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExperimentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16653,29 +19494,30 @@ func (m *GraphiteMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Address = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16685,23 +19527,25 @@ func (m *GraphiteMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, Experiment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -16724,7 +19568,7 @@ func (m *GraphiteMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *IstioDestinationRule) Unmarshal(dAtA []byte) error { +func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16747,17 +19591,17 @@ func (m *IstioDestinationRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IstioDestinationRule: wiretype end group for non-group") + return fmt.Errorf("proto: ExperimentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IstioDestinationRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExperimentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16767,27 +19611,29 @@ func (m *IstioDestinationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Templates = append(m.Templates, TemplateSpec{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CanarySubsetName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16815,13 +19661,53 @@ func (m *IstioDestinationRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CanarySubsetName = string(dAtA[iNdEx:postIndex]) + m.Duration = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminate = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StableSubsetName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Analyses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16831,79 +19717,31 @@ func (m *IstioDestinationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.StableSubsetName = string(dAtA[iNdEx:postIndex]) + m.Analyses = append(m.Analyses, ExperimentAnalysisTemplateRef{}) + if err := m.Analyses[len(m.Analyses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IstioTrafficRouting) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IstioTrafficRouting: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IstioTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VirtualService", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelaySeconds", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16913,31 +19751,15 @@ func (m *IstioTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VirtualService == nil { - m.VirtualService = &IstioVirtualService{} - } - if err := m.VirtualService.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: + m.ScaleDownDelaySeconds = &v + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DestinationRule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16964,16 +19786,14 @@ func (m *IstioTrafficRouting) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DestinationRule == nil { - m.DestinationRule = &IstioDestinationRule{} - } - if err := m.DestinationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.DryRun = append(m.DryRun, DryRun{}) + if err := m.DryRun[len(m.DryRun)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VirtualServices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MeasurementRetention", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17000,8 +19820,8 @@ func (m *IstioTrafficRouting) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VirtualServices = append(m.VirtualServices, IstioVirtualService{}) - if err := m.VirtualServices[len(m.VirtualServices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.MeasurementRetention = append(m.MeasurementRetention, MeasurementRetention{}) + if err := m.MeasurementRetention[len(m.MeasurementRetention)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17026,7 +19846,7 @@ func (m *IstioTrafficRouting) Unmarshal(dAtA []byte) error { } return nil } -func (m *IstioVirtualService) Unmarshal(dAtA []byte) error { +func (m *ExperimentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17049,15 +19869,15 @@ func (m *IstioVirtualService) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IstioVirtualService: wiretype end group for non-group") + return fmt.Errorf("proto: ExperimentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IstioVirtualService: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExperimentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17085,11 +19905,11 @@ func (m *IstioVirtualService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17117,11 +19937,11 @@ func (m *IstioVirtualService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Routes = append(m.Routes, string(dAtA[iNdEx:postIndex])) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSRoutes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateStatuses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17148,64 +19968,50 @@ func (m *IstioVirtualService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLSRoutes = append(m.TLSRoutes, TLSRoute{}) - if err := m.TLSRoutes[len(m.TLSRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TemplateStatuses = append(m.TemplateStatuses, TemplateStatus{}) + if err := m.TemplateStatuses[len(m.TemplateStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableAt", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobMetric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.AvailableAt == nil { + m.AvailableAt = &v1.Time{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobMetric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobMetric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.AvailableAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17232,13 +20038,14 @@ func (m *JobMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ExperimentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AnalysisRuns", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17265,7 +20072,8 @@ func (m *JobMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AnalysisRuns = append(m.AnalysisRuns, ExperimentAnalysisRunStatus{}) + if err := m.AnalysisRuns[len(m.AnalysisRuns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17290,7 +20098,7 @@ func (m *JobMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *KayentaMetric) Unmarshal(dAtA []byte) error { +func (m *FieldRef) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17313,15 +20121,15 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KayentaMetric: wiretype end group for non-group") + return fmt.Errorf("proto: FieldRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KayentaMetric: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17349,43 +20157,61 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Address = string(dAtA[iNdEx:postIndex]) + m.FieldPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Application", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GraphiteMetric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Application = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GraphiteMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GraphiteMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CanaryConfigName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17413,11 +20239,11 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CanaryConfigName = string(dAtA[iNdEx:postIndex]) + m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsAccountName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17445,11 +20271,61 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricsAccountName = string(dAtA[iNdEx:postIndex]) + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeaderRoutingMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeaderRoutingMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeaderRoutingMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigurationAccountName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HeaderName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17477,13 +20353,13 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ConfigurationAccountName = string(dAtA[iNdEx:postIndex]) + m.HeaderName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageAccountName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HeaderValue", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17493,29 +20369,83 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.StorageAccountName = string(dAtA[iNdEx:postIndex]) + if m.HeaderValue == nil { + m.HeaderValue = &StringMatch{} + } + if err := m.HeaderValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfluxdbMetric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfluxdbMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfluxdbMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17525,30 +20455,29 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Threshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Profile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17558,25 +20487,23 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Scopes = append(m.Scopes, KayentaScope{}) - if err := m.Scopes[len(m.Scopes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17599,7 +20526,7 @@ func (m *KayentaMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *KayentaScope) Unmarshal(dAtA []byte) error { +func (m *IstioDestinationRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17622,10 +20549,10 @@ func (m *KayentaScope) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KayentaScope: wiretype end group for non-group") + return fmt.Errorf("proto: IstioDestinationRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KayentaScope: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IstioDestinationRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -17662,9 +20589,9 @@ func (m *KayentaScope) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControlScope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CanarySubsetName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17674,30 +20601,29 @@ func (m *KayentaScope) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ControlScope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CanarySubsetName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExperimentScope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StableSubsetName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17707,24 +20633,23 @@ func (m *KayentaScope) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ExperimentScope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.StableSubsetName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17747,7 +20672,7 @@ func (m *KayentaScope) Unmarshal(dAtA []byte) error { } return nil } -func (m *KayentaThreshold) Unmarshal(dAtA []byte) error { +func (m *IstioTrafficRouting) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17770,17 +20695,17 @@ func (m *KayentaThreshold) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KayentaThreshold: wiretype end group for non-group") + return fmt.Errorf("proto: IstioTrafficRouting: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KayentaThreshold: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IstioTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pass", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualService", wireType) } - m.Pass = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17790,16 +20715,33 @@ func (m *KayentaThreshold) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pass |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VirtualService == nil { + m.VirtualService = &IstioVirtualService{} + } + if err := m.VirtualService.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Marginal", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationRule", wireType) } - m.Marginal = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17809,13 +20751,64 @@ func (m *KayentaThreshold) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Marginal |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DestinationRule == nil { + m.DestinationRule = &IstioDestinationRule{} + } + if err := m.DestinationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualServices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VirtualServices = append(m.VirtualServices, IstioVirtualService{}) + if err := m.VirtualServices[len(m.VirtualServices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err @@ -17835,7 +20828,7 @@ func (m *KayentaThreshold) Unmarshal(dAtA []byte) error { } return nil } -func (m *Measurement) Unmarshal(dAtA []byte) error { +func (m *IstioVirtualService) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17858,15 +20851,15 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Measurement: wiretype end group for non-group") + return fmt.Errorf("proto: IstioVirtualService: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Measurement: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IstioVirtualService: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17894,11 +20887,11 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17926,11 +20919,11 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Routes = append(m.Routes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLSRoutes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17957,16 +20950,64 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartedAt == nil { - m.StartedAt = &v1.Time{} - } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TLSRoutes = append(m.TLSRoutes, TLSRoute{}) + if err := m.TLSRoutes[len(m.TLSRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobMetric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17993,16 +21034,96 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FinishedAt == nil { - m.FinishedAt = &v1.Time{} + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KayentaMetric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KayentaMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KayentaMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18030,13 +21151,13 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Application", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18046,124 +21167,29 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + m.Application = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CanaryConfigName", wireType) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResumeAt", wireType) - } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18173,81 +21199,27 @@ func (m *Measurement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResumeAt == nil { - m.ResumeAt = &v1.Time{} - } - if err := m.ResumeAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CanaryConfigName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricsAccountName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18275,11 +21247,11 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.MetricsAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigurationAccountName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18307,11 +21279,11 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Interval = DurationString(dAtA[iNdEx:postIndex]) + m.ConfigurationAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialDelay", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StorageAccountName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18339,11 +21311,11 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.InitialDelay = DurationString(dAtA[iNdEx:postIndex]) + m.StorageAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18370,18 +21342,15 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Count == nil { - m.Count = &intstr.IntOrString{} - } - if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Threshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18391,27 +21360,79 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SuccessCondition = string(dAtA[iNdEx:postIndex]) + m.Scopes = append(m.Scopes, KayentaScope{}) + if err := m.Scopes[len(m.Scopes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KayentaScope) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KayentaScope: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KayentaScope: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailureCondition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18439,11 +21460,11 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FailureCondition = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailureLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ControlScope", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18470,16 +21491,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FailureLimit == nil { - m.FailureLimit = &intstr.IntOrString{} - } - if err := m.FailureLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ControlScope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InconclusiveLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExperimentScope", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18506,18 +21524,65 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.InconclusiveLimit == nil { - m.InconclusiveLimit = &intstr.IntOrString{} - } - if err := m.InconclusiveLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ExperimentScope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsecutiveErrorLimit", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KayentaThreshold) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KayentaThreshold: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KayentaThreshold: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pass", wireType) + } + m.Pass = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18527,33 +21592,85 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Pass |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Marginal", wireType) } - postIndex := iNdEx + msglen - if postIndex < 0 { + m.Marginal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Marginal |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ConsecutiveErrorLimit == nil { - m.ConsecutiveErrorLimit = &intstr.IntOrString{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MangedRoutes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.ConsecutiveErrorLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 10: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MangedRoutes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MangedRoutes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18563,24 +21680,23 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -18603,7 +21719,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } return nil } -func (m *MetricProvider) Unmarshal(dAtA []byte) error { +func (m *Measurement) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18626,17 +21742,17 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MetricProvider: wiretype end group for non-group") + return fmt.Errorf("proto: Measurement: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MetricProvider: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Measurement: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prometheus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18646,33 +21762,29 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Prometheus == nil { - m.Prometheus = &PrometheusMetric{} - } - if err := m.Prometheus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kayenta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18682,31 +21794,27 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Kayenta == nil { - m.Kayenta = &KayentaMetric{} - } - if err := m.Kayenta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Web", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18733,16 +21841,16 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Web == nil { - m.Web = &WebMetric{} + if m.StartedAt == nil { + m.StartedAt = &v1.Time{} } - if err := m.Web.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Datadog", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18769,18 +21877,18 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Datadog == nil { - m.Datadog = &DatadogMetric{} + if m.FinishedAt == nil { + m.FinishedAt = &v1.Time{} } - if err := m.Datadog.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Wavefront", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18790,31 +21898,27 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Wavefront == nil { - m.Wavefront = &WavefrontMetric{} - } - if err := m.Wavefront.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewRelic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18841,52 +21945,107 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NewRelic == nil { - m.NewRelic = &NewRelicMetric{} - } - if err := m.NewRelic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType) + if m.Metadata == nil { + m.Metadata = make(map[string]string) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Job == nil { - m.Job = &JobMetric{} - } - if err := m.Job.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Metadata[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CloudWatch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResumeAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18913,18 +22072,68 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CloudWatch == nil { - m.CloudWatch = &CloudWatchMetric{} + if m.ResumeAt == nil { + m.ResumeAt = &v1.Time{} } - if err := m.CloudWatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResumeAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MeasurementRetention) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MeasurementRetention: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MeasurementRetention: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Graphite", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18934,28 +22143,43 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Graphite == nil { - m.Graphite = &GraphiteMetric{} + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - if err := m.Graphite.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18977,7 +22201,7 @@ func (m *MetricProvider) Unmarshal(dAtA []byte) error { } return nil } -func (m *MetricResult) Unmarshal(dAtA []byte) error { +func (m *Metric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19000,10 +22224,10 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MetricResult: wiretype end group for non-group") + return fmt.Errorf("proto: Metric: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MetricResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19040,7 +22264,7 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19068,13 +22292,13 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) + m.Interval = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Measurements", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelay", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19084,31 +22308,29 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Measurements = append(m.Measurements, Measurement{}) - if err := m.Measurements[len(m.Measurements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.InitialDelay = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19118,29 +22340,33 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if m.Count == nil { + m.Count = &intstr.IntOrString{} + } + if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) } - m.Count = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19150,16 +22376,29 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Successful", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.Successful = 0 + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureCondition", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19169,16 +22408,29 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Successful |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureLimit", wireType) } - m.Failed = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19188,54 +22440,33 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failed |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Inconclusive", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.Inconclusive = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Inconclusive |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.Error = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Error |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if m.FailureLimit == nil { + m.FailureLimit = &intstr.IntOrString{} } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsecutiveError", wireType) + if err := m.FailureLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ConsecutiveError = 0 + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InconclusiveLimit", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19245,66 +22476,33 @@ func (m *MetricResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ConsecutiveError |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NewRelicMetric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.InconclusiveLimit == nil { + m.InconclusiveLimit = &intstr.IntOrString{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.InconclusiveLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NewRelicMetric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NewRelicMetric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsecutiveErrorLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19314,29 +22512,33 @@ func (m *NewRelicMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Profile = string(dAtA[iNdEx:postIndex]) + if m.ConsecutiveErrorLimit == nil { + m.ConsecutiveErrorLimit = &intstr.IntOrString{} + } + if err := m.ConsecutiveErrorLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19346,23 +22548,24 @@ func (m *NewRelicMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -19385,7 +22588,7 @@ func (m *NewRelicMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { +func (m *MetricProvider) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19408,17 +22611,17 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NginxTrafficRouting: wiretype end group for non-group") + return fmt.Errorf("proto: MetricProvider: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NginxTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MetricProvider: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnnotationPrefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prometheus", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19428,29 +22631,33 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AnnotationPrefix = string(dAtA[iNdEx:postIndex]) + if m.Prometheus == nil { + m.Prometheus = &PrometheusMetric{} + } + if err := m.Prometheus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StableIngress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kayenta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19460,27 +22667,31 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.StableIngress = string(dAtA[iNdEx:postIndex]) + if m.Kayenta == nil { + m.Kayenta = &KayentaMetric{} + } + if err := m.Kayenta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalIngressAnnotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Web", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19507,27 +22718,1663 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AdditionalIngressAnnotations == nil { - m.AdditionalIngressAnnotations = make(map[string]string) + if m.Web == nil { + m.Web = &WebMetric{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Web.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Datadog", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Datadog == nil { + m.Datadog = &DatadogMetric{} + } + if err := m.Datadog.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Wavefront", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Wavefront == nil { + m.Wavefront = &WavefrontMetric{} + } + if err := m.Wavefront.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewRelic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewRelic == nil { + m.NewRelic = &NewRelicMetric{} + } + if err := m.NewRelic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Job == nil { + m.Job = &JobMetric{} + } + if err := m.Job.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudWatch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CloudWatch == nil { + m.CloudWatch = &CloudWatchMetric{} + } + if err := m.CloudWatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Graphite", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Graphite == nil { + m.Graphite = &GraphiteMetric{} + } + if err := m.Graphite.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Influxdb", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Influxdb == nil { + m.Influxdb = &InfluxdbMetric{} + } + if err := m.Influxdb.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = AnalysisPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Measurements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Measurements = append(m.Measurements, Measurement{}) + if err := m.Measurements[len(m.Measurements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Successful", wireType) + } + m.Successful = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Successful |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Inconclusive", wireType) + } + m.Inconclusive = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Inconclusive |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + m.Error = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Error |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsecutiveError", wireType) + } + m.ConsecutiveError = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConsecutiveError |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NewRelicMetric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NewRelicMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NewRelicMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Profile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NginxTrafficRouting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NginxTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnnotationPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnnotationPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StableIngress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StableIngress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalIngressAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalIngressAnnotations == nil { + m.AdditionalIngressAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AdditionalIngressAnnotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PauseCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PauseCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PauseCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = PauseReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingPongSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingPongSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingPongSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PingService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PongService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PongService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } fieldNum := int32(wire >> 3) if fieldNum == 1 { @@ -19603,8 +24450,241 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.AdditionalIngressAnnotations[mapkey] = mapvalue - iNdEx = postIndex + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferredDuringSchedulingIgnoredDuringExecution: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferredDuringSchedulingIgnoredDuringExecution: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrometheusMetric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrometheusMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrometheusMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequiredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequiredDuringSchedulingIgnoredDuringExecution: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequiredDuringSchedulingIgnoredDuringExecution: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19626,7 +24706,7 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ObjectRef) Unmarshal(dAtA []byte) error { +func (m *Rollout) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19649,17 +24729,17 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ObjectRef: wiretype end group for non-group") + return fmt.Errorf("proto: Rollout: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Rollout: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19669,29 +24749,30 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19701,29 +24782,30 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19733,23 +24815,24 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -19772,7 +24855,7 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *PauseCondition) Unmarshal(dAtA []byte) error { +func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19795,17 +24878,17 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PauseCondition: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutAnalysis: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PauseCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutAnalysis: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19815,27 +24898,181 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = PauseReason(dAtA[iNdEx:postIndex]) + m.Templates = append(m.Templates, RolloutAnalysisTemplate{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, AnalysisRunArgument{}) + if err := m.Args[len(m.Args)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, DryRun{}) + if err := m.DryRun[len(m.DryRun)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MeasurementRetention", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MeasurementRetention = append(m.MeasurementRetention, MeasurementRetention{}) + if err := m.MeasurementRetention[len(m.MeasurementRetention)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RolloutAnalysisBackground) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RolloutAnalysisBackground: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RolloutAnalysisBackground: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RolloutAnalysis", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19862,10 +25099,30 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RolloutAnalysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingStep", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingStep = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19887,7 +25144,7 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { +func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19910,17 +25167,17 @@ func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodTemplateMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutAnalysisRunStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutAnalysisRunStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19930,124 +25187,29 @@ func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20057,118 +25219,55 @@ func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = AnalysisPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Annotations[mapkey] = mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -20191,7 +25290,7 @@ func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { } return nil } -func (m *PreferredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) error { +func (m *RolloutAnalysisTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20214,17 +25313,49 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PreferredDuringSchedulingIgnoredDuringExecution: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutAnalysisTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PreferredDuringSchedulingIgnoredDuringExecution: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutAnalysisTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) } - m.Weight = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20234,11 +25365,12 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) } b := dAtA[iNdEx] iNdEx++ - m.Weight |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.ClusterScope = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20260,7 +25392,7 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) } return nil } -func (m *PrometheusMetric) Unmarshal(dAtA []byte) error { +func (m *RolloutCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20283,15 +25415,15 @@ func (m *PrometheusMetric) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrometheusMetric: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusMetric: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20319,11 +25451,11 @@ func (m *PrometheusMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Address = string(dAtA[iNdEx:postIndex]) + m.Type = RolloutConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20351,58 +25483,138 @@ func (m *PrometheusMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequiredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequiredDuringSchedulingIgnoredDuringExecution: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequiredDuringSchedulingIgnoredDuringExecution: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20424,7 +25636,7 @@ func (m *RequiredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) } return nil } -func (m *Rollout) Unmarshal(dAtA []byte) error { +func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20447,15 +25659,15 @@ func (m *Rollout) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rollout: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutExperimentStep: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rollout: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutExperimentStep: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20482,15 +25694,16 @@ func (m *Rollout) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Templates = append(m.Templates, RolloutExperimentTemplate{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20500,28 +25713,27 @@ func (m *Rollout) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Duration = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Analyses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20548,7 +25760,8 @@ func (m *Rollout) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Analyses = append(m.Analyses, RolloutExperimentStepAnalysisTemplateRef{}) + if err := m.Analyses[len(m.Analyses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20573,7 +25786,7 @@ func (m *Rollout) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { +func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20596,17 +25809,17 @@ func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutAnalysis: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutExperimentStepAnalysisTemplateRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutAnalysis: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutExperimentStepAnalysisTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20616,27 +25829,77 @@ func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Templates = append(m.Templates, RolloutAnalysisTemplate{}) - if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } @@ -20670,6 +25933,26 @@ func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredForCompletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequiredForCompletion = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20691,7 +25974,7 @@ func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutAnalysisBackground) Unmarshal(dAtA []byte) error { +func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20710,19 +25993,136 @@ func (m *RolloutAnalysisBackground) Unmarshal(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutAnalysisBackground: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutAnalysisBackground: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RolloutExperimentTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RolloutExperimentTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecRef = ReplicaSetSpecRef(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RolloutAnalysis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20749,13 +26149,16 @@ func (m *RolloutAnalysisBackground) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RolloutAnalysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingStep", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -20772,7 +26175,7 @@ func (m *RolloutAnalysisBackground) Unmarshal(dAtA []byte) error { break } } - m.StartingStep = &v + m.Weight = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20794,7 +26197,7 @@ func (m *RolloutAnalysisBackground) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { +func (m *RolloutList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20817,17 +26220,17 @@ func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutAnalysisRunStatus: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutAnalysisRunStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20837,29 +26240,30 @@ func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20869,55 +26273,25 @@ func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = AnalysisPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, Rollout{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -20940,7 +26314,7 @@ func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutAnalysisTemplate) Unmarshal(dAtA []byte) error { +func (m *RolloutPause) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20963,17 +26337,17 @@ func (m *RolloutAnalysisTemplate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutAnalysisTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutPause: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutAnalysisTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutPause: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20983,44 +26357,28 @@ func (m *RolloutAnalysisTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.TemplateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + if m.Duration == nil { + m.Duration = &intstr.IntOrString{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ClusterScope = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21042,7 +26400,7 @@ func (m *RolloutAnalysisTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutCondition) Unmarshal(dAtA []byte) error { +func (m *RolloutSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21065,17 +26423,17 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutCondition: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21085,29 +26443,17 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = RolloutConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Replicas = &v case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21117,27 +26463,31 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21164,13 +26514,32 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21197,15 +26566,55 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - var stringLen uint64 + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21215,29 +26624,17 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + m.ProgressDeadlineSeconds = &v + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartAt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21247,77 +26644,31 @@ func (m *RolloutCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.RestartAt == nil { + m.RestartAt = &v1.Time{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.RestartAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutExperimentStep: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutExperimentStep: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkloadRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21344,16 +26695,18 @@ func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Templates = append(m.Templates, RolloutExperimentTemplate{}) - if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.WorkloadRef == nil { + m.WorkloadRef = &ObjectRef{} + } + if err := m.WorkloadRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Analysis", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21363,29 +26716,33 @@ func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Duration = DurationString(dAtA[iNdEx:postIndex]) + if m.Analysis == nil { + m.Analysis = &AnalysisRunStrategy{} + } + if err := m.Analysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Analyses", wireType) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineAbort", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21395,26 +26752,12 @@ func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Analyses = append(m.Analyses, RolloutExperimentStepAnalysisTemplateRef{}) - if err := m.Analyses[len(m.Analyses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ProgressDeadlineAbort = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21436,7 +26779,7 @@ func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error { +func (m *RolloutStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21459,17 +26802,17 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutExperimentStepAnalysisTemplateRef: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutExperimentStepAnalysisTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Abort", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21479,29 +26822,17 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Abort = bool(v != 0) case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PauseConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21511,27 +26842,29 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.TemplateName = string(dAtA[iNdEx:postIndex]) + m.PauseConditions = append(m.PauseConditions, PauseCondition{}) + if err := m.PauseConditions[len(m.PauseConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPause", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -21548,10 +26881,10 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error break } } - m.ClusterScope = bool(v != 0) + m.ControllerPause = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AbortedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21578,84 +26911,16 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Unmarshal(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, AnalysisRunArgument{}) - if err := m.Args[len(m.Args)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredForCompletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.AbortedAt == nil { + m.AbortedAt = &v1.Time{} } - m.RequiredForCompletion = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.AbortedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutExperimentTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutExperimentTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentPodHash", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21683,11 +26948,11 @@ func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.CurrentPodHash = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpecRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentStepHash", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21715,13 +26980,13 @@ func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SpecRef = ReplicaSetSpecRef(dAtA[iNdEx:postIndex]) + m.CurrentStepHash = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var v int32 + m.Replicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21731,17 +26996,16 @@ func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) } - var msglen int + m.UpdatedReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21751,30 +27015,16 @@ func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var msglen int + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21784,31 +27034,53 @@ func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentStepIndex", wireType) } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 6: + m.CurrentStepIndex = &v + case 12: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -21825,60 +27097,42 @@ func (m *RolloutExperimentTemplate) Unmarshal(dAtA []byte) error { break } } - m.Weight = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + m.CollisionCount = &v + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RolloutList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ObservedGeneration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21905,13 +27159,14 @@ func (m *RolloutList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, RolloutCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Canary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21938,64 +27193,13 @@ func (m *RolloutList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Rollout{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Canary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RolloutPause) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutPause: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutPause: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlueGreen", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22022,68 +27226,15 @@ func (m *RolloutPause) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Duration == nil { - m.Duration = &intstr.IntOrString{} - } - if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.BlueGreen.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RolloutSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 17: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HPAReplicas", wireType) } - var v int32 + m.HPAReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22093,17 +27244,16 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.HPAReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v - case 2: + case 18: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22113,31 +27263,59 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StableRS", wireType) } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.StableRS = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22164,15 +27342,18 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RestartedAt == nil { + m.RestartedAt = &v1.Time{} + } + if err := m.RestartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 21: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PromoteFull", wireType) } - m.MinReadySeconds = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22182,16 +27363,17 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 5: + m.PromoteFull = bool(v != 0) + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22201,30 +27383,29 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Phase = RolloutPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22234,37 +27415,29 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.Paused = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int32 + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkloadObservedGeneration", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22274,15 +27447,27 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ProgressDeadlineSeconds = &v - case 9: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkloadObservedGeneration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ALB", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22309,16 +27494,66 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RestartAt == nil { - m.RestartAt = &v1.Time{} + if m.ALB == nil { + m.ALB = &ALBStatus{} } - if err := m.RestartAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ALB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 10: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RolloutStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RolloutStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RolloutStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkloadRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlueGreen", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22345,16 +27580,16 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WorkloadRef == nil { - m.WorkloadRef = &ObjectRef{} + if m.BlueGreen == nil { + m.BlueGreen = &BlueGreenStrategy{} } - if err := m.WorkloadRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.BlueGreen.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Analysis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Canary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22381,33 +27616,13 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Analysis == nil { - m.Analysis = &AnalysisRunStrategy{} + if m.Canary == nil { + m.Canary = &CanaryStrategy{} } - if err := m.Analysis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Canary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineAbort", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineAbort = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22429,7 +27644,7 @@ func (m *RolloutSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutStatus) Unmarshal(dAtA []byte) error { +func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22452,35 +27667,15 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutStatus: wiretype end group for non-group") + return fmt.Errorf("proto: RolloutTrafficRouting: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RolloutTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Abort", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Abort = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PauseConditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Istio", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22507,34 +27702,16 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PauseConditions = append(m.PauseConditions, PauseCondition{}) - if err := m.PauseConditions[len(m.PauseConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Istio == nil { + m.Istio = &IstioTrafficRouting{} + } + if err := m.Istio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPause", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ControllerPause = bool(v != 0) - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbortedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Nginx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22561,18 +27738,18 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AbortedAt == nil { - m.AbortedAt = &v1.Time{} + if m.Nginx == nil { + m.Nginx = &NginxTrafficRouting{} } - if err := m.AbortedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Nginx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentPodHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ALB", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22582,29 +27759,33 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CurrentPodHash = string(dAtA[iNdEx:postIndex]) + if m.ALB == nil { + m.ALB = &ALBTrafficRouting{} + } + if err := m.ALB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentStepHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SMI", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22614,48 +27795,33 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CurrentStepHash = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if m.SMI == nil { + m.SMI = &SMITrafficRouting{} } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.SMI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ambassador", wireType) } - m.UpdatedReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22665,74 +27831,33 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if postIndex > l { + return io.ErrUnexpectedEOF } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentStepIndex", wireType) + if m.Ambassador == nil { + m.Ambassador = &AmbassadorTrafficRouting{} } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Ambassador.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.CurrentStepIndex = &v - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppMesh", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22742,17 +27867,33 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.CollisionCount = &v - case 13: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppMesh == nil { + m.AppMesh = &AppMeshTrafficRouting{} + } + if err := m.AppMesh.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Traefik", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22762,27 +27903,31 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ObservedGeneration = string(dAtA[iNdEx:postIndex]) + if m.Traefik == nil { + m.Traefik = &TraefikTrafficRouting{} + } + if err := m.Traefik.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 14: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedRoutes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22809,14 +27954,64 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, RolloutCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ManagedRoutes = append(m.ManagedRoutes, MangedRoutes{}) + if err := m.ManagedRoutes[len(m.ManagedRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 15: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Canary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22843,13 +28038,16 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Canary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Method == nil { + m.Method = &StringMatch{} + } + if err := m.Method.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 16: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlueGreen", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22876,15 +28074,18 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.BlueGreen.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Path == nil { + m.Path = &StringMatch{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HPAReplicas", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) } - m.HPAReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22894,16 +28095,176 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HPAReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]StringMatch) + } + var mapkey string + mapvalue := &StringMatch{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StringMatch{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Headers[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunSummary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunSummary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunSummary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - var stringLen uint64 + m.Count = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22913,29 +28274,16 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StableRS", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Successful", wireType) } - var stringLen uint64 + m.Successful = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22945,29 +28293,16 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Successful |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StableRS = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartedAt", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) } - var msglen int + m.Failed = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22977,33 +28312,16 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Failed |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RestartedAt == nil { - m.RestartedAt = &v1.Time{} - } - if err := m.RestartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PromoteFull", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Inconclusive", wireType) } - var v int + m.Inconclusive = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23013,17 +28331,16 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Inconclusive |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.PromoteFull = bool(v != 0) - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var stringLen uint64 + m.Error = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23033,27 +28350,64 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Error |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Phase = RolloutPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 23: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SMITrafficRouting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SMITrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RootService", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23081,11 +28435,11 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.RootService = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkloadObservedGeneration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TrafficSplitName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23113,7 +28467,7 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkloadObservedGeneration = string(dAtA[iNdEx:postIndex]) + m.TrafficSplitName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23136,7 +28490,7 @@ func (m *RolloutStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutStrategy) Unmarshal(dAtA []byte) error { +func (m *ScopeDetail) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23159,17 +28513,17 @@ func (m *RolloutStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: ScopeDetail: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ScopeDetail: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlueGreen", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23179,33 +28533,112 @@ func (m *RolloutStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BlueGreen == nil { - m.BlueGreen = &BlueGreenStrategy{} + m.Scope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) } - if err := m.BlueGreen.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Region = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Start = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Canary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23215,27 +28648,23 @@ func (m *RolloutStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Canary == nil { - m.Canary = &CanaryStrategy{} - } - if err := m.Canary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.End = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23258,7 +28687,7 @@ func (m *RolloutStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { +func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23281,17 +28710,17 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RolloutTrafficRouting: wiretype end group for non-group") + return fmt.Errorf("proto: SecretKeyRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretKeyRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Istio", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23301,33 +28730,29 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Istio == nil { - m.Istio = &IstioTrafficRouting{} - } - if err := m.Istio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nginx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23337,33 +28762,79 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Nginx == nil { - m.Nginx = &NginxTrafficRouting{} - } - if err := m.Nginx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ALB", wireType) + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - var msglen int + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetCanaryScale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetCanaryScale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23373,33 +28844,17 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ALB == nil { - m.ALB = &ALBTrafficRouting{} - } - if err := m.ALB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SMI", wireType) + m.Weight = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23409,33 +28864,17 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SMI == nil { - m.SMI = &SMITrafficRouting{} - } - if err := m.SMI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ambassador", wireType) + m.Replicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchTrafficWeight", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23445,28 +28884,12 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Ambassador == nil { - m.Ambassador = &AmbassadorTrafficRouting{} - } - if err := m.Ambassador.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.MatchTrafficWeight = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23488,7 +28911,7 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } return nil } -func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { +func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23511,15 +28934,15 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SMITrafficRouting: wiretype end group for non-group") + return fmt.Errorf("proto: SetHeaderRoute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SMITrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetHeaderRoute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23547,13 +28970,13 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RootService = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TrafficSplitName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23563,23 +28986,25 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.TrafficSplitName = string(dAtA[iNdEx:postIndex]) + m.Match = append(m.Match, HeaderRoutingMatch{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -23602,7 +29027,7 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ScopeDetail) Unmarshal(dAtA []byte) error { +func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23617,55 +29042,23 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeDetail: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeDetail: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetMirrorRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetMirrorRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23693,32 +29086,13 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Region = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) - } - m.Step = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Step |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23728,29 +29102,31 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Start = string(dAtA[iNdEx:postIndex]) + m.Match = append(m.Match, RouteMatch{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Percentage", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23760,24 +29136,12 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.End = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Percentage = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23799,7 +29163,7 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { +func (m *StickinessConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23822,17 +29186,17 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretKeyRef: wiretype end group for non-group") + return fmt.Errorf("proto: StickinessConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretKeyRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StickinessConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23842,29 +29206,17 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Enabled = bool(v != 0) case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationSeconds", wireType) } - var stringLen uint64 + m.DurationSeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23874,24 +29226,11 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.DurationSeconds |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23913,7 +29252,7 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { +func (m *StringMatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23936,17 +29275,17 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetCanaryScale: wiretype end group for non-group") + return fmt.Errorf("proto: StringMatch: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetCanaryScale: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StringMatch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23956,17 +29295,29 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Weight = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exact = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23976,17 +29327,29 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchTrafficWeight", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regex", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23996,12 +29359,24 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.MatchTrafficWeight = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Regex = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24742,6 +30117,88 @@ func (m *TemplateStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *TraefikTrafficRouting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraefikTrafficRouting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraefikTrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WeightedTraefikServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WeightedTraefikServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TrafficWeights) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/apis/rollouts/v1alpha1/generated.proto b/pkg/apis/rollouts/v1alpha1/generated.proto index 817372450e..2bdc40c2ec 100644 --- a/pkg/apis/rollouts/v1alpha1/generated.proto +++ b/pkg/apis/rollouts/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes sample-controller Authors. +Copyright 2022 The Kubernetes sample-controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,15 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"; + +message ALBStatus { + optional AwsResourceRef loadBalancer = 1; + + optional AwsResourceRef canaryTargetGroup = 2; + + optional AwsResourceRef stableTargetGroup = 3; +} // ALBTrafficRouting configuration for ALB ingress controller to control traffic routing message ALBTrafficRouting { @@ -42,6 +50,10 @@ message ALBTrafficRouting { // RootService references the service in the ingress to the controller should add the action to optional string rootService = 3; + // AdditionalForwardConfig allows to specify further settings on the ForwaredConfig + // +optional + optional StickinessConfig stickinessConfig = 5; + // AnnotationPrefix has to match the configured annotation prefix on the alb ingress controller // +optional optional string annotationPrefix = 4; @@ -104,6 +116,18 @@ message AnalysisRunSpec { // Terminate is used to prematurely stop the run (e.g. rollout completed and analysis is no longer desired) optional bool terminate = 3; + + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated DryRun dryRun = 4; + + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated MeasurementRetention measurementRetention = 5; } // AnalysisRunStatus is the status for a AnalysisRun resource @@ -119,6 +143,12 @@ message AnalysisRunStatus { // StartedAt indicates when the analysisRun first started optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 4; + + // RunSummary contains the final results from the metric executions + optional RunSummary runSummary = 5; + + // DryRunSummary contains the final results from the metric executions in the dry-run mode + optional RunSummary dryRunSummary = 6; } // AnalysisRunStrategy configuration for the analysis runs and experiments to retain @@ -162,6 +192,18 @@ message AnalysisTemplateSpec { // +patchStrategy=merge // +optional repeated Argument args = 2; + + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated DryRun dryRun = 3; + + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated MeasurementRetention measurementRetention = 4; } // AntiAffinity defines which inter-pod scheduling rule to use for anti-affinity injection @@ -173,6 +215,39 @@ message AntiAffinity { optional RequiredDuringSchedulingIgnoredDuringExecution requiredDuringSchedulingIgnoredDuringExecution = 2; } +// AppMeshTrafficRouting configuration for AWS AppMesh service mesh to enable fine grain configuration +message AppMeshTrafficRouting { + // VirtualService references an AppMesh VirtualService and VirtualRouter to modify to shape traffic + optional AppMeshVirtualService virtualService = 1; + + // VirtualNodeGroup references an AppMesh Route targets that are formed by a set of VirtualNodes that are used to shape traffic + optional AppMeshVirtualNodeGroup virtualNodeGroup = 2; +} + +// AppMeshVirtualNodeGroup holds information about targets used for routing traffic to a virtual service +message AppMeshVirtualNodeGroup { + // CanaryVirtualNodeRef is the virtual node ref to modify labels with canary ReplicaSet pod template hash value + optional AppMeshVirtualNodeReference canaryVirtualNodeRef = 1; + + // StableVirtualNodeRef is the virtual node name to modify labels with stable ReplicaSet pod template hash value + optional AppMeshVirtualNodeReference stableVirtualNodeRef = 2; +} + +// AppMeshVirtualNodeReference holds a reference to VirtualNode.appmesh.k8s.aws +message AppMeshVirtualNodeReference { + // Name is the name of VirtualNode CR + optional string name = 1; +} + +// AppMeshVirtualService holds information on the virtual service the rollout needs to modify +message AppMeshVirtualService { + // Name is the name of virtual service + optional string name = 1; + + // Routes is list of HTTP routes within virtual router associated with virtual service to edit. If omitted, virtual service must have a single route of this type. + repeated string routes = 2; +} + // Argument is an argument to an AnalysisRun message Argument { // Name is the name of the argument @@ -196,6 +271,12 @@ message ArgumentValueFrom { optional FieldRef fieldRef = 2; } +message AwsResourceRef { + optional string name = 1; + + optional string arn = 2; +} + // BlueGreenStatus status fields that only pertain to the blueGreen rollout message BlueGreenStatus { // PreviewSelector indicates which replicas set the preview service is serving traffic to @@ -297,6 +378,9 @@ message CanaryStatus { // Weights records the weights which have been set on traffic provider. Only valid when using traffic routing optional TrafficWeights weights = 4; + + // StablePingPong For the ping-pong feature holds the current stable service, ping or pong + optional string stablePingPong = 5; } // CanaryStep defines a step of a canary deployment. @@ -318,6 +402,14 @@ message CanaryStep { // SetCanaryScale defines how to scale the newRS without changing traffic weight // +optional optional SetCanaryScale setCanaryScale = 5; + + // SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service + // +optional + optional SetHeaderRoute setHeaderRoute = 6; + + // SetMirrorRoutes Mirrors traffic that matches rules to a particular destination + // +optional + optional SetMirrorRoute setMirrorRoute = 8; } // CanaryStrategy defines parameters for a Replica Based Canary @@ -402,6 +494,9 @@ message CanaryStrategy { // scaling down the stable as traffic is increased to canary. When disabled (the default behavior) // the stable ReplicaSet remains fully scaled to support instantaneous aborts. optional bool dynamicStableScale = 14; + + // PingPongSpec holds the ping and pong services + optional PingPongSpec pingPong = 15; } // CloudWatchMetric defines the cloudwatch query to perform canary analysis @@ -454,7 +549,7 @@ message CloudWatchMetricStatMetricDimension { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:path=clusteranalysistemplates,shortName=cat +// +kubebuilder:resource:path=clusteranalysistemplates,shortName=cat,scope=Cluster // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time since resource was created" message ClusterAnalysisTemplate { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -476,6 +571,13 @@ message DatadogMetric { optional string query = 2; } +// DryRun defines the settings for running the analysis in Dry-Run mode. +message DryRun { + // Name of the metric which needs to be evaluated in the Dry-Run mode. Wildcard '*' is supported and denotes all + // the available metrics. + optional string metricName = 1; +} + // Experiment is a specification for an Experiment resource // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -588,6 +690,18 @@ message ExperimentSpec { // more information // +optional optional int32 scaleDownDelaySeconds = 6; + + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated DryRun dryRun = 7; + + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated MeasurementRetention measurementRetention = 8; } // ExperimentStatus is the status for a Experiment resource @@ -632,6 +746,23 @@ message GraphiteMetric { optional string query = 2; } +message HeaderRoutingMatch { + // HeaderName the name of the request header + optional string headerName = 1; + + // HeaderValue the value of the header + optional StringMatch headerValue = 2; +} + +// InfluxdbMetric defines the InfluxDB Flux query to perform canary analysis +message InfluxdbMetric { + // Profile is the name of the secret holding InfluxDB account configuration + optional string profile = 1; + + // Query is a raw InfluxDB flux query to perform + optional string query = 2; +} + // IstioDestinationRule is a reference to an Istio DestinationRule to modify and shape traffic message IstioDestinationRule { // Name holds the name of the DestinationRule @@ -707,6 +838,10 @@ message KayentaThreshold { optional int64 marginal = 2; } +message MangedRoutes { + optional string name = 1; +} + // Measurement is a point in time result value of a single metric, and the time it was measured message Measurement { // Phase is the status of this single measurement @@ -732,6 +867,15 @@ message Measurement { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time resumeAt = 7; } +// MeasurementRetention defines the settings for retaining the number of measurements during the analysis. +message MeasurementRetention { + // MetricName is the name of the metric on which this retention policy should be applied. + optional string metricName = 1; + + // Limit is the maximum number of measurements to be retained for this given metric. + optional int32 limit = 2; +} + // Metric defines a metric in which to perform analysis message Metric { // Name is the name of the metric @@ -807,6 +951,9 @@ message MetricProvider { // Graphite specifies the Graphite metric to query optional GraphiteMetric graphite = 9; + + // Influxdb specifies the influxdb metric to query + optional InfluxdbMetric influxdb = 10; } // MetricResult contain a list of the most recent measurements for a single metric along with @@ -843,6 +990,14 @@ message MetricResult { // ConsecutiveError is the number of times an error was encountered during measurement in succession // Resets to zero when non-errors are encountered optional int32 consecutiveError = 10; + + // DryRun indicates whether this metric is running in a dry-run mode or not + optional bool dryRun = 11; + + // Metadata stores additional metadata about this metric. It is used by different providers to store + // the final state which gets used while taking measurements. For example, Prometheus uses this field + // to store the final resolved query after substituting the template arguments. + map metadata = 12; } // NewRelicMetric defines the newrelic query to perform canary analysis @@ -886,6 +1041,15 @@ message PauseCondition { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; } +// PingPongSpec holds the ping and pong service name. +message PingPongSpec { + // name of the ping service + optional string pingService = 1; + + // name of the pong service + optional string pongService = 2; +} + // PodTemplateMetadata extra labels to add to the template message PodTemplateMetadata { // Labels Additional labels to add to the experiment @@ -934,6 +1098,18 @@ message RolloutAnalysis { // +patchMergeKey=name // +patchStrategy=merge repeated AnalysisRunArgument args = 2; + + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated DryRun dryRun = 3; + + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated MeasurementRetention measurementRetention = 4; } // RolloutAnalysisBackground defines a template that is used to create a background analysisRun @@ -1106,7 +1282,7 @@ message RolloutSpec { optional int32 progressDeadlineSeconds = 8; // ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds - // is exceeded if analysis is not used. Default is false. + // is exceeded. // +optional optional bool progressDeadlineAbort = 12; @@ -1122,7 +1298,11 @@ message RolloutStatus { // Abort cancel the current rollout progression optional bool abort = 1; - // PauseConditions indicates why the rollout is currently paused + // PauseConditions is a list of reasons why rollout became automatically paused (e.g. + // CanaryPauseStep, BlueGreenPause, InconclusiveAnalysis). The items in this list are populated + // by the controller but are cleared by the user (e.g. plugin, argo-cd resume action) when they + // wish to unpause. If pause conditions is empty, but controllerPause is true, it indicates + // the user manually unpaused the Rollout repeated PauseCondition pauseConditions = 2; // ControllerPause indicates the controller has paused the rollout. It is set to true when @@ -1178,10 +1358,6 @@ message RolloutStatus { // +optional optional string observedGeneration = 13; - // The generation of referenced workload observed by the rollout controller - // +optional - optional string workloadObservedGeneration = 24; - // Conditions a list of conditions a rollout can have. // +optional repeated RolloutCondition conditions = 14; @@ -1217,6 +1393,13 @@ message RolloutStatus { // Message provides details on why the rollout is in its current phase optional string message = 23; + + // The generation of referenced workload observed by the rollout controller + // +optional + optional string workloadObservedGeneration = 24; + + // / ALB keeps information regarding the ALB and TargetGroups + optional ALBStatus alb = 25; } // RolloutStrategy defines strategy to apply during next rollout @@ -1244,6 +1427,48 @@ message RolloutTrafficRouting { // Ambassador holds specific configuration to use Ambassador to route traffic optional AmbassadorTrafficRouting ambassador = 5; + + // AppMesh holds specific configuration to use AppMesh to route traffic + optional AppMeshTrafficRouting appMesh = 6; + + // Traefik holds specific configuration to use Traefik to route traffic + optional TraefikTrafficRouting traefik = 7; + + // A list of HTTP routes that Argo Rollouts manages, the order of this array also becomes the precedence in the upstream + // traffic router. + repeated MangedRoutes managedRoutes = 8; +} + +message RouteMatch { + // Method What http methods should be mirrored + // +optional + optional StringMatch method = 1; + + // Path What url paths should be mirrored + // +optional + optional StringMatch path = 2; + + // Headers What request with matching headers should be mirrored + // +optional + map headers = 3; +} + +// RunSummary contains the final results from the metric executions +message RunSummary { + // This is equal to the sum of Successful, Failed, Inconclusive + optional int32 count = 1; + + // Successful is the number of times the metric was measured Successful + optional int32 successful = 2; + + // Failed is the number of times the metric was measured Failed + optional int32 failed = 3; + + // Inconclusive is the number of times the metric was measured Inconclusive + optional int32 inconclusive = 4; + + // Error is the number of times an error was encountered during measurement + optional int32 error = 5; } // SMITrafficRouting configuration for TrafficSplit Custom Resource to control traffic routing @@ -1292,6 +1517,48 @@ message SetCanaryScale { optional bool matchTrafficWeight = 3; } +// SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service +message SetHeaderRoute { + // Name this is the name of the route to use for the mirroring of traffic this also needs + // to be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field + optional string name = 1; + + repeated HeaderRoutingMatch match = 2; +} + +message SetMirrorRoute { + // Name this is the name of the route to use for the mirroring of traffic this also needs + // to be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field + optional string name = 1; + + // Match Contains a list of rules that if mated will mirror the traffic to the services + // +optional + repeated RouteMatch match = 2; + + // Services The list of services to mirror the traffic to if the method, path, headers match + // Service string `json:"service" protobuf:"bytes,3,opt,name=service"` + // Percentage What percent of the traffic that matched the rules should be mirrored + optional int32 percentage = 4; +} + +message StickinessConfig { + optional bool enabled = 1; + + optional int64 durationSeconds = 2; +} + +// StringMatch Used to define what type of matching we will use exact, prefix, or regular expression +message StringMatch { + // Exact The string must match exactly + optional string exact = 1; + + // Prefix The string will be prefixed matched + optional string prefix = 2; + + // Regex The string will be regular expression matched + optional string regex = 3; +} + // TLSRoute holds the information on the virtual service's TLS/HTTPS routes that are desired to be matched for changing weights. message TLSRoute { // Port number of the TLS Route desired to be matched in the given Istio VirtualService. @@ -1371,6 +1638,12 @@ message TemplateStatus { optional string podTemplateHash = 11; } +// TraefikTrafficRouting defines the configuration required to use Traefik as traffic router +message TraefikTrafficRouting { + // TraefikServiceName refer to the name of the Traefik service used to route traffic to the service + optional string weightedTraefikServiceName = 1; +} + // TrafficWeights describes the current status of how traffic has been split message TrafficWeights { // Canary is the current traffic weight split to canary ReplicaSet diff --git a/pkg/apis/rollouts/v1alpha1/openapi_generated.go b/pkg/apis/rollouts/v1alpha1/openapi_generated.go index dcae450417..fa929300aa 100644 --- a/pkg/apis/rollouts/v1alpha1/openapi_generated.go +++ b/pkg/apis/rollouts/v1alpha1/openapi_generated.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2021 The Kubernetes sample-controller Authors. +Copyright 2022 The Kubernetes sample-controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,12 +24,13 @@ limitations under the License. package v1alpha1 import ( - spec "github.com/go-openapi/spec" common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" ) func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ALBStatus": schema_pkg_apis_rollouts_v1alpha1_ALBStatus(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ALBTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_ALBTrafficRouting(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AmbassadorTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_AmbassadorTrafficRouting(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRun": schema_pkg_apis_rollouts_v1alpha1_AnalysisRun(ref), @@ -41,8 +43,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateList": schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateList(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateSpec": schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateSpec(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AntiAffinity": schema_pkg_apis_rollouts_v1alpha1_AntiAffinity(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_AppMeshTrafficRouting(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeGroup": schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualNodeGroup(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeReference": schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualNodeReference(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualService": schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualService(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument": schema_pkg_apis_rollouts_v1alpha1_Argument(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ArgumentValueFrom": schema_pkg_apis_rollouts_v1alpha1_ArgumentValueFrom(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AwsResourceRef": schema_pkg_apis_rollouts_v1alpha1_AwsResourceRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.BlueGreenStatus": schema_pkg_apis_rollouts_v1alpha1_BlueGreenStatus(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.BlueGreenStrategy": schema_pkg_apis_rollouts_v1alpha1_BlueGreenStrategy(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CanaryStatus": schema_pkg_apis_rollouts_v1alpha1_CanaryStatus(ref), @@ -56,6 +63,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ClusterAnalysisTemplate": schema_pkg_apis_rollouts_v1alpha1_ClusterAnalysisTemplate(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ClusterAnalysisTemplateList": schema_pkg_apis_rollouts_v1alpha1_ClusterAnalysisTemplateList(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DatadogMetric": schema_pkg_apis_rollouts_v1alpha1_DatadogMetric(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun": schema_pkg_apis_rollouts_v1alpha1_DryRun(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Experiment": schema_pkg_apis_rollouts_v1alpha1_Experiment(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentAnalysisRunStatus": schema_pkg_apis_rollouts_v1alpha1_ExperimentAnalysisRunStatus(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentAnalysisTemplateRef": schema_pkg_apis_rollouts_v1alpha1_ExperimentAnalysisTemplateRef(ref), @@ -65,6 +73,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentStatus": schema_pkg_apis_rollouts_v1alpha1_ExperimentStatus(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.FieldRef": schema_pkg_apis_rollouts_v1alpha1_FieldRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.GraphiteMetric": schema_pkg_apis_rollouts_v1alpha1_GraphiteMetric(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.HeaderRoutingMatch": schema_pkg_apis_rollouts_v1alpha1_HeaderRoutingMatch(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.InfluxdbMetric": schema_pkg_apis_rollouts_v1alpha1_InfluxdbMetric(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.IstioDestinationRule": schema_pkg_apis_rollouts_v1alpha1_IstioDestinationRule(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.IstioTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_IstioTrafficRouting(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.IstioVirtualService": schema_pkg_apis_rollouts_v1alpha1_IstioVirtualService(ref), @@ -72,7 +82,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.KayentaMetric": schema_pkg_apis_rollouts_v1alpha1_KayentaMetric(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.KayentaScope": schema_pkg_apis_rollouts_v1alpha1_KayentaScope(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.KayentaThreshold": schema_pkg_apis_rollouts_v1alpha1_KayentaThreshold(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MangedRoutes": schema_pkg_apis_rollouts_v1alpha1_MangedRoutes(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Measurement": schema_pkg_apis_rollouts_v1alpha1_Measurement(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention": schema_pkg_apis_rollouts_v1alpha1_MeasurementRetention(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric": schema_pkg_apis_rollouts_v1alpha1_Metric(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MetricProvider": schema_pkg_apis_rollouts_v1alpha1_MetricProvider(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MetricResult": schema_pkg_apis_rollouts_v1alpha1_MetricResult(ref), @@ -80,6 +92,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NginxTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_NginxTrafficRouting(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ObjectRef": schema_pkg_apis_rollouts_v1alpha1_ObjectRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PauseCondition": schema_pkg_apis_rollouts_v1alpha1_PauseCondition(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PingPongSpec": schema_pkg_apis_rollouts_v1alpha1_PingPongSpec(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PodTemplateMetadata": schema_pkg_apis_rollouts_v1alpha1_PodTemplateMetadata(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PreferredDuringSchedulingIgnoredDuringExecution": schema_pkg_apis_rollouts_v1alpha1_PreferredDuringSchedulingIgnoredDuringExecution(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusMetric": schema_pkg_apis_rollouts_v1alpha1_PrometheusMetric(ref), @@ -99,14 +112,21 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutStatus": schema_pkg_apis_rollouts_v1alpha1_RolloutStatus(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutStrategy": schema_pkg_apis_rollouts_v1alpha1_RolloutStrategy(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_RolloutTrafficRouting(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RouteMatch": schema_pkg_apis_rollouts_v1alpha1_RouteMatch(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RunSummary": schema_pkg_apis_rollouts_v1alpha1_RunSummary(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SMITrafficRouting": schema_pkg_apis_rollouts_v1alpha1_SMITrafficRouting(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ScopeDetail": schema_pkg_apis_rollouts_v1alpha1_ScopeDetail(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SecretKeyRef": schema_pkg_apis_rollouts_v1alpha1_SecretKeyRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetCanaryScale": schema_pkg_apis_rollouts_v1alpha1_SetCanaryScale(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetHeaderRoute": schema_pkg_apis_rollouts_v1alpha1_SetHeaderRoute(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetMirrorRoute": schema_pkg_apis_rollouts_v1alpha1_SetMirrorRoute(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StickinessConfig": schema_pkg_apis_rollouts_v1alpha1_StickinessConfig(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch": schema_pkg_apis_rollouts_v1alpha1_StringMatch(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TLSRoute": schema_pkg_apis_rollouts_v1alpha1_TLSRoute(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateService": schema_pkg_apis_rollouts_v1alpha1_TemplateService(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateSpec": schema_pkg_apis_rollouts_v1alpha1_TemplateSpec(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateStatus": schema_pkg_apis_rollouts_v1alpha1_TemplateStatus(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TraefikTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_TraefikTrafficRouting(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TrafficWeights": schema_pkg_apis_rollouts_v1alpha1_TrafficWeights(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ValueFrom": schema_pkg_apis_rollouts_v1alpha1_ValueFrom(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WavefrontMetric": schema_pkg_apis_rollouts_v1alpha1_WavefrontMetric(ref), @@ -116,6 +136,38 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA } } +func schema_pkg_apis_rollouts_v1alpha1_ALBStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "loadBalancer": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AwsResourceRef"), + }, + }, + "canaryTargetGroup": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AwsResourceRef"), + }, + }, + "stableTargetGroup": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AwsResourceRef"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AwsResourceRef"}, + } +} + func schema_pkg_apis_rollouts_v1alpha1_ALBTrafficRouting(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -146,6 +198,12 @@ func schema_pkg_apis_rollouts_v1alpha1_ALBTrafficRouting(ref common.ReferenceCal Format: "", }, }, + "stickinessConfig": { + SchemaProps: spec.SchemaProps{ + Description: "AdditionalForwardConfig allows to specify further settings on the ForwaredConfig", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StickinessConfig"), + }, + }, "annotationPrefix": { SchemaProps: spec.SchemaProps{ Description: "AnnotationPrefix has to match the configured annotation prefix on the alb ingress controller", @@ -157,6 +215,8 @@ func schema_pkg_apis_rollouts_v1alpha1_ALBTrafficRouting(ref common.ReferenceCal Required: []string{"ingress", "servicePort"}, }, }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StickinessConfig"}, } } @@ -377,12 +437,52 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisRunSpec(ref common.ReferenceCallb Format: "", }, }, + "dryRun": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "DryRun object contains the settings for running the analysis in Dry-Run mode", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun"), + }, + }, + }, + }, + }, + "measurementRetention": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"), + }, + }, + }, + }, + }, }, Required: []string{"metrics"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, } } @@ -428,12 +528,25 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisRunStatus(ref common.ReferenceCal Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, + "runSummary": { + SchemaProps: spec.SchemaProps{ + Description: "RunSummary contains the final results from the metric executions", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RunSummary"), + }, + }, + "dryRunSummary": { + SchemaProps: spec.SchemaProps{ + Description: "DryRunSummary contains the final results from the metric executions in the dry-run mode", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RunSummary"), + }, + }, }, Required: []string{"phase"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MetricResult", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MetricResult", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RunSummary", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -602,12 +715,52 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateSpec(ref common.Reference }, }, }, + "dryRun": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "DryRun object contains the settings for running the analysis in Dry-Run mode", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun"), + }, + }, + }, + }, + }, + "measurementRetention": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"), + }, + }, + }, + }, + }, }, Required: []string{"metrics"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, } } @@ -636,6 +789,120 @@ func schema_pkg_apis_rollouts_v1alpha1_AntiAffinity(ref common.ReferenceCallback } } +func schema_pkg_apis_rollouts_v1alpha1_AppMeshTrafficRouting(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AppMeshTrafficRouting configuration for AWS AppMesh service mesh to enable fine grain configuration", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "virtualService": { + SchemaProps: spec.SchemaProps{ + Description: "VirtualService references an AppMesh VirtualService and VirtualRouter to modify to shape traffic", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualService"), + }, + }, + "virtualNodeGroup": { + SchemaProps: spec.SchemaProps{ + Description: "VirtualNodeGroup references an AppMesh Route targets that are formed by a set of VirtualNodes that are used to shape traffic", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeGroup"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeGroup", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualService"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualNodeGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AppMeshVirtualNodeGroup holds information about targets used for routing traffic to a virtual service", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "canaryVirtualNodeRef": { + SchemaProps: spec.SchemaProps{ + Description: "CanaryVirtualNodeRef is the virtual node ref to modify labels with canary ReplicaSet pod template hash value", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeReference"), + }, + }, + "stableVirtualNodeRef": { + SchemaProps: spec.SchemaProps{ + Description: "StableVirtualNodeRef is the virtual node name to modify labels with stable ReplicaSet pod template hash value", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeReference"), + }, + }, + }, + Required: []string{"canaryVirtualNodeRef", "stableVirtualNodeRef"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualNodeReference"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualNodeReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AppMeshVirtualNodeReference holds a reference to VirtualNode.appmesh.k8s.aws", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of VirtualNode CR", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualService(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AppMeshVirtualService holds information on the virtual service the rollout needs to modify", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of virtual service", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "routes": { + SchemaProps: spec.SchemaProps{ + Description: "Routes is list of HTTP routes within virtual router associated with virtual service to edit. If omitted, virtual service must have a single route of this type.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_Argument(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -701,6 +968,33 @@ func schema_pkg_apis_rollouts_v1alpha1_ArgumentValueFrom(ref common.ReferenceCal } } +func schema_pkg_apis_rollouts_v1alpha1_AwsResourceRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "arn": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "arn"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_BlueGreenStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -890,6 +1184,13 @@ func schema_pkg_apis_rollouts_v1alpha1_CanaryStatus(ref common.ReferenceCallback Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TrafficWeights"), }, }, + "stablePingPong": { + SchemaProps: spec.SchemaProps{ + Description: "StablePingPong For the ping-pong feature holds the current stable service, ping or pong", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -936,11 +1237,23 @@ func schema_pkg_apis_rollouts_v1alpha1_CanaryStep(ref common.ReferenceCallback) Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetCanaryScale"), }, }, + "setHeaderRoute": { + SchemaProps: spec.SchemaProps{ + Description: "SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetHeaderRoute"), + }, + }, + "setMirrorRoute": { + SchemaProps: spec.SchemaProps{ + Description: "SetMirrorRoutes Mirrors traffic that matches rules to a particular destination", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetMirrorRoute"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysis", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentStep", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutPause", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetCanaryScale"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysis", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentStep", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutPause", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetCanaryScale", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetHeaderRoute", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SetMirrorRoute"}, } } @@ -1049,11 +1362,17 @@ func schema_pkg_apis_rollouts_v1alpha1_CanaryStrategy(ref common.ReferenceCallba Format: "", }, }, + "pingPong": { + SchemaProps: spec.SchemaProps{ + Description: "PingPongSpec holds the ping and pong services", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PingPongSpec"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AntiAffinity", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CanaryStep", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PodTemplateMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisBackground", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutTrafficRouting", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AntiAffinity", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CanaryStep", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PingPongSpec", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PodTemplateMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisBackground", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutTrafficRouting", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, } } @@ -1359,6 +1678,28 @@ func schema_pkg_apis_rollouts_v1alpha1_DatadogMetric(ref common.ReferenceCallbac } } +func schema_pkg_apis_rollouts_v1alpha1_DryRun(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DryRun defines the settings for running the analysis in Dry-Run mode.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metricName": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the metric which needs to be evaluated in the Dry-Run mode. Wildcard '*' is supported and denotes all the available metrics.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"metricName"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_Experiment(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1702,19 +2043,59 @@ func schema_pkg_apis_rollouts_v1alpha1_ExperimentSpec(ref common.ReferenceCallba Format: "int32", }, }, - }, - Required: []string{"templates"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentAnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateSpec"}, - } -} - -func schema_pkg_apis_rollouts_v1alpha1_ExperimentStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ + "dryRun": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "DryRun object contains the settings for running the analysis in Dry-Run mode", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun"), + }, + }, + }, + }, + }, + "measurementRetention": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"), + }, + }, + }, + }, + }, + }, + Required: []string{"templates"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentAnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateSpec"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_ExperimentStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ Description: "ExperimentStatus is the status for a Experiment resource", Type: []string{"object"}, Properties: map[string]spec.Schema{ @@ -1836,6 +2217,62 @@ func schema_pkg_apis_rollouts_v1alpha1_GraphiteMetric(ref common.ReferenceCallba } } +func schema_pkg_apis_rollouts_v1alpha1_HeaderRoutingMatch(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "headerName": { + SchemaProps: spec.SchemaProps{ + Description: "HeaderName the name of the request header", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "headerValue": { + SchemaProps: spec.SchemaProps{ + Description: "HeaderValue the value of the header", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch"), + }, + }, + }, + Required: []string{"headerName", "headerValue"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_InfluxdbMetric(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "InfluxdbMetric defines the InfluxDB Flux query to perform canary analysis", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "profile": { + SchemaProps: spec.SchemaProps{ + Description: "Profile is the name of the secret holding InfluxDB account configuration", + Type: []string{"string"}, + Format: "", + }, + }, + "query": { + SchemaProps: spec.SchemaProps{ + Description: "Query is a raw InfluxDB flux query to perform", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_IstioDestinationRule(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2133,6 +2570,26 @@ func schema_pkg_apis_rollouts_v1alpha1_KayentaThreshold(ref common.ReferenceCall } } +func schema_pkg_apis_rollouts_v1alpha1_MangedRoutes(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_Measurement(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2205,6 +2662,36 @@ func schema_pkg_apis_rollouts_v1alpha1_Measurement(ref common.ReferenceCallback) } } +func schema_pkg_apis_rollouts_v1alpha1_MeasurementRetention(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MeasurementRetention defines the settings for retaining the number of measurements during the analysis.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metricName": { + SchemaProps: spec.SchemaProps{ + Description: "MetricName is the name of the metric on which this retention policy should be applied.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "limit": { + SchemaProps: spec.SchemaProps{ + Description: "Limit is the maximum number of measurements to be retained for this given metric.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"metricName", "limit"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_Metric(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2349,11 +2836,17 @@ func schema_pkg_apis_rollouts_v1alpha1_MetricProvider(ref common.ReferenceCallba Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.GraphiteMetric"), }, }, + "influxdb": { + SchemaProps: spec.SchemaProps{ + Description: "Influxdb specifies the influxdb metric to query", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.InfluxdbMetric"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CloudWatchMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DatadogMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.GraphiteMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.JobMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.KayentaMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NewRelicMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WavefrontMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WebMetric"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CloudWatchMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DatadogMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.GraphiteMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.InfluxdbMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.JobMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.KayentaMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NewRelicMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WavefrontMetric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WebMetric"}, } } @@ -2443,6 +2936,29 @@ func schema_pkg_apis_rollouts_v1alpha1_MetricResult(ref common.ReferenceCallback Format: "int32", }, }, + "dryRun": { + SchemaProps: spec.SchemaProps{ + Description: "DryRun indicates whether this metric is running in a dry-run mode or not", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata stores additional metadata about this metric. It is used by different providers to store the final state which gets used while taking measurements. For example, Prometheus uses this field to store the final resolved query after substituting the template arguments.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"name", "phase"}, }, @@ -2588,6 +3104,36 @@ func schema_pkg_apis_rollouts_v1alpha1_PauseCondition(ref common.ReferenceCallba } } +func schema_pkg_apis_rollouts_v1alpha1_PingPongSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PingPongSpec holds the ping and pong service name.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pingService": { + SchemaProps: spec.SchemaProps{ + Description: "name of the ping service", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "pongService": { + SchemaProps: spec.SchemaProps{ + Description: "name of the pong service", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"pingService", "pongService"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_PodTemplateMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2782,11 +3328,51 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysis(ref common.ReferenceCallb }, }, }, + "dryRun": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "DryRun object contains the settings for running the analysis in Dry-Run mode", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun"), + }, + }, + }, + }, + }, + "measurementRetention": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"}, } } @@ -2831,6 +3417,46 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisBackground(ref common.Refe }, }, }, + "dryRun": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "DryRun object contains the settings for running the analysis in Dry-Run mode", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun"), + }, + }, + }, + }, + }, + "measurementRetention": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"), + }, + }, + }, + }, + }, "startingStep": { SchemaProps: spec.SchemaProps{ Description: "StartingStep indicates which step the background analysis should start on If not listed, controller defaults to 0", @@ -2842,7 +3468,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisBackground(ref common.Refe }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"}, } } @@ -3295,7 +3921,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutSpec(ref common.ReferenceCallback) }, "progressDeadlineAbort": { SchemaProps: spec.SchemaProps{ - Description: "ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds is exceeded if analysis is not used. Default is false.", + Description: "ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds is exceeded.", Type: []string{"boolean"}, Format: "", }, @@ -3336,7 +3962,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutStatus(ref common.ReferenceCallbac }, "pauseConditions": { SchemaProps: spec.SchemaProps{ - Description: "PauseConditions indicates why the rollout is currently paused", + Description: "PauseConditions is a list of reasons why rollout became automatically paused (e.g. CanaryPauseStep, BlueGreenPause, InconclusiveAnalysis). The items in this list are populated by the controller but are cleared by the user (e.g. plugin, argo-cd resume action) when they wish to unpause. If pause conditions is empty, but controllerPause is true, it indicates the user manually unpaused the Rollout", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3424,13 +4050,6 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutStatus(ref common.ReferenceCallbac Format: "", }, }, - "workloadObservedGeneration": { - SchemaProps: spec.SchemaProps{ - Description: "The generation of referenced workload observed by the rollout controller", - Type: []string{"string"}, - Format: "", - }, - }, "conditions": { SchemaProps: spec.SchemaProps{ Description: "Conditions a list of conditions a rollout can have.", @@ -3507,11 +4126,24 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutStatus(ref common.ReferenceCallbac Format: "", }, }, + "workloadObservedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "The generation of referenced workload observed by the rollout controller", + Type: []string{"string"}, + Format: "", + }, + }, + "alb": { + SchemaProps: spec.SchemaProps{ + Description: "/ ALB keeps information regarding the ALB and TargetGroups", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ALBStatus"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.BlueGreenStatus", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CanaryStatus", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PauseCondition", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ALBStatus", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.BlueGreenStatus", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.CanaryStatus", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PauseCondition", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -3577,11 +4209,126 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutTrafficRouting(ref common.Referenc Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AmbassadorTrafficRouting"), }, }, + "appMesh": { + SchemaProps: spec.SchemaProps{ + Description: "AppMesh holds specific configuration to use AppMesh to route traffic", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshTrafficRouting"), + }, + }, + "traefik": { + SchemaProps: spec.SchemaProps{ + Description: "Traefik holds specific configuration to use Traefik to route traffic", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TraefikTrafficRouting"), + }, + }, + "managedRoutes": { + SchemaProps: spec.SchemaProps{ + Description: "A list of HTTP routes that Argo Rollouts manages, the order of this array also becomes the precedence in the upstream traffic router.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MangedRoutes"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ALBTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AmbassadorTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.IstioTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NginxTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SMITrafficRouting"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ALBTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AmbassadorTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.IstioTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MangedRoutes", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NginxTrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.SMITrafficRouting", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TraefikTrafficRouting"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_RouteMatch(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "method": { + SchemaProps: spec.SchemaProps{ + Description: "Method What http methods should be mirrored", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch"), + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path What url paths should be mirrored", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch"), + }, + }, + "headers": { + SchemaProps: spec.SchemaProps{ + Description: "Headers What request with matching headers should be mirrored", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_RunSummary(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RunSummary contains the final results from the metric executions", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "count": { + SchemaProps: spec.SchemaProps{ + Description: "This is equal to the sum of Successful, Failed, Inconclusive", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "successful": { + SchemaProps: spec.SchemaProps{ + Description: "Successful is the number of times the metric was measured Successful", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failed": { + SchemaProps: spec.SchemaProps{ + Description: "Failed is the number of times the metric was measured Failed", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "inconclusive": { + SchemaProps: spec.SchemaProps{ + Description: "Inconclusive is the number of times the metric was measured Inconclusive", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "error": { + SchemaProps: spec.SchemaProps{ + Description: "Error is the number of times an error was encountered during measurement", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, } } @@ -3723,6 +4470,146 @@ func schema_pkg_apis_rollouts_v1alpha1_SetCanaryScale(ref common.ReferenceCallba } } +func schema_pkg_apis_rollouts_v1alpha1_SetHeaderRoute(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name this is the name of the route to use for the mirroring of traffic this also needs to be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field", + Type: []string{"string"}, + Format: "", + }, + }, + "match": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.HeaderRoutingMatch"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.HeaderRoutingMatch"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_SetMirrorRoute(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name this is the name of the route to use for the mirroring of traffic this also needs to be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "match": { + SchemaProps: spec.SchemaProps{ + Description: "Match Contains a list of rules that if mated will mirror the traffic to the services", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RouteMatch"), + }, + }, + }, + }, + }, + "percentage": { + SchemaProps: spec.SchemaProps{ + Description: "Services The list of services to mirror the traffic to if the method, path, headers match Service string `json:\"service\" protobuf:\"bytes,3,opt,name=service\"` Percentage What percent of the traffic that matched the rules should be mirrored", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RouteMatch"}, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_StickinessConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "enabled": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "durationSeconds": { + SchemaProps: spec.SchemaProps{ + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + Required: []string{"enabled", "durationSeconds"}, + }, + }, + } +} + +func schema_pkg_apis_rollouts_v1alpha1_StringMatch(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StringMatch Used to define what type of matching we will use exact, prefix, or regular expression", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "exact": { + SchemaProps: spec.SchemaProps{ + Description: "Exact The string must match exactly", + Type: []string{"string"}, + Format: "", + }, + }, + "prefix": { + SchemaProps: spec.SchemaProps{ + Description: "Prefix The string will be prefixed matched", + Type: []string{"string"}, + Format: "", + }, + }, + "regex": { + SchemaProps: spec.SchemaProps{ + Description: "Regex The string will be regular expression matched", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_TLSRoute(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3921,6 +4808,28 @@ func schema_pkg_apis_rollouts_v1alpha1_TemplateStatus(ref common.ReferenceCallba } } +func schema_pkg_apis_rollouts_v1alpha1_TraefikTrafficRouting(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TraefikTrafficRouting defines the configuration required to use Traefik as traffic router", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "weightedTraefikServiceName": { + SchemaProps: spec.SchemaProps{ + Description: "TraefikServiceName refer to the name of the Traefik service used to route traffic to the service", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"weightedTraefikServiceName"}, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_TrafficWeights(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/rollouts/v1alpha1/types.go b/pkg/apis/rollouts/v1alpha1/types.go index 4cb87b334b..7ffef7b75c 100644 --- a/pkg/apis/rollouts/v1alpha1/types.go +++ b/pkg/apis/rollouts/v1alpha1/types.go @@ -71,7 +71,7 @@ type RolloutSpec struct { // Defaults to 600s. ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=progressDeadlineSeconds"` // ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds - // is exceeded if analysis is not used. Default is false. + // is exceeded. // +optional ProgressDeadlineAbort bool `json:"progressDeadlineAbort,omitempty" protobuf:"varint,12,opt,name=progressDeadlineAbort"` // RestartAt indicates when all the pods of a Rollout should be restarted @@ -305,6 +305,19 @@ type CanaryStrategy struct { // scaling down the stable as traffic is increased to canary. When disabled (the default behavior) // the stable ReplicaSet remains fully scaled to support instantaneous aborts. DynamicStableScale bool `json:"dynamicStableScale,omitempty" protobuf:"varint,14,opt,name=dynamicStableScale"` + // PingPongSpec holds the ping and pong services + PingPong *PingPongSpec `json:"pingPong,omitempty" protobuf:"varint,15,opt,name=pingPong"` + // Assuming the desired number of pods in a stable or canary ReplicaSet is not zero, then make sure it is at least + // MinPodsPerRS for High Availability. Only applicable for TrafficRoutedCanary + MinPodsPerReplicaSet *int32 `json:"minPodsPerRS,omitempty" protobuf:"varint,16,opt,name=minPodsPerRS"` +} + +// PingPongSpec holds the ping and pong service name. +type PingPongSpec struct { + // name of the ping service + PingService string `json:"pingService" protobuf:"bytes,1,opt,name=pingService"` + // name of the pong service + PongService string `json:"pongService" protobuf:"bytes,2,opt,name=pongService"` } // AnalysisRunStrategy configuration for the analysis runs and experiments to retain @@ -324,11 +337,19 @@ type ALBTrafficRouting struct { ServicePort int32 `json:"servicePort" protobuf:"varint,2,opt,name=servicePort"` // RootService references the service in the ingress to the controller should add the action to RootService string `json:"rootService,omitempty" protobuf:"bytes,3,opt,name=rootService"` + // AdditionalForwardConfig allows to specify further settings on the ForwaredConfig + // +optional + StickinessConfig *StickinessConfig `json:"stickinessConfig,omitempty" protobuf:"bytes,5,opt,name=stickinessConfig"` // AnnotationPrefix has to match the configured annotation prefix on the alb ingress controller // +optional AnnotationPrefix string `json:"annotationPrefix,omitempty" protobuf:"bytes,4,opt,name=annotationPrefix"` } +type StickinessConfig struct { + Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"` + DurationSeconds int64 `json:"durationSeconds" protobuf:"varint,2,opt,name=durationSeconds"` +} + // RolloutTrafficRouting hosts all the different configuration for supported service meshes to enable more fine-grained traffic routing type RolloutTrafficRouting struct { // Istio holds Istio specific configuration to route traffic @@ -341,6 +362,25 @@ type RolloutTrafficRouting struct { SMI *SMITrafficRouting `json:"smi,omitempty" protobuf:"bytes,4,opt,name=smi"` // Ambassador holds specific configuration to use Ambassador to route traffic Ambassador *AmbassadorTrafficRouting `json:"ambassador,omitempty" protobuf:"bytes,5,opt,name=ambassador"` + // AppMesh holds specific configuration to use AppMesh to route traffic + AppMesh *AppMeshTrafficRouting `json:"appMesh,omitempty" protobuf:"bytes,6,opt,name=appMesh"` + // Traefik holds specific configuration to use Traefik to route traffic + Traefik *TraefikTrafficRouting `json:"traefik,omitempty" protobuf:"bytes,7,opt,name=traefik"` + // A list of HTTP routes that Argo Rollouts manages, the order of this array also becomes the precedence in the upstream + // traffic router. + ManagedRoutes []MangedRoutes `json:"managedRoutes,omitempty" protobuf:"bytes,8,rep,name=managedRoutes"` +} + +type MangedRoutes struct { + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + //Possibly name for future use + //canaryRoute bool +} + +// TraefikTrafficRouting defines the configuration required to use Traefik as traffic router +type TraefikTrafficRouting struct { + // TraefikServiceName refer to the name of the Traefik service used to route traffic to the service + WeightedTraefikServiceName string `json:"weightedTraefikServiceName" protobuf:"bytes,1,name=weightedTraefikServiceName"` } // AmbassadorTrafficRouting defines the configuration required to use Ambassador as traffic @@ -370,6 +410,9 @@ type NginxTrafficRouting struct { StableIngress string `json:"stableIngress" protobuf:"bytes,2,opt,name=stableIngress"` // +optional AdditionalIngressAnnotations map[string]string `json:"additionalIngressAnnotations,omitempty" protobuf:"bytes,3,rep,name=additionalIngressAnnotations"` + // AdditionalStableIngresses refers to the names of `Ingress` resources in the same namespace as the `Rollout` in a multi ingress scenario + // +optional + AdditionalStableIngresses []string `json:"additionalStableIngresses,omitempty" protobuf:"bytes,4,rep,name=additionalStableIngresses"` } // IstioTrafficRouting configuration for Istio service mesh to enable fine grain configuration @@ -410,6 +453,36 @@ type IstioDestinationRule struct { StableSubsetName string `json:"stableSubsetName" protobuf:"bytes,3,opt,name=stableSubsetName"` } +// AppMeshTrafficRouting configuration for AWS AppMesh service mesh to enable fine grain configuration +type AppMeshTrafficRouting struct { + // VirtualService references an AppMesh VirtualService and VirtualRouter to modify to shape traffic + VirtualService *AppMeshVirtualService `json:"virtualService,omitempty" protobuf:"bytes,1,opt,name=virtualService"` + // VirtualNodeGroup references an AppMesh Route targets that are formed by a set of VirtualNodes that are used to shape traffic + VirtualNodeGroup *AppMeshVirtualNodeGroup `json:"virtualNodeGroup,omitempty" protobuf:"bytes,2,opt,name=virtualNodeGroup"` +} + +// AppMeshVirtualService holds information on the virtual service the rollout needs to modify +type AppMeshVirtualService struct { + // Name is the name of virtual service + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Routes is list of HTTP routes within virtual router associated with virtual service to edit. If omitted, virtual service must have a single route of this type. + Routes []string `json:"routes,omitempty" protobuf:"bytes,2,rep,name=routes"` +} + +// AppMeshVirtualNodeGroup holds information about targets used for routing traffic to a virtual service +type AppMeshVirtualNodeGroup struct { + // CanaryVirtualNodeRef is the virtual node ref to modify labels with canary ReplicaSet pod template hash value + CanaryVirtualNodeRef *AppMeshVirtualNodeReference `json:"canaryVirtualNodeRef" protobuf:"bytes,1,opt,name=canaryVirtualNodeRef"` + // StableVirtualNodeRef is the virtual node name to modify labels with stable ReplicaSet pod template hash value + StableVirtualNodeRef *AppMeshVirtualNodeReference `json:"stableVirtualNodeRef" protobuf:"bytes,2,opt,name=stableVirtualNodeRef"` +} + +// AppMeshVirtualNodeReference holds a reference to VirtualNode.appmesh.k8s.aws +type AppMeshVirtualNodeReference struct { + // Name is the name of VirtualNode CR + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // RolloutExperimentStep defines a template that is used to create a experiment for a step type RolloutExperimentStep struct { // Templates what templates that should be added to the experiment. Should be non-nil @@ -496,6 +569,63 @@ type CanaryStep struct { // SetCanaryScale defines how to scale the newRS without changing traffic weight // +optional SetCanaryScale *SetCanaryScale `json:"setCanaryScale,omitempty" protobuf:"bytes,5,opt,name=setCanaryScale"` + // SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service + // +optional + SetHeaderRoute *SetHeaderRoute `json:"setHeaderRoute,omitempty" protobuf:"bytes,6,opt,name=setHeaderRoute"` + // SetMirrorRoutes Mirrors traffic that matches rules to a particular destination + // +optional + SetMirrorRoute *SetMirrorRoute `json:"setMirrorRoute,omitempty" protobuf:"bytes,8,opt,name=setMirrorRoute"` +} + +type SetMirrorRoute struct { + // Name this is the name of the route to use for the mirroring of traffic this also needs + // to be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Match Contains a list of rules that if mated will mirror the traffic to the services + // +optional + Match []RouteMatch `json:"match,omitempty" protobuf:"bytes,2,opt,name=match"` + + // Services The list of services to mirror the traffic to if the method, path, headers match + //Service string `json:"service" protobuf:"bytes,3,opt,name=service"` + // Percentage What percent of the traffic that matched the rules should be mirrored + Percentage *int32 `json:"percentage,omitempty" protobuf:"varint,4,opt,name=percentage"` +} + +type RouteMatch struct { + // Method What http methods should be mirrored + // +optional + Method *StringMatch `json:"method,omitempty" protobuf:"bytes,1,opt,name=method"` + // Path What url paths should be mirrored + // +optional + Path *StringMatch `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Headers What request with matching headers should be mirrored + // +optional + Headers map[string]StringMatch `json:"headers,omitempty" protobuf:"bytes,3,opt,name=headers"` +} + +// StringMatch Used to define what type of matching we will use exact, prefix, or regular expression +type StringMatch struct { + // Exact The string must match exactly + Exact string `json:"exact,omitempty" protobuf:"bytes,1,opt,name=exact"` + // Prefix The string will be prefixed matched + Prefix string `json:"prefix,omitempty" protobuf:"bytes,2,opt,name=prefix"` + // Regex The string will be regular expression matched + Regex string `json:"regex,omitempty" protobuf:"bytes,3,opt,name=regex"` +} + +// SetHeaderRoute defines the route with specified header name to send 100% of traffic to the canary service +type SetHeaderRoute struct { + // Name this is the name of the route to use for the mirroring of traffic this also needs + // to be included in the `spec.strategy.canary.trafficRouting.managedRoutes` field + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + Match []HeaderRoutingMatch `json:"match,omitempty" protobuf:"bytes,2,rep,name=match"` +} + +type HeaderRoutingMatch struct { + // HeaderName the name of the request header + HeaderName string `json:"headerName" protobuf:"bytes,1,opt,name=headerName"` + // HeaderValue the value of the header + HeaderValue *StringMatch `json:"headerValue" protobuf:"bytes,2,opt,name=headerValue"` } // SetCanaryScale defines how to scale the newRS without changing traffic weight @@ -527,6 +657,16 @@ type RolloutAnalysis struct { // +patchMergeKey=name // +patchStrategy=merge Args []AnalysisRunArgument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=args"` + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + DryRun []DryRun `json:"dryRun,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,3,rep,name=dryRun"` + // MeasurementRetention object contains the settings for retaining the number of measurements during the analysis + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,4,rep,name=measurementRetention"` } type RolloutAnalysisTemplate struct { @@ -666,7 +806,11 @@ const ( type RolloutStatus struct { // Abort cancel the current rollout progression Abort bool `json:"abort,omitempty" protobuf:"varint,1,opt,name=abort"` - // PauseConditions indicates why the rollout is currently paused + // PauseConditions is a list of reasons why rollout became automatically paused (e.g. + // CanaryPauseStep, BlueGreenPause, InconclusiveAnalysis). The items in this list are populated + // by the controller but are cleared by the user (e.g. plugin, argo-cd resume action) when they + // wish to unpause. If pause conditions is empty, but controllerPause is true, it indicates + // the user manually unpaused the Rollout PauseConditions []PauseCondition `json:"pauseConditions,omitempty" protobuf:"bytes,2,rep,name=pauseConditions"` // ControllerPause indicates the controller has paused the rollout. It is set to true when // the controller adds a pause condition. This field helps to discern the scenario where a @@ -740,7 +884,7 @@ type RolloutStatus struct { // +optional WorkloadObservedGeneration string `json:"workloadObservedGeneration,omitempty" protobuf:"bytes,24,opt,name=workloadObservedGeneration"` /// ALB keeps information regarding the ALB and TargetGroups - ALB ALBStatus `json:"alb,omitempty" protobuf:"bytes,25,opt,name=alb"` + ALB *ALBStatus `json:"alb,omitempty" protobuf:"bytes,25,opt,name=alb"` } // BlueGreenStatus status fields that only pertain to the blueGreen rollout @@ -770,8 +914,17 @@ type CanaryStatus struct { CurrentExperiment string `json:"currentExperiment,omitempty" protobuf:"bytes,3,opt,name=currentExperiment"` // Weights records the weights which have been set on traffic provider. Only valid when using traffic routing Weights *TrafficWeights `json:"weights,omitempty" protobuf:"bytes,4,opt,name=weights"` + // StablePingPong For the ping-pong feature holds the current stable service, ping or pong + StablePingPong PingPongType `json:"stablePingPong,omitempty" protobuf:"bytes,5,opt,name=stablePingPong"` } +type PingPongType string + +const ( + PPPing PingPongType = "ping" + PPPong PingPongType = "pong" +) + // TrafficWeights describes the current status of how traffic has been split type TrafficWeights struct { // Canary is the current traffic weight split to canary ReplicaSet @@ -832,8 +985,13 @@ const ( RolloutReplicaFailure RolloutConditionType = "ReplicaFailure" // RolloutPaused means that rollout is in a paused state. It is still progressing at this point. RolloutPaused RolloutConditionType = "Paused" - // RolloutCompleted means that rollout is in a completed state. It is still progressing at this point. + // RolloutCompleted indicates that the rollout completed its update to the desired revision and is not in the middle + // of any update. Note that a Completed rollout could also be considered Progressing or Degraded, if its Pods become + // unavailable sometime after the update completes. RolloutCompleted RolloutConditionType = "Completed" + // RolloutHealthy means that rollout is in a completed state and is healthy. Which means that all the pods have been updated + // and are passing their health checks and are ready to serve traffic. + RolloutHealthy RolloutConditionType = "Healthy" ) // RolloutCondition describes the state of a rollout at a certain point. diff --git a/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go index 3f6ae73cf8..e16f370a91 100644 --- a/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -48,6 +49,11 @@ func (in *ALBStatus) DeepCopy() *ALBStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ALBTrafficRouting) DeepCopyInto(out *ALBTrafficRouting) { *out = *in + if in.StickinessConfig != nil { + in, out := &in.StickinessConfig, &out.StickinessConfig + *out = new(StickinessConfig) + **out = **in + } return } @@ -181,6 +187,16 @@ func (in *AnalysisRunSpec) DeepCopyInto(out *AnalysisRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]DryRun, len(*in)) + copy(*out, *in) + } + if in.MeasurementRetention != nil { + in, out := &in.MeasurementRetention, &out.MeasurementRetention + *out = make([]MeasurementRetention, len(*in)) + copy(*out, *in) + } return } @@ -208,6 +224,12 @@ func (in *AnalysisRunStatus) DeepCopyInto(out *AnalysisRunStatus) { in, out := &in.StartedAt, &out.StartedAt *out = (*in).DeepCopy() } + out.RunSummary = in.RunSummary + if in.DryRunSummary != nil { + in, out := &in.DryRunSummary, &out.DryRunSummary + *out = new(RunSummary) + **out = **in + } return } @@ -324,6 +346,16 @@ func (in *AnalysisTemplateSpec) DeepCopyInto(out *AnalysisTemplateSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]DryRun, len(*in)) + copy(*out, *in) + } + if in.MeasurementRetention != nil { + in, out := &in.MeasurementRetention, &out.MeasurementRetention + *out = make([]MeasurementRetention, len(*in)) + copy(*out, *in) + } return } @@ -363,6 +395,95 @@ func (in *AntiAffinity) DeepCopy() *AntiAffinity { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMeshTrafficRouting) DeepCopyInto(out *AppMeshTrafficRouting) { + *out = *in + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(AppMeshVirtualService) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeGroup != nil { + in, out := &in.VirtualNodeGroup, &out.VirtualNodeGroup + *out = new(AppMeshVirtualNodeGroup) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMeshTrafficRouting. +func (in *AppMeshTrafficRouting) DeepCopy() *AppMeshTrafficRouting { + if in == nil { + return nil + } + out := new(AppMeshTrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMeshVirtualNodeGroup) DeepCopyInto(out *AppMeshVirtualNodeGroup) { + *out = *in + if in.CanaryVirtualNodeRef != nil { + in, out := &in.CanaryVirtualNodeRef, &out.CanaryVirtualNodeRef + *out = new(AppMeshVirtualNodeReference) + **out = **in + } + if in.StableVirtualNodeRef != nil { + in, out := &in.StableVirtualNodeRef, &out.StableVirtualNodeRef + *out = new(AppMeshVirtualNodeReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMeshVirtualNodeGroup. +func (in *AppMeshVirtualNodeGroup) DeepCopy() *AppMeshVirtualNodeGroup { + if in == nil { + return nil + } + out := new(AppMeshVirtualNodeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMeshVirtualNodeReference) DeepCopyInto(out *AppMeshVirtualNodeReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMeshVirtualNodeReference. +func (in *AppMeshVirtualNodeReference) DeepCopy() *AppMeshVirtualNodeReference { + if in == nil { + return nil + } + out := new(AppMeshVirtualNodeReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMeshVirtualService) DeepCopyInto(out *AppMeshVirtualService) { + *out = *in + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMeshVirtualService. +func (in *AppMeshVirtualService) DeepCopy() *AppMeshVirtualService { + if in == nil { + return nil + } + out := new(AppMeshVirtualService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Argument) DeepCopyInto(out *Argument) { *out = *in @@ -587,6 +708,16 @@ func (in *CanaryStep) DeepCopyInto(out *CanaryStep) { *out = new(SetCanaryScale) (*in).DeepCopyInto(*out) } + if in.SetHeaderRoute != nil { + in, out := &in.SetHeaderRoute, &out.SetHeaderRoute + *out = new(SetHeaderRoute) + (*in).DeepCopyInto(*out) + } + if in.SetMirrorRoute != nil { + in, out := &in.SetMirrorRoute, &out.SetMirrorRoute + *out = new(SetMirrorRoute) + (*in).DeepCopyInto(*out) + } return } @@ -660,6 +791,11 @@ func (in *CanaryStrategy) DeepCopyInto(out *CanaryStrategy) { *out = new(int32) **out = **in } + if in.PingPong != nil { + in, out := &in.PingPong, &out.PingPong + *out = new(PingPongSpec) + **out = **in + } return } @@ -873,6 +1009,22 @@ func (in *DatadogMetric) DeepCopy() *DatadogMetric { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DryRun) DeepCopyInto(out *DryRun) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DryRun. +func (in *DryRun) DeepCopy() *DryRun { + if in == nil { + return nil + } + out := new(DryRun) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Experiment) DeepCopyInto(out *Experiment) { *out = *in @@ -1018,6 +1170,16 @@ func (in *ExperimentSpec) DeepCopyInto(out *ExperimentSpec) { *out = new(int32) **out = **in } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]DryRun, len(*in)) + copy(*out, *in) + } + if in.MeasurementRetention != nil { + in, out := &in.MeasurementRetention, &out.MeasurementRetention + *out = make([]MeasurementRetention, len(*in)) + copy(*out, *in) + } return } @@ -1102,6 +1264,43 @@ func (in *GraphiteMetric) DeepCopy() *GraphiteMetric { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderRoutingMatch) DeepCopyInto(out *HeaderRoutingMatch) { + *out = *in + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(StringMatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderRoutingMatch. +func (in *HeaderRoutingMatch) DeepCopy() *HeaderRoutingMatch { + if in == nil { + return nil + } + out := new(HeaderRoutingMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfluxdbMetric) DeepCopyInto(out *InfluxdbMetric) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfluxdbMetric. +func (in *InfluxdbMetric) DeepCopy() *InfluxdbMetric { + if in == nil { + return nil + } + out := new(InfluxdbMetric) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IstioDestinationRule) DeepCopyInto(out *IstioDestinationRule) { *out = *in @@ -1253,6 +1452,22 @@ func (in *KayentaThreshold) DeepCopy() *KayentaThreshold { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MangedRoutes) DeepCopyInto(out *MangedRoutes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MangedRoutes. +func (in *MangedRoutes) DeepCopy() *MangedRoutes { + if in == nil { + return nil + } + out := new(MangedRoutes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Measurement) DeepCopyInto(out *Measurement) { *out = *in @@ -1288,6 +1503,22 @@ func (in *Measurement) DeepCopy() *Measurement { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeasurementRetention) DeepCopyInto(out *MeasurementRetention) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeasurementRetention. +func (in *MeasurementRetention) DeepCopy() *MeasurementRetention { + if in == nil { + return nil + } + out := new(MeasurementRetention) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metric) DeepCopyInto(out *Metric) { *out = *in @@ -1373,6 +1604,11 @@ func (in *MetricProvider) DeepCopyInto(out *MetricProvider) { *out = new(GraphiteMetric) **out = **in } + if in.Influxdb != nil { + in, out := &in.Influxdb, &out.Influxdb + *out = new(InfluxdbMetric) + **out = **in + } return } @@ -1396,6 +1632,13 @@ func (in *MetricResult) DeepCopyInto(out *MetricResult) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -1435,6 +1678,11 @@ func (in *NginxTrafficRouting) DeepCopyInto(out *NginxTrafficRouting) { (*out)[key] = val } } + if in.AdditionalStableIngresses != nil { + in, out := &in.AdditionalStableIngresses, &out.AdditionalStableIngresses + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1481,6 +1729,22 @@ func (in *PauseCondition) DeepCopy() *PauseCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PingPongSpec) DeepCopyInto(out *PingPongSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingPongSpec. +func (in *PingPongSpec) DeepCopy() *PingPongSpec { + if in == nil { + return nil + } + out := new(PingPongSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodTemplateMetadata) DeepCopyInto(out *PodTemplateMetadata) { *out = *in @@ -1602,6 +1866,16 @@ func (in *RolloutAnalysis) DeepCopyInto(out *RolloutAnalysis) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]DryRun, len(*in)) + copy(*out, *in) + } + if in.MeasurementRetention != nil { + in, out := &in.MeasurementRetention, &out.MeasurementRetention + *out = make([]MeasurementRetention, len(*in)) + copy(*out, *in) + } return } @@ -1915,7 +2189,11 @@ func (in *RolloutStatus) DeepCopyInto(out *RolloutStatus) { in, out := &in.RestartedAt, &out.RestartedAt *out = (*in).DeepCopy() } - out.ALB = in.ALB + if in.ALB != nil { + in, out := &in.ALB, &out.ALB + *out = new(ALBStatus) + **out = **in + } return } @@ -1971,7 +2249,7 @@ func (in *RolloutTrafficRouting) DeepCopyInto(out *RolloutTrafficRouting) { if in.ALB != nil { in, out := &in.ALB, &out.ALB *out = new(ALBTrafficRouting) - **out = **in + (*in).DeepCopyInto(*out) } if in.SMI != nil { in, out := &in.SMI, &out.SMI @@ -1983,6 +2261,21 @@ func (in *RolloutTrafficRouting) DeepCopyInto(out *RolloutTrafficRouting) { *out = new(AmbassadorTrafficRouting) (*in).DeepCopyInto(*out) } + if in.AppMesh != nil { + in, out := &in.AppMesh, &out.AppMesh + *out = new(AppMeshTrafficRouting) + (*in).DeepCopyInto(*out) + } + if in.Traefik != nil { + in, out := &in.Traefik, &out.Traefik + *out = new(TraefikTrafficRouting) + **out = **in + } + if in.ManagedRoutes != nil { + in, out := &in.ManagedRoutes, &out.ManagedRoutes + *out = make([]MangedRoutes, len(*in)) + copy(*out, *in) + } return } @@ -1996,6 +2289,55 @@ func (in *RolloutTrafficRouting) DeepCopy() *RolloutTrafficRouting { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteMatch) DeepCopyInto(out *RouteMatch) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(StringMatch) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(StringMatch) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]StringMatch, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteMatch. +func (in *RouteMatch) DeepCopy() *RouteMatch { + if in == nil { + return nil + } + out := new(RouteMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunSummary) DeepCopyInto(out *RunSummary) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunSummary. +func (in *RunSummary) DeepCopy() *RunSummary { + if in == nil { + return nil + } + out := new(RunSummary) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SMITrafficRouting) DeepCopyInto(out *SMITrafficRouting) { *out = *in @@ -2070,6 +2412,89 @@ func (in *SetCanaryScale) DeepCopy() *SetCanaryScale { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SetHeaderRoute) DeepCopyInto(out *SetHeaderRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]HeaderRoutingMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SetHeaderRoute. +func (in *SetHeaderRoute) DeepCopy() *SetHeaderRoute { + if in == nil { + return nil + } + out := new(SetHeaderRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SetMirrorRoute) DeepCopyInto(out *SetMirrorRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]RouteMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SetMirrorRoute. +func (in *SetMirrorRoute) DeepCopy() *SetMirrorRoute { + if in == nil { + return nil + } + out := new(SetMirrorRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickinessConfig) DeepCopyInto(out *StickinessConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickinessConfig. +func (in *StickinessConfig) DeepCopy() *StickinessConfig { + if in == nil { + return nil + } + out := new(StickinessConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringMatch) DeepCopyInto(out *StringMatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringMatch. +func (in *StringMatch) DeepCopy() *StringMatch { + if in == nil { + return nil + } + out := new(StringMatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSRoute) DeepCopyInto(out *TLSRoute) { *out = *in @@ -2164,6 +2589,22 @@ func (in *TemplateStatus) DeepCopy() *TemplateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraefikTrafficRouting) DeepCopyInto(out *TraefikTrafficRouting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraefikTrafficRouting. +func (in *TraefikTrafficRouting) DeepCopy() *TraefikTrafficRouting { + if in == nil { + return nil + } + out := new(TraefikTrafficRouting) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TrafficWeights) DeepCopyInto(out *TrafficWeights) { *out = *in diff --git a/pkg/apis/rollouts/validation/validation.go b/pkg/apis/rollouts/validation/validation.go index 5afb080ddc..37bf7eaa63 100644 --- a/pkg/apis/rollouts/validation/validation.go +++ b/pkg/apis/rollouts/validation/validation.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "strconv" + "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +33,16 @@ const ( InvalidCanaryExperimentTemplateWeightWithoutTrafficRouting = "Experiment template weight cannot be set unless TrafficRouting is enabled" // InvalidSetCanaryScaleTrafficPolicy indicates that TrafficRouting, required for SetCanaryScale, is missing InvalidSetCanaryScaleTrafficPolicy = "SetCanaryScale requires TrafficRouting to be set" + // InvalidSetHeaderRouteTrafficPolicy indicates that TrafficRouting required for SetHeaderRoute is missing + InvalidSetHeaderRouteTrafficPolicy = "SetHeaderRoute requires TrafficRouting, supports Istio and ALB" + // InvalidSetMirrorRouteTrafficPolicy indicates that TrafficRouting, required for SetCanaryScale, is missing + InvalidSetMirrorRouteTrafficPolicy = "SetMirrorRoute requires TrafficRouting, supports Istio only" + // InvalidStringMatchMultipleValuePolicy indicates that SetCanaryScale, has multiple values set + InvalidStringMatchMultipleValuePolicy = "StringMatch match value must have exactly one of the following: exact, regex, prefix" + // InvalidStringMatchMissedValuePolicy indicates that SetCanaryScale, has multiple values set + InvalidStringMatchMissedValuePolicy = "StringMatch value missed, match value must have one of the following: exact, regex, prefix" + // InvalidSetHeaderRouteALBValuePolicy indicates that SetHeaderRouting using with ALB missed the 'exact' value + InvalidSetHeaderRouteALBValuePolicy = "SetHeaderRoute match value invalid. ALB supports 'exact' value only" // InvalidDurationMessage indicates the Duration value needs to be greater than 0 InvalidDurationMessage = "Duration needs to be greater than 0" // InvalidMaxSurgeMaxUnavailable indicates both maxSurge and MaxUnavailable can not be set to zero @@ -61,8 +72,32 @@ const ( InvalidCanaryDynamicStableScale = "Canary dynamicStableScale can only be used with traffic routing" // InvalidCanaryDynamicStableScaleWithScaleDownDelay indicates that canary.dynamicStableScale cannot be used with scaleDownDelaySeconds InvalidCanaryDynamicStableScaleWithScaleDownDelay = "Canary dynamicStableScale cannot be used with scaleDownDelaySeconds" + // InvalidPingPongProvidedMessage indicates that both ping and pong service must be set to use Ping-Pong feature + InvalidPingPongProvidedMessage = "Ping service and Pong service must to be set to use Ping-Pong feature" + // DuplicatedPingPongServicesMessage indicates that the rollout uses the same service for the ping and pong services + DuplicatedPingPongServicesMessage = "This rollout uses the same service for the ping and pong services, but two different services are required." + // MissedAlbRootServiceMessage indicates that the rollout with ALB TrafficRouting and ping pong feature enabled must have root service provided + MissedAlbRootServiceMessage = "Root service field is required for the configuration with ALB and ping-pong feature enabled" + // PingPongWithAlbOnlyMessage At this moment ping-pong feature works with the ALB traffic routing only + PingPongWithAlbOnlyMessage = "Ping-pong feature works with the ALB traffic routing only" + // InvalideStepRouteNameNotFoundInManagedRoutes A step has been configured that requires managedRoutes and the route name + // is missing from managedRoutes + InvalideStepRouteNameNotFoundInManagedRoutes = "Steps define a route that does not exist in spec.strategy.canary.trafficRouting.managedRoutes" ) +// allowAllPodValidationOptions allows all pod options to be true for the purposes of rollout pod +// spec validation. We allow everything because we don't know what is truly allowed in the cluster +// and rely on ReplicaSet/Pod creation to enforce if these options are truly allowed. +// NOTE: this variable may need to be updated whenever we update our k8s libraries as new options +// are introduced or removed. +var allowAllPodValidationOptions = apivalidation.PodValidationOptions{ + AllowDownwardAPIHugePages: true, + AllowInvalidPodDeletionCost: true, + AllowIndivisibleHugePagesValues: true, + AllowWindowsHostProcessField: true, + AllowExpandedDNSConfig: true, +} + func ValidateRollout(rollout *v1alpha1.Rollout) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateRolloutSpec(rollout, field.NewPath("spec"))...) @@ -123,14 +158,10 @@ func ValidateRolloutSpec(rollout *v1alpha1.Rollout, fldPath *field.Path) field.E } template.ObjectMeta = spec.Template.ObjectMeta removeSecurityContextPrivileged(&template) - opts := apivalidation.PodValidationOptions{ - AllowMultipleHugePageResources: true, - AllowDownwardAPIHugePages: true, - } // Skip validating empty template for rollout resolved from ref if rollout.Spec.TemplateResolvedFromRef || spec.WorkloadRef == nil { - allErrs = append(allErrs, validation.ValidatePodTemplateSpecForReplicaSet(&template, selector, replicas, fldPath.Child("template"), opts)...) + allErrs = append(allErrs, validation.ValidatePodTemplateSpecForReplicaSet(&template, selector, replicas, fldPath.Child("template"), allowAllPodValidationOptions)...) } } allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) @@ -201,7 +232,7 @@ func ValidateRolloutStrategyBlueGreen(rollout *v1alpha1.Rollout, fldPath *field. // canary.canaryService to be defined func requireCanaryStableServices(rollout *v1alpha1.Rollout) bool { canary := rollout.Spec.Strategy.Canary - if canary.TrafficRouting == nil || (canary.TrafficRouting.Istio != nil && canary.TrafficRouting.Istio.DestinationRule != nil) { + if canary.TrafficRouting == nil || (canary.TrafficRouting.Istio != nil && canary.TrafficRouting.Istio.DestinationRule != nil) || (canary.PingPong != nil) { return false } return true @@ -214,6 +245,23 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat if canary.CanaryService != "" && canary.StableService != "" && canary.CanaryService == canary.StableService { allErrs = append(allErrs, field.Invalid(fldPath.Child("stableService"), canary.StableService, DuplicatedServicesCanaryMessage)) } + if canary.PingPong != nil { + if canary.TrafficRouting != nil && canary.TrafficRouting.ALB == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("trafficRouting").Child("alb"), canary.TrafficRouting.ALB, PingPongWithAlbOnlyMessage)) + } + if canary.PingPong.PingService == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("pingPong").Child("pingService"), canary.PingPong.PingService, InvalidPingPongProvidedMessage)) + } + if canary.PingPong.PongService == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("pingPong").Child("pongService"), canary.PingPong.PongService, InvalidPingPongProvidedMessage)) + } + if canary.PingPong.PingService == canary.PingPong.PongService { + allErrs = append(allErrs, field.Invalid(fldPath.Child("pingPong").Child("pingService"), canary.PingPong.PingService, DuplicatedPingPongServicesMessage)) + } + if canary.TrafficRouting != nil && canary.TrafficRouting.ALB != nil && canary.TrafficRouting.ALB.RootService == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("trafficRouting").Child("alb").Child("rootService"), canary.TrafficRouting.ALB.RootService, MissedAlbRootServiceMessage)) + } + } if requireCanaryStableServices(rollout) { if canary.StableService == "" { allErrs = append(allErrs, field.Invalid(fldPath.Child("stableService"), canary.StableService, InvalidTrafficRoutingMessage)) @@ -239,9 +287,10 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat for i, step := range canary.Steps { stepFldPath := fldPath.Child("steps").Index(i) allErrs = append(allErrs, hasMultipleStepsType(step, stepFldPath)...) - if step.Experiment == nil && step.Pause == nil && step.SetWeight == nil && step.Analysis == nil && step.SetCanaryScale == nil { - errVal := fmt.Sprintf("step.Experiment: %t step.Pause: %t step.SetWeight: %t step.Analysis: %t step.SetCanaryScale %t", - step.Experiment == nil, step.Pause == nil, step.SetWeight == nil, step.Analysis == nil, step.SetCanaryScale == nil) + if step.Experiment == nil && step.Pause == nil && step.SetWeight == nil && step.Analysis == nil && step.SetCanaryScale == nil && + step.SetHeaderRoute == nil && step.SetMirrorRoute == nil { + errVal := fmt.Sprintf("step.Experiment: %t step.Pause: %t step.SetWeight: %t step.Analysis: %t step.SetCanaryScale: %t step.SetHeaderRoute: %t step.SetMirrorRoutes: %t", + step.Experiment == nil, step.Pause == nil, step.SetWeight == nil, step.Analysis == nil, step.SetCanaryScale == nil, step.SetHeaderRoute == nil, step.SetMirrorRoute == nil) allErrs = append(allErrs, field.Invalid(stepFldPath, errVal, InvalidStepMessage)) } if step.SetWeight != nil && (*step.SetWeight < 0 || *step.SetWeight > 100) { @@ -250,9 +299,65 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat if step.Pause != nil && step.Pause.DurationSeconds() < 0 { allErrs = append(allErrs, field.Invalid(stepFldPath.Child("pause").Child("duration"), step.Pause.DurationSeconds(), InvalidDurationMessage)) } - if rollout.Spec.Strategy.Canary != nil && rollout.Spec.Strategy.Canary.TrafficRouting == nil && step.SetCanaryScale != nil { - allErrs = append(allErrs, field.Invalid(stepFldPath.Child("setCanaryScale"), step.SetCanaryScale, InvalidSetCanaryScaleTrafficPolicy)) + if step.SetCanaryScale != nil && canary.TrafficRouting == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("trafficRouting"), InvalidSetCanaryScaleTrafficPolicy)) + } + + if step.SetHeaderRoute != nil { + trafficRouting := rollout.Spec.Strategy.Canary.TrafficRouting + if trafficRouting == nil || (trafficRouting.Istio == nil && trafficRouting.ALB == nil) { + allErrs = append(allErrs, field.Invalid(stepFldPath.Child("setHeaderRoute"), step.SetHeaderRoute, InvalidSetHeaderRouteTrafficPolicy)) + } else if step.SetHeaderRoute.Match != nil && len(step.SetHeaderRoute.Match) > 0 { + for j, match := range step.SetHeaderRoute.Match { + if trafficRouting.ALB != nil { + matchFld := stepFldPath.Child("setHeaderRoute").Child("match").Index(j) + allErrs = append(allErrs, hasALBInvalidValues(match.HeaderValue, matchFld)...) + } else { + matchFld := stepFldPath.Child("setHeaderRoute").Child("match").Index(j) + allErrs = append(allErrs, hasMultipleMatchValues(match.HeaderValue, matchFld)...) + } + } + } + } + + if step.SetMirrorRoute != nil { + trafficRouting := rollout.Spec.Strategy.Canary.TrafficRouting + if trafficRouting == nil || trafficRouting.Istio == nil { + allErrs = append(allErrs, field.Invalid(stepFldPath.Child("setMirrorRoute"), step.SetMirrorRoute, "SetMirrorRoute requires TrafficRouting, supports Istio only")) + } + if step.SetMirrorRoute.Match != nil && len(step.SetMirrorRoute.Match) > 0 { + for j, match := range step.SetMirrorRoute.Match { + matchFld := stepFldPath.Child("setMirrorRoute").Child("match").Index(j) + if match.Method != nil { + allErrs = append(allErrs, hasMultipleMatchValues(match.Method, matchFld)...) + } + if match.Path != nil { + allErrs = append(allErrs, hasMultipleMatchValues(match.Path, matchFld)...) + } + if match.Method != nil { + allErrs = append(allErrs, hasMultipleMatchValues(match.Method, matchFld)...) + } + } + } + } + + if rollout.Spec.Strategy.Canary.TrafficRouting != nil { + if step.SetHeaderRoute != nil || step.SetMirrorRoute != nil { + if rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes == nil { + message := fmt.Sprintf(MissingFieldMessage, "spec.strategy.canary.trafficRouting.managedRoutes") + allErrs = append(allErrs, field.Required(fldPath.Child("trafficRouting", "managedRoutes"), message)) + } + } + } + if rollout.Spec.Strategy.Canary.TrafficRouting != nil && rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes != nil { + if step.SetHeaderRoute != nil { + allErrs = append(allErrs, ValidateStepRouteFoundInManagedRoute(stepFldPath.Child("setHeaderRoute"), step.SetHeaderRoute.Name, rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes)...) + } + if step.SetMirrorRoute != nil { + allErrs = append(allErrs, ValidateStepRouteFoundInManagedRoute(stepFldPath.Child("setMirrorRoute"), step.SetMirrorRoute.Name, rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes)...) + } } + analysisRunArgs := make([]v1alpha1.AnalysisRunArgument, 0) if step.Experiment != nil { for tmplIndex, template := range step.Experiment.Templates { @@ -278,7 +383,7 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat for _, arg := range analysisRunArgs { if arg.ValueFrom != nil { - if arg.ValueFrom.FieldRef != nil { + if arg.ValueFrom.FieldRef != nil && strings.HasPrefix(arg.ValueFrom.FieldRef.FieldPath, "metadata") { _, err := fieldpath.ExtractFieldPathAsString(rollout, arg.ValueFrom.FieldRef.FieldPath) if err != nil { allErrs = append(allErrs, field.Invalid(stepFldPath.Child("analyses"), analysisRunArgs, InvalidAnalysisArgsMessage)) @@ -292,6 +397,20 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat return allErrs } +func ValidateStepRouteFoundInManagedRoute(stepFldPath *field.Path, stepRoutName string, roManagedRoutes []v1alpha1.MangedRoutes) field.ErrorList { + allErrs := field.ErrorList{} + found := false + for _, managedRoute := range roManagedRoutes { + if stepRoutName == managedRoute.Name { + found = true + } + } + if !found { + allErrs = append(allErrs, field.Invalid(stepFldPath, stepRoutName, InvalideStepRouteNameNotFoundInManagedRoutes)) + } + return allErrs +} + func ValidateRolloutStrategyAntiAffinity(antiAffinity *v1alpha1.AntiAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if antiAffinity != nil { @@ -358,3 +477,47 @@ func hasMultipleStepsType(s v1alpha1.CanaryStep, fldPath *field.Path) field.Erro } return allErrs } + +func hasALBInvalidValues(match *v1alpha1.StringMatch, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if match == nil { + e := field.Invalid(fldPath, match, InvalidStringMatchMissedValuePolicy) + allErrs = append(allErrs, e) + return allErrs + } + if match.Exact == "" || match.Regex != "" || match.Prefix != "" { + return append(allErrs, field.Invalid(fldPath, match, InvalidSetHeaderRouteALBValuePolicy)) + } + return allErrs +} + +func hasMultipleMatchValues(match *v1alpha1.StringMatch, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if match == nil { + e := field.Invalid(fldPath, match, InvalidStringMatchMissedValuePolicy) + allErrs = append(allErrs, e) + return allErrs + } + + var oneOf []bool + oneOf = append(oneOf, match.Exact != "") + oneOf = append(oneOf, match.Regex != "") + oneOf = append(oneOf, match.Prefix != "") + hasValue := false + for i := range oneOf { + if oneOf[i] { + if hasValue { + e := field.Invalid(fldPath, match, InvalidStringMatchMultipleValuePolicy) + allErrs = append(allErrs, e) + break + } + hasValue = true + } + } + if !hasValue { + e := field.Invalid(fldPath, match, InvalidStringMatchMissedValuePolicy) + allErrs = append(allErrs, e) + } + return allErrs +} diff --git a/pkg/apis/rollouts/validation/validation_references.go b/pkg/apis/rollouts/validation/validation_references.go index b02add3d97..55128f993b 100644 --- a/pkg/apis/rollouts/validation/validation_references.go +++ b/pkg/apis/rollouts/validation/validation_references.go @@ -3,21 +3,21 @@ package validation import ( "fmt" - analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" - "github.com/argoproj/argo-rollouts/utils/conditions" - istioutil "github.com/argoproj/argo-rollouts/utils/istio" - serviceutil "github.com/argoproj/argo-rollouts/utils/service" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/validation/field" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/ambassador" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/appmesh" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" + analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" + "github.com/argoproj/argo-rollouts/utils/conditions" + ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" + istioutil "github.com/argoproj/argo-rollouts/utils/istio" + serviceutil "github.com/argoproj/argo-rollouts/utils/service" ) // Controller will validate references in reconciliation @@ -46,6 +46,8 @@ type ServiceType string const ( StableService ServiceType = "StableService" CanaryService ServiceType = "CanaryService" + PingService ServiceType = "PingService" + PongService ServiceType = "PongService" ActiveService ServiceType = "ActiveService" PreviewService ServiceType = "PreviewService" ) @@ -61,6 +63,7 @@ type ReferencedResources struct { ServiceWithType []ServiceWithType VirtualServices []unstructured.Unstructured AmbassadorMappings []unstructured.Unstructured + AppMeshResources []unstructured.Unstructured } func ValidateRolloutReferencedResources(rollout *v1alpha1.Rollout, referencedResources ReferencedResources) field.ErrorList { @@ -80,6 +83,9 @@ func ValidateRolloutReferencedResources(rollout *v1alpha1.Rollout, referencedRes for _, mapping := range referencedResources.AmbassadorMappings { allErrs = append(allErrs, ValidateAmbassadorMapping(mapping)...) } + for _, appmeshRes := range referencedResources.AppMeshResources { + allErrs = append(allErrs, ValidateAppMeshResource(appmeshRes)...) + } return allErrs } @@ -99,7 +105,6 @@ func ValidateService(svc ServiceWithType, rollout *v1alpha1.Rollout) field.Error } if v, ok := rollout.Spec.Template.Labels[svcLabelKey]; !ok || v != svcLabelValue { msg := fmt.Sprintf("Service %q has unmatch lable %q in rollout", service.Name, svcLabelKey) - fmt.Println(msg) allErrs = append(allErrs, field.Invalid(fldPath, service.Name, msg)) } } @@ -121,12 +126,24 @@ func ValidateAnalysisTemplatesWithType(rollout *v1alpha1.Rollout, templates Anal templateNames := GetAnalysisTemplateNames(templates) value := fmt.Sprintf("templateNames: %s", templateNames) - _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), "", "", "") + _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "", "", "") if err != nil { allErrs = append(allErrs, field.Invalid(fldPath, value, err.Error())) return allErrs } + if rollout.Spec.Strategy.Canary != nil { + for _, step := range rollout.Spec.Strategy.Canary.Steps { + if step.Analysis != nil { + _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), step.Analysis.DryRun, step.Analysis.MeasurementRetention, "", "", "") + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, value, err.Error())) + return allErrs + } + } + } + } + for _, template := range templates.AnalysisTemplates { allErrs = append(allErrs, ValidateAnalysisTemplateWithType(rollout, template, nil, templates.TemplateType, fldPath)...) } @@ -201,23 +218,39 @@ func setArgValuePlaceHolder(Args []v1alpha1.Argument) { func ValidateIngress(rollout *v1alpha1.Rollout, ingress *ingressutil.Ingress) field.ErrorList { allErrs := field.ErrorList{} fldPath := field.NewPath("spec", "strategy", "canary", "trafficRouting") + canary := rollout.Spec.Strategy.Canary var ingressName string var serviceName string - if rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil { + if canary.TrafficRouting.Nginx != nil { + // If there are additional stable ingresses + if len(canary.TrafficRouting.Nginx.AdditionalStableIngresses) > 0 { + // validate each ingress as valid + fldPath = fldPath.Child("nginx").Child("additionalStableIngresses") + serviceName = canary.StableService + for _, ing := range canary.TrafficRouting.Nginx.AdditionalStableIngresses { + ingressName = ing + allErrs = reportErrors(ingress, serviceName, ingressName, fldPath, allErrs) + } + } fldPath = fldPath.Child("nginx").Child("stableIngress") - serviceName = rollout.Spec.Strategy.Canary.StableService - ingressName = rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress - } else if rollout.Spec.Strategy.Canary.TrafficRouting.ALB != nil { + serviceName = canary.StableService + ingressName = canary.TrafficRouting.Nginx.StableIngress + + allErrs = reportErrors(ingress, serviceName, ingressName, fldPath, allErrs) + } else if canary.TrafficRouting.ALB != nil { fldPath = fldPath.Child("alb").Child("ingress") - ingressName = rollout.Spec.Strategy.Canary.TrafficRouting.ALB.Ingress - serviceName = rollout.Spec.Strategy.Canary.StableService - if rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService != "" { - serviceName = rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService + ingressName = canary.TrafficRouting.ALB.Ingress + serviceName = canary.StableService + if canary.TrafficRouting.ALB.RootService != "" { + serviceName = canary.TrafficRouting.ALB.RootService } - - } else { - return allErrs + allErrs = reportErrors(ingress, serviceName, ingressName, fldPath, allErrs) } + + return allErrs +} + +func reportErrors(ingress *ingressutil.Ingress, serviceName, ingressName string, fldPath *field.Path, allErrs field.ErrorList) field.ErrorList { if !ingressutil.HasRuleWithService(ingress, serviceName) { msg := fmt.Sprintf("ingress `%s` has no rules using service %s backend", ingress.GetName(), serviceName) allErrs = append(allErrs, field.Invalid(fldPath, ingressName, msg)) @@ -279,7 +312,7 @@ func ValidateVirtualService(rollout *v1alpha1.Rollout, obj unstructured.Unstruct } // Validate HTTP Routes if errHttp == nil { - httpRoutes, err := istio.GetHttpRoutes(newObj, httpRoutesI) + httpRoutes, err := istio.GetHttpRoutes(httpRoutesI) if err != nil { msg := fmt.Sprintf("Unable to get HTTP routes for Istio VirtualService") allErrs = append(allErrs, field.Invalid(fldPath, vsvcName, msg)) @@ -321,6 +354,57 @@ func ValidateAmbassadorMapping(obj unstructured.Unstructured) field.ErrorList { return allErrs } +func ValidateAppMeshResource(obj unstructured.Unstructured) field.ErrorList { + if obj.GetKind() != "VirtualRouter" { + fldPath := field.NewPath("kind") + msg := fmt.Sprintf("Expected object kind to be VirtualRouter but is %s", obj.GetKind()) + return field.ErrorList{field.Invalid(fldPath, obj.GetKind(), msg)} + } + + err := ValidateAppMeshVirtualRouter(&obj) + if err != nil { + return field.ErrorList{err} + } + return field.ErrorList{} +} + +func ValidateAppMeshVirtualRouter(vrouter *unstructured.Unstructured) *field.Error { + routesFldPath := field.NewPath("spec", "routes") + allRoutesI, found, err := unstructured.NestedSlice(vrouter.Object, "spec", "routes") + if !found || err != nil || len(allRoutesI) == 0 { + msg := fmt.Sprintf("No routes defined for AppMesh virtual-router %s", vrouter.GetName()) + return field.Invalid(routesFldPath, vrouter.GetName(), msg) + } + for idx, routeI := range allRoutesI { + routeFldPath := routesFldPath.Index(idx) + route, ok := routeI.(map[string]interface{}) + if !ok { + msg := fmt.Sprintf("Invalid route was found for AppMesh virtual-router %s at index %d", vrouter.GetName(), idx) + return field.Invalid(routeFldPath, vrouter.GetName(), msg) + } + + routeName := route["name"] + routeRule, routeType, err := appmesh.GetRouteRule(route) + if err != nil { + msg := fmt.Sprintf("Error getting route details for AppMesh virtual-router %s and route %s. Error: %s", vrouter.GetName(), routeName, err.Error()) + return field.Invalid(routeFldPath, vrouter.GetName(), msg) + } + + weightedTargetsFldPath := routeFldPath.Child(routeType).Child("action").Child("weightedTargets") + weightedTargets, found, err := unstructured.NestedSlice(routeRule, "action", "weightedTargets") + if !found || err != nil { + msg := fmt.Sprintf("Invalid route action found for AppMesh virtual-router %s and route %s", vrouter.GetName(), routeName) + return field.Invalid(weightedTargetsFldPath, vrouter.GetName(), msg) + } + + if len(weightedTargets) != 2 { + msg := fmt.Sprintf("Invalid number of weightedTargets (%d) for AppMesh virtual-router %s and route %s, expected 2", len(weightedTargets), vrouter.GetName(), routeName) + return field.Invalid(weightedTargetsFldPath, vrouter.GetName(), msg) + } + } + return nil +} + func GetServiceWithTypeFieldPath(serviceType ServiceType) *field.Path { fldPath := field.NewPath("spec", "strategy") switch serviceType { @@ -332,6 +416,10 @@ func GetServiceWithTypeFieldPath(serviceType ServiceType) *field.Path { fldPath = fldPath.Child("canary", "canaryService") case StableService: fldPath = fldPath.Child("canary", "stableService") + case PingService: + fldPath = fldPath.Child("canary", "pingPong", "pingService") + case PongService: + fldPath = fldPath.Child("canary", "pingPong", "pongService") default: return nil } @@ -382,7 +470,8 @@ func buildAnalysisArgs(args []v1alpha1.AnalysisRunArgument, r *v1alpha1.Rollout) }, }, } - return analysisutil.BuildArgumentsForRolloutAnalysisRun(args, &stableRSDummy, &newRSDummy, r) + res, _ := analysisutil.BuildArgumentsForRolloutAnalysisRun(args, &stableRSDummy, &newRSDummy, r) + return res } // validateAnalysisMetrics validates the metrics of an Analysis object diff --git a/pkg/apis/rollouts/validation/validation_references_test.go b/pkg/apis/rollouts/validation/validation_references_test.go index fae183544c..7b14dd7483 100644 --- a/pkg/apis/rollouts/validation/validation_references_test.go +++ b/pkg/apis/rollouts/validation/validation_references_test.go @@ -4,11 +4,6 @@ import ( "fmt" "testing" - "k8s.io/utils/pointer" - - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" - "github.com/argoproj/argo-rollouts/utils/unstructured" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" @@ -17,6 +12,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/pointer" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" + "github.com/argoproj/argo-rollouts/utils/unstructured" ) const successCaseVsvc = `apiVersion: networking.istio.io/v1alpha3 @@ -256,6 +256,39 @@ func TestValidateAnalysisTemplatesWithType(t *testing.T) { assert.Empty(t, allErrs) }) + t.Run("failure - duplicate MeasurementRetention", func(t *testing.T) { + rollout := getRollout() + rollout.Spec.Strategy.Canary.Steps = append(rollout.Spec.Strategy.Canary.Steps, v1alpha1.CanaryStep{ + Analysis: &v1alpha1.RolloutAnalysis{ + Templates: []v1alpha1.RolloutAnalysisTemplate{ + { + TemplateName: "analysis-template-name", + }, + }, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "example", + Limit: 2, + }, + }, + }, + }) + templates := getAnalysisTemplatesWithType() + templates.AnalysisTemplates[0].Spec.Args = append(templates.AnalysisTemplates[0].Spec.Args, v1alpha1.Argument{Name: "valid"}) + templates.AnalysisTemplates[0].Spec.MeasurementRetention = []v1alpha1.MeasurementRetention{ + { + MetricName: "example", + Limit: 5, + }, + } + templates.Args = []v1alpha1.AnalysisRunArgument{{Name: "valid", Value: "true"}} + + allErrs := ValidateAnalysisTemplatesWithType(rollout, templates) + assert.Len(t, allErrs, 1) + msg := fmt.Sprintf("spec.strategy.canary.steps[0].analysis.templates: Invalid value: \"templateNames: [analysis-template-name cluster-analysis-template-name]\": two Measurement Retention metric rules have the same name 'example'") + assert.Equal(t, msg, allErrs[0].Error()) + }) + } func TestValidateAnalysisTemplateWithType(t *testing.T) { @@ -618,6 +651,18 @@ func TestGetServiceWithTypeFieldPath(t *testing.T) { assert.Equal(t, expectedFldPath.String(), fldPath.String()) }) + t.Run("get pingService fieldPath", func(t *testing.T) { + fldPath := GetServiceWithTypeFieldPath(PingService) + expectedFldPath := field.NewPath("spec", "strategy", "canary", "pingPong", "pingService") + assert.Equal(t, expectedFldPath.String(), fldPath.String()) + }) + + t.Run("get pongService fieldPath", func(t *testing.T) { + fldPath := GetServiceWithTypeFieldPath(PongService) + expectedFldPath := field.NewPath("spec", "strategy", "canary", "pingPong", "pongService") + assert.Equal(t, expectedFldPath.String(), fldPath.String()) + }) + t.Run("get fieldPath for serviceType that does not exist", func(t *testing.T) { fldPath := GetServiceWithTypeFieldPath("DoesNotExist") assert.Nil(t, fldPath) @@ -672,6 +717,203 @@ spec: }) } +func TestValidateAppMeshResource(t *testing.T) { + t.Run("will return error with appmesh virtual-service", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + namespace: myns + name: mysvc +spec: + awsName: mysvc.myns.svc.cluster.local + provider: + virtualRouter: + virtualRouterRef: + name: mysvc-vrouter +` + obj := toUnstructured(t, manifest) + refResources := ReferencedResources{ + AppMeshResources: []k8sunstructured.Unstructured{*obj}, + } + errList := ValidateRolloutReferencedResources(getRollout(), refResources) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, errList[0].Detail, "Expected object kind to be VirtualRouter but is VirtualService") + }) + + t.Run("will return error when appmesh virtual-router has no routes", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: +` + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, errList[0].Field, field.NewPath("spec", "routes").String()) + }) + + routeTypes := []string{"httpRoute", "tcpRoute", "grpcRoute", "http2Route"} + for _, routeType := range routeTypes { + t.Run(fmt.Sprintf("will succeed with valid appmesh virtual-router with %s", routeType), func(t *testing.T) { + t.Parallel() + manifest := fmt.Sprintf(` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: + - name: primary + %s: + action: + weightedTargets: + - virtualNodeRef: + name: mysvc-canary-vn + weight: 0 + - virtualNodeRef: + name: mysvc-stable-vn + weight: 100 +`, routeType) + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 0) + }) + } + + t.Run("will return error with appmesh virtual-router with unsupported route type", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: + - name: primary + badRouteType: +` + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, field.NewPath("spec", "routes").Index(0).String(), errList[0].Field) + }) + + t.Run("will return error when appmesh virtual-router has route that is not a struct", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: + - invalid-spec +` + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, field.NewPath("spec", "routes").Index(0).String(), errList[0].Field) + }) + + t.Run("will return error when appmesh virtual-router has routes with no targets", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: + - name: primary + httpRoute: + match: + prefix: / + action: +` + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, field.NewPath("spec", "routes").Index(0).Child("httpRoute").Child("action").Child("weightedTargets").String(), errList[0].Field) + }) + + t.Run("will return error when appmesh virtual-router has routes with 1 target", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: + - name: primary + httpRoute: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: only-target + weight: 100 +` + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, field.NewPath("spec", "routes").Index(0).Child("httpRoute").Child("action").Child("weightedTargets").String(), errList[0].Field) + }) + + t.Run("will return error when appmesh virtual-router has routes with 3 targets", func(t *testing.T) { + t.Parallel() + manifest := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + routes: + - name: primary + httpRoute: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: target-1 + weight: 10 + - virtualNodeRef: + name: target-2 + weight: 10 + - virtualNodeRef: + name: target-3 + weight: 80 +` + obj := toUnstructured(t, manifest) + errList := ValidateAppMeshResource(*obj) + assert.NotNil(t, errList) + assert.Len(t, errList, 1) + assert.Equal(t, field.NewPath("spec", "routes").Index(0).Child("httpRoute").Child("action").Child("weightedTargets").String(), errList[0].Field) + }) +} + func toUnstructured(t *testing.T, manifest string) *k8sunstructured.Unstructured { t.Helper() obj := &k8sunstructured.Unstructured{} diff --git a/pkg/apis/rollouts/validation/validation_test.go b/pkg/apis/rollouts/validation/validation_test.go index 1c9ffb551d..9722d0d093 100644 --- a/pkg/apis/rollouts/validation/validation_test.go +++ b/pkg/apis/rollouts/validation/validation_test.go @@ -15,6 +15,10 @@ import ( "github.com/argoproj/argo-rollouts/utils/defaults" ) +const ( + errTrafficRoutingWithExperimentSupport = "Experiment template weight is only available for TrafficRouting with SMI, ALB, and Istio at this time" +) + func TestValidateRollout(t *testing.T) { selector := &metav1.LabelSelector{ MatchLabels: map[string]string{"key": "value"}, @@ -129,7 +133,7 @@ func TestValidateRolloutStrategyCanary(t *testing.T) { CanaryService: "canary", StableService: "stable", TrafficRouting: &v1alpha1.RolloutTrafficRouting{ - SMI: &v1alpha1.SMITrafficRouting{}, + ALB: &v1alpha1.ALBTrafficRouting{RootService: "root-service"}, }, Steps: []v1alpha1.CanaryStep{{}}, } @@ -167,6 +171,47 @@ func TestValidateRolloutStrategyCanary(t *testing.T) { assert.Equal(t, DuplicatedServicesCanaryMessage, allErrs[0].Detail) }) + t.Run("duplicate ping pong services", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping", PongService: "ping"} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, DuplicatedPingPongServicesMessage, allErrs[0].Detail) + }) + + t.Run("ping services using only", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping", PongService: ""} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidPingPongProvidedMessage, allErrs[0].Detail) + }) + + t.Run("pong service using only", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "", PongService: "pong"} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidPingPongProvidedMessage, allErrs[0].Detail) + }) + + t.Run("missed ALB root service for the ping-pong feature", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping", PongService: "pong"} + invalidRo.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{RootService: ""}, + } + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, MissedAlbRootServiceMessage, allErrs[0].Detail) + }) + + t.Run("ping-pong feature without the ALB traffic routing", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping", PongService: "pong"} + invalidRo.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{StableIngress: "stable-ingress"}, + } + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, PingPongWithAlbOnlyMessage, allErrs[0].Detail) + }) + t.Run("invalid traffic routing", func(t *testing.T) { invalidRo := ro.DeepCopy() invalidRo.Spec.Strategy.Canary.CanaryService = "" @@ -236,6 +281,288 @@ func TestValidateRolloutStrategyAntiAffinity(t *testing.T) { assert.Equal(t, InvalidAntiAffinityWeightMessage, allErrs[0].Detail) } +func TestValidateRolloutStrategyCanarySetHeaderRoute(t *testing.T) { + ro := &v1alpha1.Rollout{} + ro.Spec.Strategy.Canary = &v1alpha1.CanaryStrategy{ + CanaryService: "canary", + StableService: "stable", + } + + t.Run("using SetHeaderRoute step without the traffic routing", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{Exact: "chrome"}, + }, + }, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidSetHeaderRouteTrafficPolicy, allErrs[0].Detail) + }) +} + +func TestValidateRolloutStrategyCanarySetHeaderRouteIstio(t *testing.T) { + ro := &v1alpha1.Rollout{} + ro.Spec.Strategy.Canary = &v1alpha1.CanaryStrategy{ + CanaryService: "canary", + StableService: "stable", + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Istio: &v1alpha1.IstioTrafficRouting{ + VirtualService: &v1alpha1.IstioVirtualService{Name: "virtual-service"}, + }, + }, + } + + t.Run("using SetHeaderRoute step with multiple values", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "chrome", + Regex: "chrome(.*)", + }, + }, + }, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidStringMatchMultipleValuePolicy, allErrs[0].Detail) + }) + + t.Run("using SetHeaderRoute step with missed values", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + }, + }, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidStringMatchMissedValuePolicy, allErrs[0].Detail) + }) + + t.Run("using SetHeaderRoute step without managedRoutes defined but missing route", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{Exact: "exact"}, + }, + }, + }, + }} + invalidRo.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(invalidRo.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, v1alpha1.MangedRoutes{ + Name: "not-in-steps", + }) + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalideStepRouteNameNotFoundInManagedRoutes, allErrs[0].Detail) + }) +} + +func TestValidateRolloutStrategyCanarySetHeaderRoutingALB(t *testing.T) { + ro := &v1alpha1.Rollout{} + ro.Spec.Strategy.Canary = &v1alpha1.CanaryStrategy{ + CanaryService: "canary", + StableService: "stable", + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{ + RootService: "action_name", + }, + }, + } + + t.Run("using SetHeaderRouting step with multiple values", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "chrome", + Regex: "chrome(.*)", + }, + }, + }, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidSetHeaderRouteALBValuePolicy, allErrs[0].Detail) + }) + + t.Run("using SetHeaderRouting step with missed values", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + }, + }, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidStringMatchMissedValuePolicy, allErrs[0].Detail) + }) + + t.Run("using SetHeaderRouting step with invalid ALB match value", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetHeaderRoute: &v1alpha1.SetHeaderRoute{ + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{ + Prefix: "chrome", + }, + }, + }, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidSetHeaderRouteALBValuePolicy, allErrs[0].Detail) + }) +} + +func TestValidateRolloutStrategyCanarySetMirrorRouteIstio(t *testing.T) { + ro := &v1alpha1.Rollout{} + ro.Spec.Strategy.Canary = &v1alpha1.CanaryStrategy{ + CanaryService: "canary", + StableService: "stable", + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Istio: &v1alpha1.IstioTrafficRouting{ + VirtualService: &v1alpha1.IstioVirtualService{Name: "virtual-service"}, + }, + }, + } + + t.Run("using SetMirrorRoute step without the traffic routing", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.TrafficRouting = nil + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetMirrorRoute: &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: nil, + Percentage: nil, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidSetMirrorRouteTrafficPolicy, allErrs[0].Detail) + }) + + t.Run("using SetMirrorRoute step with multiple values", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetMirrorRoute: &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "test", + Prefix: "test", + }, + Path: nil, + Headers: nil, + }}, + Percentage: nil, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidStringMatchMultipleValuePolicy, allErrs[0].Detail) + }) + + t.Run("using SetMirrorRoute step with missed match and no kind", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetMirrorRoute: &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{}, + Path: nil, + Headers: nil, + }}, + Percentage: nil, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalidStringMatchMissedValuePolicy, allErrs[0].Detail) + }) + + t.Run("using SetMirrorRoute step without managedRoutes not defined", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetMirrorRoute: &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "exact", + }, + }}, + Percentage: nil, + }, + }} + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, fmt.Sprintf(MissingFieldMessage, "spec.strategy.canary.trafficRouting.managedRoutes"), allErrs[0].Detail) + }) + + t.Run("using SetMirrorRoute step without managedRoutes defined but missing route", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetMirrorRoute: &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + Path: &v1alpha1.StringMatch{ + Prefix: "/", + }, + }}, + Percentage: nil, + }, + }} + invalidRo.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(invalidRo.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, v1alpha1.MangedRoutes{ + Name: "not-in-steps", + }) + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, InvalideStepRouteNameNotFoundInManagedRoutes, allErrs[0].Detail) + }) + + t.Run("using SetMirrorRoute step with managedRoutes defined", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetMirrorRoute: &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + Path: &v1alpha1.StringMatch{ + Prefix: "/", + }, + }}, + Percentage: nil, + }, + }} + invalidRo.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(invalidRo.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, v1alpha1.MangedRoutes{ + Name: "test-mirror-1", + }) + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Len(t, allErrs, 0) + }) +} + func TestInvalidMaxSurgeMaxUnavailable(t *testing.T) { r := func(maxSurge, maxUnavailable intstr.IntOrString) *v1alpha1.Rollout { return &v1alpha1.Rollout{ @@ -438,7 +765,7 @@ func TestCanaryExperimentStepWithWeight(t *testing.T) { invalidRo.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{} allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) assert.Equal(t, 1, len(allErrs)) - assert.Equal(t, "Experiment template weight is only available for TrafficRouting with SMI, ALB, and Istio at this time", allErrs[0].Detail) + assert.Equal(t, errTrafficRoutingWithExperimentSupport, allErrs[0].Detail) }) t.Run("unsupported - Nginx TrafficRouting", func(t *testing.T) { @@ -450,7 +777,7 @@ func TestCanaryExperimentStepWithWeight(t *testing.T) { } allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) assert.Equal(t, 1, len(allErrs)) - assert.Equal(t, "Experiment template weight is only available for TrafficRouting with SMI, ALB, and Istio at this time", allErrs[0].Detail) + assert.Equal(t, errTrafficRoutingWithExperimentSupport, allErrs[0].Detail) }) t.Run("unsupported - Ambassador TrafficRouting", func(t *testing.T) { @@ -462,7 +789,7 @@ func TestCanaryExperimentStepWithWeight(t *testing.T) { } allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) assert.Equal(t, 1, len(allErrs)) - assert.Equal(t, "Experiment template weight is only available for TrafficRouting with SMI, ALB, and Istio at this time", allErrs[0].Detail) + assert.Equal(t, errTrafficRoutingWithExperimentSupport, allErrs[0].Detail) }) t.Run("unsupported - Istio TrafficRouting", func(t *testing.T) { @@ -495,4 +822,15 @@ func TestCanaryExperimentStepWithWeight(t *testing.T) { allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) assert.Equal(t, 0, len(allErrs)) }) + + t.Run("unsupported - AppMesh TrafficRouting", func(t *testing.T) { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{}, + } + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + assert.Equal(t, 1, len(allErrs)) + assert.Equal(t, errTrafficRoutingWithExperimentSupport, allErrs[0].Detail) + }) + } diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 7b022d43a0..fef1aaafe4 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -20,6 +20,7 @@ package versioned import ( "fmt" + "net/http" argoprojv1alpha1 "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/typed/rollouts/v1alpha1" discovery "k8s.io/client-go/discovery" @@ -55,22 +56,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy) + cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -80,11 +104,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 0ce58818ea..1a5e6bb353 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -74,7 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } -var _ clientset.Interface = &Clientset{} +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) // ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go index d425d94dcd..89de9c005c 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go @@ -117,7 +117,7 @@ func (c *FakeAnalysisRuns) UpdateStatus(ctx context.Context, analysisRun *v1alph // Delete takes name of the analysisRun and deletes it. Returns an error if one occurs. func (c *FakeAnalysisRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(analysisrunsResource, c.ns, name), &v1alpha1.AnalysisRun{}) + Invokes(testing.NewDeleteActionWithOptions(analysisrunsResource, c.ns, name, opts), &v1alpha1.AnalysisRun{}) return err } diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go index ffd134dde4..2cbb02d515 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go @@ -105,7 +105,7 @@ func (c *FakeAnalysisTemplates) Update(ctx context.Context, analysisTemplate *v1 // Delete takes name of the analysisTemplate and deletes it. Returns an error if one occurs. func (c *FakeAnalysisTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(analysistemplatesResource, c.ns, name), &v1alpha1.AnalysisTemplate{}) + Invokes(testing.NewDeleteActionWithOptions(analysistemplatesResource, c.ns, name, opts), &v1alpha1.AnalysisTemplate{}) return err } diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go index 36823a912f..36389661de 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go @@ -99,7 +99,7 @@ func (c *FakeClusterAnalysisTemplates) Update(ctx context.Context, clusterAnalys // Delete takes name of the clusterAnalysisTemplate and deletes it. Returns an error if one occurs. func (c *FakeClusterAnalysisTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusteranalysistemplatesResource, name), &v1alpha1.ClusterAnalysisTemplate{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusteranalysistemplatesResource, name, opts), &v1alpha1.ClusterAnalysisTemplate{}) return err } diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go index eb2b7fb5d9..f237ce32d1 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go @@ -117,7 +117,7 @@ func (c *FakeExperiments) UpdateStatus(ctx context.Context, experiment *v1alpha1 // Delete takes name of the experiment and deletes it. Returns an error if one occurs. func (c *FakeExperiments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(experimentsResource, c.ns, name), &v1alpha1.Experiment{}) + Invokes(testing.NewDeleteActionWithOptions(experimentsResource, c.ns, name, opts), &v1alpha1.Experiment{}) return err } diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go index fca9011ae1..fce5e8d66e 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go @@ -117,7 +117,7 @@ func (c *FakeRollouts) UpdateStatus(ctx context.Context, rollout *v1alpha1.Rollo // Delete takes name of the rollout and deletes it. Returns an error if one occurs. func (c *FakeRollouts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolloutsResource, c.ns, name), &v1alpha1.Rollout{}) + Invokes(testing.NewDeleteActionWithOptions(rolloutsResource, c.ns, name, opts), &v1alpha1.Rollout{}) return err } diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/rollouts_client.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/rollouts_client.go index 91c3ecbd8d..61e7ef4170 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/rollouts_client.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/rollouts_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *ArgoprojV1alpha1Client) Rollouts(namespace string) RolloutInterface { } // NewForConfig creates a new ArgoprojV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ArgoprojV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ArgoprojV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/pkg/kubectl-argo-rollouts/cmd/dashboard/dashboard.go b/pkg/kubectl-argo-rollouts/cmd/dashboard/dashboard.go index ce081a764c..3f3cd1687a 100644 --- a/pkg/kubectl-argo-rollouts/cmd/dashboard/dashboard.go +++ b/pkg/kubectl-argo-rollouts/cmd/dashboard/dashboard.go @@ -9,6 +9,7 @@ import ( ) func NewCmdDashboard(o *options.ArgoRolloutsOptions) *cobra.Command { + var rootPath string var cmd = &cobra.Command{ Use: "dashboard", Short: "Start UI dashboard", @@ -22,6 +23,7 @@ func NewCmdDashboard(o *options.ArgoRolloutsOptions) *cobra.Command { KubeClientset: kubeclientset, RolloutsClientset: rolloutclientset, DynamicClientset: o.DynamicClientset(), + RootPath: rootPath, } for { @@ -33,6 +35,7 @@ func NewCmdDashboard(o *options.ArgoRolloutsOptions) *cobra.Command { } }, } + cmd.Flags().StringVar(&rootPath, "root-path", "rollouts", "changes the root path of the dashboard") return cmd } diff --git a/pkg/kubectl-argo-rollouts/cmd/get/get.go b/pkg/kubectl-argo-rollouts/cmd/get/get.go index bb29d692e9..3576484518 100644 --- a/pkg/kubectl-argo-rollouts/cmd/get/get.go +++ b/pkg/kubectl-argo-rollouts/cmd/get/get.go @@ -31,6 +31,8 @@ var ( info.InfoTagStable: FgGreen, info.InfoTagActive: FgGreen, info.InfoTagPreview: FgHiBlue, + info.InfoTagPing: FgHiBlue, + info.InfoTagPong: FgHiBlue, // Colors for highlighting experiment/analysisruns string(v1alpha1.AnalysisPhasePending): FgHiBlue, diff --git a/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go b/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go index 92eab92477..03a7465751 100644 --- a/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go +++ b/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go @@ -212,6 +212,14 @@ func (o *GetOptions) PrintReplicaSetInfo(w io.Writer, rsInfo rollout.ReplicaSetI infoCols = append(infoCols, o.colorize(info.InfoTagPreview)) name = o.colorizeStatus(name, info.InfoTagPreview) } + if rsInfo.Ping { + infoCols = append(infoCols, o.colorize(info.InfoTagPing)) + name = o.colorizeStatus(name, info.InfoTagPing) + } + if rsInfo.Pong { + infoCols = append(infoCols, o.colorize(info.InfoTagPong)) + name = o.colorizeStatus(name, info.InfoTagPong) + } if rsInfo.ScaleDownDeadline != "" { infoCols = append(infoCols, fmt.Sprintf("delay:%s", info.ScaleDownDelay(rsInfo))) } diff --git a/pkg/kubectl-argo-rollouts/cmd/get/get_test.go b/pkg/kubectl-argo-rollouts/cmd/get/get_test.go index 3ca3872be1..3f2f56468a 100644 --- a/pkg/kubectl-argo-rollouts/cmd/get/get_test.go +++ b/pkg/kubectl-argo-rollouts/cmd/get/get_test.go @@ -7,14 +7,13 @@ import ( "testing" "time" - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/stretchr/testify/assert" "k8s.io/cli-runtime/pkg/genericclioptions" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/info/testdata" options "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options/fake" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func assertStdout(t *testing.T, expectedOut string, o genericclioptions.IOStreams) { @@ -163,7 +162,7 @@ NAME KIND STATUS AGE INFO func TestGetBlueGreenRolloutScaleDownDelay(t *testing.T) { rolloutObjs := testdata.NewBlueGreenRollout() - inFourHours := metav1.Now().Add(4 * time.Hour).Truncate(time.Second).UTC().Format(time.RFC3339) + inFourHours := timeutil.Now().Add(4 * time.Hour).Truncate(time.Second).UTC().Format(time.RFC3339) rolloutObjs.ReplicaSets[2].Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inFourHours delete(rolloutObjs.ReplicaSets[2].Labels, v1alpha1.DefaultRolloutUniqueLabelKey) @@ -211,7 +210,7 @@ NAME KIND STATUS AGE INFO func TestGetBlueGreenRolloutScaleDownDelayPassed(t *testing.T) { rolloutObjs := testdata.NewBlueGreenRollout() - anHourAgo := metav1.Now().Add(-1 * time.Hour).Truncate(time.Second).UTC().Format(time.RFC3339) + anHourAgo := timeutil.Now().Add(-1 * time.Hour).Truncate(time.Second).UTC().Format(time.RFC3339) rolloutObjs.ReplicaSets[2].Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = anHourAgo delete(rolloutObjs.ReplicaSets[2].Labels, v1alpha1.DefaultRolloutUniqueLabelKey) @@ -305,6 +304,54 @@ NAME KIND STATUS AGE IN assertStdout(t, expectedOut, o.IOStreams) } +func TestGetCanaryPingPongRollout(t *testing.T) { + rolloutObjs := testdata.NewCanaryRollout() + + tf, o := options.NewFakeArgoRolloutsOptions(rolloutObjs.AllObjects()...) + o.RESTClientGetter = tf.WithNamespace(rolloutObjs.Rollouts[3].Namespace) + defer tf.Cleanup() + cmd := NewCmdGetRollout(o) + cmd.PersistentPreRunE = o.PersistentPreRunE + cmd.SetArgs([]string{rolloutObjs.Rollouts[3].Name, "--no-color"}) + err := cmd.Execute() + assert.NoError(t, err) + + expectedOut := strings.TrimPrefix(` +Name: canary-demo-pingpong +Namespace: jesse-test +Status: ✖ Degraded +Message: ProgressDeadlineExceeded: ReplicaSet "canary-demo-65fb5ffc84" has timed out progressing. +Strategy: Canary + Step: 0/8 + SetWeight: 20 + ActualWeight: 0 +Images: argoproj/rollouts-demo:does-not-exist (canary, ping) + argoproj/rollouts-demo:green (stable, pong) +Replicas: + Desired: 5 + Current: 6 + Updated: 1 + Ready: 5 + Available: 5 + +NAME KIND STATUS AGE INFO +⟳ canary-demo-pingpong Rollout ✖ Degraded 7d +├──# revision:31 +│ └──⧉ canary-demo-65fb5ffc84 ReplicaSet ◌ Progressing 7d canary,ping +│ └──□ canary-demo-65fb5ffc84-9wf5r Pod ⚠ ImagePullBackOff 7d ready:0/1 +├──# revision:30 +│ └──⧉ canary-demo-877894d5b ReplicaSet ✔ Healthy 7d stable,pong +│ ├──□ canary-demo-877894d5b-6jfpt Pod ✔ Running 7d ready:1/1 +│ ├──□ canary-demo-877894d5b-7jmqw Pod ✔ Running 7d ready:1/1 +│ ├──□ canary-demo-877894d5b-j8g2b Pod ✔ Running 7d ready:1/1 +│ ├──□ canary-demo-877894d5b-jw5qm Pod ✔ Running 7d ready:1/1 +│ └──□ canary-demo-877894d5b-kh7x4 Pod ✔ Running 7d ready:1/1 +└──# revision:29 + └──⧉ canary-demo-859c99b45c ReplicaSet • ScaledDown 7d +`, "\n") + assertStdout(t, expectedOut, o.IOStreams) +} + func TestExperimentRollout(t *testing.T) { rolloutObjs := testdata.NewExperimentAnalysisRollout() @@ -346,6 +393,13 @@ NAME K │ │ └──⧉ rollout-experiment-analysis-6f646bf7b7-1-vcv27-canary-7699dcf5d ReplicaSet ✔ Healthy 7d │ │ └──□ rollout-experiment-analysis-6f646bf7b7-1-vcv27-canary-7699vgr24 Pod ✔ Running 7d ready:1/1 │ └──α rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr AnalysisRun ? Inconclusive 7d ✔ 4,✖ 4,? 1,⚠ 1 +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rzl6lt Job ✖ Failed 7d +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-r8lqpd Job ✔ Successful 7d +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rjjsgg Job ✔ Successful 7d +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rrnfj5 Job ✖ Failed 7d +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rx5kqk Job ✖ Failed 7d +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rp894b Job ✔ Successful 7d +│ ├──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rmngtj Job ✖ Failed 7d │ └──⊞ rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rsxm69 Job ✔ Successful 7d └──# revision:1 └──⧉ rollout-experiment-analysis-f6db98dff ReplicaSet ✔ Healthy 7d stable diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/lint.go b/pkg/kubectl-argo-rollouts/cmd/lint/lint.go index 1c50a9650d..66e28318eb 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/lint.go +++ b/pkg/kubectl-argo-rollouts/cmd/lint/lint.go @@ -2,19 +2,23 @@ package lint import ( "bytes" - "encoding/json" + "fmt" "io" "io/ioutil" - "unicode" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/validation" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options" + ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" "github.com/ghodss/yaml" "github.com/spf13/cobra" goyaml "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation/field" ) type LintOptions struct { @@ -22,6 +26,11 @@ type LintOptions struct { File string } +type roAndReferences struct { + Rollout v1alpha1.Rollout + References validation.ReferencedResources +} + const ( lintExample = ` # Lint a rollout @@ -56,42 +65,10 @@ func NewCmdLint(o *options.ArgoRolloutsOptions) *cobra.Command { return cmd } -// isJSON detects if the byte array looks like json, based on the first non-whitespace character -func isJSON(fileBytes []byte) bool { - for _, b := range fileBytes { - if !unicode.IsSpace(rune(b)) { - return b == '{' - } - } - return false -} - func unmarshal(fileBytes []byte, obj interface{}) error { - if isJSON(fileBytes) { - decoder := json.NewDecoder(bytes.NewReader(fileBytes)) - decoder.DisallowUnknownFields() - return decoder.Decode(&obj) - } return yaml.UnmarshalStrict(fileBytes, &obj, yaml.DisallowUnknownFields) } -func validate(fileBytes []byte, un *unstructured.Unstructured) error { - gvk := un.GroupVersionKind() - switch { - case gvk.Group == rollouts.Group && gvk.Kind == rollouts.RolloutKind: - var ro v1alpha1.Rollout - err := unmarshal(fileBytes, &ro) - if err != nil { - return err - } - errs := validation.ValidateRollout(&ro) - if 0 < len(errs) { - return errs[0] - } - } - return nil -} - func (l *LintOptions) lintResource(path string) error { fileBytes, err := ioutil.ReadFile(path) if err != nil { @@ -99,13 +76,8 @@ func (l *LintOptions) lintResource(path string) error { } var un unstructured.Unstructured - - if isJSON(fileBytes) { - if err = unmarshal(fileBytes, un); err != nil { - return err - } - return validate(fileBytes, &un) - } + var refResource validation.ReferencedResources + var fileRollouts []v1alpha1.Rollout decoder := goyaml.NewDecoder(bytes.NewReader(fileBytes)) for { @@ -128,10 +100,218 @@ func (l *LintOptions) lintResource(path string) error { return err } - if err = validate(valueBytes, &un); err != nil { + gvk := un.GroupVersionKind() + if gvk.Group == rollouts.Group && gvk.Kind == rollouts.RolloutKind { + var ro v1alpha1.Rollout + err := unmarshal(valueBytes, &ro) + if err != nil { + return err + } + fileRollouts = append(fileRollouts, ro) + } + err = buildAllReferencedResources(un, &refResource) + if err != nil { return err } } + setServiceTypeAndManagedAnnotation(fileRollouts, refResource) + setIngressManagedAnnotation(fileRollouts, refResource) + setVirtualServiceManagedAnnotation(fileRollouts, refResource) + + var errList field.ErrorList + for _, rollout := range fileRollouts { + roRef := matchRolloutToReferences(rollout, refResource) + + errList = append(errList, validation.ValidateRollout(&roRef.Rollout)...) + errList = append(errList, validation.ValidateRolloutReferencedResources(&roRef.Rollout, roRef.References)...) + } + + for _, e := range errList { + fmt.Println(e.ErrorBody()) + } + if len(errList) > 0 { + return errList[0] + } else { + return nil + } +} + +// buildAllReferencedResources This builds a ReferencedResources object that has all the external resources for every +// rollout resource in the manifest. We will need to later match each referenced resource to its own rollout resource +// before passing the rollout object and its managed reference on to validation. +func buildAllReferencedResources(un unstructured.Unstructured, refResource *validation.ReferencedResources) error { + + valueBytes, err := un.MarshalJSON() + if err != nil { + return err + } + + gvk := un.GroupVersionKind() + switch { + case gvk.Group == v1.GroupName && gvk.Kind == "Service": + var svc v1.Service + err := unmarshal(valueBytes, &svc) + if err != nil { + return err + } + refResource.ServiceWithType = append(refResource.ServiceWithType, validation.ServiceWithType{ + Service: &svc, + }) + + case gvk.Group == "networking.istio.io" && gvk.Kind == "VirtualService": + refResource.VirtualServices = append(refResource.VirtualServices, un) + + case (gvk.Group == networkingv1.GroupName || gvk.Group == extensionsv1beta1.GroupName) && gvk.Kind == "Ingress": + var ing networkingv1.Ingress + var ingv1beta1 extensionsv1beta1.Ingress + if gvk.Version == "v1" { + err := unmarshal(valueBytes, &ing) + if err != nil { + return err + } + refResource.Ingresses = append(refResource.Ingresses, *ingressutil.NewIngress(&ing)) + } else if gvk.Version == "v1beta1" { + err := unmarshal(valueBytes, &ingv1beta1) + if err != nil { + return err + } + refResource.Ingresses = append(refResource.Ingresses, *ingressutil.NewLegacyIngress(&ingv1beta1)) + } + + } return nil } + +// matchRolloutToReferences This function goes through the global list of all ReferencedResources in the manifest and matches +// them up with their respective rollout object so that we can latter have a mapping of a single rollout object and its +// referenced resources. +func matchRolloutToReferences(rollout v1alpha1.Rollout, refResource validation.ReferencedResources) roAndReferences { + matchedReferenceResources := roAndReferences{Rollout: rollout, References: validation.ReferencedResources{}} + + for _, service := range refResource.ServiceWithType { + if service.Service.Annotations[v1alpha1.ManagedByRolloutsKey] == rollout.Name { + matchedReferenceResources.References.ServiceWithType = append(matchedReferenceResources.References.ServiceWithType, service) + } + } + for _, ingress := range refResource.Ingresses { + if ingress.GetAnnotations()[v1alpha1.ManagedByRolloutsKey] == rollout.Name { + matchedReferenceResources.References.Ingresses = append(matchedReferenceResources.References.Ingresses, ingress) + } + } + for _, virtualService := range refResource.VirtualServices { + if virtualService.GetAnnotations()[v1alpha1.ManagedByRolloutsKey] == rollout.Name { + matchedReferenceResources.References.VirtualServices = append(matchedReferenceResources.References.VirtualServices, virtualService) + } + } + + return matchedReferenceResources +} + +// setServiceTypeAndManagedAnnotation This sets the managed annotation on each service as well as figures out what +// type of service its is by looking at the rollout and set's its service type accordingly. +func setServiceTypeAndManagedAnnotation(rollouts []v1alpha1.Rollout, refResource validation.ReferencedResources) { + for _, rollout := range rollouts { + for i := range refResource.ServiceWithType { + + if refResource.ServiceWithType[i].Service.Annotations == nil { + refResource.ServiceWithType[i].Service.Annotations = make(map[string]string) + } + + if rollout.Spec.Strategy.Canary != nil { + if rollout.Spec.Strategy.Canary.CanaryService == refResource.ServiceWithType[i].Service.Name { + refResource.ServiceWithType[i].Type = validation.CanaryService + refResource.ServiceWithType[i].Service.Annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + } + if rollout.Spec.Strategy.Canary.StableService == refResource.ServiceWithType[i].Service.Name { + refResource.ServiceWithType[i].Type = validation.StableService + refResource.ServiceWithType[i].Service.Annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + } + if rollout.Spec.Strategy.Canary.PingPong != nil { + if rollout.Spec.Strategy.Canary.PingPong.PingService == refResource.ServiceWithType[i].Service.Name { + refResource.ServiceWithType[i].Type = validation.PingService + refResource.ServiceWithType[i].Service.Annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + } + if rollout.Spec.Strategy.Canary.PingPong.PongService == refResource.ServiceWithType[i].Service.Name { + refResource.ServiceWithType[i].Type = validation.PongService + refResource.ServiceWithType[i].Service.Annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + } + } + } + + if rollout.Spec.Strategy.BlueGreen != nil { + if rollout.Spec.Strategy.BlueGreen.ActiveService == refResource.ServiceWithType[i].Service.Name { + refResource.ServiceWithType[i].Type = validation.ActiveService + refResource.ServiceWithType[i].Service.Annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + } + if rollout.Spec.Strategy.BlueGreen.PreviewService == refResource.ServiceWithType[i].Service.Name { + refResource.ServiceWithType[i].Type = validation.PreviewService + refResource.ServiceWithType[i].Service.Annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + } + } + + } + } +} + +// setIngressManagedAnnotation This tries to find ingresses that have matching services in the rollout resource and if so +// it will add the managed by annotations just for linting so that we can later match up resources to a rollout resources +// for the case when we have multiple rollout resources in a single manifest. +func setIngressManagedAnnotation(rollouts []v1alpha1.Rollout, refResource validation.ReferencedResources) { + for _, rollout := range rollouts { + for i := range refResource.Ingresses { + var serviceName string + + // Basic Canary so ingress is only pointing a single service and so no linting is needed for this case. + if rollout.Spec.Strategy.Canary == nil || rollout.Spec.Strategy.Canary.TrafficRouting == nil { + return + } + if rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil { + serviceName = rollout.Spec.Strategy.Canary.StableService + } else if rollout.Spec.Strategy.Canary.TrafficRouting.ALB != nil { + serviceName = rollout.Spec.Strategy.Canary.StableService + if rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService != "" { + serviceName = rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService + } + } else if rollout.Spec.Strategy.Canary.TrafficRouting.SMI != nil { + serviceName = rollout.Spec.Strategy.Canary.TrafficRouting.SMI.RootService + } + + if ingressutil.HasRuleWithService(&refResource.Ingresses[i], serviceName) { + annotations := refResource.Ingresses[i].GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + refResource.Ingresses[i].SetAnnotations(annotations) + } + } + } +} + +// setVirtualServiceManagedAnnotation This function finds virtual services that are listed in the rollout resources and +// adds the ManagedByRolloutsKey to the annotations of the virtual services. +func setVirtualServiceManagedAnnotation(ro []v1alpha1.Rollout, refResource validation.ReferencedResources) { + for _, rollout := range ro { + for i := range refResource.VirtualServices { + if rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService != nil && rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name == refResource.VirtualServices[i].GetName() { + annotations := refResource.VirtualServices[i].GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + refResource.VirtualServices[i].SetAnnotations(annotations) + } + for _, virtualService := range rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualServices { + if virtualService.Name == refResource.VirtualServices[i].GetName() { + annotations := refResource.VirtualServices[i].GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name + refResource.VirtualServices[i].SetAnnotations(annotations) + } + } + } + } +} diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/lint_test.go b/pkg/kubectl-argo-rollouts/cmd/lint/lint_test.go index 1780dbcc8b..29fce3afe3 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/lint_test.go +++ b/pkg/kubectl-argo-rollouts/cmd/lint/lint_test.go @@ -15,13 +15,31 @@ func TestLintValidRollout(t *testing.T) { cmd := NewCmdLint(o) cmd.PersistentPreRunE = o.PersistentPreRunE - for _, filename := range []string{"testdata/valid.yml", "testdata/valid-workload-ref.yaml", "testdata/valid-with-another-empty-object.yml"} { - cmd.SetArgs([]string{"-f", filename}) - err := cmd.Execute() - assert.NoError(t, err) + tests := []string{ + "testdata/valid.yml", + "testdata/valid.json", + "testdata/valid-workload-ref.yaml", + "testdata/valid-with-another-empty-object.yml", + "testdata/valid-istio-v1alpha3.yml", + "testdata/valid-istio-v1beta1.yml", + "testdata/valid-blue-green.yml", + "testdata/valid-ingress-smi.yml", + "testdata/valid-ingress-smi-multi.yml", + "testdata/valid-alb-canary.yml", + "testdata/valid-nginx-canary.yml", + "testdata/valid-nginx-basic-canary.yml", + "testdata/valid-istio-v1beta1-mulitiple-virtualsvcs.yml", + } + + for _, filename := range tests { + t.Run(filename, func(t *testing.T) { + cmd.SetArgs([]string{"-f", filename}) + err := cmd.Execute() + assert.NoError(t, err) - stdout := o.Out.(*bytes.Buffer).String() - assert.Empty(t, stdout) + stdout := o.Out.(*bytes.Buffer).String() + assert.Empty(t, stdout) + }) } } @@ -32,36 +50,56 @@ func TestLintInvalidRollout(t *testing.T) { filename string errmsg string }{ - { "testdata/invalid.yml", "Error: spec.strategy.maxSurge: Invalid value: intstr.IntOrString{Type:0, IntVal:0, StrVal:\"\"}: MaxSurge and MaxUnavailable both can not be zero\n", }, + { + "testdata/invalid.json", + "Error: spec.strategy.maxSurge: Invalid value: intstr.IntOrString{Type:0, IntVal:0, StrVal:\"\"}: MaxSurge and MaxUnavailable both can not be zero\n", + }, { "testdata/invalid-multiple-docs.yml", "Error: spec.strategy.maxSurge: Invalid value: intstr.IntOrString{Type:0, IntVal:0, StrVal:\"\"}: MaxSurge and MaxUnavailable both can not be zero\n", }, - { "testdata/invalid-unknown-field.yml", "Error: error unmarshaling JSON: while decoding JSON: json: unknown field \"unknown-strategy\"\n", }, + { + "testdata/invalid-service-labels.yml", + "Error: spec.strategy.canary.canaryService: Invalid value: \"istio-host-split-canary\": Service \"istio-host-split-canary\" has unmatch lable \"app\" in rollout\n", + }, + { + "testdata/invalid-ping-pong.yml", + "Error: spec.strategy.canary.pingPong.pingService: Invalid value: \"ping-service\": Service \"ping-service\" has unmatch lable \"app\" in rollout\n", + }, + { + "testdata/invalid-ingress-smi-multi.yml", + "Error: spec.strategy.canary.canaryService: Invalid value: \"rollout-smi-experiment-canary\": Service \"rollout-smi-experiment-canary\" has unmatch lable \"app\" in rollout\n", + }, + { + filename: "testdata/invalid-nginx-canary.yml", + errmsg: "Error: spec.strategy.steps[1].experiment.templates[0].weight: Invalid value: 20: Experiment template weight is only available for TrafficRouting with SMI, ALB, and Istio at this time\n", + }, } runCmd = func(filename string, errmsg string) { - tf, o := options.NewFakeArgoRolloutsOptions() - defer tf.Cleanup() + t.Run(filename, func(t *testing.T) { + tf, o := options.NewFakeArgoRolloutsOptions() + defer tf.Cleanup() - cmd := NewCmdLint(o) - cmd.PersistentPreRunE = o.PersistentPreRunE - cmd.SetArgs([]string{"-f", filename}) - err := cmd.Execute() - assert.Error(t, err) + cmd := NewCmdLint(o) + cmd.PersistentPreRunE = o.PersistentPreRunE + cmd.SetArgs([]string{"-f", filename}) + err := cmd.Execute() + assert.Error(t, err) - stdout := o.Out.(*bytes.Buffer).String() - stderr := o.ErrOut.(*bytes.Buffer).String() - assert.Empty(t, stdout) - assert.Equal(t, errmsg, stderr) + stdout := o.Out.(*bytes.Buffer).String() + stderr := o.ErrOut.(*bytes.Buffer).String() + assert.Empty(t, stdout) + assert.Equal(t, errmsg, stderr) + }) } for _, t := range tests { diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml new file mode 100644 index 0000000000..fc04b8deb7 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml @@ -0,0 +1,244 @@ +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-miss +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-miss +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-root +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-miss +--- +apiVersion: split.smi-spec.io/v1alpha1 +kind: TrafficSplit +metadata: + name: rollout-smi-experiment-split +spec: + service: rollout-smi-experiment-root + backends: + - service: rollout-smi-experiment-stable + weight: 95 + - service: rollout-smi-experiment-canary + weight: 5 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: rollout-smi-experiment-stable + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: rollout-smi-experiment.local + http: + paths: + - path: / + backend: + serviceName: rollout-smi-experiment-stable + servicePort: 80 + - host: rollout-smi-experiment-root.local + http: + paths: + - path: / + backend: + serviceName: rollout-smi-experiment-root + servicePort: 80 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-smi-experiment +spec: + replicas: 1 + strategy: + canary: + canaryService: rollout-smi-experiment-canary + stableService: rollout-smi-experiment-stable + trafficRouting: + smi: + trafficSplitName: rollout-smi-experiment-split + rootService: rollout-smi-experiment-root + steps: + - setWeight: 5 + - experiment: + templates: + - name: experiment-smi + specRef: canary + weight: 5 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-smi-experiment + template: + metadata: + labels: + app: rollout-smi-experiment + spec: + containers: + - name: rollout-smi-experiment + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + + + + + +--- + + + + +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-canary-1 +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-stable-1 +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-root-1 +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-1 +--- +apiVersion: split.smi-spec.io/v1alpha1 +kind: TrafficSplit +metadata: + name: rollout-smi-experiment-split-1 +spec: + service: rollout-smi-experiment-root-1 + backends: + - service: rollout-smi-experiment-stable-1 + weight: 95 + - service: rollout-smi-experiment-canary-1 + weight: 5 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: rollout-smi-experiment-stable-1 + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: rollout-smi-experiment.local + http: + paths: + - path: / + backend: + service: + name: rollout-smi-experiment-stable-1 + port: + number: 80 + - host: rollout-smi-experiment-root.local + http: + paths: + - path: / + backend: + service: + name: rollout-smi-experiment-root-1 + port: + number: 80 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-smi-experiment-1 +spec: + replicas: 1 + strategy: + canary: + canaryService: rollout-smi-experiment-canary-1 + stableService: rollout-smi-experiment-stable-1 + trafficRouting: + smi: + trafficSplitName: rollout-smi-experiment-split-1 + rootService: rollout-smi-experiment-root-1 + steps: + - setWeight: 5 + - experiment: + templates: + - name: experiment-smi + specRef: canary + weight: 5 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-smi-experiment-1 + template: + metadata: + labels: + app: rollout-smi-experiment-1 + spec: + containers: + - name: rollout-smi-experiment + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml new file mode 100644 index 0000000000..ec90d8ad2d --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-root +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-canary +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-stable +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: nginx-rollout-ingress + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - http: + paths: + - path: /* + backend: + serviceName: nginx-rollout-root + servicePort: use-annotation +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: nginx-rollout +spec: + selector: + matchLabels: + app: nginx-rollout + template: + metadata: + labels: + app: nginx-rollout + spec: + containers: + - name: nginx-rollout + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + canaryService: nginx-rollout-canary + stableService: nginx-rollout-stable + trafficRouting: + nginx: + stableIngress: nginx-rollout-ingress + steps: + - setWeight: 10 + - experiment: + templates: + - name: experiment-nginx + specRef: canary + weight: 20 diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ping-pong.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ping-pong.yml new file mode 100644 index 0000000000..a0c908dce8 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ping-pong.yml @@ -0,0 +1,69 @@ +# Miss matched labels on service +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: invalid-rollout +spec: + revisionHistoryLimit: 1 + replicas: 10 + strategy: + canary: + pingPong: #Indicates that the ping-pong services enabled + pingService: ping-service + pongService: pong-service + maxUnavailable: 0 + maxSurge: 1 + analysis: + templates: + - templateName: integrationtests + steps: + - setWeight: 10 + - setWeight: 20 + - setWeight: 40 + - setWeight: 80 + selector: + matchLabels: + app: invalid-rollout + template: + metadata: + labels: + app: invalid-rollout + spec: + containers: + - name: invalid-rollout + image: invalid-rollout:0.0.0 + ports: + - name: http + containerPort: 8080 + protocol: TCP + readinessProbe: + httpGet: + path: /ping + port: 8080 + periodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: ping-service +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: invalid-rollout-miss-match +--- +apiVersion: v1 +kind: Service +metadata: + name: pong-service +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: invalid-rollout-miss-match \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-service-labels.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-service-labels.yml new file mode 100644 index 0000000000..705597f927 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-service-labels.yml @@ -0,0 +1,69 @@ +# Miss matched labels on service +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: invalid-rollout +spec: + revisionHistoryLimit: 1 + replicas: 10 + strategy: + canary: + canaryService: istio-host-split-canary + stableService: istio-host-split-stable + maxUnavailable: 0 + maxSurge: 1 + analysis: + templates: + - templateName: integrationtests + steps: + - setWeight: 10 + - setWeight: 20 + - setWeight: 40 + - setWeight: 80 + selector: + matchLabels: + app: invalid-rollout + template: + metadata: + labels: + app: invalid-rollout + spec: + containers: + - name: invalid-rollout + image: invalid-rollout:0.0.0 + ports: + - name: http + containerPort: 8080 + protocol: TCP + readinessProbe: + httpGet: + path: /ping + port: 8080 + periodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid.json b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid.json new file mode 100644 index 0000000000..09fbae6451 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid.json @@ -0,0 +1,72 @@ +{ + "apiVersion": "argoproj.io/v1alpha1", + "kind": "Rollout", + "metadata": { + "name": "invalid-rollout" + }, + "spec": { + "revisionHistoryLimit": 1, + "replicas": 1, + "strategy": { + "canary": { + "maxUnavailable": 0, + "maxSurge": 0, + "analysis": { + "templates": [ + { + "templateName": "integrationtests" + } + ] + }, + "steps": [ + { + "setWeight": 10 + }, + { + "setWeight": 20 + }, + { + "setWeight": 40 + }, + { + "setWeight": 80 + } + ] + } + }, + "selector": { + "matchLabels": { + "app": "invalid-rollout" + } + }, + "template": { + "metadata": { + "labels": { + "app": "invalid-rollout" + } + }, + "spec": { + "containers": [ + { + "name": "invalid-rollout", + "image": "invalid-rollout:0.0.0", + "ports": [ + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + } + ], + "readinessProbe": { + "httpGet": { + "path": "/ping", + "port": 8080 + }, + "periodSeconds": 5 + } + } + ] + } + } + } +} diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml new file mode 100644 index 0000000000..c0b2131c74 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: alb-rollout-root +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: alb-rollout-canary +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: alb-rollout-stable +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-rollout +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: alb-rollout-ingress +spec: + rules: + - http: + paths: + - path: /* + backend: + serviceName: alb-rollout-root + servicePort: use-annotation +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: alb-rollout +spec: + selector: + matchLabels: + app: alb-rollout + template: + metadata: + labels: + app: alb-rollout + spec: + containers: + - name: alb-rollout + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + canaryService: alb-rollout-canary + stableService: alb-rollout-stable + trafficRouting: + alb: + ingress: alb-rollout-ingress + rootService: alb-rollout-root + servicePort: 80 + steps: + - setWeight: 10 + - experiment: + templates: + - name: experiment-alb + specRef: canary + weight: 20 diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-blue-green.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-blue-green.yml new file mode 100644 index 0000000000..ad93ae1c5e --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-blue-green.yml @@ -0,0 +1,51 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-bluegreen +spec: + replicas: 2 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-bluegreen + template: + metadata: + labels: + app: rollout-bluegreen + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:blue + imagePullPolicy: Always + ports: + - containerPort: 8080 + strategy: + blueGreen: + activeService: rollout-bluegreen-active + previewService: rollout-bluegreen-preview + autoPromotionEnabled: false +--- +kind: Service +apiVersion: v1 +metadata: + name: rollout-bluegreen-active +spec: + selector: + app: rollout-bluegreen + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + +--- +kind: Service +apiVersion: v1 +metadata: + name: rollout-bluegreen-preview +spec: + selector: + app: rollout-bluegreen + ports: + - protocol: TCP + port: 80 + targetPort: 8080 diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml new file mode 100644 index 0000000000..884eebf406 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml @@ -0,0 +1,244 @@ +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-root +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment +--- +apiVersion: split.smi-spec.io/v1alpha1 +kind: TrafficSplit +metadata: + name: rollout-smi-experiment-split +spec: + service: rollout-smi-experiment-root + backends: + - service: rollout-smi-experiment-stable + weight: 95 + - service: rollout-smi-experiment-canary + weight: 5 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: rollout-smi-experiment-stable + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: rollout-smi-experiment.local + http: + paths: + - path: / + backend: + serviceName: rollout-smi-experiment-stable + servicePort: 80 + - host: rollout-smi-experiment-root.local + http: + paths: + - path: / + backend: + serviceName: rollout-smi-experiment-root + servicePort: 80 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-smi-experiment +spec: + replicas: 1 + strategy: + canary: + canaryService: rollout-smi-experiment-canary + stableService: rollout-smi-experiment-stable + trafficRouting: + smi: + trafficSplitName: rollout-smi-experiment-split + rootService: rollout-smi-experiment-root + steps: + - setWeight: 5 + - experiment: + templates: + - name: experiment-smi + specRef: canary + weight: 5 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-smi-experiment + template: + metadata: + labels: + app: rollout-smi-experiment + spec: + containers: + - name: rollout-smi-experiment + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + + + + + +--- + + + + +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-canary-1 +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-stable-1 +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-root-1 +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment-1 +--- +apiVersion: split.smi-spec.io/v1alpha1 +kind: TrafficSplit +metadata: + name: rollout-smi-experiment-split-1 +spec: + service: rollout-smi-experiment-root-1 + backends: + - service: rollout-smi-experiment-stable-1 + weight: 95 + - service: rollout-smi-experiment-canary-1 + weight: 5 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: rollout-smi-experiment-stable-1 + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: rollout-smi-experiment.local + http: + paths: + - path: / + backend: + service: + name: rollout-smi-experiment-stable-1 + port: + number: 80 + - host: rollout-smi-experiment-root.local + http: + paths: + - path: / + backend: + service: + name: rollout-smi-experiment-root-1 + port: + number: 80 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-smi-experiment-1 +spec: + replicas: 1 + strategy: + canary: + canaryService: rollout-smi-experiment-canary-1 + stableService: rollout-smi-experiment-stable-1 + trafficRouting: + smi: + trafficSplitName: rollout-smi-experiment-split-1 + rootService: rollout-smi-experiment-root-1 + steps: + - setWeight: 5 + - experiment: + templates: + - name: experiment-smi + specRef: canary + weight: 5 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-smi-experiment-1 + template: + metadata: + labels: + app: rollout-smi-experiment-1 + spec: + containers: + - name: rollout-smi-experiment + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml new file mode 100644 index 0000000000..63f9e6cddf --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml @@ -0,0 +1,115 @@ +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-smi-experiment-root +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-smi-experiment +--- +apiVersion: split.smi-spec.io/v1alpha1 +kind: TrafficSplit +metadata: + name: rollout-smi-experiment-split +spec: + service: rollout-smi-experiment-root + backends: + - service: rollout-smi-experiment-stable + weight: 95 + - service: rollout-smi-experiment-canary + weight: 5 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: rollout-smi-experiment-stable + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: rollout-smi-experiment.local + http: + paths: + - path: / + backend: + serviceName: rollout-smi-experiment-stable + servicePort: 80 + - host: rollout-smi-experiment-root.local + http: + paths: + - path: / + backend: + serviceName: rollout-smi-experiment-root + servicePort: 80 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-smi-experiment +spec: + replicas: 1 + strategy: + canary: + canaryService: rollout-smi-experiment-canary + stableService: rollout-smi-experiment-stable + trafficRouting: + smi: + trafficSplitName: rollout-smi-experiment-split + rootService: rollout-smi-experiment-root + steps: + - setWeight: 5 + - experiment: + templates: + - name: experiment-smi + specRef: canary + weight: 5 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-smi-experiment + template: + metadata: + labels: + app: rollout-smi-experiment + spec: + containers: + - name: rollout-smi-experiment + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1alpha3.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1alpha3.yml new file mode 100644 index 0000000000..1de296d451 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1alpha3.yml @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split.com + gateways: + - istio-host-split-gateway + http: + - name: primary + route: + - destination: + host: istio-host-split-stable + weight: 100 + - destination: + host: istio-host-split-canary + weight: 0 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-host-split +spec: + strategy: + canary: + canaryService: istio-host-split-canary + stableService: istio-host-split-stable + trafficRouting: + istio: + virtualService: + name: istio-host-split-vsvc + routes: + - primary + steps: + - setWeight: 10 + - experiment: + templates: + - name: experiment-istio + specRef: canary + weight: 20 + selector: + matchLabels: + app: istio-host-split + template: + metadata: + labels: + app: istio-host-split + spec: + containers: + - name: istio-host-split + image: argoproj/rollouts-demo:red + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1beta1-mulitiple-virtualsvcs.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1beta1-mulitiple-virtualsvcs.yml new file mode 100644 index 0000000000..346de2e287 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1beta1-mulitiple-virtualsvcs.yml @@ -0,0 +1,110 @@ +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split.com + gateways: + - istio-host-split-gateway + http: + - name: primary + route: + - destination: + host: istio-host-split-stable + weight: 100 + - destination: + host: istio-host-split-canary + weight: 0 +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: istio-host-split-vsvc-1 +spec: + hosts: + - istio-host-split.com + gateways: + - istio-host-split-gateway + http: + - name: primary + route: + - destination: + host: istio-host-split-stable + weight: 100 + - destination: + host: istio-host-split-canary + weight: 0 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-host-split +spec: + strategy: + canary: + canaryService: istio-host-split-canary + stableService: istio-host-split-stable + trafficRouting: + istio: + virtualServices: + - name: istio-host-split-vsvc + routes: + - primary + - name: istio-host-split-vsvc-1 + routes: + - primary + steps: + - setWeight: 10 + - experiment: + templates: + - name: experiment-istio + specRef: canary + weight: 20 + selector: + matchLabels: + app: istio-host-split + template: + metadata: + labels: + app: istio-host-split + spec: + containers: + - name: istio-host-split + image: argoproj/rollouts-demo:red + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1beta1.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1beta1.yml new file mode 100644 index 0000000000..d793401a9f --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-istio-v1beta1.yml @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split.com + gateways: + - istio-host-split-gateway + http: + - name: primary + route: + - destination: + host: istio-host-split-stable + weight: 100 + - destination: + host: istio-host-split-canary + weight: 0 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-host-split +spec: + strategy: + canary: + canaryService: istio-host-split-canary + stableService: istio-host-split-stable + trafficRouting: + istio: + virtualService: + name: istio-host-split-vsvc + routes: + - primary + steps: + - setWeight: 10 + - experiment: + templates: + - name: experiment-istio + specRef: canary + weight: 20 + selector: + matchLabels: + app: istio-host-split + template: + metadata: + labels: + app: istio-host-split + spec: + containers: + - name: istio-host-split + image: argoproj/rollouts-demo:red + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m \ No newline at end of file diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml new file mode 100644 index 0000000000..4d295c6c86 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-stable +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: nginx-rollout-ingress +spec: + rules: + - http: + paths: + - path: /* + backend: + serviceName: nginx-rollout-root + servicePort: use-annotation +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: nginx-rollout +spec: + selector: + matchLabels: + app: nginx-rollout + template: + metadata: + labels: + app: nginx-rollout + spec: + containers: + - name: nginx-rollout + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + steps: + - setWeight: 10 + - pause: {} + - setWeight: 50 + - pause: {} diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml new file mode 100644 index 0000000000..30fe00ca12 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-root +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-canary +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-rollout-stable +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: nginx-rollout +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: nginx-rollout-ingress +spec: + rules: + - http: + paths: + - path: /* + backend: + serviceName: nginx-rollout-root + servicePort: use-annotation +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: nginx-rollout +spec: + selector: + matchLabels: + app: nginx-rollout + template: + metadata: + labels: + app: nginx-rollout + spec: + containers: + - name: nginx-rollout + image: argoproj/rollouts-demo:blue + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + canaryService: nginx-rollout-canary + stableService: nginx-rollout-stable + trafficRouting: + nginx: + stableIngress: nginx-rollout-ingress + steps: + - setWeight: 10 diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid.json b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid.json new file mode 100644 index 0000000000..df85628af7 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid.json @@ -0,0 +1,72 @@ +{ + "apiVersion": "argoproj.io/v1alpha1", + "kind": "Rollout", + "metadata": { + "name": "valid-rollout" + }, + "spec": { + "revisionHistoryLimit": 1, + "replicas": 10, + "strategy": { + "canary": { + "maxUnavailable": 0, + "maxSurge": 1, + "analysis": { + "templates": [ + { + "templateName": "integrationtests" + } + ] + }, + "steps": [ + { + "setWeight": 10 + }, + { + "setWeight": 20 + }, + { + "setWeight": 40 + }, + { + "setWeight": 80 + } + ] + } + }, + "selector": { + "matchLabels": { + "app": "valid-rollout" + } + }, + "template": { + "metadata": { + "labels": { + "app": "valid-rollout" + } + }, + "spec": { + "containers": [ + { + "name": "valid-rollout", + "image": "valid-rollout:0.0.0", + "ports": [ + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + } + ], + "readinessProbe": { + "httpGet": { + "path": "/ping", + "port": 8080 + }, + "periodSeconds": 5 + } + } + ] + } + } + } +} diff --git a/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go b/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go index 336e043183..493a8aaa18 100644 --- a/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go +++ b/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go @@ -11,6 +11,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options" experimentutil "github.com/argoproj/argo-rollouts/utils/experiment" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -85,7 +86,7 @@ func (o *ListOptions) PrintExperimentTable(expList *v1alpha1.ExperimentList) err } fmt.Fprintf(w, headerStr) for _, exp := range expList.Items { - age := duration.HumanDuration(metav1.Now().Sub(exp.CreationTimestamp.Time)) + age := duration.HumanDuration(timeutil.MetaNow().Sub(exp.CreationTimestamp.Time)) dur := "-" remaining := "-" if exp.Spec.Duration != "" { diff --git a/pkg/kubectl-argo-rollouts/cmd/list/list_test.go b/pkg/kubectl-argo-rollouts/cmd/list/list_test.go index b67f482b31..705363b394 100644 --- a/pkg/kubectl-argo-rollouts/cmd/list/list_test.go +++ b/pkg/kubectl-argo-rollouts/cmd/list/list_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/undefinedlabs/go-mpatch" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" kubetesting "k8s.io/client-go/testing" @@ -176,22 +175,14 @@ func TestListNamespaceAndTimestamp(t *testing.T) { cmd.PersistentPreRunE = o.PersistentPreRunE cmd.SetArgs([]string{"--all-namespaces", "--timestamps"}) - patch, err := mpatch.PatchMethod(time.Now, func() time.Time { - return time.Time{} - }) - assert.NoError(t, err) - err = cmd.Execute() - patch.Unpatch() + err := cmd.Execute() assert.NoError(t, err) stdout := o.Out.(*bytes.Buffer).String() stderr := o.ErrOut.(*bytes.Buffer).String() assert.Empty(t, stderr) - expectedOut := strings.TrimPrefix(` -TIMESTAMP NAMESPACE NAME STRATEGY STATUS STEP SET-WEIGHT READY DESIRED UP-TO-DATE AVAILABLE -0001-01-01T00:00:00Z test can-guestbook Canary Progressing 1/3 10 1/4 5 3 2 -`, "\n") - assert.Equal(t, expectedOut, stdout) + assert.Contains(t, stdout, "TIMESTAMP NAMESPACE NAME STRATEGY STATUS STEP SET-WEIGHT READY DESIRED UP-TO-DATE AVAILABLE") + assert.Contains(t, stdout, "test can-guestbook Canary Progressing 1/3 10 1/4 5 3 2") } func TestListWithWatch(t *testing.T) { diff --git a/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go b/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go index 05a37a02e8..d015939f69 100644 --- a/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go +++ b/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go @@ -8,6 +8,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -92,7 +93,7 @@ func (ri *rolloutInfo) String(timestamp, namespace bool) string { } if timestamp { fmtString = "%-20s\t" + fmtString - timestampStr := time.Now().UTC().Truncate(time.Second).Format("2006-01-02T15:04:05Z") + timestampStr := timeutil.Now().UTC().Truncate(time.Second).Format("2006-01-02T15:04:05Z") args = append([]interface{}{timestampStr}, args...) } return fmt.Sprintf(fmtString, args...) diff --git a/pkg/kubectl-argo-rollouts/cmd/restart/restart.go b/pkg/kubectl-argo-rollouts/cmd/restart/restart.go index 1438360da1..c99ac12009 100644 --- a/pkg/kubectl-argo-rollouts/cmd/restart/restart.go +++ b/pkg/kubectl-argo-rollouts/cmd/restart/restart.go @@ -12,6 +12,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" clientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/typed/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -70,7 +71,7 @@ func NewCmdRestart(o *options.ArgoRolloutsOptions) *cobra.Command { func RestartRollout(rolloutIf clientset.RolloutInterface, name string, restartAt *time.Time) (*v1alpha1.Rollout, error) { ctx := context.TODO() if restartAt == nil { - t := time.Now().UTC() + t := timeutil.Now().UTC() restartAt = &t } patch := fmt.Sprintf(restartPatch, restartAt.Format(time.RFC3339)) diff --git a/pkg/kubectl-argo-rollouts/cmd/set/set_image.go b/pkg/kubectl-argo-rollouts/cmd/set/set_image.go index 4be9989c90..c1a66c935c 100644 --- a/pkg/kubectl-argo-rollouts/cmd/set/set_image.go +++ b/pkg/kubectl-argo-rollouts/cmd/set/set_image.go @@ -9,6 +9,7 @@ import ( k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -44,8 +45,10 @@ func NewCmdSetImage(o *options.ArgoRolloutsOptions) *cobra.Command { container := imageSplit[0] image := imageSplit[1] + var un *unstructured.Unstructured + var err error for attempt := 0; attempt < maxAttempts; attempt++ { - err := SetImage(o.DynamicClientset(), o.Namespace(), rollout, container, image) + un, err = SetImage(o.DynamicClientset(), o.Namespace(), rollout, container, image) if err != nil { if k8serr.IsConflict(err) && attempt < maxAttempts { continue @@ -54,33 +57,56 @@ func NewCmdSetImage(o *options.ArgoRolloutsOptions) *cobra.Command { } break } - fmt.Fprintf(o.Out, "rollout \"%s\" image updated\n", rollout) + fmt.Fprintf(o.Out, "%s \"%s\" image updated\n", strings.ToLower(un.GetKind()), un.GetName()) return nil }, } return cmd } +var deploymentGVR = schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", +} + // SetImage updates a rollout's container image // We use a dynamic clientset instead of a rollout clientset in order to allow an older plugin // to still work with a newer version of Rollouts (without dropping newly introduced fields during // the marshalling) -func SetImage(dynamicClient dynamic.Interface, namespace, rollout, container, image string) error { +func SetImage(dynamicClient dynamic.Interface, namespace, rollout, container, image string) (*unstructured.Unstructured, error) { ctx := context.TODO() rolloutIf := dynamicClient.Resource(v1alpha1.RolloutGVR).Namespace(namespace) ro, err := rolloutIf.Get(ctx, rollout, metav1.GetOptions{}) if err != nil { - return err + return nil, err } - newRo, err := newRolloutSetImage(ro, container, image) + workloadRef, ok, err := unstructured.NestedMap(ro.Object, "spec", "workloadRef") if err != nil { - return err + return nil, err } - _, err = rolloutIf.Update(ctx, newRo, metav1.UpdateOptions{}) - if err != nil { - return err + if ok { + deployIf := dynamicClient.Resource(deploymentGVR).Namespace(namespace) + deployName, ok := workloadRef["name"].(string) + if !ok { + return nil, fmt.Errorf("spec.workloadRef.name is not a string: %v", workloadRef["name"]) + } + deployUn, err := deployIf.Get(ctx, deployName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + newDeploy, err := newRolloutSetImage(deployUn, container, image) + if err != nil { + return nil, err + } + return deployIf.Update(ctx, newDeploy, metav1.UpdateOptions{}) + } else { + newRo, err := newRolloutSetImage(ro, container, image) + if err != nil { + return nil, err + } + return rolloutIf.Update(ctx, newRo, metav1.UpdateOptions{}) } - return nil } func newRolloutSetImage(orig *unstructured.Unstructured, container string, image string) (*unstructured.Unstructured, error) { diff --git a/pkg/kubectl-argo-rollouts/cmd/set/set_test.go b/pkg/kubectl-argo-rollouts/cmd/set/set_test.go index 7b116ac66e..3e2baeceed 100644 --- a/pkg/kubectl-argo-rollouts/cmd/set/set_test.go +++ b/pkg/kubectl-argo-rollouts/cmd/set/set_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -299,3 +300,84 @@ func TestSetImageConflict(t *testing.T) { assert.Empty(t, stderr) assert.True(t, updateCalls > 0) } + +func TestSetImageWorkloadRef(t *testing.T) { + ro := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.RolloutSpec{ + WorkloadRef: &v1alpha1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "guestbook", + }, + }, + } + deploy := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: metav1.NamespaceDefault, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "guestbook", + Image: "argoproj/rollouts-demo:blue", + }, + }, + Containers: []corev1.Container{ + { + Name: "foo", + Image: "alpine:3.8", + }, + { + Name: "guestbook", + Image: "argoproj/rollouts-demo:blue", + }, + { + Name: "bar", + Image: "alpine:3.8", + }, + }, + EphemeralContainers: []corev1.EphemeralContainer{ + { + EphemeralContainerCommon: corev1.EphemeralContainerCommon{ + Name: "guestbook", + Image: "argoproj/rollouts-demo:blue", + }, + }, + }, + }, + }, + }, + } + + tf, o := options.NewFakeArgoRolloutsOptions(&ro, &deploy) + defer tf.Cleanup() + + cmd := NewCmdSetImage(o) + cmd.PersistentPreRunE = o.PersistentPreRunE + cmd.SetArgs([]string{"guestbook", "guestbook=argoproj/rollouts-demo:NEWIMAGE"}) + err := cmd.Execute() + assert.NoError(t, err) + + newDeployUn, err := o.DynamicClientset().Resource(deploymentGVR).Namespace(ro.Namespace).Get(context.Background(), "guestbook", metav1.GetOptions{}) + assert.NoError(t, err) + var newDeploy appsv1.Deployment + err = runtime.DefaultUnstructuredConverter.FromUnstructured(newDeployUn.Object, &newDeploy) + assert.NoError(t, err) + assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", newDeploy.Spec.Template.Spec.Containers[1].Image) + assert.Equal(t, "alpine:3.8", newDeploy.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, "alpine:3.8", newDeploy.Spec.Template.Spec.Containers[2].Image) + assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", newDeploy.Spec.Template.Spec.InitContainers[0].Image) + assert.Equal(t, "argoproj/rollouts-demo:NEWIMAGE", newDeploy.Spec.Template.Spec.EphemeralContainers[0].Image) + + stdout := o.Out.(*bytes.Buffer).String() + stderr := o.ErrOut.(*bytes.Buffer).String() + assert.Equal(t, stdout, "deployment \"guestbook\" image updated\n") + assert.Empty(t, stderr) +} diff --git a/pkg/kubectl-argo-rollouts/cmd/status/status.go b/pkg/kubectl-argo-rollouts/cmd/status/status.go index fe7058f1d5..826debb207 100644 --- a/pkg/kubectl-argo-rollouts/cmd/status/status.go +++ b/pkg/kubectl-argo-rollouts/cmd/status/status.go @@ -72,7 +72,7 @@ func NewCmdStatus(o *options.ArgoRolloutsOptions) *cobra.Command { }) go controller.Run(ctx) statusOptions.WatchStatus(ctx.Done(), rolloutUpdates) - close(rolloutUpdates) + defer close(rolloutUpdates) // the final rollout info after timeout or reach Healthy or Degraded status ri, err = controller.GetRolloutInfo() @@ -98,7 +98,7 @@ func NewCmdStatus(o *options.ArgoRolloutsOptions) *cobra.Command { func (o *StatusOptions) WatchStatus(stopCh <-chan struct{}, rolloutUpdates <-chan *rollout.RolloutInfo) string { timeout := make(chan bool) var roInfo *rollout.RolloutInfo - var preventFlicker time.Time + var prevMessage string if o.Timeout != 0 { go func() { @@ -107,16 +107,25 @@ func (o *StatusOptions) WatchStatus(stopCh <-chan struct{}, rolloutUpdates <-cha }() } + printStatus := func(roInfo rollout.RolloutInfo) { + message := roInfo.Status + if roInfo.Message != "" { + message = fmt.Sprintf("%s - %s", roInfo.Status, roInfo.Message) + } + if message != prevMessage { + fmt.Fprintln(o.Out, message) + prevMessage = message + } + } + for { select { case roInfo = <-rolloutUpdates: - if roInfo != nil && roInfo.Status == "Healthy" || roInfo.Status == "Degraded" { - fmt.Fprintln(o.Out, roInfo.Status) - return roInfo.Status - } - if roInfo != nil && time.Now().After(preventFlicker.Add(200*time.Millisecond)) { - fmt.Fprintf(o.Out, "%s - %s\n", roInfo.Status, roInfo.Message) - preventFlicker = time.Now() + if roInfo != nil { + printStatus(*roInfo) + if roInfo.Status == "Healthy" || roInfo.Status == "Degraded" { + return roInfo.Status + } } case <-stopCh: return "" diff --git a/pkg/kubectl-argo-rollouts/cmd/status/status_test.go b/pkg/kubectl-argo-rollouts/cmd/status/status_test.go index 24c103d584..ddbee9b24e 100644 --- a/pkg/kubectl-argo-rollouts/cmd/status/status_test.go +++ b/pkg/kubectl-argo-rollouts/cmd/status/status_test.go @@ -120,7 +120,7 @@ func TestWatchAbortedRollout(t *testing.T) { assert.Error(t, err) stdout := o.Out.(*bytes.Buffer).String() stderr := o.ErrOut.(*bytes.Buffer).String() - assert.Equal(t, "Degraded\n", stdout) + assert.Equal(t, "Degraded - RolloutAborted: metric \"web\" assessed Failed due to failed (1) > failureLimit (0)\n", stdout) assert.Equal(t, "Error: The rollout is in a degraded state with message: RolloutAborted: metric \"web\" assessed Failed due to failed (1) > failureLimit (0)\n", stderr) } diff --git a/pkg/kubectl-argo-rollouts/info/analysisrun_info.go b/pkg/kubectl-argo-rollouts/info/analysisrun_info.go index cecc3f2cd9..9c60ccad87 100644 --- a/pkg/kubectl-argo-rollouts/info/analysisrun_info.go +++ b/pkg/kubectl-argo-rollouts/info/analysisrun_info.go @@ -26,32 +26,72 @@ func getAnalysisRunInfo(ownerUID types.UID, allAnalysisRuns []*v1alpha1.Analysis UID: run.UID, }, } + if run.Spec.Metrics != nil { + for _, metric := range run.Spec.Metrics { + + metrics := rollout.Metrics{ + Name: metric.Name, + SuccessCondition: metric.SuccessCondition, + } + + if metric.InconclusiveLimit != nil { + metrics.InconclusiveLimit = metric.InconclusiveLimit.IntVal + } else { + metrics.InconclusiveLimit = 0 + } + + if metric.Count != nil { + metrics.Count = metric.Count.IntVal + } else { + metrics.Count = 0 + } + + if metric.FailureLimit != nil { + metrics.FailureLimit = metric.FailureLimit.IntVal + } else { + metrics.FailureLimit = 0 + } + + arInfo.Metrics = append(arInfo.Metrics, &metrics) + } + } arInfo.Status = string(run.Status.Phase) for _, mr := range run.Status.MetricResults { arInfo.Successful += mr.Successful arInfo.Failed += mr.Failed arInfo.Inconclusive += mr.Inconclusive arInfo.Error += mr.Error - lastMeasurement := analysisutil.LastMeasurement(run, mr.Name) - if lastMeasurement != nil && lastMeasurement.Metadata != nil { - if jobName, ok := lastMeasurement.Metadata[job.JobNameKey]; ok { - jobInfo := rollout.JobInfo{ - ObjectMeta: &v1.ObjectMeta{ - Name: jobName, - }, - Icon: analysisIcon(lastMeasurement.Phase), - Status: string(lastMeasurement.Phase), + for _, measurement := range analysisutil.ArrayMeasurement(run, mr.Name) { + if measurement.Metadata != nil { + if jobName, ok := measurement.Metadata[job.JobNameKey]; ok { + jobInfo := rollout.JobInfo{ + ObjectMeta: &v1.ObjectMeta{ + Name: jobName, + }, + Icon: analysisIcon(measurement.Phase), + Status: string(measurement.Phase), + StartedAt: measurement.StartedAt, + MetricName: mr.Name, + } + if measurement.StartedAt != nil { + jobInfo.ObjectMeta.CreationTimestamp = *measurement.StartedAt + } + arInfo.Jobs = append(arInfo.Jobs, &jobInfo) } - if lastMeasurement.StartedAt != nil { - jobInfo.ObjectMeta.CreationTimestamp = *lastMeasurement.StartedAt + } else { + nonJobInfo := rollout.NonJobInfo{ + Value: measurement.Value, + Status: string(measurement.Phase), + StartedAt: measurement.StartedAt, + MetricName: mr.Name, } - arInfo.Jobs = append(arInfo.Jobs, &jobInfo) + arInfo.NonJobInfo = append(arInfo.NonJobInfo, &nonJobInfo) } + } } arInfo.Icon = analysisIcon(run.Status.Phase) - arInfo.Revision = int32(parseRevision(run.ObjectMeta.Annotations)) - + arInfo.Revision = int64(parseRevision(run.ObjectMeta.Annotations)) arInfos = append(arInfos, &arInfo) } sort.Slice(arInfos[:], func(i, j int) bool { diff --git a/pkg/kubectl-argo-rollouts/info/experiment_info.go b/pkg/kubectl-argo-rollouts/info/experiment_info.go index 1799577cc1..e9f6ffb272 100644 --- a/pkg/kubectl-argo-rollouts/info/experiment_info.go +++ b/pkg/kubectl-argo-rollouts/info/experiment_info.go @@ -31,7 +31,7 @@ func NewExperimentInfo( Message: exp.Status.Message, } expInfo.Icon = analysisIcon(exp.Status.Phase) - expInfo.Revision = int32(parseRevision(exp.ObjectMeta.Annotations)) + expInfo.Revision = int64(parseRevision(exp.ObjectMeta.Annotations)) expInfo.ReplicaSets = GetReplicaSetInfo(exp.UID, nil, allReplicaSets, allPods) expInfo.AnalysisRuns = getAnalysisRunInfo(exp.UID, allAnalysisRuns) return &expInfo diff --git a/pkg/kubectl-argo-rollouts/info/info.go b/pkg/kubectl-argo-rollouts/info/info.go index 01fd1ebb5c..92e2cef729 100644 --- a/pkg/kubectl-argo-rollouts/info/info.go +++ b/pkg/kubectl-argo-rollouts/info/info.go @@ -10,6 +10,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/annotations" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -28,6 +29,8 @@ const ( InfoTagStable = "stable" InfoTagActive = "active" InfoTagPreview = "preview" + InfoTagPing = "ping" + InfoTagPong = "pong" ) type Metadata v1.ObjectMeta @@ -38,7 +41,7 @@ type ImageInfo struct { } func Age(m v1.ObjectMeta) string { - return duration.HumanDuration(metav1.Now().Sub(m.CreationTimestamp.Time)) + return duration.HumanDuration(timeutil.MetaNow().Sub(m.CreationTimestamp.Time)) } func ownerRef(ownerRefs []metav1.OwnerReference, uids []types.UID) *metav1.OwnerReference { diff --git a/pkg/kubectl-argo-rollouts/info/info_test.go b/pkg/kubectl-argo-rollouts/info/info_test.go index 6e61680565..d7bbe161fd 100644 --- a/pkg/kubectl-argo-rollouts/info/info_test.go +++ b/pkg/kubectl-argo-rollouts/info/info_test.go @@ -1,6 +1,7 @@ package info import ( + "strconv" "testing" "time" @@ -9,6 +10,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/info/testdata" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func TestAge(t *testing.T) { @@ -20,7 +22,7 @@ func TestAge(t *testing.T) { func TestCanaryRolloutInfo(t *testing.T) { rolloutObjs := testdata.NewCanaryRollout() - roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns) + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) assert.Equal(t, roInfo.ObjectMeta.Name, rolloutObjs.Rollouts[0].Name) assert.Len(t, Revisions(roInfo), 3) @@ -36,10 +38,52 @@ func TestCanaryRolloutInfo(t *testing.T) { }) } +func TestCanaryRolloutInfoWeights(t *testing.T) { + rolloutObjs := testdata.NewCanaryRollout() + + t.Run("TestActualWeightWithExistingWeight", func(t *testing.T) { + t.Run("will test that actual weight for info object is set from rollout status", func(t *testing.T) { + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[4], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) + actualWeightString := roInfo.ActualWeight + actualWeightStringInt32, err := strconv.ParseInt(actualWeightString, 10, 32) + if err != nil { + t.Error(err) + } + assert.Equal(t, rolloutObjs.Rollouts[4].Status.Canary.Weights.Canary.Weight, int32(actualWeightStringInt32)) + }) + }) + + t.Run("TestActualWeightWithoutExistingWeight", func(t *testing.T) { + t.Run("will test that actual weight is set to SetWeight when status field does not exist", func(t *testing.T) { + //This test has a no canary weight object in the status field so we fall back to using SetWeight value + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[5], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) + assert.Equal(t, roInfo.SetWeight, roInfo.ActualWeight) + }) + }) +} + +func TestPingPongCanaryRolloutInfo(t *testing.T) { + rolloutObjs := testdata.NewCanaryRollout() + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[3], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) + assert.Equal(t, roInfo.ObjectMeta.Name, rolloutObjs.Rollouts[3].Name) + assert.Len(t, Revisions(roInfo), 3) + + assert.Equal(t, Images(roInfo), []ImageInfo{ + { + Image: "argoproj/rollouts-demo:does-not-exist", + Tags: []string{InfoTagCanary, InfoTagPing}, + }, + { + Image: "argoproj/rollouts-demo:green", + Tags: []string{InfoTagStable, InfoTagPong}, + }, + }) +} + func TestBlueGreenRolloutInfo(t *testing.T) { { rolloutObjs := testdata.NewBlueGreenRollout() - roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns) + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) assert.Equal(t, roInfo.ObjectMeta.Name, rolloutObjs.Rollouts[0].Name) assert.Len(t, Revisions(roInfo), 3) @@ -63,10 +107,10 @@ func TestBlueGreenRolloutInfo(t *testing.T) { } { rolloutObjs := testdata.NewBlueGreenRollout() - inFourHours := metav1.Now().Add(4 * time.Hour).Truncate(time.Second).UTC().Format(time.RFC3339) + inFourHours := timeutil.Now().Add(4 * time.Hour).Truncate(time.Second).UTC().Format(time.RFC3339) rolloutObjs.ReplicaSets[0].Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inFourHours delayedRs := rolloutObjs.ReplicaSets[0].ObjectMeta.UID - roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns) + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) assert.Equal(t, roInfo.ReplicaSets[1].ObjectMeta.UID, delayedRs) assert.Equal(t, roInfo.ReplicaSets[1].ScaleDownDeadline, inFourHours) @@ -76,7 +120,7 @@ func TestBlueGreenRolloutInfo(t *testing.T) { func TestExperimentAnalysisRolloutInfo(t *testing.T) { rolloutObjs := testdata.NewExperimentAnalysisRollout() - roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns) + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) assert.Equal(t, roInfo.ObjectMeta.Name, rolloutObjs.Rollouts[0].Name) assert.Len(t, Revisions(roInfo), 2) @@ -114,14 +158,14 @@ func TestExperimentInfo(t *testing.T) { func TestRolloutStatusInvalidSpec(t *testing.T) { rolloutObjs := testdata.NewInvalidRollout() - roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns) + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) assert.Equal(t, "Degraded", roInfo.Status) assert.Equal(t, "InvalidSpec: The Rollout \"rollout-invalid\" is invalid: spec.template.metadata.labels: Invalid value: map[string]string{\"app\":\"doesnt-match\"}: `selector` does not match template `labels`", roInfo.Message) } func TestRolloutAborted(t *testing.T) { rolloutObjs := testdata.NewAbortedRollout() - roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns) + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) assert.Equal(t, "Degraded", roInfo.Status) assert.Equal(t, `RolloutAborted: metric "web" assessed Failed due to failed (1) > failureLimit (0)`, roInfo.Message) } diff --git a/pkg/kubectl-argo-rollouts/info/replicaset_info.go b/pkg/kubectl-argo-rollouts/info/replicaset_info.go index 44f148f0b5..362261daef 100644 --- a/pkg/kubectl-argo-rollouts/info/replicaset_info.go +++ b/pkg/kubectl-argo-rollouts/info/replicaset_info.go @@ -4,17 +4,17 @@ import ( "sort" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/duration" "github.com/argoproj/argo-rollouts/pkg/apiclient/rollout" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func GetReplicaSetInfo(ownerUID types.UID, ro *v1alpha1.Rollout, allReplicaSets []*appsv1.ReplicaSet, allPods []*corev1.Pod) []*rollout.ReplicaSetInfo { @@ -36,17 +36,26 @@ func GetReplicaSetInfo(ownerUID types.UID, ro *v1alpha1.Rollout, allReplicaSets Available: rs.Status.AvailableReplicas, } rsInfo.Icon = replicaSetIcon(rsInfo.Status) - rsInfo.Revision = int32(parseRevision(rs.ObjectMeta.Annotations)) + rsInfo.Revision = int64(parseRevision(rs.ObjectMeta.Annotations)) rsInfo.Template = parseExperimentTemplateName(rs.ObjectMeta.Annotations) rsInfo.ScaleDownDeadline = parseScaleDownDeadline(rs.ObjectMeta.Annotations) if ro != nil { podTemplateHash := replicasetutil.GetPodTemplateHash(rs) if ro.Spec.Strategy.Canary != nil { + stableRsIsPing := trafficrouting.IsStablePing(ro) if ro.Status.StableRS == podTemplateHash { rsInfo.Stable = true + if trafficrouting.IsPingPongEnabled(ro) { + rsInfo.Ping = stableRsIsPing + rsInfo.Pong = !stableRsIsPing + } } else if ro.Status.CurrentPodHash == podTemplateHash { rsInfo.Canary = true + if trafficrouting.IsPingPongEnabled(ro) { + rsInfo.Ping = !stableRsIsPing + rsInfo.Pong = stableRsIsPing + } } } if ro.Spec.Strategy.BlueGreen != nil { @@ -121,7 +130,7 @@ func getReplicaSetCondition(status appsv1.ReplicaSetStatus, condType appsv1.Repl func ScaleDownDelay(rs rollout.ReplicaSetInfo) string { if deadline, err := time.Parse(time.RFC3339, rs.ScaleDownDeadline); err == nil { - now := metav1.Now().Time + now := timeutil.MetaNow().Time if deadline.Before(now) { return "passed" } diff --git a/pkg/kubectl-argo-rollouts/info/rollout_info.go b/pkg/kubectl-argo-rollouts/info/rollout_info.go index 08b2194e08..59ee3f076a 100644 --- a/pkg/kubectl-argo-rollouts/info/rollout_info.go +++ b/pkg/kubectl-argo-rollouts/info/rollout_info.go @@ -22,6 +22,7 @@ func NewRolloutInfo( allPods []*corev1.Pod, allExperiments []*v1alpha1.Experiment, allARs []*v1alpha1.AnalysisRun, + workloadRef *appsv1.Deployment, ) *rollout.RolloutInfo { roInfo := rollout.RolloutInfo{ @@ -64,7 +65,11 @@ func NewRolloutInfo( } } } else { - roInfo.ActualWeight = roInfo.SetWeight + if ro.Status.Canary.Weights != nil { + roInfo.ActualWeight = fmt.Sprintf("%d", ro.Status.Canary.Weights.Canary.Weight) + } else { + roInfo.ActualWeight = roInfo.SetWeight + } } } } else if ro.Spec.Strategy.BlueGreen != nil { @@ -75,9 +80,16 @@ func NewRolloutInfo( roInfo.Message = message roInfo.Icon = rolloutIcon(roInfo.Status) roInfo.Containers = []*rollout.ContainerInfo{} - for c := range ro.Spec.Template.Spec.Containers { - curContainer := ro.Spec.Template.Spec.Containers[c] - roInfo.Containers = append(roInfo.Containers, &rollout.ContainerInfo{Name: curContainer.Name, Image: curContainer.Image}) + + var containerList []corev1.Container + if workloadRef != nil { + containerList = workloadRef.Spec.Template.Spec.Containers + } else { + containerList = ro.Spec.Template.Spec.Containers + } + + for _, c := range containerList { + roInfo.Containers = append(roInfo.Containers, &rollout.ContainerInfo{Name: c.Name, Image: c.Image}) } if ro.Status.RestartedAt != nil { @@ -139,6 +151,12 @@ func Images(r *rollout.RolloutInfo) []ImageInfo { if rsInfo.Preview { newImage.Tags = append(newImage.Tags, InfoTagPreview) } + if rsInfo.Ping { + newImage.Tags = append(newImage.Tags, InfoTagPing) + } + if rsInfo.Pong { + newImage.Tags = append(newImage.Tags, InfoTagPong) + } images = mergeImageAndTags(newImage, images) } } @@ -211,7 +229,7 @@ func Revisions(r *rollout.RolloutInfo) []int { func ReplicaSetsByRevision(r *rollout.RolloutInfo, rev int) []*rollout.ReplicaSetInfo { var replicaSets []*rollout.ReplicaSetInfo for _, rs := range r.ReplicaSets { - if rs.Revision == int32(rev) { + if rs.Revision == int64(rev) { replicaSets = append(replicaSets, rs) } } diff --git a/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout4.yaml b/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout4.yaml new file mode 100644 index 0000000000..6ec2f0795a --- /dev/null +++ b/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout4.yaml @@ -0,0 +1,86 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "31" + creationTimestamp: "2019-10-25T06:07:18Z" + generation: 429 + labels: + app: canary-demo + app.kubernetes.io/instance: jesse-test + name: canary-demo-pingpong + namespace: jesse-test + resourceVersion: "28253567" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/jesse-test/rollouts/canary-demo + uid: b350ba76-f6ed-11e9-a15b-42010aa80033 +spec: + progressDeadlineSeconds: 30 + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + pingPong: + pingService: ping-service + pongService: pong-service + steps: + - setWeight: 20 + - pause: {} + - setWeight: 40 + - pause: + duration: 10s + - setWeight: 60 + - pause: + duration: 10s + - setWeight: 80 + - pause: + duration: 10s + template: + metadata: + creationTimestamp: null + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:does-not-exist + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 6 + availableReplicas: 5 + blueGreen: {} + canary: { + stablePingPong: pong + } + stableRS: 877894d5b + conditions: + - lastTransitionTime: "2019-10-25T06:07:29Z" + lastUpdateTime: "2019-10-25T06:07:29Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2019-10-28T04:52:55Z" + lastUpdateTime: "2019-10-28T04:52:55Z" + message: ReplicaSet "canary-demo-65fb5ffc84" has timed out progressing. + reason: ProgressDeadlineExceeded + status: "False" + type: Progressing + currentPodHash: 65fb5ffc84 + currentStepHash: f64cdc9d + currentStepIndex: 0 + observedGeneration: "429" + readyReplicas: 5 + replicas: 6 + selector: app=canary-demo + updatedReplicas: 1 diff --git a/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout5.yaml b/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout5.yaml new file mode 100644 index 0000000000..2ea2c4436a --- /dev/null +++ b/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout5.yaml @@ -0,0 +1,96 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "31" + creationTimestamp: "2019-10-25T06:07:18Z" + generation: 429 + labels: + app: canary-demo-weights + app.kubernetes.io/instance: jesse-test + name: canary-demo-weights + namespace: jesse-test + resourceVersion: "28253567" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/jesse-test/rollouts/canary-demo-weights + uid: b350ba76-f6ed-11e9-a15b-42010aa80033 +spec: + progressDeadlineSeconds: 30 + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo-weights + strategy: + canary: + canaryService: canary-demo-preview + stableService: canary-demo-stable + trafficRouting: + smi: + rootService: root-svc # optional + trafficSplitName: rollout-example-traffic-split # optional + steps: + - setWeight: 20 + - pause: {} + - setWeight: 40 + - pause: + duration: 10s + - setWeight: 60 + - pause: + duration: 10s + - setWeight: 80 + - pause: + duration: 10s + template: + metadata: + creationTimestamp: null + labels: + app: canary-demo-weights + spec: + containers: + - image: argoproj/rollouts-demo:does-not-exist + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 6 + availableReplicas: 5 + blueGreen: {} + canary: + weights: + canary: + podTemplateHash: 868d98998a + serviceName: canary-demo + weight: 20 + stable: + podTemplateHash: 877894d5b + serviceName: canary-demo + weight: 60 + stableRS: 877894d5b + conditions: + - lastTransitionTime: "2019-10-25T06:07:29Z" + lastUpdateTime: "2019-10-25T06:07:29Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2019-10-28T04:52:55Z" + lastUpdateTime: "2019-10-28T04:52:55Z" + message: ReplicaSet "canary-demo-65fb5ffc84" has timed out progressing. + reason: ProgressDeadlineExceeded + status: "False" + type: Progressing + currentPodHash: 65fb5ffc84 + currentStepHash: f64cdc9d + currentStepIndex: 0 + observedGeneration: "429" + readyReplicas: 5 + replicas: 6 + selector: app=canary-demo-weights + updatedReplicas: 1 diff --git a/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout6.yaml b/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout6.yaml new file mode 100644 index 0000000000..54edf27937 --- /dev/null +++ b/pkg/kubectl-argo-rollouts/info/testdata/canary/canary-rollout6.yaml @@ -0,0 +1,87 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "31" + creationTimestamp: "2019-10-25T06:07:18Z" + generation: 429 + labels: + app: canary-demo-weights-na + app.kubernetes.io/instance: jesse-test + name: canary-demo-weights-na + namespace: jesse-test + resourceVersion: "28253567" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/jesse-test/rollouts/canary-demo-weights-na + uid: b350ba76-f6ed-11e9-a15b-42010aa80033 +spec: + progressDeadlineSeconds: 30 + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo-weights-na + strategy: + canary: + canaryService: canary-demo-preview + stableService: canary-demo-stable + trafficRouting: + smi: + rootService: root-svc # optional + trafficSplitName: rollout-example-traffic-split # optional + steps: + - setWeight: 20 + - pause: {} + - setWeight: 40 + - pause: + duration: 10s + - setWeight: 60 + - pause: + duration: 10s + - setWeight: 80 + - pause: + duration: 10s + template: + metadata: + creationTimestamp: null + labels: + app: canary-demo-weights-na + spec: + containers: + - image: argoproj/rollouts-demo:does-not-exist + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 6 + availableReplicas: 5 + blueGreen: {} + canary: {} + stableRS: 877894d5b + conditions: + - lastTransitionTime: "2019-10-25T06:07:29Z" + lastUpdateTime: "2019-10-25T06:07:29Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2019-10-28T04:52:55Z" + lastUpdateTime: "2019-10-28T04:52:55Z" + message: ReplicaSet "canary-demo-65fb5ffc84" has timed out progressing. + reason: ProgressDeadlineExceeded + status: "False" + type: Progressing + currentPodHash: 65fb5ffc84 + currentStepHash: f64cdc9d + currentStepIndex: 0 + observedGeneration: "429" + readyReplicas: 5 + replicas: 6 + selector: app=canary-demo-weights-na + updatedReplicas: 1 diff --git a/pkg/kubectl-argo-rollouts/info/testdata/experiment-analysis/canary-analysis.yaml b/pkg/kubectl-argo-rollouts/info/testdata/experiment-analysis/canary-analysis.yaml index f27fa93e14..c792eca60d 100644 --- a/pkg/kubectl-argo-rollouts/info/testdata/experiment-analysis/canary-analysis.yaml +++ b/pkg/kubectl-argo-rollouts/info/testdata/experiment-analysis/canary-analysis.yaml @@ -12,88 +12,90 @@ metadata: name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr namespace: jesse-test ownerReferences: - - apiVersion: argoproj.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: Rollout - name: rollout-experiment-analysis - uid: a17d1089-fae6-11e9-a15b-42010aa80033 + - apiVersion: argoproj.io/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Rollout + name: rollout-experiment-analysis + uid: a17d1089-fae6-11e9-a15b-42010aa80033 resourceVersion: "29424124" selfLink: /apis/argoproj.io/v1alpha1/namespaces/jesse-test/analysisruns/rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr uid: e50bceec-fae6-11e9-a15b-42010aa80033 spec: metrics: - - interval: 10s - failureLimit: 5 - name: random-fail - provider: - job: - metadata: - creationTimestamp: null - spec: - backoffLimit: 0 - template: - metadata: - creationTimestamp: null - spec: - containers: - - args: - - FLIP=$(($(($RANDOM%10))%2)) && exit $FLIP - command: - - sh - - -c - image: alpine:3.8 - name: sleep - resources: {} - restartPolicy: Never + - interval: 10s + count: 10 + failureLimit: 5 + inconclusiveLimit: 1 + name: random-fail + provider: + job: + metadata: + creationTimestamp: null + spec: + backoffLimit: 0 + template: + metadata: + creationTimestamp: null + spec: + containers: + - args: + - FLIP=$(($(($RANDOM%10))%2)) && exit $FLIP + command: + - sh + - -c + image: alpine:3.8 + name: sleep + resources: {} + restartPolicy: Never status: metricResults: - - count: 8 - error: 1 - failed: 4 - inconclusive: 1 - measurements: - - finishedAt: "2019-10-30T07:28:40Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rzl6lt - startedAt: "2019-10-30T07:28:38Z" - phase: Failed - - finishedAt: "2019-10-30T07:28:53Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-r8lqpd - startedAt: "2019-10-30T07:28:50Z" - phase: Successful - - finishedAt: "2019-10-30T07:29:05Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rjjsgg - startedAt: "2019-10-30T07:29:03Z" - phase: Successful - - finishedAt: "2019-10-30T07:29:17Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rrnfj5 - startedAt: "2019-10-30T07:29:15Z" - phase: Failed - - finishedAt: "2019-10-30T07:29:29Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rx5kqk - startedAt: "2019-10-30T07:29:27Z" - phase: Failed - - finishedAt: "2019-10-30T07:29:41Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rp894b - startedAt: "2019-10-30T07:29:39Z" - phase: Successful - - finishedAt: "2019-10-30T07:29:53Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rmngtj - startedAt: "2019-10-30T07:29:51Z" - phase: Failed - - finishedAt: "2019-10-30T16:13:40Z" - metadata: - job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rsxm69 - startedAt: "2019-10-30T16:13:38Z" - phase: Successful - name: random-fail - phase: Inconclusive - successful: 4 + - count: 8 + error: 1 + failed: 4 + inconclusive: 1 + measurements: + - finishedAt: "2019-10-30T07:28:40Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rzl6lt + startedAt: "2019-10-30T07:28:38Z" + phase: Failed + - finishedAt: "2019-10-30T07:28:53Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-r8lqpd + startedAt: "2019-10-30T07:28:50Z" + phase: Successful + - finishedAt: "2019-10-30T07:29:05Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rjjsgg + startedAt: "2019-10-30T07:29:03Z" + phase: Successful + - finishedAt: "2019-10-30T07:29:17Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rrnfj5 + startedAt: "2019-10-30T07:29:15Z" + phase: Failed + - finishedAt: "2019-10-30T07:29:29Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rx5kqk + startedAt: "2019-10-30T07:29:27Z" + phase: Failed + - finishedAt: "2019-10-30T07:29:41Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rp894b + startedAt: "2019-10-30T07:29:39Z" + phase: Successful + - finishedAt: "2019-10-30T07:29:53Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rmngtj + startedAt: "2019-10-30T07:29:51Z" + phase: Failed + - finishedAt: "2019-10-30T16:13:40Z" + metadata: + job-name: rollout-experiment-analysis-random-fail-6f646bf7b7-skqcr-rsxm69 + startedAt: "2019-10-30T16:13:38Z" + phase: Successful + name: random-fail + phase: Inconclusive + successful: 4 phase: Inconclusive diff --git a/pkg/kubectl-argo-rollouts/options/fake/fakeoptions.go b/pkg/kubectl-argo-rollouts/options/fake/fakeoptions.go index 2379897d33..17c02fffd9 100644 --- a/pkg/kubectl-argo-rollouts/options/fake/fakeoptions.go +++ b/pkg/kubectl-argo-rollouts/options/fake/fakeoptions.go @@ -3,15 +3,17 @@ package options import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" dynamicfake "k8s.io/client-go/dynamic/fake" k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" fakeroclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options" - "k8s.io/client-go/kubernetes/scheme" ) // NewFakeArgoRolloutsOptions returns a options.ArgoRolloutsOptions suitable for testing @@ -71,6 +73,14 @@ func NewFakeArgoRolloutsOptions(obj ...runtime.Object) (*cmdtesting.TestFactory, if err != nil { panic(err) } - o.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, allObjs...) + listMapping := map[schema.GroupVersionResource]string{ + v1alpha1.RolloutGVR: rollouts.RolloutKind + "List", + v1alpha1.AnalysisTemplateGVR: rollouts.AnalysisTemplateKind + "List", + v1alpha1.AnalysisRunGVR: rollouts.AnalysisRunKind + "List", + v1alpha1.ExperimentGVR: rollouts.ExperimentKind + "List", + v1alpha1.ClusterAnalysisTemplateGVR: rollouts.ClusterAnalysisTemplateKind + "List", + } + + o.DynamicClient = dynamicfake.NewSimpleDynamicClientWithCustomListKinds(scheme.Scheme, listMapping, allObjs...) return tf, o } diff --git a/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go b/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go index 247ca64b39..157f148d5f 100644 --- a/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go +++ b/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go @@ -8,6 +8,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/queue" log "github.com/sirupsen/logrus" + v1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -39,6 +40,7 @@ type viewController struct { rolloutLister rolloutlisters.RolloutNamespaceLister experimentLister rolloutlisters.ExperimentNamespaceLister analysisRunLister rolloutlisters.AnalysisRunNamespaceLister + deploymentLister appslisters.DeploymentNamespaceLister cacheSyncs []cache.InformerSynced @@ -100,6 +102,7 @@ func newViewController(namespace string, name string, kubeClient kubernetes.Inte rolloutLister: rolloutsInformerFactory.Argoproj().V1alpha1().Rollouts().Lister().Rollouts(namespace), experimentLister: rolloutsInformerFactory.Argoproj().V1alpha1().Experiments().Lister().Experiments(namespace), analysisRunLister: rolloutsInformerFactory.Argoproj().V1alpha1().AnalysisRuns().Lister().AnalysisRuns(namespace), + deploymentLister: kubeInformerFactory.Apps().V1().Deployments().Lister().Deployments(namespace), workqueue: workqueue.NewRateLimitingQueue(queue.DefaultArgoRolloutsRateLimiter()), } @@ -144,6 +147,7 @@ func (c *viewController) Run(ctx context.Context) error { } }, time.Second, ctx.Done()) <-ctx.Done() + c.DeregisterCallbacks() return nil } @@ -168,6 +172,10 @@ func (c *viewController) processNextWorkItem() bool { return true } +func (c *viewController) DeregisterCallbacks() { + c.callbacks = nil +} + func (c *RolloutViewController) GetRolloutInfo() (*rollout.RolloutInfo, error) { ro, err := c.rolloutLister.Get(c.name) if err != nil { @@ -194,7 +202,15 @@ func (c *RolloutViewController) GetRolloutInfo() (*rollout.RolloutInfo, error) { return nil, err } - roInfo := info.NewRolloutInfo(ro, allReplicaSets, allPods, allExps, allAnalysisRuns) + var workloadRef *v1.Deployment + if ro.Spec.WorkloadRef != nil { + workloadRef, err = c.deploymentLister.Get(ro.Spec.WorkloadRef.Name) + if err != nil { + return nil, err + } + } + + roInfo := info.NewRolloutInfo(ro, allReplicaSets, allPods, allExps, allAnalysisRuns, workloadRef) return roInfo, nil } diff --git a/pkg/signals/signal_posix.go b/pkg/signals/signal_posix.go index 808c4489ee..56184b9a3e 100644 --- a/pkg/signals/signal_posix.go +++ b/pkg/signals/signal_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package signals diff --git a/rollout/analysis.go b/rollout/analysis.go index e495b07656..339aa3fec6 100644 --- a/rollout/analysis.go +++ b/rollout/analysis.go @@ -347,7 +347,11 @@ func (c *rolloutContext) reconcileBackgroundAnalysisRun() (*v1alpha1.AnalysisRun } func (c *rolloutContext) createAnalysisRun(rolloutAnalysis *v1alpha1.RolloutAnalysis, infix string, labels map[string]string) (*v1alpha1.AnalysisRun, error) { - args := analysisutil.BuildArgumentsForRolloutAnalysisRun(rolloutAnalysis.Args, c.stableRS, c.newRS, c.rollout) + args, err := analysisutil.BuildArgumentsForRolloutAnalysisRun(rolloutAnalysis.Args, c.stableRS, c.newRS, c.rollout) + if err != nil { + return nil, err + } + podHash := replicasetutil.GetPodTemplateHash(c.newRS) if podHash == "" { return nil, fmt.Errorf("Latest ReplicaSet '%s' has no pod hash in the labels", c.newRS.Name) @@ -448,7 +452,7 @@ func (c *rolloutContext) newAnalysisRunFromRollout(rolloutAnalysis *v1alpha1.Rol } } - run, err = analysisutil.NewAnalysisRunFromTemplates(templates, clusterTemplates, args, name, "", c.rollout.Namespace) + run, err = analysisutil.NewAnalysisRunFromTemplates(templates, clusterTemplates, args, rolloutAnalysis.DryRun, rolloutAnalysis.MeasurementRetention, name, "", c.rollout.Namespace) if err != nil { return nil, err } diff --git a/rollout/analysis_test.go b/rollout/analysis_test.go index 2678c26b1a..46fe961fec 100644 --- a/rollout/analysis_test.go +++ b/rollout/analysis_test.go @@ -8,11 +8,13 @@ import ( "testing" "time" + "github.com/argoproj/argo-rollouts/utils/hash" + timeutil "github.com/argoproj/argo-rollouts/utils/time" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/controller" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -31,6 +33,12 @@ func analysisTemplate(name string) *v1alpha1.AnalysisTemplate { Metrics: []v1alpha1.Metric{{ Name: "example", }}, + DryRun: []v1alpha1.DryRun{{ + MetricName: "example", + }}, + MeasurementRetention: []v1alpha1.MeasurementRetention{{ + MetricName: "example", + }}, }, } } @@ -50,7 +58,7 @@ func clusterAnalysisTemplate(name string) *v1alpha1.ClusterAnalysisTemplate { func clusterAnalysisRun(cat *v1alpha1.ClusterAnalysisTemplate, analysisRunType string, r *v1alpha1.Rollout) *v1alpha1.AnalysisRun { labels := map[string]string{} - podHash := controller.ComputeHash(&r.Spec.Template, r.Status.CollisionCount) + podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) var name string if analysisRunType == v1alpha1.RolloutTypeStepLabel { labels = analysisutil.StepLabels(*r.Status.CurrentStepIndex, podHash, "") @@ -81,7 +89,7 @@ func clusterAnalysisRun(cat *v1alpha1.ClusterAnalysisTemplate, analysisRunType s func analysisRun(at *v1alpha1.AnalysisTemplate, analysisRunType string, r *v1alpha1.Rollout) *v1alpha1.AnalysisRun { labels := map[string]string{} - podHash := controller.ComputeHash(&r.Spec.Template, r.Status.CollisionCount) + podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) var name string if analysisRunType == v1alpha1.RolloutTypeStepLabel { labels = analysisutil.StepLabels(*r.Status.CurrentStepIndex, podHash, "") @@ -104,8 +112,10 @@ func analysisRun(at *v1alpha1.AnalysisTemplate, analysisRunType string, r *v1alp OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(r, controllerKind)}, }, Spec: v1alpha1.AnalysisRunSpec{ - Metrics: at.Spec.Metrics, - Args: at.Spec.Args, + Metrics: at.Spec.Metrics, + DryRun: at.Spec.DryRun, + MeasurementRetention: at.Spec.MeasurementRetention, + Args: at.Spec.Args, }, } } @@ -143,6 +153,8 @@ func TestCreateBackgroundAnalysisRun(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completeCond, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completeCond) f.rolloutLister = append(f.rolloutLister, r2) f.analysisTemplateLister = append(f.analysisTemplateLister, at) @@ -202,6 +214,8 @@ func TestCreateBackgroundAnalysisRunWithTemplates(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completeCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completeCondition) f.rolloutLister = append(f.rolloutLister, r2) f.analysisTemplateLister = append(f.analysisTemplateLister, at) @@ -262,6 +276,8 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplates(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) f.rolloutLister = append(f.rolloutLister, r2) f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, cat) @@ -371,6 +387,8 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplate(t *testing.T conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) f.rolloutLister = append(f.rolloutLister, r2) f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, cat) @@ -436,6 +454,8 @@ func TestCreateAnalysisRunWithCollision(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) ar.Status.Phase = v1alpha1.AnalysisPhaseFailed @@ -504,6 +524,8 @@ func TestCreateAnalysisRunWithCollisionAndSemanticEquality(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) f.rolloutLister = append(f.rolloutLister, r2) f.analysisRunLister = append(f.analysisRunLister, ar) @@ -563,6 +585,8 @@ func TestCreateAnalysisRunOnAnalysisStep(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) f.rolloutLister = append(f.rolloutLister, r2) f.analysisTemplateLister = append(f.analysisTemplateLister, at) @@ -733,7 +757,7 @@ func TestDoNothingWithAnalysisRunsWhileBackgroundAnalysisRunRunning(t *testing.T SetWeight: pointer.Int32Ptr(10), }} - r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1)) r2 := bumpVersion(r1) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ @@ -758,6 +782,8 @@ func TestDoNothingWithAnalysisRunsWhileBackgroundAnalysisRunRunning(t *testing.T conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Canary.CurrentBackgroundAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ Name: ar.Name, Status: v1alpha1.AnalysisPhaseRunning, @@ -790,7 +816,7 @@ func TestDoNothingWhileStepBasedAnalysisRunRunning(t *testing.T) { }, }} - r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1)) r2 := bumpVersion(r1) ar := analysisRun(at, v1alpha1.RolloutTypeStepLabel, r2) ar.Status.Phase = v1alpha1.AnalysisPhaseRunning @@ -806,6 +832,8 @@ func TestDoNothingWhileStepBasedAnalysisRunRunning(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Canary.CurrentStepAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ Name: ar.Name, Status: v1alpha1.AnalysisPhaseRunning, @@ -856,6 +884,8 @@ func TestCancelOlderAnalysisRuns(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Canary.CurrentStepAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ Name: ar.Name, Status: "", @@ -923,6 +953,8 @@ func TestDeleteAnalysisRunsWithNoMatchingRS(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Canary.CurrentStepAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ Name: ar.Name, } @@ -977,7 +1009,7 @@ func TestDeleteAnalysisRunsAfterRSDelete(t *testing.T) { arToDelete.Spec.Terminate = true arAlreadyDeleted := arToDelete.DeepCopy() arAlreadyDeleted.Name = "already-deleted-analysis-run" - now := metav1.Now() + now := timeutil.MetaNow() arAlreadyDeleted.DeletionTimestamp = &now r3 = updateCanaryRolloutStatus(r3, rs2PodHash, 1, 0, 1, false) @@ -1049,7 +1081,7 @@ func TestIncrementStepAfterSuccessfulAnalysisRun(t *testing.T) { "conditions": %s } }` - condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "") + condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition)), patch) } @@ -1065,7 +1097,7 @@ func TestPausedOnInconclusiveBackgroundAnalysisRun(t *testing.T) { {SetWeight: pointer.Int32Ptr(30)}, } - r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1)) r2 := bumpVersion(r1) ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ @@ -1100,7 +1132,7 @@ func TestPausedOnInconclusiveBackgroundAnalysisRun(t *testing.T) { patchIndex := f.expectPatchRolloutAction(r2) f.run(getKey(r2, t)) patch := f.getPatchedRollout(patchIndex) - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) expectedPatch := `{ "status": { "conditions": %s, @@ -1118,7 +1150,7 @@ func TestPausedOnInconclusiveBackgroundAnalysisRun(t *testing.T) { "message": "%s" } }` - condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, v1alpha1.PauseReasonInconclusiveAnalysis, now, v1alpha1.PauseReasonInconclusiveAnalysis)), patch) } @@ -1164,7 +1196,7 @@ func TestPausedStepAfterInconclusiveAnalysisRun(t *testing.T) { patchIndex := f.expectPatchRolloutAction(r2) f.run(getKey(r2, t)) patch := f.getPatchedRollout(patchIndex) - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) expectedPatch := `{ "status": { "conditions": %s, @@ -1182,7 +1214,7 @@ func TestPausedStepAfterInconclusiveAnalysisRun(t *testing.T) { "message": "%s" } }` - condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, v1alpha1.PauseReasonInconclusiveAnalysis, now, v1alpha1.PauseReasonInconclusiveAnalysis)), patch) } @@ -1246,9 +1278,9 @@ func TestErrorConditionAfterErrorAnalysisRunStep(t *testing.T) { "message": "RolloutAborted: %s" } }` - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2) + ": " + ar.Status.Message - condition := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, errmsg) + condition := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, errmsg, false) expectedPatch = fmt.Sprintf(expectedPatch, condition, now, errmsg) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) } @@ -1323,9 +1355,9 @@ func TestErrorConditionAfterErrorAnalysisRunBackground(t *testing.T) { } }` errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2) - condition := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "") + condition := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false) - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.Now().UTC().Format(time.RFC3339) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, now, errmsg)), patch) } @@ -1376,7 +1408,7 @@ func TestCancelAnalysisRunsWhenAborted(t *testing.T) { assert.True(t, f.verifyPatchedAnalysisRun(cancelOldAr, olderAr)) assert.True(t, f.verifyPatchedAnalysisRun(cancelCurrentAr, ar)) patch := f.getPatchedRollout(patchIndex) - newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "") + newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false) expectedPatch := `{ "status": { "conditions": %s, @@ -1386,7 +1418,7 @@ func TestCancelAnalysisRunsWhenAborted(t *testing.T) { } }` errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2) - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.Now().UTC().Format(time.RFC3339) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, newConditions, now, errmsg)), patch) } @@ -1445,7 +1477,7 @@ func TestDoNotCreateBackgroundAnalysisRunAfterInconclusiveRun(t *testing.T) { {SetWeight: pointer.Int32Ptr(10)}, } - r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1)) r2 := bumpVersion(r1) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ @@ -1465,7 +1497,7 @@ func TestDoNotCreateBackgroundAnalysisRunAfterInconclusiveRun(t *testing.T) { r2.Status.PauseConditions = []v1alpha1.PauseCondition{{ Reason: v1alpha1.PauseReasonInconclusiveAnalysis, - StartTime: metav1.Now(), + StartTime: timeutil.MetaNow(), }} r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 1, 0, 1, false) @@ -1478,6 +1510,9 @@ func TestDoNotCreateBackgroundAnalysisRunAfterInconclusiveRun(t *testing.T) { availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + f.rolloutLister = append(f.rolloutLister, r2) f.analysisTemplateLister = append(f.analysisTemplateLister, at) f.objects = append(f.objects, r2, at) @@ -1576,13 +1611,16 @@ func TestCreatePrePromotionAnalysisRun(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + previewSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} previewSvc := newService("preview", 80, previewSelector, r2) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} @@ -1640,7 +1678,7 @@ func TestDoNotCreatePrePromotionAnalysisAfterPromotionRollout(t *testing.T) { f.analysisTemplateLister = append(f.analysisTemplateLister, at) f.objects = append(f.objects, at) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 1, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 1, 1, false, true, true) r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) f.rolloutLister = append(f.rolloutLister, r2) @@ -1651,7 +1689,7 @@ func TestDoNotCreatePrePromotionAnalysisAfterPromotionRollout(t *testing.T) { f.run(getKey(r2, t)) - newConditions := generateConditionsPatchWithComplete(true, conditions.NewRSAvailableReason, rs2, true, "", true) + newConditions := generateConditionsPatchWithHealthy(true, conditions.NewRSAvailableReason, rs2, true, "", true, true) expectedPatch := fmt.Sprintf(`{ "status":{ "conditions":%s @@ -1713,7 +1751,7 @@ func TestDoNotCreatePrePromotionAnalysisRunOnNotReadyReplicaSet(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 2, 2, 4, 2, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 2, 2, 4, 2, false, true, false) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -1756,7 +1794,7 @@ func TestRolloutPrePromotionAnalysisBecomesInconclusive(t *testing.T) { rs2 := newReplicaSetWithStatus(r2, 1, 1) rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) r2.Status.BlueGreen.PrePromotionAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ Name: ar.Name, Status: v1alpha1.AnalysisPhaseRunning, @@ -1767,6 +1805,9 @@ func TestRolloutPrePromotionAnalysisBecomesInconclusive(t *testing.T) { pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -1781,7 +1822,7 @@ func TestRolloutPrePromotionAnalysisBecomesInconclusive(t *testing.T) { patchIndex := f.expectPatchRolloutActionWithPatch(r2, OnlyObservedGenerationPatch) f.run(getKey(r2, t)) patch := f.getPatchedRollout(patchIndex) - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) expectedPatch := fmt.Sprintf(`{ "status": { "pauseConditions":[ @@ -1824,7 +1865,7 @@ func TestRolloutPrePromotionAnalysisSwitchServiceAfterSuccess(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) r2.Status.BlueGreen.PrePromotionAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ Name: ar.Name, Status: v1alpha1.AnalysisPhaseRunning, @@ -1893,8 +1934,8 @@ func TestRolloutPrePromotionAnalysisHonorAutoPromotionSeconds(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) - now := metav1.NewTime(metav1.Now().Add(-10 * time.Second)) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) + now := metav1.NewTime(timeutil.MetaNow().Add(-10 * time.Second)) r2.Status.PauseConditions[0].StartTime = now progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) @@ -1956,10 +1997,10 @@ func TestRolloutPrePromotionAnalysisDoNothingOnInconclusiveAnalysis(t *testing.T rs2 := newReplicaSetWithStatus(r2, 1, 1) rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) inconclusivePauseCondition := v1alpha1.PauseCondition{ Reason: v1alpha1.PauseReasonInconclusiveAnalysis, - StartTime: metav1.Now(), + StartTime: timeutil.MetaNow(), } r2.Status.PauseConditions = append(r2.Status.PauseConditions, inconclusivePauseCondition) r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) @@ -2011,12 +2052,15 @@ func TestAbortRolloutOnErrorPrePromotionAnalysis(t *testing.T) { rs2 := newReplicaSetWithStatus(r2, 1, 1) rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Phase, r2.Status.Message = rolloututil.CalculateRolloutPhase(r2.Spec, r2.Status) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} @@ -2049,7 +2093,7 @@ func TestAbortRolloutOnErrorPrePromotionAnalysis(t *testing.T) { "message": "%s: %s" } }` - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) progressingFalseAborted, _ := newProgressingCondition(conditions.RolloutAbortedReason, r2, "") newConditions := updateConditionsPatch(*r2, progressingFalseAborted) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, newConditions, conditions.RolloutAbortedReason, progressingFalseAborted.Message)), patch) @@ -2073,7 +2117,7 @@ func TestCreatePostPromotionAnalysisRun(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -2126,11 +2170,14 @@ func TestRolloutPostPromotionAnalysisSuccess(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 1, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 1, 1, false, true, false) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} activeSvc := newService("active", 80, activeSelector, r2) + cond, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r2.Status, cond) + f.objects = append(f.objects, r2, at, ar) f.kubeobjects = append(f.kubeobjects, activeSvc, rs1, rs2) f.rolloutLister = append(f.rolloutLister, r2) @@ -2180,10 +2227,10 @@ func TestPostPromotionAnalysisRunHandleInconclusive(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) r2.Status.PauseConditions = []v1alpha1.PauseCondition{{ Reason: v1alpha1.PauseReasonInconclusiveAnalysis, - StartTime: metav1.Now(), + StartTime: timeutil.MetaNow(), }} progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) @@ -2191,6 +2238,9 @@ func TestPostPromotionAnalysisRunHandleInconclusive(t *testing.T) { pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -2241,13 +2291,16 @@ func TestAbortRolloutOnErrorPostPromotionAnalysis(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -2278,7 +2331,7 @@ func TestAbortRolloutOnErrorPostPromotionAnalysis(t *testing.T) { "message": "%s: %s" } }` - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) progressingFalseAborted, _ := newProgressingCondition(conditions.RolloutAbortedReason, r2, "") newConditions := updateConditionsPatch(*r2, progressingFalseAborted) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, newConditions, conditions.RolloutAbortedReason, progressingFalseAborted.Message)), patch) diff --git a/rollout/bluegreen.go b/rollout/bluegreen.go index ef6df170ee..afcad812eb 100644 --- a/rollout/bluegreen.go +++ b/rollout/bluegreen.go @@ -127,6 +127,8 @@ func (c *rolloutContext) isBlueGreenFastTracked(activeSvc *corev1.Service) bool return false } +// reconcileBlueGreenPause will automatically pause or resume the blue-green rollout +// depending if auto-promotion is enabled and we have passedAutoPromotionSeconds func (c *rolloutContext) reconcileBlueGreenPause(activeSvc, previewSvc *corev1.Service) { if c.rollout.Status.Abort { return @@ -136,8 +138,8 @@ func (c *rolloutContext) reconcileBlueGreenPause(activeSvc, previewSvc *corev1.S c.log.Infof("New RS '%s' is not ready to pause", c.newRS.Name) return } - if c.rollout.Spec.Paused { - c.log.Info("rollout has been paused by user") + if reason := c.haltProgress(); reason != "" { + c.log.Infof("skipping pause reconciliation: %s", reason) return } if c.isBlueGreenFastTracked(activeSvc) { diff --git a/rollout/bluegreen_test.go b/rollout/bluegreen_test.go index a83b9071c2..8ad0b1f838 100644 --- a/rollout/bluegreen_test.go +++ b/rollout/bluegreen_test.go @@ -11,13 +11,14 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" core "k8s.io/client-go/testing" - "k8s.io/kubernetes/pkg/controller" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" + "github.com/argoproj/argo-rollouts/utils/hash" rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) var ( @@ -33,10 +34,66 @@ func newBlueGreenRollout(name string, replicas int, revisionHistoryLimit *int32, AbortScaleDownDelaySeconds: &abortScaleDownDelaySeconds, } rollout.Status.CurrentStepHash = conditions.ComputeStepHash(rollout) - rollout.Status.CurrentPodHash = controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + rollout.Status.CurrentPodHash = hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) return rollout } +func TestBlueGreenCompletedRolloutRestart(t *testing.T) { + f := newFixture(t) + defer f.Close() + + r := newBlueGreenRollout("foo", 1, nil, "active", "preview") + r.Status.Conditions = []v1alpha1.RolloutCondition{} + + completedHealthyCond := conditions.NewRolloutCondition(v1alpha1.RolloutHealthy, corev1.ConditionFalse, conditions.RolloutHealthyReason, conditions.RolloutNotHealthyMessage) + conditions.SetRolloutCondition(&r.Status, *completedHealthyCond) + completedCond, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r.Status, completedCond) + + f.rolloutLister = append(f.rolloutLister, r) + f.objects = append(f.objects, r) + previewSvc := newService("preview", 80, nil, r) + activeSvc := newService("active", 80, nil, r) + f.kubeobjects = append(f.kubeobjects, previewSvc, activeSvc) + f.serviceLister = append(f.serviceLister, activeSvc, previewSvc) + + rs := newReplicaSet(r, 1) + rsPodHash := rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + generatedConditions := generateConditionsPatchWithCompletedHealthy(false, conditions.ReplicaSetUpdatedReason, rs, false, "", false, true) + + f.expectCreateReplicaSetAction(rs) + servicePatchIndex := f.expectPatchServiceAction(previewSvc, rsPodHash) + f.expectUpdateReplicaSetAction(rs) // scale up RS + updatedRolloutIndex := f.expectUpdateRolloutStatusAction(r) + expectedPatchWithoutSubs := `{ + "status":{ + "blueGreen" : { + "previewSelector": "%s" + }, + "conditions": %s, + "selector": "foo=bar", + "stableRS": "%s", + "phase": "Progressing", + "message": "more replicas need to be updated" + } + }` + expectedPatch := calculatePatch(r, fmt.Sprintf(expectedPatchWithoutSubs, rsPodHash, generatedConditions, rsPodHash)) + patchRolloutIndex := f.expectPatchRolloutActionWithPatch(r, expectedPatch) + f.run(getKey(r, t)) + + f.verifyPatchedService(servicePatchIndex, rsPodHash, "") + + updatedRollout := f.getUpdatedRollout(updatedRolloutIndex) + updatedProgressingCondition := conditions.GetRolloutCondition(updatedRollout.Status, v1alpha1.RolloutProgressing) + assert.NotNil(t, updatedProgressingCondition) + assert.Equal(t, conditions.NewReplicaSetReason, updatedProgressingCondition.Reason) + assert.Equal(t, corev1.ConditionTrue, updatedProgressingCondition.Status) + assert.Equal(t, fmt.Sprintf(conditions.NewReplicaSetMessage, rs.Name), updatedProgressingCondition.Message) + + patch := f.getPatchedRollout(patchRolloutIndex) + assert.Equal(t, expectedPatch, patch) +} + func TestBlueGreenCreatesReplicaSet(t *testing.T) { f := newFixture(t) defer f.Close() @@ -52,7 +109,7 @@ func TestBlueGreenCreatesReplicaSet(t *testing.T) { rs := newReplicaSet(r, 1) rsPodHash := rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - generatedConditions := generateConditionsPatch(false, conditions.ReplicaSetUpdatedReason, rs, false, "") + generatedConditions := generateConditionsPatchWithCompleted(false, conditions.ReplicaSetUpdatedReason, rs, false, "", true) f.expectCreateReplicaSetAction(rs) servicePatchIndex := f.expectPatchServiceAction(previewSvc, rsPodHash) @@ -216,7 +273,7 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 2, 2, 4, 2, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 2, 2, 4, 2, false, true, false) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -247,7 +304,7 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) previewSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} previewSvc := newService("preview", 80, previewSelector, r2) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} @@ -276,7 +333,7 @@ func TestBlueGreenHandlePause(t *testing.T) { "message": "BlueGreenPause" } }` - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.Now().UTC().Format(time.RFC3339) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, v1alpha1.PauseReasonBlueGreenPause, now)), patch) }) @@ -295,7 +352,7 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) previewSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} previewSvc := newService("preview", 80, previewSelector, r2) @@ -318,7 +375,7 @@ func TestBlueGreenHandlePause(t *testing.T) { "conditions": %s } }` - addedConditions := generateConditionsPatchWithPause(true, conditions.RolloutPausedReason, rs2, true, "", true) + addedConditions := generateConditionsPatchWithPause(true, conditions.RolloutPausedReason, rs2, true, "", true, false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, addedConditions)), patch) }) @@ -335,12 +392,15 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Phase, r2.Status.Message = rolloututil.CalculateRolloutPhase(r2.Spec, r2.Status) previewSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} @@ -378,8 +438,8 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) - now := metav1.Now() + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) + now := timeutil.MetaNow() r2.Status.PauseConditions = append(r2.Status.PauseConditions, v1alpha1.PauseCondition{ Reason: v1alpha1.PauseReasonInconclusiveAnalysis, StartTime: now, @@ -424,12 +484,15 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, r2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Phase, r2.Status.Message = rolloututil.CalculateRolloutPhase(r2.Spec, r2.Status) previewSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} @@ -462,8 +525,8 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) - now := metav1.Now() + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) + now := timeutil.MetaNow() before := metav1.NewTime(now.Add(-1 * time.Minute)) r2.Status.PauseConditions[0].StartTime = before r2.Status.ControllerPause = true @@ -520,8 +583,8 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) - now := metav1.Now() + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, false) + now := timeutil.MetaNow() before := metav1.NewTime(now.Add(-1 * time.Minute)) r2.Status.PauseConditions[0].StartTime = before r2.Status.ControllerPause = true @@ -531,6 +594,9 @@ func TestBlueGreenHandlePause(t *testing.T) { pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) + + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Phase, r2.Status.Message = rolloututil.CalculateRolloutPhase(r2.Spec, r2.Status) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} @@ -564,7 +630,7 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) r2.Spec.Strategy.BlueGreen.ScaleDownDelaySeconds = pointer.Int32Ptr(10) progressingCondition, _ := newProgressingCondition(conditions.NewReplicaSetReason, rs2, "") @@ -580,7 +646,7 @@ func TestBlueGreenHandlePause(t *testing.T) { servicePatchIndex := f.expectPatchServiceAction(activeSvc, rs2PodHash) - generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, true, "") + generatedConditions := generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs2, true, "", true) newSelector := metav1.FormatLabelSelector(rs2.Spec.Selector) expectedPatchWithoutSubs := `{ "status": { @@ -615,10 +681,13 @@ func TestBlueGreenHandlePause(t *testing.T) { rs2 := newReplicaSetWithStatus(r2, 1, 1) rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) progressingCondition, _ := newProgressingCondition(conditions.NewReplicaSetReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) + + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} activeSvc := newService("active", 80, activeSelector, r2) @@ -628,7 +697,7 @@ func TestBlueGreenHandlePause(t *testing.T) { f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) f.serviceLister = append(f.serviceLister, activeSvc) - now := metav1.Now().UTC().Format(time.RFC3339) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) expectedPatchWithoutSubs := `{ "status": { "pauseConditions": [{ @@ -658,7 +727,7 @@ func TestBlueGreenHandlePause(t *testing.T) { rs1 := newReplicaSetWithStatus(r1, 1, 1) rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r1 = updateBlueGreenRolloutStatus(r1, "", "", "", 1, 1, 1, 1, false, false) + r1 = updateBlueGreenRolloutStatus(r1, "", "", "", 1, 1, 1, 1, false, false, false) activeSelector := map[string]string{"foo": "bar"} @@ -684,7 +753,7 @@ func TestBlueGreenHandlePause(t *testing.T) { } }` - generateConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "") + generateConditions := generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs1, false, "", true) newSelector := metav1.FormatLabelSelector(rs1.Spec.Selector) expectedPatch := calculatePatch(r1, fmt.Sprintf(expectedPatchWithoutSubs, rs1PodHash, rs1PodHash, generateConditions, newSelector)) patchRolloutIndex := f.expectPatchRolloutActionWithPatch(r1, expectedPatch) @@ -710,8 +779,10 @@ func TestBlueGreenHandlePause(t *testing.T) { rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] r2.Spec.Strategy.BlueGreen.ScaleDownDelaySeconds = pointer.Int32Ptr(10) - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) r2.Status.ControllerPause = true + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) pausedCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, pausedCondition) @@ -733,7 +804,10 @@ func TestBlueGreenHandlePause(t *testing.T) { f.verifyPatchedService(servicePatchIndex, rs2PodHash, "") unpausePatch := f.getPatchedRollout(unpausePatchIndex) - unpauseConditions := generateConditionsPatch(true, conditions.RolloutResumedReason, rs2, true, "") + _, availableCondition := newAvailableCondition(true) + _, progressingCondition := newProgressingCondition(conditions.RolloutResumedReason, rs2, "") + _, compCondition := newCompletedCondition(false) + unpauseConditions := fmt.Sprintf("[%s, %s, %s]", availableCondition, compCondition, progressingCondition) expectedUnpausePatch := `{ "status": { "conditions": %s @@ -741,7 +815,7 @@ func TestBlueGreenHandlePause(t *testing.T) { }` assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedUnpausePatch, unpauseConditions)), unpausePatch) - generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, true, "") + generatedConditions := generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs2, true, "", true) expected2ndPatchWithoutSubs := `{ "status": { "blueGreen": { @@ -780,7 +854,7 @@ func TestBlueGreenAddScaleDownDelayToPreviousActiveReplicaSet(t *testing.T) { f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) r2.Spec.Strategy.BlueGreen.ScaleDownDelaySeconds = pointer.Int32Ptr(10) - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, false) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.serviceLister = append(f.serviceLister, s) @@ -803,7 +877,7 @@ func TestBlueGreenAddScaleDownDelayToPreviousActiveReplicaSet(t *testing.T) { } }` newSelector := metav1.FormatLabelSelector(rs2.Spec.Selector) - expectedCondition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, true, "") + expectedCondition := generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs2, true, "", true) expectedPatch := calculatePatch(r2, fmt.Sprintf(expectedPatchWithoutSubs, rs2PodHash, rs2PodHash, expectedCondition, newSelector)) assert.Equal(t, expectedPatch, patch) } @@ -824,7 +898,7 @@ func TestBlueGreenRolloutStatusHPAStatusFieldsActiveSelectorSet(t *testing.T) { previewSvc := newService("preview", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}, r2) activeSvc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 0, 0, 0, 0, true, false) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 0, 0, 0, 0, true, false, false) r2.Status.Selector = "" progressingCondition, _ := newProgressingCondition(conditions.RolloutPausedReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) @@ -885,6 +959,7 @@ func TestBlueGreenRolloutStatusHPAStatusFieldsNoActiveSelector(t *testing.T) { assert.Len(t, f.client.Actions(), 1) result := f.client.Actions()[0].(core.PatchAction).GetPatch() _, availableStr := newAvailableCondition(false) + _, compCond := newCompletedCondition(true) expectedPatchWithoutSub := `{ "status":{ "HPAReplicas":1, @@ -892,11 +967,11 @@ func TestBlueGreenRolloutStatusHPAStatusFieldsNoActiveSelector(t *testing.T) { "availableReplicas": 1, "updatedReplicas":1, "replicas":1, - "conditions":[%s, %s], + "conditions":[%s, %s, %s], "selector":"foo=bar" } }` - expectedPatch := calculatePatch(ro, fmt.Sprintf(expectedPatchWithoutSub, progressingConditionStr, availableStr)) + expectedPatch := calculatePatch(ro, fmt.Sprintf(expectedPatchWithoutSub, progressingConditionStr, availableStr, compCond)) assert.Equal(t, expectedPatch, string(result)) } @@ -918,7 +993,7 @@ func TestBlueGreenRolloutScaleUpdateActiveRS(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 1, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 1, 1, false, true, false) f.objects = append(f.objects, r2) previewSvc := newService("preview", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}, r2) activeSvc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash}, r2) @@ -951,7 +1026,7 @@ func TestPreviewReplicaCountHandleScaleUpPreviewCheckPoint(t *testing.T) { activeSvc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 3, 3, 8, 5, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 3, 3, 8, 5, false, true, false) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.kubeobjects = append(f.kubeobjects, activeSvc) @@ -983,7 +1058,7 @@ func TestPreviewReplicaCountHandleScaleUpPreviewCheckPoint(t *testing.T) { activeSvc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 5, 5, 8, 5, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 5, 5, 8, 5, false, true, false) r2.Status.BlueGreen.ScaleUpPreviewCheckPoint = true f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) @@ -1014,7 +1089,7 @@ func TestPreviewReplicaCountHandleScaleUpPreviewCheckPoint(t *testing.T) { activeSvc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 5, 5, 8, 5, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 5, 5, 8, 5, false, true, false) r2.Status.BlueGreen.ScaleUpPreviewCheckPoint = true f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) @@ -1048,7 +1123,7 @@ func TestBlueGreenRolloutIgnoringScalingUsePreviewRSCount(t *testing.T) { previewSvc := newService("preview", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}, r2) activeSvc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 2, 1, 1, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 2, 1, 1, 1, false, true, true) // Scaling up the rollout r2.Spec.Replicas = pointer.Int32Ptr(2) f.rolloutLister = append(f.rolloutLister, r2) @@ -1081,7 +1156,7 @@ func TestBlueGreenRolloutCompleted(t *testing.T) { s := newService("bar", 80, serviceSelector, r2) f.kubeobjects = append(f.kubeobjects, s) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 1, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 1, 1, false, true, true) r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) f.rolloutLister = append(f.rolloutLister, r2) @@ -1092,7 +1167,7 @@ func TestBlueGreenRolloutCompleted(t *testing.T) { f.run(getKey(r2, t)) - newConditions := generateConditionsPatchWithComplete(true, conditions.NewRSAvailableReason, rs2, true, "", true) + newConditions := generateConditionsPatchWithHealthy(true, conditions.NewRSAvailableReason, rs2, true, "", true, true) expectedPatch := fmt.Sprintf(`{ "status":{ "conditions":%s @@ -1107,7 +1182,7 @@ func TestBlueGreenRolloutCompletedFalse(t *testing.T) { defer f.Close() r1 := newBlueGreenRollout("foo", 1, nil, "bar", "") - completedCondition, _ := newCompletedCondition(true) + completedCondition, _ := newHealthyCondition(true) conditions.SetRolloutCondition(&r1.Status, completedCondition) r2 := bumpVersion(r1) @@ -1127,7 +1202,7 @@ func TestBlueGreenRolloutCompletedFalse(t *testing.T) { s := newService("bar", 80, serviceSelector, r2) f.kubeobjects = append(f.kubeobjects, s) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 1, 1, true, false) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 1, 1, true, false, false) r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) f.rolloutLister = append(f.rolloutLister, r2) @@ -1142,8 +1217,8 @@ func TestBlueGreenRolloutCompletedFalse(t *testing.T) { err := json.Unmarshal([]byte(patch), &rolloutPatch) assert.NoError(t, err) - index := len(rolloutPatch.Status.Conditions) - 2 - assert.Equal(t, v1alpha1.RolloutCompleted, rolloutPatch.Status.Conditions[index].Type) + index := len(rolloutPatch.Status.Conditions) - 3 + assert.Equal(t, v1alpha1.RolloutHealthy, rolloutPatch.Status.Conditions[index].Type) assert.Equal(t, corev1.ConditionFalse, rolloutPatch.Status.Conditions[index].Status) } @@ -1165,7 +1240,7 @@ func TestBlueGreenUnableToReadScaleDownAt(t *testing.T) { f.kubeobjects = append(f.kubeobjects, s, rs1, rs2) f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true, true) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.serviceLister = append(f.serviceLister, s) @@ -1193,7 +1268,7 @@ func TestBlueGreenNotReadyToScaleDownOldReplica(t *testing.T) { rs2 := newReplicaSetWithStatus(r2, 1, 1) rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - inTheFuture := metav1.Now().Add(10 * time.Second).UTC().Format(time.RFC3339) + inTheFuture := timeutil.Now().Add(10 * time.Second).UTC().Format(time.RFC3339) rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inTheFuture @@ -1202,7 +1277,7 @@ func TestBlueGreenNotReadyToScaleDownOldReplica(t *testing.T) { f.kubeobjects = append(f.kubeobjects, s, rs1, rs2) f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true, true) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.serviceLister = append(f.serviceLister, s) @@ -1226,7 +1301,7 @@ func TestBlueGreenReadyToScaleDownOldReplica(t *testing.T) { rs2 := newReplicaSetWithStatus(r2, 1, 1) rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - inThePast := metav1.Now().Add(-10 * time.Second).UTC().Format(time.RFC3339) + inThePast := timeutil.Now().Add(-10 * time.Second).UTC().Format(time.RFC3339) rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inThePast @@ -1235,7 +1310,7 @@ func TestBlueGreenReadyToScaleDownOldReplica(t *testing.T) { f.kubeobjects = append(f.kubeobjects, s, rs1, rs2) f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true, true) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.serviceLister = append(f.serviceLister, s) @@ -1265,7 +1340,7 @@ func TestFastRollback(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] //Setting the scaleDownAt time - inTheFuture := metav1.Now().Add(10 * time.Second).UTC().Format(time.RFC3339) + inTheFuture := timeutil.Now().Add(10 * time.Second).UTC().Format(time.RFC3339) rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inTheFuture rs2.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inTheFuture @@ -1280,7 +1355,7 @@ func TestFastRollback(t *testing.T) { r2.Status.CurrentPodHash = rs1PodHash rs1.Annotations[annotations.RevisionAnnotation] = "3" - r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs1PodHash, rs1PodHash, 1, 1, 2, 1, false, true, true) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.serviceLister = append(f.serviceLister, s) @@ -1309,7 +1384,7 @@ func TestBlueGreenScaleDownLimit(t *testing.T) { rs3PodHash := rs3.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] //Setting the scaleDownAt time - inTheFuture := metav1.Now().Add(10 * time.Second).UTC().Format(time.RFC3339) + inTheFuture := timeutil.MetaNow().Add(10 * time.Second).UTC().Format(time.RFC3339) rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inTheFuture rs2.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inTheFuture @@ -1318,7 +1393,7 @@ func TestBlueGreenScaleDownLimit(t *testing.T) { f.kubeobjects = append(f.kubeobjects, s, rs1, rs2, rs3) f.replicaSetLister = append(f.replicaSetLister, rs1, rs2, rs3) - r3 = updateBlueGreenRolloutStatus(r3, "", rs3PodHash, rs3PodHash, 1, 1, 3, 1, false, true) + r3 = updateBlueGreenRolloutStatus(r3, "", rs3PodHash, rs3PodHash, 1, 1, 3, 1, false, true, true) f.rolloutLister = append(f.rolloutLister, r3) f.objects = append(f.objects, r3) f.serviceLister = append(f.serviceLister, s) @@ -1344,7 +1419,7 @@ func TestBlueGreenAbort(t *testing.T) { r1 := newBlueGreenRollout("foo", 1, nil, "bar", "") r2 := bumpVersion(r1) r2.Status.Abort = true - now := metav1.Now() + now := timeutil.MetaNow() r2.Status.AbortedAt = &now rs1 := newReplicaSetWithStatus(r1, 1, 1) @@ -1357,7 +1432,7 @@ func TestBlueGreenAbort(t *testing.T) { f.kubeobjects = append(f.kubeobjects, s, rs1, rs2) f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 1, 1, 2, 1, false, true, true) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) f.serviceLister = append(f.serviceLister, s) @@ -1365,7 +1440,7 @@ func TestBlueGreenAbort(t *testing.T) { f.expectPatchServiceAction(s, rs1PodHash) patchIndex := f.expectPatchRolloutAction(r2) f.run(getKey(r2, t)) - expectedConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, true, "") + expectedConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, true, "", false) expectedPatch := fmt.Sprintf(`{ "status": { "blueGreen": { @@ -1394,8 +1469,8 @@ func TestBlueGreenHandlePauseAutoPromoteWithConditions(t *testing.T) { rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true) - now := metav1.Now() + r2 = updateBlueGreenRolloutStatus(r2, rs2PodHash, rs1PodHash, rs1PodHash, 1, 1, 2, 1, true, true, true) + now := timeutil.MetaNow() before := metav1.NewTime(now.Add(-1 * time.Minute)) r2.Status.PauseConditions[0].StartTime = before r2.Status.ControllerPause = true @@ -1408,6 +1483,8 @@ func TestBlueGreenHandlePauseAutoPromoteWithConditions(t *testing.T) { availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + //completedCondition, _ := newCompletedCondition(true) + //conditions.SetRolloutCondition(&r2.Status, completedCondition) r2.Status.Phase, r2.Status.Message = rolloututil.CalculateRolloutPhase(r2.Spec, r2.Status) activeSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} @@ -1426,7 +1503,7 @@ func TestBlueGreenHandlePauseAutoPromoteWithConditions(t *testing.T) { "blueGreen": { "activeSelector": "%s" }, - "conditions": [%s, %s, %s], + "conditions": [%s, %s, %s, %s], "stableRS": "%s", "pauseConditions": null, "controllerPause": null, @@ -1440,9 +1517,12 @@ func TestBlueGreenHandlePauseAutoPromoteWithConditions(t *testing.T) { updatedProgressingCond, _ := newProgressingCondition(conditions.ReplicaSetUpdatedReason, rs2, fmt.Sprintf("ReplicaSet \"%s\" is progressing.", rs2.Name)) progressingCondBytes, err := json.Marshal(updatedProgressingCond) assert.Nil(t, err) - pausedCondBytes, err := json.Marshal(r2.Status.Conditions[2]) + pausedCondBytes, err := json.Marshal(r2.Status.Conditions[3]) + assert.Nil(t, err) + completeCond, _ := newCompletedCondition(true) + completeCondBytes, err := json.Marshal(completeCond) assert.Nil(t, err) - expectedPatch := calculatePatch(r2, fmt.Sprintf(expectedPatchWithoutSubs, rs2PodHash, string(availableCondBytes), string(pausedCondBytes), string(progressingCondBytes), rs2PodHash, rs2PodHash)) + expectedPatch := calculatePatch(r2, fmt.Sprintf(expectedPatchWithoutSubs, rs2PodHash, string(availableCondBytes), string(completeCondBytes), string(pausedCondBytes), string(progressingCondBytes), rs2PodHash, rs2PodHash)) f.expectPatchServiceAction(activeSvc, rs2PodHash) patchRolloutIndex := f.expectPatchRolloutActionWithPatch(r2, expectedPatch) f.run(getKey(r2, t)) @@ -1466,8 +1546,8 @@ func TestBlueGreenAddScaleDownDelay(t *testing.T) { rs2 := newReplicaSetWithStatus(r2, 1, 1) rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true) - completedCondition, _ := newCompletedCondition(true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs2PodHash, 1, 1, 2, 1, false, true, true) + completedCondition, _ := newHealthyCondition(true) conditions.SetRolloutCondition(&r2.Status, completedCondition) progressingCondition, _ := newProgressingCondition(conditions.NewRSAvailableReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) diff --git a/rollout/canary.go b/rollout/canary.go index 3d04707cb8..87228499ab 100644 --- a/rollout/canary.go +++ b/rollout/canary.go @@ -8,6 +8,7 @@ import ( "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/record" @@ -44,6 +45,10 @@ func (c *rolloutContext) rolloutCanary() error { return err } + if err := c.reconcilePingAndPongService(); err != nil { + return err + } + if err := c.reconcileStableAndCanaryService(); err != nil { return err } @@ -174,7 +179,6 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli } annotationedRSs := int32(0) - rolloutReplicas := defaults.GetReplicasOrDefault(c.rollout.Spec.Replicas) for _, targetRS := range oldRSs { if replicasetutil.IsStillReferenced(c.rollout.Status, targetRS) { // We should technically never get here because we shouldn't be passing a replicaset list @@ -204,8 +208,8 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli // 1. if we are using dynamic scaling, then this should be scaled down to 0 now desiredReplicaCount = 0 } else { - // 2. otherwise, honor scaledown delay second - annotationedRSs, desiredReplicaCount, err = c.scaleDownDelayHelper(targetRS, annotationedRSs, rolloutReplicas) + // 2. otherwise, honor scaledown delay second and keep replicas of the current step + annotationedRSs, desiredReplicaCount, err = c.scaleDownDelayHelper(targetRS, annotationedRSs, *targetRS.Spec.Replicas) if err != nil { return totalScaledDown, err } @@ -216,18 +220,13 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli // and doesn't yet have scale down deadline. This happens when a user changes their // mind in the middle of an V1 -> V2 update, and then applies a V3. We are deciding // what to do with the defunct, intermediate V2 ReplicaSet right now. - if replicasetutil.IsReplicaSetReady(c.newRS) && replicasetutil.IsReplicaSetReady(c.stableRS) { - // If the both new and old RS are available, we can infer that it is safe to - // scale down this ReplicaSet, since traffic should have shifted away from this RS. - // TODO: instead of checking availability of canary/stable, a better way to determine - // if it is safe to scale this down, is to check if traffic is directed to the RS. - // In other words, we can check c.rollout.Status.Canary.Weights to see if this - // ReplicaSet hash is still referenced, and scale it down otherwise. + if !c.replicaSetReferencedByCanaryTraffic(targetRS) { + // It is safe to scale the intermediate RS down, if no traffic is directed to it. c.log.Infof("scaling down intermediate RS '%s'", targetRS.Name) } else { - // The current and stable ReplicaSets have not reached readiness. This implies - // we might not have shifted traffic away from this ReplicaSet so we need to - // keep this scaled up. + c.log.Infof("Skip scaling down intermediate RS '%s': still referenced by service", targetRS.Name) + // This ReplicaSet is still referenced by the service. It is not safe to scale + // this down. continue } } @@ -249,6 +248,21 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli return totalScaledDown, nil } +func (c *rolloutContext) replicaSetReferencedByCanaryTraffic(rs *appsv1.ReplicaSet) bool { + rsPodHash := replicasetutil.GetPodTemplateHash(rs) + ro := c.rollout + + if ro.Status.Canary.Weights == nil { + return false + } + + if ro.Status.Canary.Weights.Canary.PodTemplateHash == rsPodHash || ro.Status.Canary.Weights.Stable.PodTemplateHash == rsPodHash { + return true + } + + return false +} + // canProceedWithScaleDownAnnotation returns whether or not it is safe to proceed with annotating // old replicasets with the scale-down-deadline in the traffic-routed canary strategy. // This method only matters with ALB canary + the target group verification feature. @@ -279,7 +293,8 @@ func (c *rolloutContext) canProceedWithScaleDownAnnotation(oldRSs []*appsv1.Repl // AWS API calls. return true, nil } - stableSvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(c.rollout.Spec.Strategy.Canary.StableService) + stableSvcName, _ := trafficrouting.GetStableAndCanaryServices(c.rollout) + stableSvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(stableSvcName) if err != nil { return false, err } @@ -322,6 +337,10 @@ func (c *rolloutContext) completedCurrentCanaryStep() bool { currentStepAr := c.currentArs.CanaryStep analysisExistsAndCompleted := currentStepAr != nil && currentStepAr.Status.Phase.Completed() return analysisExistsAndCompleted && currentStepAr.Status.Phase == v1alpha1.AnalysisPhaseSuccessful + case currentStep.SetHeaderRoute != nil: + return true + case currentStep.SetMirrorRoute != nil: + return true } return false } @@ -331,6 +350,7 @@ func (c *rolloutContext) syncRolloutStatusCanary() error { newStatus.AvailableReplicas = replicasetutil.GetAvailableReplicaCountForReplicaSets(c.allRSs) newStatus.HPAReplicas = replicasetutil.GetActualReplicaCountForReplicaSets(c.allRSs) newStatus.Selector = metav1.FormatLabelSelector(c.rollout.Spec.Selector) + newStatus.Canary.StablePingPong = c.rollout.Status.Canary.StablePingPong currentStep, currentStepIndex := replicasetutil.GetCurrentCanaryStep(c.rollout) newStatus.StableRS = c.rollout.Status.StableRS @@ -394,6 +414,10 @@ func (c *rolloutContext) syncRolloutStatusCanary() error { } func (c *rolloutContext) reconcileCanaryReplicaSets() (bool, error) { + if haltReason := c.haltProgress(); haltReason != "" { + c.log.Infof("Skipping canary/stable ReplicaSet reconciliation: %s", haltReason) + return false, nil + } err := c.removeScaleDownDeadlines() if err != nil { return false, err diff --git a/rollout/canary_test.go b/rollout/canary_test.go index 60cf79f49e..56445f3978 100644 --- a/rollout/canary_test.go +++ b/rollout/canary_test.go @@ -15,16 +15,18 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" k8sinformers "k8s.io/client-go/informers" k8sfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/kubernetes/pkg/controller" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" + "github.com/argoproj/argo-rollouts/utils/hash" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" + replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func newCanaryRollout(name string, replicas int, revisionHistoryLimit *int32, steps []v1alpha1.CanaryStep, stepIndex *int32, maxSurge, maxUnavailable intstr.IntOrString) *v1alpha1.Rollout { @@ -37,7 +39,7 @@ func newCanaryRollout(name string, replicas int, revisionHistoryLimit *int32, st } rollout.Status.CurrentStepIndex = stepIndex rollout.Status.CurrentStepHash = conditions.ComputeStepHash(rollout) - rollout.Status.CurrentPodHash = controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + rollout.Status.CurrentPodHash = hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) rollout.Status.Selector = metav1.FormatLabelSelector(rollout.Spec.Selector) rollout.Status.Phase, rollout.Status.Message = rolloututil.CalculateRolloutPhase(rollout.Spec, rollout.Status) return rollout @@ -52,7 +54,7 @@ func bumpVersion(rollout *v1alpha1.Rollout) *v1alpha1.Rollout { newRevisionStr := strconv.FormatInt(int64(newRevision), 10) annotations.SetRolloutRevision(newRollout, newRevisionStr) newRollout.Spec.Template.Spec.Containers[0].Image = "foo/bar" + newRevisionStr - newRollout.Status.CurrentPodHash = controller.ComputeHash(&newRollout.Spec.Template, newRollout.Status.CollisionCount) + newRollout.Status.CurrentPodHash = hash.ComputePodTemplateHash(&newRollout.Spec.Template, newRollout.Status.CollisionCount) newRollout.Status.CurrentStepHash = conditions.ComputeStepHash(newRollout) newRollout.Status.Phase, newRollout.Status.Message = rolloututil.CalculateRolloutPhase(newRollout.Spec, newRollout.Status) return newRollout @@ -176,8 +178,8 @@ func TestCanaryRolloutEnterPauseState(t *testing.T) { } }` - conditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") - now := metav1.Now().UTC().Format(time.RFC3339) + conditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) + now := timeutil.MetaNow().UTC().Format(time.RFC3339) expectedPatchWithoutObservedGen := fmt.Sprintf(expectedPatchTemplate, v1alpha1.PauseReasonCanaryPauseStep, now, conditions, v1alpha1.PauseReasonCanaryPauseStep) expectedPatch := calculatePatch(r2, expectedPatchWithoutObservedGen) assert.Equal(t, expectedPatch, patch) @@ -237,6 +239,9 @@ func TestCanaryRolloutUpdatePauseConditionWhilePaused(t *testing.T) { availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + rs1 := newReplicaSetWithStatus(r1, 10, 10) rs2 := newReplicaSetWithStatus(r2, 0, 0) @@ -335,7 +340,7 @@ func TestCanaryRolloutIncrementStepAfterUnPaused(t *testing.T) { "currentStepIndex": 1 } }` - generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "") + generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", false) expectedPatch := calculatePatch(r2, fmt.Sprintf(expectedPatchTemplate, generatedConditions)) assert.Equal(t, expectedPatch, patch) } @@ -377,7 +382,7 @@ func TestCanaryRolloutUpdateStatusWhenAtEndOfSteps(t *testing.T) { } }` - expectedPatch := fmt.Sprintf(expectedPatchWithoutStableRS, expectedStableRS, generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "")) + expectedPatch := fmt.Sprintf(expectedPatchWithoutStableRS, expectedStableRS, generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", true)) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) } @@ -419,7 +424,7 @@ func TestResetCurrentStepIndexOnStepChange(t *testing.T) { "conditions": %s } }` - newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, expectedCurrentStepHash, newConditions) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) @@ -460,7 +465,7 @@ func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) { "conditions": %s } }` - newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, newConditions) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) @@ -500,7 +505,7 @@ func TestCanaryRolloutCreateFirstReplicasetNoSteps(t *testing.T) { } }` - newConditions := generateConditionsPatch(false, conditions.ReplicaSetUpdatedReason, rs, false, "") + newConditions := generateConditionsPatchWithCompleted(false, conditions.ReplicaSetUpdatedReason, rs, false, "", true) assert.Equal(t, calculatePatch(r, fmt.Sprintf(expectedPatch, newConditions)), patch) } @@ -540,7 +545,7 @@ func TestCanaryRolloutCreateFirstReplicasetWithSteps(t *testing.T) { "conditions": %s } }` - expectedPatch := fmt.Sprintf(expectedPatchWithSub, generateConditionsPatch(false, conditions.ReplicaSetUpdatedReason, rs, false, "")) + expectedPatch := fmt.Sprintf(expectedPatchWithSub, generateConditionsPatchWithCompleted(false, conditions.ReplicaSetUpdatedReason, rs, false, "", true)) assert.Equal(t, calculatePatch(r, expectedPatch), patch) } @@ -737,6 +742,11 @@ func TestCanaryDontScaleDownOldRsDuringInterruptedUpdate(t *testing.T) { rs1 := newReplicaSetWithStatus(r1, 5, 5) rs2 := newReplicaSetWithStatus(r2, 5, 5) rs3 := newReplicaSetWithStatus(r3, 5, 0) + r3.Status.Canary.Weights = &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + PodTemplateHash: replicasetutil.GetPodTemplateHash(rs2), + }, + } f.objects = append(f.objects, r3) f.kubeobjects = append(f.kubeobjects, rs1, rs2, rs3, canarySvc, stableSvc) @@ -750,7 +760,7 @@ func TestCanaryDontScaleDownOldRsDuringInterruptedUpdate(t *testing.T) { // TestCanaryScaleDownOldRsDuringInterruptedUpdate tests that we proceed with scale down of an // intermediate V2 ReplicaSet when applying a V3 spec in the middle of updating a traffic routed // canary going from V1 -> V2 (i.e. after we have shifted traffic away from V2). This test is the -// same as TestCanaryDontScaleDownOldRsDuringUpdate but rs3 is fully available +// same as TestCanaryDontScaleDownOldRsDuringInterruptedUpdate but rs3 is fully available func TestCanaryScaleDownOldRsDuringInterruptedUpdate(t *testing.T) { f := newFixture(t) defer f.Close() @@ -833,8 +843,8 @@ func TestRollBackToStable(t *testing.T) { "conditions": %s } }` - newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "") - expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, controller.ComputeHash(&r2.Spec.Template, r2.Status.CollisionCount), newConditions) + newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "", true) + expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, hash.ComputePodTemplateHash(&r2.Spec.Template, r2.Status.CollisionCount), newConditions) patch := f.getPatchedRollout(patchIndex) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) } @@ -876,7 +886,7 @@ func TestGradualShiftToNewStable(t *testing.T) { "conditions": %s } }` - newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, newConditions) patch := f.getPatchedRollout(patchIndex) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) @@ -922,9 +932,9 @@ func TestRollBackToStableAndStepChange(t *testing.T) { "conditions": %s } }` - newPodHash := controller.ComputeHash(&r2.Spec.Template, r2.Status.CollisionCount) + newPodHash := hash.ComputePodTemplateHash(&r2.Spec.Template, r2.Status.CollisionCount) newStepHash := conditions.ComputeStepHash(r2) - newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "") + newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "", true) expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, newPodHash, newStepHash, newConditions) patch := f.getPatchedRollout(patchIndex) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) @@ -962,7 +972,7 @@ func TestCanaryRolloutIncrementStepIfSetWeightsAreCorrect(t *testing.T) { "conditions": %s } }` - newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs3, false, "") + newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs3, false, "", false) assert.Equal(t, calculatePatch(r3, fmt.Sprintf(expectedPatch, newConditions)), patch) } @@ -998,6 +1008,9 @@ func TestSyncRolloutWaitAddToQueue(t *testing.T) { availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) @@ -1046,6 +1059,9 @@ func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) @@ -1089,7 +1105,7 @@ func TestSyncRolloutWaitIncrementStepIndex(t *testing.T) { pausedCondition, _ := newPausedCondition(true) conditions.SetRolloutCondition(&r2.Status, pausedCondition) - earlier := metav1.Now() + earlier := timeutil.MetaNow() earlier.Time = earlier.Add(-10 * time.Second) r2.Status.ControllerPause = true r2.Status.PauseConditions = []v1alpha1.PauseCondition{{ @@ -1179,7 +1195,7 @@ func TestCanaryRolloutWithCanaryService(t *testing.T) { func TestCanarySVCSelectors(t *testing.T) { for _, tc := range []struct { canaryReplicas int32 - canaryReadyReplicas int32 + canaryAvailReplicas int32 shouldTargetNewRS bool }{ @@ -1240,7 +1256,7 @@ func TestCanarySVCSelectors(t *testing.T) { Replicas: pointer.Int32Ptr(tc.canaryReplicas), }, Status: v1.ReplicaSetStatus{ - ReadyReplicas: tc.canaryReadyReplicas, + AvailableReplicas: tc.canaryAvailReplicas, }, }, stableRS: &v1.ReplicaSet{ @@ -1260,12 +1276,12 @@ func TestCanarySVCSelectors(t *testing.T) { assert.NoError(t, err, "unable to get updated canary service") if tc.shouldTargetNewRS { assert.Equal(t, selectorNewRSVal, updatedCanarySVC.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey], - "canary SVC should have newRS selector label when newRS has %d replicas and %d ReadyReplicas", - tc.canaryReplicas, tc.canaryReadyReplicas) + "canary SVC should have newRS selector label when newRS has %d replicas and %d AvailableReplicas", + tc.canaryReplicas, tc.canaryAvailReplicas) } else { assert.Empty(t, updatedCanarySVC.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey], - "canary SVC should not have newRS selector label when newRS has %d replicas and %d ReadyReplicas", - tc.canaryReplicas, tc.canaryReadyReplicas) + "canary SVC should not have newRS selector label when newRS has %d replicas and %d AvailableReplicas", + tc.canaryReplicas, tc.canaryAvailReplicas) } } } @@ -1354,6 +1370,89 @@ func TestCanaryRolloutWithInvalidStableServiceName(t *testing.T) { assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.stableService: Invalid value: \"invalid-stable\": service \"invalid-stable\" not found", condition["message"]) } +func TestCanaryRolloutWithPingPongServices(t *testing.T) { + f := newFixture(t) + defer f.Close() + + r := newCanaryRollout("foo", 1, nil, nil, nil, intstr.FromInt(1), intstr.FromInt(0)) + pingSvc := newService("ping-service", 80, nil, r) + pongSvc := newService("pong-service", 80, nil, r) + rs1 := newReplicaSetWithStatus(r, 1, 1) + r.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: pingSvc.Name, PongService: pongSvc.Name} + + f.rolloutLister = append(f.rolloutLister, r) + f.objects = append(f.objects, r) + f.kubeobjects = append(f.kubeobjects, pingSvc, pongSvc, rs1) + f.serviceLister = append(f.serviceLister, pingSvc, pongSvc) + + _ = f.expectPatchServiceAction(pingSvc, r.Status.CurrentPodHash) + _ = f.expectPatchRolloutAction(r) + f.run(getKey(r, t)) +} + +func TestCanaryRolloutWithInvalidPingServiceName(t *testing.T) { + f := newFixture(t) + defer f.Close() + + r := newCanaryRollout("foo", 0, nil, nil, nil, intstr.FromInt(1), intstr.FromInt(0)) + r.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping-service", PongService: "pong-service"} + + f.rolloutLister = append(f.rolloutLister, r) + f.objects = append(f.objects, r) + f.kubeobjects = append(f.kubeobjects) + f.serviceLister = append(f.serviceLister) + + patchIndex := f.expectPatchRolloutAction(r) + f.run(getKey(r, t)) + + patch := make(map[string]interface{}) + patchData := f.getPatchedRollout(patchIndex) + err := json.Unmarshal([]byte(patchData), &patch) + assert.NoError(t, err) + + c, ok, err := unstructured.NestedSlice(patch, "status", "conditions") + assert.NoError(t, err) + assert.True(t, ok) + assert.Len(t, c, 2) + + condition, ok := c[1].(map[string]interface{}) + assert.True(t, ok) + assert.Equal(t, conditions.InvalidSpecReason, condition["reason"]) + assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.pingPong.pingService: Invalid value: \"ping-service\": service \"ping-service\" not found", condition["message"]) +} + +func TestCanaryRolloutWithInvalidPongServiceName(t *testing.T) { + f := newFixture(t) + defer f.Close() + + r := newCanaryRollout("foo", 0, nil, nil, nil, intstr.FromInt(1), intstr.FromInt(0)) + pingSvc := newService("ping-service", 80, nil, r) + r.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: pingSvc.Name, PongService: "pong-service"} + + f.rolloutLister = append(f.rolloutLister, r) + f.objects = append(f.objects, r) + f.kubeobjects = append(f.kubeobjects, pingSvc) + f.serviceLister = append(f.serviceLister, pingSvc) + + patchIndex := f.expectPatchRolloutAction(r) + f.run(getKey(r, t)) + + patch := make(map[string]interface{}) + patchData := f.getPatchedRollout(patchIndex) + err := json.Unmarshal([]byte(patchData), &patch) + assert.NoError(t, err) + + c, ok, err := unstructured.NestedSlice(patch, "status", "conditions") + assert.NoError(t, err) + assert.True(t, ok) + assert.Len(t, c, 2) + + condition, ok := c[1].(map[string]interface{}) + assert.True(t, ok) + assert.Equal(t, conditions.InvalidSpecReason, condition["reason"]) + assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.pingPong.pongService: Invalid value: \"pong-service\": service \"pong-service\" not found", condition["message"]) +} + func TestCanaryRolloutScaleWhilePaused(t *testing.T) { f := newFixture(t) defer f.Close() @@ -1527,6 +1626,9 @@ func TestHandleNilNewRSOnScaleAndImageChange(t *testing.T) { availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + f.kubeobjects = append(f.kubeobjects, rs1) f.replicaSetLister = append(f.replicaSetLister, rs1) f.rolloutLister = append(f.rolloutLister, r2) @@ -1560,7 +1662,7 @@ func TestHandleCanaryAbort(t *testing.T) { r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 10, 1, 10, false) r2.Status.Abort = true - now := metav1.Now() + now := timeutil.MetaNow() r2.Status.AbortedAt = &now f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) @@ -1582,7 +1684,7 @@ func TestHandleCanaryAbort(t *testing.T) { } }` errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2) - newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "") + newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, newConditions, conditions.RolloutAbortedReason, errmsg)), patch) }) @@ -1597,7 +1699,7 @@ func TestHandleCanaryAbort(t *testing.T) { } r1 := newCanaryRollout("foo", 2, nil, steps, int32Ptr(3), intstr.FromInt(1), intstr.FromInt(0)) r1.Status.Abort = true - now := metav1.Now() + now := timeutil.MetaNow() r1.Status.AbortedAt = &now rs1 := newReplicaSetWithStatus(r1, 2, 2) rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] @@ -1620,7 +1722,7 @@ func TestHandleCanaryAbort(t *testing.T) { } }` errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 1) - newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r1, false, "") + newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r1, false, "", true) assert.Equal(t, calculatePatch(r1, fmt.Sprintf(expectedPatch, newConditions, conditions.RolloutAbortedReason, errmsg)), patch) }) } diff --git a/rollout/context.go b/rollout/context.go index 5ebd566409..f8fa5b5f03 100644 --- a/rollout/context.go +++ b/rollout/context.go @@ -74,8 +74,8 @@ func (c *rolloutContext) reconcile() error { return err } - if getPauseCondition(c.rollout, v1alpha1.PauseReasonInconclusiveAnalysis) != nil || c.rollout.Spec.Paused || isScalingEvent { - return c.syncReplicasOnly(isScalingEvent) + if isScalingEvent { + return c.syncReplicasOnly() } if c.rollout.Spec.Strategy.BlueGreen != nil { @@ -141,3 +141,16 @@ func (c *rolloutContext) SetCurrentAnalysisRuns(currARs analysisutil.CurrentAnal } } } + +// haltProgress returns a reason on whether or not we should halt all progress with an update +// to ReplicaSet counts (e.g. due to canary steps or blue-green promotion). This is either because +// user explicitly paused the rollout by setting `spec.paused`, or the analysis was inconclusive +func (c *rolloutContext) haltProgress() string { + if c.rollout.Spec.Paused { + return "user paused" + } + if getPauseCondition(c.rollout, v1alpha1.PauseReasonInconclusiveAnalysis) != nil { + return "inconclusive analysis" + } + return "" +} diff --git a/rollout/controller.go b/rollout/controller.go index 3e9c5d88ca..79c7f9facd 100644 --- a/rollout/controller.go +++ b/rollout/controller.go @@ -8,6 +8,9 @@ import ( "strconv" "time" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts" smiclientset "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/split/clientset/versioned" log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" @@ -40,6 +43,7 @@ import ( listers "github.com/argoproj/argo-rollouts/pkg/client/listers/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/rollout/trafficrouting" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/ambassador" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/appmesh" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" "github.com/argoproj/argo-rollouts/utils/conditions" @@ -52,6 +56,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" serviceutil "github.com/argoproj/argo-rollouts/utils/service" + timeutil "github.com/argoproj/argo-rollouts/utils/time" unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" ) @@ -243,6 +248,7 @@ func NewController(cfg ControllerConfig) *Controller { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil { logCtx := logutil.WithRollout(ro) logCtx.Info("rollout deleted") + controller.metricsServer.Remove(ro.Namespace, ro.Name, logutil.RolloutKey) // Rollout is deleted, queue up the referenced Service and/or DestinationRules so // that the rollouts-pod-template-hash can be cleared from each for _, s := range serviceutil.GetRolloutServiceKeys(ro) { @@ -251,6 +257,7 @@ func NewController(cfg ControllerConfig) *Controller { for _, key := range istioutil.GetRolloutDesinationRuleKeys(ro) { controller.IstioController.EnqueueDestinationRule(key) } + controller.recorder.Eventf(ro, record.EventOptions{EventReason: conditions.RolloutDeletedReason}, conditions.RolloutDeletedMessage, ro.Name, ro.Namespace) } }, }) @@ -341,7 +348,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { // with the current status of the resource. func (c *Controller) syncHandler(key string) error { ctx := context.TODO() - startTime := time.Now() + startTime := timeutil.Now() namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err @@ -389,7 +396,10 @@ func (c *Controller) syncHandler(key string) error { if rollout.Spec.Replicas == nil { logCtx.Info("Defaulting .spec.replica to 1") r.Spec.Replicas = pointer.Int32Ptr(defaults.DefaultReplicas) - _, err := c.argoprojclientset.ArgoprojV1alpha1().Rollouts(r.Namespace).Update(ctx, r, metav1.UpdateOptions{}) + newRollout, err := c.argoprojclientset.ArgoprojV1alpha1().Rollouts(r.Namespace).Update(ctx, r, metav1.UpdateOptions{}) + if err == nil { + c.writeBackToInformer(newRollout) + } return err } @@ -414,6 +424,19 @@ func (c *Controller) writeBackToInformer(ro *v1alpha1.Rollout) { return } un := unstructured.Unstructured{Object: obj} + // With code-gen tools the argoclientset is generated and the update method here is removing typemetafields + // which the notification controller expects when it converts rolloutobject to toUnstructured and if not present + // and that throws an error "Failed to process: Object 'Kind' is missing in ..." + // Fixing this here as the informer is shared by notification controller by updating typemetafileds. + // TODO: Need to revisit this in the future and maybe we should have a dedicated informer for notification + gvk := un.GetObjectKind().GroupVersionKind() + if len(gvk.Version) == 0 || len(gvk.Group) == 0 || len(gvk.Kind) == 0 { + un.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{ + Group: v1alpha1.SchemeGroupVersion.Group, + Kind: rollouts.RolloutKind, + Version: v1alpha1.SchemeGroupVersion.Version, + }) + } err = c.rolloutsInformer.GetStore().Update(&un) if err != nil { logCtx.Errorf("failed to update informer store: %v", err) @@ -429,7 +452,7 @@ func (c *Controller) newRolloutContext(rollout *v1alpha1.Rollout) (*rolloutConte } newRS := replicasetutil.FindNewReplicaSet(rollout, rsList) - olderRSs := replicasetutil.FindOldReplicaSets(rollout, rsList) + olderRSs := replicasetutil.FindOldReplicaSets(rollout, rsList, newRS) stableRS := replicasetutil.GetStableRS(rollout, newRS, olderRSs) otherRSs := replicasetutil.GetOtherRSs(rollout, newRS, stableRS, rsList) @@ -553,9 +576,49 @@ func (c *rolloutContext) getRolloutReferencedResources() (*validation.Referenced } refResources.AmbassadorMappings = ambassadorMappings + appmeshResources, err := c.getReferencedAppMeshResources() + if err != nil { + return nil, err + } + refResources.AppMeshResources = appmeshResources + return &refResources, nil } +func (c *rolloutContext) getReferencedAppMeshResources() ([]unstructured.Unstructured, error) { + ctx := context.TODO() + appmeshClient := appmesh.NewResourceClient(c.dynamicclientset) + rollout := c.rollout + refResources := []unstructured.Unstructured{} + if rollout.Spec.Strategy.Canary != nil { + canary := rollout.Spec.Strategy.Canary + if canary.TrafficRouting != nil && canary.TrafficRouting.AppMesh != nil { + fldPath := field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService") + tr := canary.TrafficRouting.AppMesh + if tr.VirtualService == nil { + return nil, field.Invalid(fldPath, nil, "must provide virtual-service") + } + + vsvc, err := appmeshClient.GetVirtualServiceCR(ctx, c.rollout.Namespace, tr.VirtualService.Name) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, field.Invalid(fldPath, fmt.Sprintf("%s.%s", tr.VirtualService.Name, c.rollout.Namespace), err.Error()) + } + return nil, err + } + vr, err := appmeshClient.GetVirtualRouterCRForVirtualService(ctx, vsvc) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, field.Invalid(fldPath, fmt.Sprintf("%s.%s", tr.VirtualService.Name, c.rollout.Namespace), err.Error()) + } + return nil, err + } + refResources = append(refResources, *vr) + } + } + return refResources, nil +} + func (c *rolloutContext) getAmbassadorMappings() ([]unstructured.Unstructured, error) { mappings := []unstructured.Unstructured{} if c.rollout.Spec.Strategy.Canary != nil { @@ -584,69 +647,60 @@ func (c *rolloutContext) getAmbassadorMappings() ([]unstructured.Unstructured, e } func (c *rolloutContext) getReferencedServices() (*[]validation.ServiceWithType, error) { - services := []validation.ServiceWithType{} - if c.rollout.Spec.Strategy.BlueGreen != nil { - if c.rollout.Spec.Strategy.BlueGreen.ActiveService != "" { - activeSvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(c.rollout.Spec.Strategy.BlueGreen.ActiveService) - if k8serrors.IsNotFound(err) { - fldPath := validation.GetServiceWithTypeFieldPath(validation.ActiveService) - return nil, field.Invalid(fldPath, c.rollout.Spec.Strategy.BlueGreen.ActiveService, err.Error()) - } - if err != nil { - return nil, err - } - services = append(services, validation.ServiceWithType{ - Service: activeSvc, - Type: validation.ActiveService, - }) + var services []validation.ServiceWithType + if bluegreenSpec := c.rollout.Spec.Strategy.BlueGreen; bluegreenSpec != nil { + if service, err := c.getReferencedService(bluegreenSpec.ActiveService, validation.ActiveService); service != nil { + services = append(services, *service) + } else if err != nil { + return nil, err } - if c.rollout.Spec.Strategy.BlueGreen.PreviewService != "" { - previewSvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(c.rollout.Spec.Strategy.BlueGreen.PreviewService) - if k8serrors.IsNotFound(err) { - fldPath := validation.GetServiceWithTypeFieldPath(validation.PreviewService) - return nil, field.Invalid(fldPath, c.rollout.Spec.Strategy.BlueGreen.PreviewService, err.Error()) - } - if err != nil { - return nil, err - } - services = append(services, validation.ServiceWithType{ - Service: previewSvc, - Type: validation.PreviewService, - }) + if service, err := c.getReferencedService(bluegreenSpec.PreviewService, validation.PreviewService); service != nil { + services = append(services, *service) + } else if err != nil { + return nil, err } - } else if c.rollout.Spec.Strategy.Canary != nil { - if c.rollout.Spec.Strategy.Canary.StableService != "" { - stableSvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(c.rollout.Spec.Strategy.Canary.StableService) - if k8serrors.IsNotFound(err) { - fldPath := validation.GetServiceWithTypeFieldPath(validation.StableService) - return nil, field.Invalid(fldPath, c.rollout.Spec.Strategy.Canary.StableService, err.Error()) - } - if err != nil { - return nil, err - } - services = append(services, validation.ServiceWithType{ - Service: stableSvc, - Type: validation.StableService, - }) + } else if canarySpec := c.rollout.Spec.Strategy.Canary; canarySpec != nil { + if service, err := c.getReferencedService(canarySpec.StableService, validation.StableService); service != nil { + services = append(services, *service) + } else if err != nil { + return nil, err } - if c.rollout.Spec.Strategy.Canary.CanaryService != "" { - canarySvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(c.rollout.Spec.Strategy.Canary.CanaryService) - if k8serrors.IsNotFound(err) { - fldPath := validation.GetServiceWithTypeFieldPath(validation.CanaryService) - return nil, field.Invalid(fldPath, c.rollout.Spec.Strategy.Canary.CanaryService, err.Error()) + if service, err := c.getReferencedService(canarySpec.CanaryService, validation.CanaryService); service != nil { + services = append(services, *service) + } else if err != nil { + return nil, err + } + if canarySpec.PingPong != nil { + if service, err := c.getReferencedService(canarySpec.PingPong.PingService, validation.PingService); service != nil { + services = append(services, *service) + } else if err != nil { + return nil, err } - if err != nil { + if service, err := c.getReferencedService(canarySpec.PingPong.PongService, validation.PongService); service != nil { + services = append(services, *service) + } else if err != nil { return nil, err } - services = append(services, validation.ServiceWithType{ - Service: canarySvc, - Type: validation.CanaryService, - }) } } return &services, nil } +func (c *rolloutContext) getReferencedService(serviceName string, serviceType validation.ServiceType) (*validation.ServiceWithType, error) { + if serviceName != "" { + svc, err := c.servicesLister.Services(c.rollout.Namespace).Get(serviceName) + if k8serrors.IsNotFound(err) { + fldPath := validation.GetServiceWithTypeFieldPath(serviceType) + return nil, field.Invalid(fldPath, serviceName, err.Error()) + } + if err != nil { + return nil, err + } + return &validation.ServiceWithType{Service: svc, Type: serviceType}, nil + } + return nil, nil +} + func (c *rolloutContext) getReferencedRolloutAnalyses() (*[]validation.AnalysisTemplatesWithType, error) { analysisTemplates := make([]validation.AnalysisTemplatesWithType, 0) if c.rollout.Spec.Strategy.BlueGreen != nil { @@ -744,6 +798,19 @@ func (c *rolloutContext) getReferencedIngresses() (*[]ingressutil.Ingress, error } ingresses = append(ingresses, *ingress) } else if canary.TrafficRouting.Nginx != nil { + // If the rollout resource manages more than 1 ingress + if len(canary.TrafficRouting.Nginx.AdditionalStableIngresses) > 0 { + for _, ing := range canary.TrafficRouting.Nginx.AdditionalStableIngresses { + ingress, err := c.ingressWrapper.GetCached(c.rollout.Namespace, ing) + if k8serrors.IsNotFound(err) { + return nil, field.Invalid(fldPath.Child("nginx", "AdditionalStableIngresses"), canary.TrafficRouting.Nginx.StableIngress, err.Error()) + } + if err != nil { + return nil, err + } + ingresses = append(ingresses, *ingress) + } + } ingress, err := c.ingressWrapper.GetCached(c.rollout.Namespace, canary.TrafficRouting.Nginx.StableIngress) if k8serrors.IsNotFound(err) { return nil, field.Invalid(fldPath.Child("nginx", "stableIngress"), canary.TrafficRouting.Nginx.StableIngress, err.Error()) diff --git a/rollout/controller_test.go b/rollout/controller_test.go index d58904e779..ec7ac4e355 100644 --- a/rollout/controller_test.go +++ b/rollout/controller_test.go @@ -13,7 +13,6 @@ import ( "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/undefinedlabs/go-mpatch" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -35,7 +34,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" corev1defaults "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/controller" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/controller/metrics" @@ -48,12 +46,15 @@ import ( "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/hash" ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" istioutil "github.com/argoproj/argo-rollouts/utils/istio" "github.com/argoproj/argo-rollouts/utils/queue" "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + timeutil "github.com/argoproj/argo-rollouts/utils/time" + unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" ) var ( @@ -103,9 +104,8 @@ type fixture struct { unfreezeTime func() error // events holds all the K8s Event Reasons emitted during the run - events []string - fakeTrafficRouting []*mocks.TrafficRoutingReconciler - fakeSingleTrafficRouting *mocks.TrafficRoutingReconciler + events []string + fakeTrafficRouting *mocks.TrafficRoutingReconciler } func newFixture(t *testing.T) *fixture { @@ -115,12 +115,13 @@ func newFixture(t *testing.T) *fixture { f.kubeobjects = []runtime.Object{} f.enqueuedObjects = make(map[string]int) now := time.Now() - patch, err := mpatch.PatchMethod(time.Now, func() time.Time { return now }) - assert.NoError(t, err) - f.unfreezeTime = patch.Unpatch + timeutil.Now = func() time.Time { return now } + f.unfreezeTime = func() error { + timeutil.Now = time.Now + return nil + } - f.fakeTrafficRouting = newFakeTrafficRoutingReconciler() - f.fakeSingleTrafficRouting = newFakeSingleTrafficRoutingReconciler() + f.fakeTrafficRouting = newFakeSingleTrafficRoutingReconciler() return f } @@ -178,8 +179,8 @@ func newPausedCondition(isPaused bool) (v1alpha1.RolloutCondition, string) { status = corev1.ConditionFalse } condition := v1alpha1.RolloutCondition{ - LastTransitionTime: metav1.Now(), - LastUpdateTime: metav1.Now(), + LastTransitionTime: timeutil.MetaNow(), + LastUpdateTime: timeutil.MetaNow(), Message: conditions.RolloutPausedMessage, Reason: conditions.RolloutPausedReason, Status: status, @@ -192,14 +193,36 @@ func newPausedCondition(isPaused bool) (v1alpha1.RolloutCondition, string) { return condition, string(conditionBytes) } +func newHealthyCondition(isHealthy bool) (v1alpha1.RolloutCondition, string) { + status := corev1.ConditionTrue + msg := conditions.RolloutHealthyMessage + if !isHealthy { + status = corev1.ConditionFalse + msg = conditions.RolloutNotHealthyMessage + } + condition := v1alpha1.RolloutCondition{ + LastTransitionTime: timeutil.MetaNow(), + LastUpdateTime: timeutil.MetaNow(), + Message: msg, + Reason: conditions.RolloutHealthyReason, + Status: status, + Type: v1alpha1.RolloutHealthy, + } + conditionBytes, err := json.Marshal(condition) + if err != nil { + panic(err) + } + return condition, string(conditionBytes) +} + func newCompletedCondition(isCompleted bool) (v1alpha1.RolloutCondition, string) { status := corev1.ConditionTrue if !isCompleted { status = corev1.ConditionFalse } condition := v1alpha1.RolloutCondition{ - LastTransitionTime: metav1.Now(), - LastUpdateTime: metav1.Now(), + LastTransitionTime: timeutil.MetaNow(), + LastUpdateTime: timeutil.MetaNow(), Message: conditions.RolloutCompletedReason, Reason: conditions.RolloutCompletedReason, Status: status, @@ -229,6 +252,9 @@ func newProgressingCondition(reason string, resourceObj runtime.Object, optional if reason == conditions.NewRSAvailableReason { msg = fmt.Sprintf(conditions.ReplicaSetCompletedMessage, resource.Name) } + if reason == conditions.ReplicaSetNotAvailableReason { + msg = conditions.NotAvailableMessage + } case *v1alpha1.Rollout: if reason == conditions.ReplicaSetUpdatedReason { msg = fmt.Sprintf(conditions.RolloutProgressingMessage, resource.Name) @@ -276,8 +302,8 @@ func newProgressingCondition(reason string, resourceObj runtime.Object, optional } condition := v1alpha1.RolloutCondition{ - LastTransitionTime: metav1.Now(), - LastUpdateTime: metav1.Now(), + LastTransitionTime: timeutil.MetaNow(), + LastUpdateTime: timeutil.MetaNow(), Message: msg, Reason: reason, Status: status, @@ -300,8 +326,8 @@ func newAvailableCondition(available bool) (v1alpha1.RolloutCondition, string) { } condition := v1alpha1.RolloutCondition{ - LastTransitionTime: metav1.Now(), - LastUpdateTime: metav1.Now(), + LastTransitionTime: timeutil.MetaNow(), + LastUpdateTime: timeutil.MetaNow(), Message: message, Reason: conditions.AvailableReason, Status: status, @@ -311,33 +337,57 @@ func newAvailableCondition(available bool) (v1alpha1.RolloutCondition, string) { return condition, string(conditionBytes) } -func generateConditionsPatch(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string) string { +func generateConditionsPatch(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isCompleted bool) string { _, availableCondition := newAvailableCondition(available) _, progressingCondition := newProgressingCondition(progressingReason, progressingResource, progressingMessage) + _, completedCondition := newCompletedCondition(isCompleted) if availableConditionFirst { - return fmt.Sprintf("[%s, %s]", availableCondition, progressingCondition) + return fmt.Sprintf("[%s, %s, %s]", availableCondition, progressingCondition, completedCondition) } - return fmt.Sprintf("[%s, %s]", progressingCondition, availableCondition) + return fmt.Sprintf("[%s, %s, %s]", progressingCondition, availableCondition, completedCondition) } -func generateConditionsPatchWithPause(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isPaused bool) string { +func generateConditionsPatchWithPause(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isPaused bool, isCompleted bool) string { _, availableCondition := newAvailableCondition(available) _, progressingCondition := newProgressingCondition(progressingReason, progressingResource, progressingMessage) _, pauseCondition := newPausedCondition(isPaused) + _, completedCondition := newCompletedCondition(isCompleted) if availableConditionFirst { - return fmt.Sprintf("[%s, %s, %s]", availableCondition, progressingCondition, pauseCondition) + return fmt.Sprintf("[%s, %s, %s, %s]", availableCondition, completedCondition, progressingCondition, pauseCondition) } - return fmt.Sprintf("[%s, %s, %s]", progressingCondition, pauseCondition, availableCondition) + return fmt.Sprintf("[%s, %s, %s, %s]", progressingCondition, pauseCondition, availableCondition, completedCondition) } -func generateConditionsPatchWithComplete(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isCompleted bool) string { +func generateConditionsPatchWithHealthy(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isHealthy bool, isCompleted bool) string { + _, availableCondition := newAvailableCondition(available) + _, progressingCondition := newProgressingCondition(progressingReason, progressingResource, progressingMessage) + _, healthyCondition := newHealthyCondition(isHealthy) + _, completedCondition := newCompletedCondition(isCompleted) + if availableConditionFirst { + return fmt.Sprintf("[%s, %s, %s, %s]", availableCondition, completedCondition, healthyCondition, progressingCondition) + } + return fmt.Sprintf("[%s, %s, %s, %s]", completedCondition, healthyCondition, progressingCondition, availableCondition) +} + +func generateConditionsPatchWithCompleted(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isCompleted bool) string { _, availableCondition := newAvailableCondition(available) _, progressingCondition := newProgressingCondition(progressingReason, progressingResource, progressingMessage) _, completeCondition := newCompletedCondition(isCompleted) if availableConditionFirst { - return fmt.Sprintf("[%s, %s, %s]", availableCondition, completeCondition, progressingCondition) + return fmt.Sprintf("[%s, %s, %s]", availableCondition, progressingCondition, completeCondition) } - return fmt.Sprintf("[%s, %s, %s]", completeCondition, progressingCondition, availableCondition) + return fmt.Sprintf("[%s, %s, %s]", progressingCondition, availableCondition, completeCondition) +} + +func generateConditionsPatchWithCompletedHealthy(available bool, progressingReason string, progressingResource runtime.Object, availableConditionFirst bool, progressingMessage string, isHealthy bool, isCompleted bool) string { + _, completedCondition := newCompletedCondition(isCompleted) + _, availableCondition := newAvailableCondition(available) + _, progressingCondition := newProgressingCondition(progressingReason, progressingResource, progressingMessage) + _, healthyCondition := newHealthyCondition(isHealthy) + if availableConditionFirst { + return fmt.Sprintf("[%s, %s, %s, %s]", availableCondition, healthyCondition, completedCondition, progressingCondition) + } + return fmt.Sprintf("[%s, %s, %s, %s]", healthyCondition, completedCondition, progressingCondition, availableCondition) } func updateConditionsPatch(r v1alpha1.Rollout, newCondition v1alpha1.RolloutCondition) string { @@ -347,7 +397,7 @@ func updateConditionsPatch(r v1alpha1.Rollout, newCondition v1alpha1.RolloutCond } // func updateBlueGreenRolloutStatus(r *v1alpha1.Rollout, preview, active string, availableReplicas, updatedReplicas, hpaReplicas int32, pause bool, available bool, progressingStatus string) *v1alpha1.Rollout { -func updateBlueGreenRolloutStatus(r *v1alpha1.Rollout, preview, active, stable string, availableReplicas, updatedReplicas, totalReplicas, hpaReplicas int32, pause bool, available bool) *v1alpha1.Rollout { +func updateBlueGreenRolloutStatus(r *v1alpha1.Rollout, preview, active, stable string, availableReplicas, updatedReplicas, totalReplicas, hpaReplicas int32, pause bool, available bool, isCompleted bool) *v1alpha1.Rollout { newRollout := updateBaseRolloutStatus(r, availableReplicas, updatedReplicas, totalReplicas, hpaReplicas) selector := newRollout.Spec.Selector.DeepCopy() if active != "" { @@ -359,8 +409,10 @@ func updateBlueGreenRolloutStatus(r *v1alpha1.Rollout, preview, active, stable s newRollout.Status.StableRS = stable cond, _ := newAvailableCondition(available) newRollout.Status.Conditions = append(newRollout.Status.Conditions, cond) + completeCond, _ := newCompletedCondition(isCompleted) + newRollout.Status.Conditions = append(newRollout.Status.Conditions, completeCond) if pause { - now := metav1.Now() + now := timeutil.MetaNow() cond := v1alpha1.PauseCondition{ Reason: v1alpha1.PauseReasonBlueGreenPause, StartTime: now, @@ -398,7 +450,7 @@ func updateBaseRolloutStatus(r *v1alpha1.Rollout, availableReplicas, updatedRepl } func newReplicaSet(r *v1alpha1.Rollout, replicas int) *appsv1.ReplicaSet { - podHash := controller.ComputeHash(&r.Spec.Template, r.Status.CollisionCount) + podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) rsLabels := map[string]string{ v1alpha1.DefaultRolloutUniqueLabelKey: podHash, } @@ -565,7 +617,7 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share return nil, nil } var reconcilers = []trafficrouting.TrafficRoutingReconciler{} - reconcilers = append(reconcilers, f.fakeSingleTrafficRouting) + reconcilers = append(reconcilers, f.fakeTrafficRouting) return reconcilers, nil } @@ -892,7 +944,7 @@ func (f *fixture) verifyPatchedReplicaSet(index int, scaleDownDelaySeconds int32 if !ok { assert.Fail(f.t, "Expected Patch action, not %s", action.GetVerb()) } - now := metav1.Now().Add(time.Duration(scaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) + now := timeutil.Now().Add(time.Duration(scaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, now) assert.Equal(f.t, string(patchAction.GetPatch()), patch) } @@ -1174,7 +1226,7 @@ func TestRequeueStuckRollout(t *testing.T) { if progressingConditionReason != "" { lastUpdated := metav1.Time{ - Time: metav1.Now().Add(-10 * time.Second), + Time: timeutil.MetaNow().Add(-10 * time.Second), } r.Status.Conditions = []v1alpha1.RolloutCondition{{ Type: v1alpha1.RolloutProgressing, @@ -1297,7 +1349,7 @@ func TestPodTemplateHashEquivalence(t *testing.T) { var err error // NOTE: This test will fail on every k8s library upgrade. // To fix it, update expectedReplicaSetName to match the new hash. - expectedReplicaSetName := "guestbook-586d86c77b" + expectedReplicaSetName := "guestbook-6c5667f666" r1 := newBlueGreenRollout("guestbook", 1, nil, "active", "") r1Resources := ` @@ -1386,6 +1438,8 @@ func TestComputeHashChangeTolerationBlueGreen(t *testing.T) { conditions.SetRolloutCondition(&r.Status, availableCondition) progressingCondition, _ := newProgressingCondition(conditions.ReplicaSetUpdatedReason, rs, "") conditions.SetRolloutCondition(&r.Status, progressingCondition) + completedCondition, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r.Status, completedCondition) r.Status.Phase, r.Status.Message = rolloututil.CalculateRolloutPhase(r.Spec, r.Status) podTemplate := corev1.PodTemplate{ @@ -1430,6 +1484,8 @@ func TestComputeHashChangeTolerationCanary(t *testing.T) { conditions.SetRolloutCondition(&r.Status, availableCondition) progressingCondition, _ := newProgressingCondition(conditions.ReplicaSetUpdatedReason, rs, "") conditions.SetRolloutCondition(&r.Status, progressingCondition) + completedCondition, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r.Status, completedCondition) podTemplate := corev1.PodTemplate{ Template: rs.Spec.Template, @@ -1457,7 +1513,7 @@ func TestSwitchBlueGreenToCanary(t *testing.T) { activeSvc := newService("active", 80, nil, r) rs := newReplicaSetWithStatus(r, 1, 1) rsPodHash := rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] - r = updateBlueGreenRolloutStatus(r, "", rsPodHash, rsPodHash, 1, 1, 1, 1, false, true) + r = updateBlueGreenRolloutStatus(r, "", rsPodHash, rsPodHash, 1, 1, 1, 1, false, true, false) // StableRS is set to avoid running the migration code. When .status.canary.stableRS is removed, the line below can be deleted //r.Status.Canary.StableRS = rsPodHash r.Spec.Strategy.BlueGreen = nil @@ -1475,7 +1531,7 @@ func TestSwitchBlueGreenToCanary(t *testing.T) { f.run(getKey(r, t)) patch := f.getPatchedRollout(i) - addedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs, true, "") + addedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs, true, "", true) expectedPatch := fmt.Sprintf(`{ "status": { "blueGreen": { @@ -1498,8 +1554,8 @@ func newInvalidSpecCondition(reason string, resourceObj runtime.Object, optional } condition := v1alpha1.RolloutCondition{ - LastTransitionTime: metav1.Now(), - LastUpdateTime: metav1.Now(), + LastTransitionTime: timeutil.MetaNow(), + LastUpdateTime: timeutil.MetaNow(), Message: msg, Reason: reason, Status: status, @@ -1656,6 +1712,67 @@ func TestGetReferencedIngressesNginx(t *testing.T) { defer f.Close() t.Run("get referenced Nginx ingress - fail", func(t *testing.T) { + // clear fixture + f.ingressLister = []*ingressutil.Ingress{} + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedIngresses() + expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "nginx", "stableIngress"), "nginx-ingress-name", "ingress.extensions \"nginx-ingress-name\" not found") + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("get referenced Nginx ingress - success", func(t *testing.T) { + // clear fixture + f.ingressLister = []*ingressutil.Ingress{} + ingress := &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-name", + Namespace: metav1.NamespaceDefault, + }, + } + f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress)) + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedIngresses() + assert.NoError(t, err) + }) +} +func TestGetReferencedIngressesNginxMultiIngress(t *testing.T) { + f := newFixture(t) + defer f.Close() + r := newCanaryRollout("rollout", 1, nil, nil, nil, intstr.FromInt(0), intstr.FromInt(1)) + r.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{ + StableIngress: "nginx-ingress-name", + AdditionalStableIngresses: []string{"nginx-ingress-additional"}, + }, + } + r.Namespace = metav1.NamespaceDefault + defer f.Close() + + t.Run("get referenced Nginx ingress - fail on secondary when both missing", func(t *testing.T) { + // clear fixture + f.ingressLister = []*ingressutil.Ingress{} + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedIngresses() + expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "nginx", "AdditionalStableIngresses"), "nginx-ingress-name", "ingress.extensions \"nginx-ingress-additional\" not found") + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("get referenced Nginx ingress - fail on primary when additional present", func(t *testing.T) { + // clear fixture + f.ingressLister = []*ingressutil.Ingress{} + ingressAdditional := &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-additional", + Namespace: metav1.NamespaceDefault, + }, + } + f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingressAdditional)) c, _, _ := f.newController(noResyncPeriodFunc) roCtx, err := c.newRolloutContext(r) assert.NoError(t, err) @@ -1664,14 +1781,44 @@ func TestGetReferencedIngressesNginx(t *testing.T) { assert.Equal(t, expectedErr.Error(), err.Error()) }) + t.Run("get referenced Nginx ingress - fail on secondary when only secondary missing", func(t *testing.T) { + // clear fixture + f.ingressLister = []*ingressutil.Ingress{} + ingress := &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-name", + Namespace: metav1.NamespaceDefault, + }, + } + f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress)) + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedIngresses() + if err == nil { + fmt.Println("ERROR IS NIL") + } + expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "nginx", "AdditionalStableIngresses"), "nginx-ingress-name", "ingress.extensions \"nginx-ingress-additional\" not found") + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + t.Run("get referenced Nginx ingress - success", func(t *testing.T) { + // clear fixture + f.ingressLister = []*ingressutil.Ingress{} ingress := &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "nginx-ingress-name", Namespace: metav1.NamespaceDefault, }, } + ingressAdditional := &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-ingress-additional", + Namespace: metav1.NamespaceDefault, + }, + } f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress)) + f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingressAdditional)) c, _, _ := f.newController(noResyncPeriodFunc) roCtx, err := c.newRolloutContext(r) assert.NoError(t, err) @@ -1680,6 +1827,135 @@ func TestGetReferencedIngressesNginx(t *testing.T) { }) } +func TestGetReferencedAppMeshResources(t *testing.T) { + r := newCanaryRollout("rollout", 1, nil, nil, nil, intstr.FromInt(0), intstr.FromInt(1)) + r.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{ + VirtualService: &v1alpha1.AppMeshVirtualService{ + Name: "mysvc", + Routes: []string{"primary"}, + }, + VirtualNodeGroup: &v1alpha1.AppMeshVirtualNodeGroup{ + CanaryVirtualNodeRef: &v1alpha1.AppMeshVirtualNodeReference{ + Name: "mysvc-canary-vn", + }, + StableVirtualNodeRef: &v1alpha1.AppMeshVirtualNodeReference{ + Name: "mysvc-stable-vn", + }, + }, + }, + } + r.Namespace = "default" + + t.Run("should return error when virtual-service is not defined on rollout", func(t *testing.T) { + f := newFixture(t) + defer f.Close() + + c, _, _ := f.newController(noResyncPeriodFunc) + rCopy := r.DeepCopy() + rCopy.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualService = nil + roCtx, err := c.newRolloutContext(rCopy) + assert.NoError(t, err) + _, err = roCtx.getRolloutReferencedResources() + expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService"), "null", "must provide virtual-service") + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("should return error when virtual-service is not-found", func(t *testing.T) { + f := newFixture(t) + defer f.Close() + + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getRolloutReferencedResources() + expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService"), "mysvc.default", "virtualservices.appmesh.k8s.aws \"mysvc\" not found") + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("should return error when virtual-router is not-found", func(t *testing.T) { + f := newFixture(t) + defer f.Close() + + vsvc := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + name: mysvc + namespace: default +spec: + provider: + virtualRouter: + virtualRouterRef: + name: mysvc-vrouter +` + uVsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvc) + f.objects = append(f.objects, uVsvc) + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getRolloutReferencedResources() + expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService"), "mysvc.default", "virtualrouters.appmesh.k8s.aws \"mysvc-vrouter\" not found") + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("get referenced App Mesh - success", func(t *testing.T) { + f := newFixture(t) + defer f.Close() + + vsvc := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + name: mysvc + namespace: default +spec: + provider: + virtualRouter: + virtualRouterRef: + name: mysvc-vrouter +` + + vrouter := ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + name: mysvc-vrouter + namespace: default +spec: + listeners: + - portMapping: + port: 8080 + protocol: http + routes: + - name: primary + httpRoute: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: mysvc-canary-vn + weight: 0 + - virtualNodeRef: + name: mysvc-stable-vn + weight: 100 +` + + uVsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvc) + uVrouter := unstructuredutil.StrToUnstructuredUnsafe(vrouter) + f.objects = append(f.objects, uVsvc, uVrouter) + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + refRsources, err := roCtx.getRolloutReferencedResources() + assert.NoError(t, err) + assert.Len(t, refRsources.AppMeshResources, 1) + assert.Equal(t, refRsources.AppMeshResources[0].GetKind(), "VirtualRouter") + }) + +} + func TestGetAmbassadorMappings(t *testing.T) { f := newFixture(t) defer f.Close() diff --git a/rollout/experiment.go b/rollout/experiment.go index 807d44ddb7..aeac6e8049 100644 --- a/rollout/experiment.go +++ b/rollout/experiment.go @@ -4,19 +4,20 @@ import ( "context" "fmt" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/defaults" experimentutil "github.com/argoproj/argo-rollouts/utils/experiment" + "github.com/argoproj/argo-rollouts/utils/hash" "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/controller" ) // GetExperimentFromTemplate takes the canary experiment step and converts it to an experiment @@ -25,7 +26,7 @@ func GetExperimentFromTemplate(r *v1alpha1.Rollout, stableRS, newRS *appsv1.Repl if step == nil { return nil, nil } - podHash := controller.ComputeHash(&r.Spec.Template, r.Status.CollisionCount) + podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) currentStep := int32(0) if r.Status.CurrentStepIndex != nil { currentStep = *r.Status.CurrentStepIndex @@ -106,7 +107,11 @@ func GetExperimentFromTemplate(r *v1alpha1.Rollout, stableRS, newRS *appsv1.Repl } for i := range step.Analyses { analysis := step.Analyses[i] - args := analysisutil.BuildArgumentsForRolloutAnalysisRun(analysis.Args, stableRS, newRS, r) + args, err := analysisutil.BuildArgumentsForRolloutAnalysisRun(analysis.Args, stableRS, newRS, r) + if err != nil { + return nil, err + } + analysisTemplate := v1alpha1.ExperimentAnalysisTemplateRef{ Name: analysis.Name, TemplateName: analysis.TemplateName, @@ -230,7 +235,7 @@ func (c *rolloutContext) createExperimentWithCollisionHandling(newEx *v1alpha1.E // we likely reconciled the rollout with a stale cache (quite common). return existingEx, nil } - newEx.Name = fmt.Sprintf("%s.%d", baseName, collisionCount) + newEx.Name = fmt.Sprintf("%s-%d", baseName, collisionCount) collisionCount++ } } diff --git a/rollout/experiment_test.go b/rollout/experiment_test.go index 48c05e4233..e2e50c1f28 100644 --- a/rollout/experiment_test.go +++ b/rollout/experiment_test.go @@ -1,18 +1,20 @@ package rollout import ( + "encoding/json" "fmt" "testing" "time" - "k8s.io/apimachinery/pkg/util/uuid" - - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/utils/pointer" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/utils/conditions" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func TestRolloutCreateExperiment(t *testing.T) { @@ -66,7 +68,7 @@ func TestRolloutCreateExperiment(t *testing.T) { "conditions": %s } }` - conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch) } @@ -123,7 +125,7 @@ func TestRolloutCreateClusterTemplateExperiment(t *testing.T) { "conditions": %s } }` - conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch) } @@ -165,7 +167,7 @@ func TestCreateExperimentWithCollision(t *testing.T) { f.run(getKey(r2, t)) createdEx := f.getCreatedExperiment(createExIndex) - assert.Equal(t, ex.Name+".1", createdEx.Name) + assert.Equal(t, ex.Name+"-1", createdEx.Name) patch := f.getPatchedRollout(patchIndex) expectedPatch := `{ "status": { @@ -175,7 +177,7 @@ func TestCreateExperimentWithCollision(t *testing.T) { "conditions": %s } }` - conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, createdEx.Name, conds)), patch) } @@ -226,7 +228,7 @@ func TestCreateExperimentWithCollisionAndSemanticEquality(t *testing.T) { "conditions": %s } }` - conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") + conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch) } @@ -254,6 +256,8 @@ func TestRolloutExperimentProcessingDoNothing(t *testing.T) { conditions.SetRolloutCondition(&r2.Status, progressingCondition) availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) f.rolloutLister = append(f.rolloutLister, r2) f.experimentLister = append(f.experimentLister, ex) @@ -308,8 +312,8 @@ func TestAbortRolloutAfterFailedExperiment(t *testing.T) { "message": "%s: %s" } }` - now := metav1.Now().UTC().Format(time.RFC3339) - generatedConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "") + now := timeutil.Now().UTC().Format(time.RFC3339) + generatedConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, generatedConditions, conditions.RolloutAbortedReason, fmt.Sprintf(conditions.RolloutAbortedMessage, 2))), patch) } @@ -321,7 +325,7 @@ func TestPauseRolloutAfterInconclusiveExperiment(t *testing.T) { Experiment: &v1alpha1.RolloutExperimentStep{}, }} - r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1)) r2 := bumpVersion(r1) rs1 := newReplicaSetWithStatus(r1, 1, 1) @@ -342,25 +346,13 @@ func TestPauseRolloutAfterInconclusiveExperiment(t *testing.T) { patchIndex := f.expectPatchRolloutAction(r1) f.run(getKey(r2, t)) patch := f.getPatchedRollout(patchIndex) - expectedPatchFmt := `{ - "status": { - "canary": { - "currentExperiment": null - }, - "pauseConditions": [{ - "reason": "%s", - "startTime": "%s" - }], - "conditions": %s, - "controllerPause": true, - "phase": "Paused", - "message": "%s" - } - }` - now := metav1.Now().UTC().Format(time.RFC3339) - conditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "") - expectedPatch := calculatePatch(r2, fmt.Sprintf(expectedPatchFmt, v1alpha1.PauseReasonInconclusiveExperiment, now, conditions, v1alpha1.PauseReasonInconclusiveExperiment)) - assert.Equal(t, expectedPatch, patch) + ro := v1alpha1.Rollout{} + err := json.Unmarshal([]byte(patch), &ro) + if err != nil { + panic(err) + } + assert.Equal(t, ro.Status.PauseConditions[0].Reason, v1alpha1.PauseReason("InconclusiveExperiment")) + assert.Equal(t, ro.Status.Message, "InconclusiveExperiment") } func TestRolloutExperimentScaleDownExperimentFromPreviousStep(t *testing.T) { @@ -372,7 +364,7 @@ func TestRolloutExperimentScaleDownExperimentFromPreviousStep(t *testing.T) { {SetWeight: pointer.Int32Ptr(1)}, } - r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(0), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(1)) r2 := bumpVersion(r1) rs1 := newReplicaSetWithStatus(r1, 1, 1) @@ -487,7 +479,7 @@ func TestRolloutExperimentFinishedIncrementStep(t *testing.T) { "conditions": %s } }` - generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "") + generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", false) assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, generatedConditions)), patch) } diff --git a/rollout/mocks/TrafficRoutingReconciler.go b/rollout/mocks/TrafficRoutingReconciler.go index bf68c301e1..3a5b1fa67f 100644 --- a/rollout/mocks/TrafficRoutingReconciler.go +++ b/rollout/mocks/TrafficRoutingReconciler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -13,6 +13,48 @@ type TrafficRoutingReconciler struct { mock.Mock } +// RemoveManagedRoutes provides a mock function with given fields: +func (_m *TrafficRoutingReconciler) RemoveManagedRoutes() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetHeaderRoute provides a mock function with given fields: setHeaderRoute +func (_m *TrafficRoutingReconciler) SetHeaderRoute(setHeaderRoute *v1alpha1.SetHeaderRoute) error { + ret := _m.Called(setHeaderRoute) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1alpha1.SetHeaderRoute) error); ok { + r0 = rf(setHeaderRoute) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetMirrorRoute provides a mock function with given fields: setMirrorRoute +func (_m *TrafficRoutingReconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + ret := _m.Called(setMirrorRoute) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1alpha1.SetMirrorRoute) error); ok { + r0 = rf(setMirrorRoute) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // SetWeight provides a mock function with given fields: desiredWeight, additionalDestinations func (_m *TrafficRoutingReconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { _va := make([]interface{}, len(additionalDestinations)) @@ -98,3 +140,18 @@ func (_m *TrafficRoutingReconciler) VerifyWeight(desiredWeight int32, additional return r0, r1 } + +type mockConstructorTestingTNewTrafficRoutingReconciler interface { + mock.TestingT + Cleanup(func()) +} + +// NewTrafficRoutingReconciler creates a new instance of TrafficRoutingReconciler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewTrafficRoutingReconciler(t mockConstructorTestingTNewTrafficRoutingReconciler) *TrafficRoutingReconciler { + mock := &TrafficRoutingReconciler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rollout/pause.go b/rollout/pause.go index c3ee87aa3a..25ae24c3ba 100644 --- a/rollout/pause.go +++ b/rollout/pause.go @@ -7,6 +7,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) type pauseContext struct { @@ -56,7 +57,7 @@ func (pCtx *pauseContext) ClearPauseConditions() { } func (pCtx *pauseContext) CalculatePauseStatus(newStatus *v1alpha1.RolloutStatus) { - now := metav1.Now() + now := timeutil.MetaNow() // if we are already aborted, preserve the original timestamp, otherwise we'll cause a // reconciliation hot-loop. newAbortedAt := pCtx.rollout.Status.AbortedAt @@ -163,7 +164,7 @@ func (pCtx *pauseContext) CompletedBlueGreenPause() bool { } if pauseCond != nil { switchDeadline := pauseCond.StartTime.Add(time.Duration(autoPromotionSeconds) * time.Second) - now := metav1.Now() + now := timeutil.MetaNow() if now.After(switchDeadline) { return true } @@ -189,7 +190,7 @@ func (pCtx *pauseContext) CompletedCanaryPauseStep(pause v1alpha1.RolloutPause) pCtx.log.Info("Rollout has been unpaused") return true } else if pause.Duration != nil { - now := metav1.Now() + now := timeutil.MetaNow() if pauseCondition != nil { expiredTime := pauseCondition.StartTime.Add(time.Duration(pause.DurationSeconds()) * time.Second) if now.After(expiredTime) { @@ -202,7 +203,7 @@ func (pCtx *pauseContext) CompletedCanaryPauseStep(pause v1alpha1.RolloutPause) } func (c *rolloutContext) checkEnqueueRolloutDuringWait(startTime metav1.Time, durationInSeconds int32) { - now := metav1.Now() + now := timeutil.MetaNow() expiredTime := startTime.Add(time.Duration(durationInSeconds) * time.Second) nextResync := now.Add(c.resyncPeriod) if nextResync.After(expiredTime) && expiredTime.After(now.Time) { diff --git a/rollout/replicaset.go b/rollout/replicaset.go index 71b6682f9c..dceff65aa0 100644 --- a/rollout/replicaset.go +++ b/rollout/replicaset.go @@ -15,6 +15,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) var controllerKind = v1alpha1.SchemeGroupVersion.WithKind("Rollout") @@ -53,7 +54,7 @@ func (c *rolloutContext) addScaleDownDelay(rs *appsv1.ReplicaSet, scaleDownDelay } return nil } - deadline := metav1.Now().Add(scaleDownDelaySeconds).UTC().Format(time.RFC3339) + deadline := timeutil.MetaNow().Add(scaleDownDelaySeconds).UTC().Format(time.RFC3339) patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, deadline) _, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{}) if err == nil { @@ -76,7 +77,7 @@ func (c *Controller) getReplicaSetsForRollouts(r *v1alpha1.Rollout) ([]*appsv1.R } // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing ReplicaSets (see #42639). - canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { + canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) { fresh, err := c.argoprojclientset.ArgoprojV1alpha1().Rollouts(r.Namespace).Get(ctx, r.Name, metav1.GetOptions{}) if err != nil { return nil, err @@ -87,7 +88,7 @@ func (c *Controller) getReplicaSetsForRollouts(r *v1alpha1.Rollout) ([]*appsv1.R return fresh, nil }) cm := controller.NewReplicaSetControllerRefManager(c.replicaSetControl, r, replicaSetSelector, controllerKind, canAdoptFunc) - return cm.ClaimReplicaSets(rsList) + return cm.ClaimReplicaSets(ctx, rsList) } // removeScaleDownDeadlines removes the scale-down-deadline annotation from the new/stable ReplicaSets, @@ -131,7 +132,7 @@ func (c *rolloutContext) reconcileNewReplicaSet() (bool, error) { if err != nil { c.log.Warnf("Unable to read scaleDownAt label on rs '%s'", c.newRS.Name) } else { - now := metav1.Now() + now := timeutil.MetaNow() scaleDownAt := metav1.NewTime(scaleDownAtTime) if scaleDownAt.After(now.Time) { c.log.Infof("RS '%s' has not reached the scaleDownTime", c.newRS.Name) diff --git a/rollout/replicaset_test.go b/rollout/replicaset_test.go index 89bf020f32..b1588ff4ce 100644 --- a/rollout/replicaset_test.go +++ b/rollout/replicaset_test.go @@ -17,6 +17,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/annotations" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func newRolloutControllerRef(r *v1alpha1.Rollout) *metav1.OwnerReference { @@ -221,9 +222,9 @@ func TestReconcileNewReplicaSet(t *testing.T) { if test.abortScaleDownAnnotated { var deadline string if test.abortScaleDownDelayPassed { - deadline = metav1.Now().Add(-time.Duration(test.abortScaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) + deadline = timeutil.Now().Add(-time.Duration(test.abortScaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) } else { - deadline = metav1.Now().Add(time.Duration(test.abortScaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) + deadline = timeutil.Now().Add(time.Duration(test.abortScaleDownDelaySeconds) * time.Second).UTC().Format(time.RFC3339) } roCtx.newRS.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = deadline } diff --git a/rollout/restart.go b/rollout/restart.go index 51ebd13840..a8361bf24f 100644 --- a/rollout/restart.go +++ b/rollout/restart.go @@ -18,6 +18,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/replicaset" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -142,6 +143,13 @@ func maxInt(left, right int32) int32 { return right } +func minInt(left, right int32) int32 { + if left < right { + return left + } + return right +} + // getRolloutPods returns all pods associated with a rollout func (p *RolloutPodRestarter) getRolloutPods(ctx context.Context, ro *v1alpha1.Rollout, allRSs []*appsv1.ReplicaSet) ([]*corev1.Pod, error) { pods, err := p.client.CoreV1().Pods(ro.Namespace).List(ctx, metav1.ListOptions{ @@ -167,7 +175,7 @@ func (p *RolloutPodRestarter) getRolloutPods(ctx context.Context, ro *v1alpha1.R func getAvailablePodCount(pods []*corev1.Pod, minReadySeconds int32) int32 { var available int32 - now := metav1.Now() + now := timeutil.MetaNow() for _, pod := range pods { if podutil.IsPodAvailable(pod, minReadySeconds, now) && pod.DeletionTimestamp == nil { available += 1 diff --git a/rollout/service.go b/rollout/service.go index 7a8e77ab05..a2861be0c2 100644 --- a/rollout/service.go +++ b/rollout/service.go @@ -11,6 +11,7 @@ import ( "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/aws" "github.com/argoproj/argo-rollouts/utils/conditions" @@ -18,6 +19,7 @@ import ( logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" + rolloututils "github.com/argoproj/argo-rollouts/utils/rollout" serviceutil "github.com/argoproj/argo-rollouts/utils/service" ) @@ -76,6 +78,10 @@ func (c *rolloutContext) reconcilePreviewService(previewSvc *corev1.Service) err if previewSvc == nil { return nil } + if haltReason := c.haltProgress(); haltReason != "" { + c.log.Infof("Skipping preview service reconciliation: %s", haltReason) + return nil + } newPodHash := c.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] err := c.switchServiceSelector(previewSvc, newPodHash, c.rollout) if err != nil { @@ -86,6 +92,10 @@ func (c *rolloutContext) reconcilePreviewService(previewSvc *corev1.Service) err } func (c *rolloutContext) reconcileActiveService(activeSvc *corev1.Service) error { + if haltReason := c.haltProgress(); haltReason != "" { + c.log.Infof("Skipping active service reconciliation: %s", haltReason) + return nil + } if !replicasetutil.ReadyForPause(c.rollout, c.newRS, c.allRSs) || !annotations.IsSaturated(c.rollout, c.newRS) { c.log.Infof("skipping active service switch: New RS '%s' is not fully saturated", c.newRS.Name) return nil @@ -232,25 +242,32 @@ func (c *rolloutContext) getPreviewAndActiveServices() (*corev1.Service, *corev1 return previewSvc, activeSvc, nil } +func (c *rolloutContext) reconcilePingAndPongService() error { + if trafficrouting.IsPingPongEnabled(c.rollout) && !rolloututils.IsFullyPromoted(c.rollout) { + _, canaryService := trafficrouting.GetStableAndCanaryServices(c.rollout) + return c.ensureSVCTargets(canaryService, c.newRS, false) + } + return nil +} + func (c *rolloutContext) reconcileStableAndCanaryService() error { if c.rollout.Spec.Strategy.Canary == nil { return nil } - err := c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.StableService, c.stableRS) + err := c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.StableService, c.stableRS, true) if err != nil { return err } - - if replicasetutil.IsReplicaSetReady(c.newRS) { - err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.newRS) - if err != nil { - return err - } + err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.newRS, true) + if err != nil { + return err } return nil } -func (c *rolloutContext) ensureSVCTargets(svcName string, rs *appsv1.ReplicaSet) error { +// ensureSVCTargets updates the service with the given name to point to the given ReplicaSet, +// but only if that ReplicaSet has full availability. +func (c *rolloutContext) ensureSVCTargets(svcName string, rs *appsv1.ReplicaSet, checkRsAvailability bool) error { if rs == nil || svcName == "" { return nil } @@ -258,8 +275,16 @@ func (c *rolloutContext) ensureSVCTargets(svcName string, rs *appsv1.ReplicaSet) if err != nil { return err } - if svc.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey] != rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] { - err = c.switchServiceSelector(svc, rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey], c.rollout) + currSelector := svc.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey] + desiredSelector := rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + if currSelector != desiredSelector { + // ensure ReplicaSet is fully available, otherwise we will point the service to nothing or an underprovisioned ReplicaSet + if checkRsAvailability && !replicasetutil.IsReplicaSetAvailable(rs) { + logCtx := c.log.WithField(logutil.ServiceKey, svc.Name) + logCtx.Infof("delaying service switch from %s to %s: ReplicaSet not fully available", currSelector, desiredSelector) + return nil + } + err = c.switchServiceSelector(svc, desiredSelector, c.rollout) if err != nil { return err } diff --git a/rollout/service_test.go b/rollout/service_test.go index 7dda838271..8a402d001c 100644 --- a/rollout/service_test.go +++ b/rollout/service_test.go @@ -25,6 +25,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" + timeutil "github.com/argoproj/argo-rollouts/utils/time" unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" ) @@ -301,13 +302,15 @@ func TestBlueGreenAWSVerifyTargetGroupsNotYetReady(t *testing.T) { rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] svc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 3, 3, 6, 3, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 3, 3, 6, 3, false, true, false) r2.Status.Message = "" r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) - completedCondition, _ := newCompletedCondition(true) - conditions.SetRolloutCondition(&r2.Status, completedCondition) + completedHealthyCondition, _ := newHealthyCondition(true) + conditions.SetRolloutCondition(&r2.Status, completedHealthyCondition) progressingCondition, _ := newProgressingCondition(conditions.NewRSAvailableReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2, tgb) @@ -384,13 +387,15 @@ func TestBlueGreenAWSVerifyTargetGroupsReady(t *testing.T) { rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] svc := newService("active", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}, r2) - r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 3, 3, 6, 3, false, true) + r2 = updateBlueGreenRolloutStatus(r2, "", rs2PodHash, rs1PodHash, 3, 3, 6, 3, false, true, false) r2.Status.Message = "waiting for post-promotion verification to complete" r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) - completedCondition, _ := newCompletedCondition(true) + completedCondition, _ := newHealthyCondition(true) conditions.SetRolloutCondition(&r2.Status, completedCondition) progressingCondition, _ := newProgressingCondition(conditions.NewRSAvailableReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) + completedCond := conditions.NewRolloutCondition(v1alpha1.RolloutCompleted, corev1.ConditionTrue, conditions.RolloutCompletedReason, conditions.RolloutCompletedReason) + conditions.SetRolloutCondition(&r2.Status, *completedCond) f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2, tgb) @@ -458,7 +463,10 @@ func TestCanaryAWSVerifyTargetGroupsNotYetReady(t *testing.T) { } fakeELB.On("DescribeTargetHealth", mock.Anything, mock.Anything).Return(&thOut, nil) - r1 := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromString("25%"), intstr.FromString("25%")) + r1 := newCanaryRollout("foo", 3, nil, []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }}, pointer.Int32Ptr(0), intstr.FromString("25%"), intstr.FromString("25%")) + r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ ALB: &v1alpha1.ALBTrafficRouting{ Ingress: "ingress", @@ -485,10 +493,12 @@ func TestCanaryAWSVerifyTargetGroupsNotYetReady(t *testing.T) { r2.Status.StableRS = rs2PodHash availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) - completedCondition, _ := newCompletedCondition(false) - conditions.SetRolloutCondition(&r2.Status, completedCondition) + healthyCondition, _ := newHealthyCondition(false) + conditions.SetRolloutCondition(&r2.Status, healthyCondition) progressingCondition, _ := newProgressingCondition(conditions.NewRSAvailableReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) + completedCondition, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r2.Status, completedCondition) _, r2.Status.Canary.Weights = calculateWeightStatus(r2, rs2PodHash, rs2PodHash, 0) f.rolloutLister = append(f.rolloutLister, r2) @@ -552,7 +562,9 @@ func TestCanaryAWSVerifyTargetGroupsReady(t *testing.T) { } fakeELB.On("DescribeTargetHealth", mock.Anything, mock.Anything).Return(&thOut, nil) - r1 := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromString("25%"), intstr.FromString("25%")) + r1 := newCanaryRollout("foo", 3, nil, []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }}, pointer.Int32Ptr(0), intstr.FromString("25%"), intstr.FromString("25%")) r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ ALB: &v1alpha1.ALBTrafficRouting{ Ingress: "ingress", @@ -579,10 +591,12 @@ func TestCanaryAWSVerifyTargetGroupsReady(t *testing.T) { r2.Status.StableRS = rs2PodHash availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) - completedCondition, _ := newCompletedCondition(false) - conditions.SetRolloutCondition(&r2.Status, completedCondition) + healthyCondition, _ := newHealthyCondition(false) + conditions.SetRolloutCondition(&r2.Status, healthyCondition) progressingCondition, _ := newProgressingCondition(conditions.NewRSAvailableReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) + completedCondition, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r2.Status, completedCondition) _, r2.Status.Canary.Weights = calculateWeightStatus(r2, rs2PodHash, rs2PodHash, 0) f.rolloutLister = append(f.rolloutLister, r2) @@ -609,7 +623,9 @@ func TestCanaryAWSVerifyTargetGroupsSkip(t *testing.T) { f := newFixture(t) defer f.Close() - r1 := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromString("25%"), intstr.FromString("25%")) + r1 := newCanaryRollout("foo", 3, nil, []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }}, pointer.Int32Ptr(0), intstr.FromString("25%"), intstr.FromString("25%")) r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ ALB: &v1alpha1.ALBTrafficRouting{ Ingress: "ingress", @@ -622,7 +638,7 @@ func TestCanaryAWSVerifyTargetGroupsSkip(t *testing.T) { rs1 := newReplicaSetWithStatus(r1, 3, 3) // set an annotation on old RS to cause verification to be skipped - rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = metav1.Now().Add(600 * time.Second).UTC().Format(time.RFC3339) + rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = timeutil.Now().Add(600 * time.Second).UTC().Format(time.RFC3339) rs2 := newReplicaSetWithStatus(r2, 3, 3) rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] @@ -638,10 +654,12 @@ func TestCanaryAWSVerifyTargetGroupsSkip(t *testing.T) { r2.Status.StableRS = rs2PodHash availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) - completedCondition, _ := newCompletedCondition(false) - conditions.SetRolloutCondition(&r2.Status, completedCondition) + healthyCondition, _ := newHealthyCondition(false) + conditions.SetRolloutCondition(&r2.Status, healthyCondition) progressingCondition, _ := newProgressingCondition(conditions.NewRSAvailableReason, rs2, "") conditions.SetRolloutCondition(&r2.Status, progressingCondition) + completedCondition, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r2.Status, completedCondition) _, r2.Status.Canary.Weights = calculateWeightStatus(r2, rs2PodHash, rs2PodHash, 0) f.rolloutLister = append(f.rolloutLister, r2) @@ -737,3 +755,53 @@ func TestShouldVerifyTargetGroups(t *testing.T) { assert.True(t, roCtx.shouldVerifyTargetGroup(activeSvc)) }) } + +// TestDelayCanaryStableServiceLabelInjection verifies we don't inject pod hash labels to the canary +// or stable service before the pods for them are ready. +func TestDelayCanaryStableServiceLabelInjection(t *testing.T) { + ro1 := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromInt(1), intstr.FromInt(1)) + ro1.Spec.Strategy.Canary.CanaryService = "canary" + ro1.Spec.Strategy.Canary.StableService = "stable" + canarySvc := newService("canary", 80, ro1.Spec.Selector.MatchLabels, nil) + stableSvc := newService("stable", 80, ro1.Spec.Selector.MatchLabels, nil) + ro2 := bumpVersion(ro1) + + f := newFixture(t) + defer f.Close() + f.kubeobjects = append(f.kubeobjects, canarySvc, stableSvc) + f.serviceLister = append(f.serviceLister, canarySvc, stableSvc) + + { + // first ensure we don't update service because new/stable are both not available + ctrl, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := ctrl.newRolloutContext(ro1) + assert.NoError(t, err) + + roCtx.newRS = newReplicaSetWithStatus(ro1, 3, 0) + roCtx.stableRS = newReplicaSetWithStatus(ro2, 3, 0) + + err = roCtx.reconcileStableAndCanaryService() + assert.NoError(t, err) + _, canaryInjected := canarySvc.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey] + assert.False(t, canaryInjected) + _, stableInjected := stableSvc.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey] + assert.False(t, stableInjected) + } + { + // next ensure we do update service because new/stable are now available + ctrl, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := ctrl.newRolloutContext(ro1) + assert.NoError(t, err) + + roCtx.newRS = newReplicaSetWithStatus(ro1, 3, 3) + roCtx.stableRS = newReplicaSetWithStatus(ro2, 3, 3) + + err = roCtx.reconcileStableAndCanaryService() + assert.NoError(t, err) + _, canaryInjected := canarySvc.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey] + assert.True(t, canaryInjected) + _, stableInjected := stableSvc.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey] + assert.True(t, stableInjected) + } + +} diff --git a/rollout/sync.go b/rollout/sync.go index e54f3375e6..b9bc616552 100644 --- a/rollout/sync.go +++ b/rollout/sync.go @@ -17,12 +17,14 @@ import ( "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting" analysisutil "github.com/argoproj/argo-rollouts/utils/analysis" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/diff" experimentutil "github.com/argoproj/argo-rollouts/utils/experiment" + "github.com/argoproj/argo-rollouts/utils/hash" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" @@ -139,7 +141,7 @@ func (c *rolloutContext) createDesiredReplicaSet() (*appsv1.ReplicaSet, error) { newRSTemplate := *c.rollout.Spec.Template.DeepCopy() // Add default anti-affinity rule if antiAffinity bool set and RSTemplate meets requirements newRSTemplate.Spec.Affinity = replicasetutil.GenerateReplicaSetAffinity(*c.rollout) - podTemplateSpecHash := controller.ComputeHash(&c.rollout.Spec.Template, c.rollout.Status.CollisionCount) + podTemplateSpecHash := hash.ComputePodTemplateHash(&c.rollout.Spec.Template, c.rollout.Status.CollisionCount) newRSTemplate.Labels = labelsutil.CloneAndAddLabel(c.rollout.Spec.Template.Labels, v1alpha1.DefaultRolloutUniqueLabelKey, podTemplateSpecHash) // Add podTemplateHash label to selector. newRSSelector := labelsutil.CloneSelectorAndAddLabel(c.rollout.Spec.Selector, v1alpha1.DefaultRolloutUniqueLabelKey, podTemplateSpecHash) @@ -266,8 +268,8 @@ func (c *rolloutContext) createDesiredReplicaSet() (*appsv1.ReplicaSet, error) { } // syncReplicasOnly is responsible for reconciling rollouts on scaling events. -func (c *rolloutContext) syncReplicasOnly(isScaling bool) error { - c.log.Infof("Syncing replicas only (userPaused %v, isScaling: %v)", c.rollout.Spec.Paused, isScaling) +func (c *rolloutContext) syncReplicasOnly() error { + c.log.Infof("Syncing replicas only due to scaling event") _, err := c.getAllReplicaSetsAndSyncRevision(false) if err != nil { return err @@ -276,15 +278,9 @@ func (c *rolloutContext) syncReplicasOnly(isScaling bool) error { // NOTE: it is possible for newRS to be nil (e.g. when template and replicas changed at same time) if c.rollout.Spec.Strategy.BlueGreen != nil { previewSvc, activeSvc, err := c.getPreviewAndActiveServices() - // Keep existing analysis runs if the rollout is paused - c.SetCurrentAnalysisRuns(c.currentArs) if err != nil { return nil } - err = c.podRestarter.Reconcile(c) - if err != nil { - return err - } if err := c.reconcileBlueGreenReplicaSets(activeSvc); err != nil { // If we get an error while trying to scale, the rollout will be requeued // so we can abort this resync @@ -295,37 +291,11 @@ func (c *rolloutContext) syncReplicasOnly(isScaling bool) error { // The controller wants to use the rolloutCanary method to reconcile the rollout if the rollout is not paused. // If there are no scaling events, the rollout should only sync its status if c.rollout.Spec.Strategy.Canary != nil { - err = c.podRestarter.Reconcile(c) - if err != nil { - return err - } - - if isScaling { - if _, err := c.reconcileCanaryReplicaSets(); err != nil { - // If we get an error while trying to scale, the rollout will be requeued - // so we can abort this resync - return err - } - } - // Reconciling AnalysisRuns to manage Background AnalysisRun if necessary - err = c.reconcileAnalysisRuns() - if err != nil { - return err - } - - // reconcileCanaryPause will ensure we will requeue this rollout at the appropriate time - // if we are at a pause step with a duration. - c.reconcileCanaryPause() - err = c.reconcileStableAndCanaryService() - if err != nil { - return err - } - - err = c.reconcileTrafficRouting() - if err != nil { + if _, err := c.reconcileCanaryReplicaSets(); err != nil { + // If we get an error while trying to scale, the rollout will be requeued + // so we can abort this resync return err } - return c.syncRolloutStatusCanary() } return fmt.Errorf("no rollout strategy provided") @@ -410,7 +380,7 @@ func (c *rolloutContext) calculateBaseStatus() v1alpha1.RolloutStatus { // newRS potentially might be nil when called by syncReplicasOnly(). For this // to happen, the user would have had to simultaneously change the number of replicas, and // the pod template spec at the same time. - currentPodHash = controller.ComputeHash(&c.rollout.Spec.Template, c.rollout.Status.CollisionCount) + currentPodHash = hash.ComputePodTemplateHash(&c.rollout.Spec.Template, c.rollout.Status.CollisionCount) c.log.Infof("Assuming %s for new replicaset pod hash", currentPodHash) } else { currentPodHash = c.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] @@ -577,14 +547,19 @@ func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutSt isPaused := len(c.rollout.Status.PauseConditions) > 0 || c.rollout.Spec.Paused isAborted := c.pauseContext.IsAborted() - completeCond := conditions.GetRolloutCondition(c.rollout.Status, v1alpha1.RolloutCompleted) - if !isPaused && conditions.RolloutComplete(c.rollout, &newStatus) { - updateCompletedCond := conditions.NewRolloutCondition(v1alpha1.RolloutCompleted, corev1.ConditionTrue, conditions.RolloutCompletedReason, conditions.RolloutCompletedReason) - conditions.SetRolloutCondition(&newStatus, *updateCompletedCond) + var becameUnhealthy bool // remember if we transitioned from healthy to unhealthy + completeCond := conditions.GetRolloutCondition(c.rollout.Status, v1alpha1.RolloutHealthy) + if !isPaused && conditions.RolloutHealthy(c.rollout, &newStatus) { + updateHealthyCond := conditions.NewRolloutCondition(v1alpha1.RolloutHealthy, corev1.ConditionTrue, conditions.RolloutHealthyReason, conditions.RolloutHealthyMessage) + conditions.SetRolloutCondition(&newStatus, *updateHealthyCond) + // If we ever wanted to emit a healthy event here it would be noisy and somewhat unpredictable for tests and so should probably be skipped + // when checking in e2e and unit tests. + //c.recorder.Warnf(c.rollout, record.EventOptions{EventReason: conditions.RolloutHealthyReason}, conditions.RolloutHealthyMessage) } else { if completeCond != nil { - updateCompletedCond := conditions.NewRolloutCondition(v1alpha1.RolloutCompleted, corev1.ConditionFalse, conditions.RolloutCompletedReason, conditions.RolloutCompletedReason) - conditions.SetRolloutCondition(&newStatus, *updateCompletedCond) + updateHealthyCond := conditions.NewRolloutCondition(v1alpha1.RolloutHealthy, corev1.ConditionFalse, conditions.RolloutHealthyReason, conditions.RolloutNotHealthyMessage) + becameUnhealthy = conditions.SetRolloutCondition(&newStatus, *updateHealthyCond) + //c.recorder.Warnf(c.rollout, record.EventOptions{EventReason: conditions.RolloutHealthyReason}, conditions.RolloutNotHealthyMessage) } } @@ -605,11 +580,11 @@ func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutSt // In such a case, we should simply not estimate any progress for this rollout. currentCond := conditions.GetRolloutCondition(c.rollout.Status, v1alpha1.RolloutProgressing) - isCompleteRollout := newStatus.Replicas == newStatus.AvailableReplicas && currentCond != nil && currentCond.Reason == conditions.NewRSAvailableReason && currentCond.Type != v1alpha1.RolloutProgressing + isHealthyRollout := newStatus.Replicas == newStatus.AvailableReplicas && currentCond != nil && currentCond.Reason == conditions.NewRSAvailableReason && currentCond.Type != v1alpha1.RolloutProgressing // Check for progress. Only do this if the latest rollout hasn't completed yet and it is not aborted - if !isCompleteRollout && !isAborted { + if !isHealthyRollout && !isAborted { switch { - case conditions.RolloutComplete(c.rollout, &newStatus): + case conditions.RolloutHealthy(c.rollout, &newStatus): // Update the rollout conditions with a message for the new replica set that // was successfully deployed. If the condition already exists, we ignore this update. rsName := "" @@ -619,14 +594,26 @@ func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutSt msg := fmt.Sprintf(conditions.ReplicaSetCompletedMessage, rsName) progressingCondition := conditions.NewRolloutCondition(v1alpha1.RolloutProgressing, corev1.ConditionTrue, conditions.NewRSAvailableReason, msg) conditions.SetRolloutCondition(&newStatus, *progressingCondition) - case conditions.RolloutProgressing(c.rollout, &newStatus): + case conditions.RolloutProgressing(c.rollout, &newStatus) || becameUnhealthy: // If there is any progress made, continue by not checking if the rollout failed. This // behavior emulates the rolling updater progressDeadline check. msg := fmt.Sprintf(conditions.RolloutProgressingMessage, c.rollout.Name) if c.newRS != nil { msg = fmt.Sprintf(conditions.ReplicaSetProgressingMessage, c.newRS.Name) } - condition := conditions.NewRolloutCondition(v1alpha1.RolloutProgressing, corev1.ConditionTrue, conditions.ReplicaSetUpdatedReason, msg) + + var reason string + if newStatus.StableRS == newStatus.CurrentPodHash && becameUnhealthy { + // When a fully promoted rollout becomes Incomplete, e.g., due to the ReplicaSet status changes like + // pod restarts, evicted -> recreated, we'll need to reset the rollout's condition to `PROGRESSING` to + // avoid any timeouts. + reason = conditions.ReplicaSetNotAvailableReason + msg = conditions.NotAvailableMessage + } else { + reason = conditions.ReplicaSetUpdatedReason + } + condition := conditions.NewRolloutCondition(v1alpha1.RolloutProgressing, corev1.ConditionTrue, reason, msg) + // Update the current Progressing condition or add a new one if it doesn't exist. // If a Progressing condition with status=true already exists, we should update // everything but lastTransitionTime. SetRolloutCondition already does that but @@ -689,6 +676,22 @@ func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutSt } else { conditions.RemoveRolloutCondition(&newStatus, v1alpha1.RolloutReplicaFailure) } + + if conditions.RolloutCompleted(c.rollout, &newStatus) { + // The event gets triggered in function promoteStable + updateCompletedCond := conditions.NewRolloutCondition(v1alpha1.RolloutCompleted, corev1.ConditionTrue, + conditions.RolloutCompletedReason, conditions.RolloutCompletedReason) + conditions.SetRolloutCondition(&newStatus, *updateCompletedCond) + } else { + updateCompletedCond := conditions.NewRolloutCondition(v1alpha1.RolloutCompleted, corev1.ConditionFalse, + conditions.RolloutCompletedReason, conditions.RolloutCompletedReason) + if conditions.SetRolloutCondition(&newStatus, *updateCompletedCond) { + revision, _ := replicasetutil.Revision(c.rollout) + c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: conditions.RolloutNotCompletedReason}, + conditions.RolloutNotCompletedMessage, revision+1, newStatus.CurrentPodHash) + } + } + return newStatus } @@ -771,7 +774,7 @@ func (c *rolloutContext) requeueStuckRollout(newStatus v1alpha1.RolloutStatus) t } // No need to estimate progress if the rollout is complete or already timed out. isPaused := len(c.rollout.Status.PauseConditions) > 0 || c.rollout.Spec.Paused - if conditions.RolloutComplete(c.rollout, &newStatus) || currentCond.Reason == conditions.TimedOutReason || isPaused || c.rollout.Status.Abort || isIndefiniteStep(c.rollout) { + if conditions.RolloutHealthy(c.rollout, &newStatus) || currentCond.Reason == conditions.TimedOutReason || isPaused || c.rollout.Status.Abort || isIndefiniteStep(c.rollout) { return time.Duration(-1) } // If there is no sign of progress at this point then there is a high chance that the @@ -932,7 +935,15 @@ func (c *rolloutContext) promoteStable(newStatus *v1alpha1.RolloutStatus, reason previousStableHash := newStatus.StableRS if previousStableHash != newStatus.CurrentPodHash { // only emit this event when we switched stable + if trafficrouting.IsPingPongEnabled(c.rollout) { + if trafficrouting.IsStablePing(c.rollout) { + newStatus.Canary.StablePingPong = v1alpha1.PPPong + } else { + newStatus.Canary.StablePingPong = v1alpha1.PPPing + } + } newStatus.StableRS = newStatus.CurrentPodHash + revision, _ := replicasetutil.Revision(c.rollout) c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: conditions.RolloutCompletedReason}, conditions.RolloutCompletedMessage, revision, newStatus.CurrentPodHash, reason) diff --git a/rollout/sync_test.go b/rollout/sync_test.go index 2687d55fce..77205db2c6 100644 --- a/rollout/sync_test.go +++ b/rollout/sync_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -19,7 +20,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/conditions" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" - "github.com/stretchr/testify/assert" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time, ownerRef *metav1.OwnerReference) *appsv1.ReplicaSet { @@ -46,7 +47,7 @@ func rs(name string, replicas int, selector map[string]string, timestamp metav1. } func TestReconcileRevisionHistoryLimit(t *testing.T) { - now := metav1.Now() + now := timeutil.MetaNow() before := metav1.Time{Time: now.Add(-time.Minute)} newRS := func(name string) *appsv1.ReplicaSet { @@ -263,6 +264,29 @@ func TestPersistWorkloadRefGeneration(t *testing.T) { } } +func TestPingPongCanaryPromoteStable(t *testing.T) { + ro := &v1alpha1.Rollout{} + ro.Spec.Strategy.Canary = &v1alpha1.CanaryStrategy{PingPong: &v1alpha1.PingPongSpec{}} + ro.Status.Canary.StablePingPong = v1alpha1.PPPing + roCtx := &rolloutContext{ + pauseContext: &pauseContext{}, + rollout: ro, + reconcilerBase: reconcilerBase{ + recorder: record.NewFakeEventRecorder(), + }, + } + newStatus := &v1alpha1.RolloutStatus{ + CurrentPodHash: "2f646bf702", + StableRS: "15fb5ffc01", + } + + // test call + err := roCtx.promoteStable(newStatus, "reason") + + assert.Nil(t, err) + assert.Equal(t, v1alpha1.PPPong, newStatus.Canary.StablePingPong) +} + // TestCanaryPromoteFull verifies skip pause, analysis, steps when promote full is set for a canary rollout func TestCanaryPromoteFull(t *testing.T) { f := newFixture(t) @@ -376,7 +400,7 @@ func TestBlueGreenPromoteFull(t *testing.T) { // TestSendStateChangeEvents verifies we emit appropriate events on rollout state changes func TestSendStateChangeEvents(t *testing.T) { - now := metav1.Now() + now := timeutil.MetaNow() tests := []struct { prevStatus v1alpha1.RolloutStatus newStatus v1alpha1.RolloutStatus diff --git a/rollout/temlateref.go b/rollout/temlateref.go index 58cdf5d778..a8faf92b8b 100644 --- a/rollout/temlateref.go +++ b/rollout/temlateref.go @@ -20,6 +20,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" @@ -245,12 +246,22 @@ func (r *informerBasedTemplateResolver) updateRolloutsReferenceAnnotation(obj in updateAnnotation = func(ro *v1alpha1.Rollout) { updated := annotations.SetRolloutWorkloadRefGeneration(ro, generation) if updated { - // update the annotation causes the rollout to be requeued and the template will be resolved to the referred - // workload during next reconciliation - ro.Spec.Template.Spec.Containers = []corev1.Container{} - _, err := r.argoprojclientset.ArgoprojV1alpha1().Rollouts(ro.Namespace).Update(context.TODO(), ro, v1.UpdateOptions{}) + + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + annotations.WorkloadGenerationAnnotation: ro.Annotations[annotations.WorkloadGenerationAnnotation], + }, + }, + } + patchData, err := json.Marshal(patch) + if err == nil { + _, err = r.argoprojclientset.ArgoprojV1alpha1().Rollouts(ro.Namespace).Patch( + context.TODO(), ro.GetName(), types.MergePatchType, patchData, v1.PatchOptions{}) + } + if err != nil { - log.Errorf("Cannot update the workload-ref/annotation for %s/%s", ro.GetName(), ro.GetNamespace()) + log.Errorf("Cannot update the workload-ref/annotation for %s/%s: %v", ro.GetName(), ro.GetNamespace(), err) } } } diff --git a/rollout/temlateref_test.go b/rollout/temlateref_test.go index d859996aba..cd81b2d1a6 100644 --- a/rollout/temlateref_test.go +++ b/rollout/temlateref_test.go @@ -159,7 +159,7 @@ func TestResolve_UnknownAPIResource(t *testing.T) { err := resolver.Resolve(&rollout) assert.Error(t, err) - assert.Equal(t, `GroupVersion "apps/v1" not found`, err.Error()) + assert.Equal(t, `the server could not find the requested resource, GroupVersion "apps/v1" not found`, err.Error()) } func TestResolve_RefDoesNotExists(t *testing.T) { diff --git a/rollout/trafficrouting.go b/rollout/trafficrouting.go index db6e53a432..ebc22b9704 100644 --- a/rollout/trafficrouting.go +++ b/rollout/trafficrouting.go @@ -1,15 +1,20 @@ package rollout import ( + "fmt" "reflect" + "strconv" + "strings" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/rollout/trafficrouting" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/alb" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/ambassador" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/appmesh" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/nginx" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/smi" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/traefik" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/record" @@ -73,6 +78,21 @@ func (c *Controller) NewTrafficRoutingReconciler(roCtx *rolloutContext) ([]traff ac := ambassador.NewDynamicClient(c.dynamicclientset, rollout.GetNamespace()) trafficReconcilers = append(trafficReconcilers, ambassador.NewReconciler(rollout, ac, c.recorder)) } + if rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh != nil { + trafficReconcilers = append(trafficReconcilers, appmesh.NewReconciler(appmesh.ReconcilerConfig{ + Rollout: rollout, + Client: c.dynamicclientset, + Recorder: c.recorder, + })) + } + if rollout.Spec.Strategy.Canary.TrafficRouting.Traefik != nil { + dynamicClient := traefik.NewDynamicClient(c.dynamicclientset, rollout.GetNamespace()) + trafficReconcilers = append(trafficReconcilers, traefik.NewReconciler(&traefik.ReconcilerConfig{ + Rollout: rollout, + Client: dynamicClient, + Recorder: c.recorder, + })) + } // ensure that the trafficReconcilers is a healthy list and its not empty if len(trafficReconcilers) > 0 { @@ -119,19 +139,47 @@ func (c *rolloutContext) reconcileTrafficRouting() error { if rolloututil.IsFullyPromoted(c.rollout) { // when we are fully promoted. desired canary weight should be 0 + err := reconciler.RemoveManagedRoutes() + if err != nil { + return err + } } else if c.pauseContext.IsAborted() { - // when aborted, desired canary weight should be 0 (100% to stable), *unless* we - // are using dynamic stable scaling. In that case, we can only decrease canary weight - // according to available replica counts of the stable. + // when aborted, desired canary weight should immediately be 0 (100% to stable), *unless* + // we are using dynamic stable scaling. In that case, we are dynamically decreasing the + // weight to the canary according to the availability of the stable (whatever it can support). if c.rollout.Spec.Strategy.Canary.DynamicStableScale { desiredWeight = 100 - ((100 * c.stableRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas) + if c.rollout.Status.Canary.Weights != nil { + // This ensures that if we are already at a lower weight, then we will not + // increase the weight because stable availability is flapping (e.g. pod restarts) + desiredWeight = minInt(desiredWeight, c.rollout.Status.Canary.Weights.Canary.Weight) + } + } + err := reconciler.RemoveManagedRoutes() + if err != nil { + return err } + } else if c.newRS == nil || c.newRS.Status.AvailableReplicas == 0 { // when newRS is not available or replicas num is 0. never weight to canary weightDestinations = append(weightDestinations, c.calculateWeightDestinationsFromExperiment()...) + } else if c.rollout.Status.PromoteFull { + // on a promote full, desired stable weight should be 0 (100% to canary), + // But we can only increase canary weight according to available replica counts of the canary. + // we will need to set the desiredWeight to 0 when the newRS is not available. + if c.rollout.Spec.Strategy.Canary.DynamicStableScale { + desiredWeight = (100 * c.newRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas + } else if c.rollout.Status.Canary.Weights != nil { + desiredWeight = c.rollout.Status.Canary.Weights.Canary.Weight + } + + err := reconciler.RemoveManagedRoutes() + if err != nil { + return err + } } else if index != nil { atDesiredReplicaCount := replicasetutil.AtDesiredReplicaCountsForCanary(c.rollout, c.newRS, c.stableRS, c.otherRSs, nil) - if !atDesiredReplicaCount { + if !atDesiredReplicaCount && !c.rollout.Status.PromoteFull { // Use the previous weight since the new RS is not ready for a new weight for i := *index - 1; i >= 0; i-- { step := c.rollout.Spec.Strategy.Canary.Steps[i] @@ -140,16 +188,31 @@ func (c *rolloutContext) reconcileTrafficRouting() error { break } } + weightDestinations = append(weightDestinations, c.calculateWeightDestinationsFromExperiment()...) } else if *index != int32(len(c.rollout.Spec.Strategy.Canary.Steps)) { - // This if statement prevents the desiredWeight from being set to 100 - // when the rollout has progressed through all the steps. The rollout - // should send all traffic to the stable service by using a weight of - // 0. If the rollout is progressing through the steps, the desired + // If the rollout is progressing through the steps, the desired // weight of the traffic routing service should be at the value of the // last setWeight step, which is set by GetCurrentSetWeight. desiredWeight = replicasetutil.GetCurrentSetWeight(c.rollout) + weightDestinations = append(weightDestinations, c.calculateWeightDestinationsFromExperiment()...) + } else { + desiredWeight = 100 + } + } + + // We need to check for Generation > 1 because when we first install the rollout we run step 0 this prevents that. + // We could also probably use c.newRS == nil || c.newRS.Status.AvailableReplicas == 0 + if currentStep != nil && c.rollout.ObjectMeta.Generation > 1 { + if currentStep.SetHeaderRoute != nil { + if err = reconciler.SetHeaderRoute(currentStep.SetHeaderRoute); err != nil { + return err + } + } + if currentStep.SetMirrorRoute != nil { + if err = reconciler.SetMirrorRoute(currentStep.SetMirrorRoute); err != nil { + return err + } } - weightDestinations = append(weightDestinations, c.calculateWeightDestinationsFromExperiment()...) } err = reconciler.UpdateHash(canaryHash, stableHash, weightDestinations...) @@ -162,39 +225,54 @@ func (c *rolloutContext) reconcileTrafficRouting() error { c.recorder.Warnf(c.rollout, record.EventOptions{EventReason: "TrafficRoutingError"}, err.Error()) return err } + if modified, newWeights := calculateWeightStatus(c.rollout, canaryHash, stableHash, desiredWeight, weightDestinations...); modified { c.log.Infof("Previous weights: %v", c.rollout.Status.Canary.Weights) c.log.Infof("New weights: %v", newWeights) + c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: conditions.TrafficWeightUpdatedReason}, trafficWeightUpdatedMessage(c.rollout.Status.Canary.Weights, newWeights)) c.newStatus.Canary.Weights = newWeights } - // If we are in the middle of an update at a setWeight step, also perform weight verification. - // Note that we don't do this every reconciliation because weight verification typically involves - // API calls to the cloud provider which could incur rate limiting - shouldVerifyWeight := c.rollout.Status.StableRS != "" && - !rolloututil.IsFullyPromoted(c.rollout) && - currentStep != nil && currentStep.SetWeight != nil + weightVerified, err := reconciler.VerifyWeight(desiredWeight, weightDestinations...) + c.newStatus.Canary.Weights.Verified = weightVerified + if err != nil { + c.recorder.Warnf(c.rollout, record.EventOptions{EventReason: conditions.WeightVerifyErrorReason}, conditions.WeightVerifyErrorMessage, err) + return nil // return nil instead of error since we want to continue with normal reconciliation + } - if shouldVerifyWeight { - weightVerified, err := reconciler.VerifyWeight(desiredWeight, weightDestinations...) - c.newStatus.Canary.Weights.Verified = weightVerified - if err != nil { - c.recorder.Warnf(c.rollout, record.EventOptions{EventReason: conditions.WeightVerifyErrorReason}, conditions.WeightVerifyErrorMessage, err) - return nil // return nil instead of error since we want to continue with normal reconciliation - } - if weightVerified != nil { - if *weightVerified { - c.log.Infof("Desired weight (stepIdx: %d) %d verified", *index, desiredWeight) - } else { - c.log.Infof("Desired weight (stepIdx: %d) %d not yet verified", *index, desiredWeight) - c.enqueueRolloutAfter(c.rollout, defaults.GetRolloutVerifyRetryInterval()) - } + var indexString string + if index != nil { + indexString = strconv.FormatInt(int64(*index), 10) + } else { + indexString = "n/a" + } + + if weightVerified != nil { + if *weightVerified { + c.log.Infof("Desired weight (stepIdx: %s) %d verified", indexString, desiredWeight) + } else { + c.log.Infof("Desired weight (stepIdx: %s) %d not yet verified", indexString, desiredWeight) + c.enqueueRolloutAfter(c.rollout, defaults.GetRolloutVerifyRetryInterval()) } } } return nil } +// trafficWeightUpdatedMessage returns a message we emit for the kubernetes event whenever we adjust traffic weights +func trafficWeightUpdatedMessage(prev, new *v1alpha1.TrafficWeights) string { + var details []string + if prev == nil { + details = append(details, fmt.Sprintf("to %d", new.Canary.Weight)) + } else if prev.Canary.Weight != new.Canary.Weight { + details = append(details, fmt.Sprintf("from %d to %d", prev.Canary.Weight, new.Canary.Weight)) + } + if prev != nil && new != nil && !reflect.DeepEqual(prev.Additional, new.Additional) { + details = append(details, fmt.Sprintf("additional: %v", new.Additional)) + } + return fmt.Sprintf(conditions.TrafficWeightUpdatedMessage, strings.Join(details, ", ")) +} + // calculateWeightStatus calculates the Rollout's `status.canary.weights` values. Returns true if // it has changed from previous values (which indicates we should reset status.canary.weights.verified) func calculateWeightStatus(ro *v1alpha1.Rollout, canaryHash, stableHash string, desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) (bool, *v1alpha1.TrafficWeights) { diff --git a/rollout/trafficrouting/alb/alb.go b/rollout/trafficrouting/alb/alb.go index f264d72df0..103e3f5e9a 100644 --- a/rollout/trafficrouting/alb/alb.go +++ b/rollout/trafficrouting/alb/alb.go @@ -5,6 +5,8 @@ import ( "fmt" "strconv" + rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -13,6 +15,7 @@ import ( "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting" "github.com/argoproj/argo-rollouts/utils/aws" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" @@ -112,7 +115,55 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 return nil } -func (r *Reconciler) shouldVerifyWeight() bool { +func (r *Reconciler) SetHeaderRoute(headerRoute *v1alpha1.SetHeaderRoute) error { + if headerRoute == nil { + return nil + } + ctx := context.TODO() + rollout := r.cfg.Rollout + ingressName := rollout.Spec.Strategy.Canary.TrafficRouting.ALB.Ingress + action := headerRoute.Name + port := rollout.Spec.Strategy.Canary.TrafficRouting.ALB.ServicePort + + ingress, err := r.cfg.IngressWrapper.GetCached(rollout.Namespace, ingressName) + if err != nil { + return err + } + + desiredAnnotations, err := getDesiredHeaderAnnotations(ingress, rollout, port, headerRoute) + if err != nil { + return err + } + desiredIngress := ingressutil.NewIngressWithSpecAndAnnotations(ingress, desiredAnnotations) + hasRule := ingressutil.HasRuleWithService(ingress, action) + if hasRule && headerRoute.Match == nil { + desiredIngress.RemovePathByServiceName(action) + } + if !hasRule && headerRoute.Match != nil { + desiredIngress.CreateAnnotationBasedPath(action) + } + desiredIngress.SortHttpPaths(rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes) + patch, modified, err := ingressutil.BuildIngressPatch(ingress.Mode(), ingress, desiredIngress, ingressutil.WithAnnotations(), ingressutil.WithSpec()) + if err != nil { + return nil + } + if !modified { + r.log.Info("no changes to the ALB Ingress for header routing") + return nil + } + r.log.WithField("patch", string(patch)).Debug("applying ALB Ingress patch") + r.cfg.Recorder.Eventf(rollout, record.EventOptions{EventReason: "PatchingALBIngress"}, "Updating Ingress `%s` to headerRoute '%d'", ingressName, headerRoute) + + _, err = r.cfg.IngressWrapper.Patch(ctx, ingress.GetNamespace(), ingress.GetName(), types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.log.WithField("err", err.Error()).Error("error patching alb ingress") + return fmt.Errorf("error patching alb ingress `%s`: %v", ingressName, err) + } + return nil +} + +// Gets the controller configuration flag for verifying alb weights +func (r *Reconciler) getShouldVerifyWeightCfg() bool { if r.cfg.VerifyWeight != nil { return *r.cfg.VerifyWeight } @@ -120,9 +171,25 @@ func (r *Reconciler) shouldVerifyWeight() bool { } func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (*bool, error) { - if !r.shouldVerifyWeight() { + if !r.getShouldVerifyWeightCfg() { + r.cfg.Status.ALB = nil return nil, nil } + + if !rolloututil.ShouldVerifyWeight(r.cfg.Rollout) { + // If we should not verify weight but the ALB status has not been set yet due to a Rollout resource just being + // installed in the cluster we want to actually run the rest of the function, so we do not return if + // r.cfg.Rollout.Status.ALB is nil. However, if we should not verify, and we have already updated the status once + // we return early to avoid calling AWS apis. + if r.cfg.Rollout.Status.ALB != nil { + return nil, nil + } + } + + if r.cfg.Status.ALB == nil { + r.cfg.Status.ALB = &v1alpha1.ALBStatus{} + } + ctx := context.TODO() rollout := r.cfg.Rollout ingressName := rollout.Spec.Strategy.Canary.TrafficRouting.ALB.Ingress @@ -132,9 +199,8 @@ func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations .. } resourceIDToDest := map[string]v1alpha1.WeightDestination{} - canaryService := rollout.Spec.Strategy.Canary.CanaryService + stableService, canaryService := trafficrouting.GetStableAndCanaryServices(rollout) canaryResourceID := aws.BuildTargetGroupResourceID(rollout.Namespace, ingress.GetName(), canaryService, rollout.Spec.Strategy.Canary.TrafficRouting.ALB.ServicePort) - stableService := rollout.Spec.Strategy.Canary.StableService stableResourceID := aws.BuildTargetGroupResourceID(rollout.Namespace, ingress.GetName(), stableService, rollout.Spec.Strategy.Canary.TrafficRouting.ALB.ServicePort) for _, dest := range additionalDestinations { @@ -158,7 +224,7 @@ func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations .. return pointer.BoolPtr(false), err } if lb == nil || lb.LoadBalancerArn == nil { - r.log.Infof("LoadBalancer %s not found", lbIngress.Hostname) + r.cfg.Recorder.Warnf(rollout, record.EventOptions{EventReason: conditions.LoadBalancerNotFoundReason}, conditions.LoadBalancerNotFoundMessage, lbIngress.Hostname) return pointer.BoolPtr(false), nil } @@ -207,9 +273,8 @@ func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations .. return pointer.BoolPtr(numVerifiedWeights == 1+len(additionalDestinations)), nil } -func getForwardActionString(r *v1alpha1.Rollout, port int32, desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) string { - stableService := r.Spec.Strategy.Canary.StableService - canaryService := r.Spec.Strategy.Canary.CanaryService +func getForwardActionString(r *v1alpha1.Rollout, port int32, desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (string, error) { + stableService, canaryService := trafficrouting.GetStableAndCanaryServices(r) portStr := strconv.Itoa(int(port)) stableWeight := int32(100) targetGroups := make([]ingressutil.ALBTargetGroup, 0) @@ -245,24 +310,211 @@ func getForwardActionString(r *v1alpha1.Rollout, port int32, desiredWeight int32 TargetGroups: targetGroups, }, } + + var stickinessConfig = r.Spec.Strategy.Canary.TrafficRouting.ALB.StickinessConfig + if stickinessConfig != nil && stickinessConfig.Enabled { + // AWS API valid range + // https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_TargetGroupStickinessConfig.html + if stickinessConfig.DurationSeconds < 1 || stickinessConfig.DurationSeconds > 604800 { + return "", fmt.Errorf("TargetGroupStickinessConfig's duration must be between 1 and 604800 seconds (7 days)!") + } + newStickyConfig := ingressutil.ALBTargetGroupStickinessConfig{ + Enabled: true, + DurationSeconds: stickinessConfig.DurationSeconds, + } + action.ForwardConfig.TargetGroupStickinessConfig = &newStickyConfig + } + bytes := jsonutil.MustMarshal(action) - return string(bytes) + return string(bytes), nil } func getDesiredAnnotations(current *ingressutil.Ingress, r *v1alpha1.Rollout, port int32, desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (map[string]string, error) { desired := current.DeepCopy().GetAnnotations() key := ingressutil.ALBActionAnnotationKey(r) - desired[key] = getForwardActionString(r, port, desiredWeight, additionalDestinations...) - m, err := ingressutil.NewManagedALBActions(desired[ingressutil.ManagedActionsAnnotation]) + value, err := getForwardActionString(r, port, desiredWeight, additionalDestinations...) + if err != nil { + return nil, err + } + desired[key] = value + return modifyManagedAnnotation(desired, r.Name, true, key) +} + +func getDesiredHeaderAnnotations(current *ingressutil.Ingress, r *v1alpha1.Rollout, port int32, headerRoute *v1alpha1.SetHeaderRoute) (map[string]string, error) { + desired := current.DeepCopy().GetAnnotations() + actionKey := ingressutil.ALBHeaderBasedActionAnnotationKey(r, headerRoute.Name) + conditionKey := ingressutil.ALBHeaderBasedConditionAnnotationKey(r, headerRoute.Name) + add := headerRoute.Match != nil + if add { + actionValue, err := getTrafficForwardActionString(r, port) + if err != nil { + return nil, err + } + conditionValue, err := getTrafficForwardConditionString(headerRoute) + if err != nil { + return nil, err + } + desired[actionKey] = actionValue + desired[conditionKey] = conditionValue + } else { + delete(desired, actionKey) + delete(desired, conditionKey) + } + + return modifyManagedAnnotation(desired, r.Name, add, actionKey, conditionKey) +} + +func modifyManagedAnnotation(annotations map[string]string, rolloutName string, add bool, annotationKeys ...string) (map[string]string, error) { + m, err := ingressutil.NewManagedALBAnnotations(annotations[ingressutil.ManagedAnnotations]) if err != nil { return nil, err } - m[r.Name] = key - desired[ingressutil.ManagedActionsAnnotation] = m.String() - return desired, nil + managedAnnotation := m[rolloutName] + if managedAnnotation == nil { + managedAnnotation = ingressutil.ManagedALBAnnotation{} + } + for _, annotationKey := range annotationKeys { + if add { + if !hasValue(managedAnnotation, annotationKey) { + managedAnnotation = append(managedAnnotation, annotationKey) + } + } else { + managedAnnotation = removeValue(managedAnnotation, annotationKey) + } + } + m[rolloutName] = managedAnnotation + annotations[ingressutil.ManagedAnnotations] = m.String() + return annotations, nil +} + +func hasValue(array []string, key string) bool { + for _, item := range array { + if item == key { + return true + } + } + return false +} + +func removeValue(array []string, key string) []string { + for i, v := range array { + if v == key { + array = append(array[:i], array[i+1:]...) + } + } + return array +} + +func getTrafficForwardActionString(r *v1alpha1.Rollout, port int32) (string, error) { + _, canaryService := trafficrouting.GetStableAndCanaryServices(r) + portStr := strconv.Itoa(int(port)) + weight := int64(100) + targetGroups := make([]ingressutil.ALBTargetGroup, 0) + // create target group for canary + targetGroups = append(targetGroups, ingressutil.ALBTargetGroup{ + ServiceName: canaryService, + ServicePort: portStr, + Weight: pointer.Int64Ptr(weight), + }) + + action := ingressutil.ALBAction{ + Type: "forward", + ForwardConfig: ingressutil.ALBForwardConfig{ + TargetGroups: targetGroups, + }, + } + + var stickinessConfig = r.Spec.Strategy.Canary.TrafficRouting.ALB.StickinessConfig + if stickinessConfig != nil && stickinessConfig.Enabled { + // AWS API valid range + // https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_TargetGroupStickinessConfig.html + if stickinessConfig.DurationSeconds < 1 || stickinessConfig.DurationSeconds > 604800 { + return "", fmt.Errorf("TargetGroupStickinessConfig's duration must be between 1 and 604800 seconds (7 days)!") + } + newStickyConfig := ingressutil.ALBTargetGroupStickinessConfig{ + Enabled: true, + DurationSeconds: stickinessConfig.DurationSeconds, + } + action.ForwardConfig.TargetGroupStickinessConfig = &newStickyConfig + } + + bytes := jsonutil.MustMarshal(action) + return string(bytes), nil +} + +func getTrafficForwardConditionString(headerRoute *v1alpha1.SetHeaderRoute) (string, error) { + var res []ingressutil.ALBCondition + for _, match := range headerRoute.Match { + condition := ingressutil.ALBCondition{ + Field: "http-header", + HttpHeaderConfig: ingressutil.HttpHeaderConfig{ + HttpHeaderName: match.HeaderName, + Values: []string{match.HeaderValue.Exact}, + }, + } + res = append(res, condition) + } + bytes := jsonutil.MustMarshal(res) + return string(bytes), nil } // UpdateHash informs a traffic routing reconciler about new canary/stable pod hashes func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { return nil } + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + return nil +} + +func (r *Reconciler) RemoveManagedRoutes() error { + if len(r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes) == 0 { + return nil + } + ctx := context.TODO() + rollout := r.cfg.Rollout + ingressName := rollout.Spec.Strategy.Canary.TrafficRouting.ALB.Ingress + + ingress, err := r.cfg.IngressWrapper.GetCached(rollout.Namespace, ingressName) + if err != nil { + return err + } + + desiredAnnotations := ingress.DeepCopy().GetAnnotations() + var actionKeys []string + for _, managedRoute := range rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes { + actionKey := ingressutil.ALBHeaderBasedActionAnnotationKey(rollout, managedRoute.Name) + conditionKey := ingressutil.ALBHeaderBasedConditionAnnotationKey(rollout, managedRoute.Name) + delete(desiredAnnotations, actionKey) + delete(desiredAnnotations, conditionKey) + actionKeys = append(actionKeys, actionKey, conditionKey) + } + desiredAnnotations, err = modifyManagedAnnotation(desiredAnnotations, rollout.Name, false, actionKeys...) + if err != nil { + return err + } + + desiredIngress := ingressutil.NewIngressWithSpecAndAnnotations(ingress, desiredAnnotations) + + for _, managedRoute := range rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes { + desiredIngress.RemovePathByServiceName(managedRoute.Name) + } + + patch, modified, err := ingressutil.BuildIngressPatch(ingress.Mode(), ingress, desiredIngress, ingressutil.WithAnnotations(), ingressutil.WithSpec()) + if err != nil { + return nil + } + if !modified { + r.log.Info("no changes to the ALB Ingress for header routing") + return nil + } + r.log.WithField("patch", string(patch)).Debug("applying ALB Ingress patch") + r.cfg.Recorder.Eventf(rollout, record.EventOptions{EventReason: "PatchingALBIngress"}, "Updating Ingress `%s` removing managed routes", ingressName) + + _, err = r.cfg.IngressWrapper.Patch(ctx, ingress.GetNamespace(), ingress.GetName(), types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.log.WithField("err", err.Error()).Error("error patching alb ingress") + return fmt.Errorf("error patching alb ingress `%s`: %v", ingressName, err) + } + return nil +} diff --git a/rollout/trafficrouting/alb/alb_test.go b/rollout/trafficrouting/alb/alb_test.go index b3db5beba5..3a044c537f 100644 --- a/rollout/trafficrouting/alb/alb_test.go +++ b/rollout/trafficrouting/alb/alb_test.go @@ -27,7 +27,12 @@ import ( "github.com/argoproj/argo-rollouts/utils/record" ) -func fakeRollout(stableSvc, canarySvc, stableIng string, port int32) *v1alpha1.Rollout { +const STABLE_SVC = "stable-svc" +const CANARY_SVC = "canary-svc" +const PING_SVC = "ping-service" +const PONG_SVC = "pong-service" + +func fakeRollout(stableSvc, canarySvc string, pingPong *v1alpha1.PingPongSpec, stableIng string, port int32) *v1alpha1.Rollout { return &v1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{ Name: "rollout", @@ -38,6 +43,7 @@ func fakeRollout(stableSvc, canarySvc, stableIng string, port int32) *v1alpha1.R Canary: &v1alpha1.CanaryStrategy{ StableService: stableSvc, CanaryService: canarySvc, + PingPong: pingPong, TrafficRouting: &v1alpha1.RolloutTrafficRouting{ ALB: &v1alpha1.ALBTrafficRouting{ Ingress: stableIng, @@ -67,15 +73,41 @@ const actionTemplate = `{ } }` +const actionTemplateWithStickyConfig = `{ + "Type":"forward", + "ForwardConfig":{ + "TargetGroups":[ + { + "ServiceName":"%s", + "ServicePort":"%d", + "Weight":%d + },{ + "ServiceName":"%s", + "ServicePort":"%d", + "Weight":%d + } + ], + "TargetGroupStickinessConfig":{ + "DurationSeconds" : 300, + "Enabled" : true + } + } +}` + const actionTemplateWithExperiments = `{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d}]}}` func albActionAnnotation(stable string) string { return fmt.Sprintf("%s%s%s", ingressutil.ALBIngressAnnotation, ingressutil.ALBActionPrefix, stable) } -func ingress(name string, stableSvc, canarySvc string, port, weight int32, managedBy string) *extensionsv1beta1.Ingress { - managedByValue := fmt.Sprintf("%s:%s", managedBy, albActionAnnotation(stableSvc)) +func ingress(name, stableSvc, canarySvc, actionService string, port, weight int32, managedBy string, includeStickyConfig bool) *extensionsv1beta1.Ingress { + managedByValue := ingressutil.ManagedALBAnnotations{ + managedBy: ingressutil.ManagedALBAnnotation{albActionAnnotation(actionService)}, + } action := fmt.Sprintf(actionTemplate, canarySvc, port, weight, stableSvc, port, 100-weight) + if includeStickyConfig { + action = fmt.Sprintf(actionTemplateWithStickyConfig, canarySvc, port, weight, stableSvc, port, 100-weight) + } var a ingressutil.ALBAction err := json.Unmarshal([]byte(action), &a) if err != nil { @@ -87,8 +119,8 @@ func ingress(name string, stableSvc, canarySvc string, port, weight int32, manag Name: name, Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ - albActionAnnotation(stableSvc): string(jsonutil.MustMarshal(a)), - ingressutil.ManagedActionsAnnotation: managedByValue, + albActionAnnotation(actionService): string(jsonutil.MustMarshal(a)), + ingressutil.ManagedAnnotations: managedByValue.String(), }, }, Spec: extensionsv1beta1.IngressSpec{ @@ -99,7 +131,7 @@ func ingress(name string, stableSvc, canarySvc string, port, weight int32, manag Paths: []extensionsv1beta1.HTTPIngressPath{ { Backend: extensionsv1beta1.IngressBackend{ - ServiceName: stableSvc, + ServiceName: actionService, ServicePort: intstr.Parse("use-annotation"), }, }, @@ -115,7 +147,7 @@ func ingress(name string, stableSvc, canarySvc string, port, weight int32, manag func TestType(t *testing.T) { client := fake.NewSimpleClientset() - rollout := fakeRollout("stable-service", "canary-service", "stable-ingress", 443) + rollout := fakeRollout("stable-service", "canary-service", nil, "stable-ingress", 443) r, err := NewReconciler(ReconcilerConfig{ Rollout: rollout, Client: client, @@ -126,8 +158,15 @@ func TestType(t *testing.T) { assert.NoError(t, err) } +func TestAddManagedAnnotation(t *testing.T) { + annotations, _ := modifyManagedAnnotation(map[string]string{}, "argo-rollouts", true, "alb.ingress.kubernetes.io/actions.action1", "alb.ingress.kubernetes.io/conditions.action1") + assert.Equal(t, annotations[ingressutil.ManagedAnnotations], "{\"argo-rollouts\":[\"alb.ingress.kubernetes.io/actions.action1\",\"alb.ingress.kubernetes.io/conditions.action1\"]}") + _, err := modifyManagedAnnotation(map[string]string{ingressutil.ManagedAnnotations: "invalid, non-json value"}, "some-rollout", false) + assert.Error(t, err) +} + func TestIngressNotFound(t *testing.T) { - ro := fakeRollout("stable-service", "canary-service", "stable-ingress", 443) + ro := fakeRollout("stable-service", "canary-service", nil, "stable-ingress", 443) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) @@ -147,9 +186,9 @@ func TestIngressNotFound(t *testing.T) { } func TestServiceNotFoundInIngress(t *testing.T) { - ro := fakeRollout("stable-stable", "canary-service", "ingress", 443) + ro := fakeRollout("stable-stable", "canary-service", nil, "ingress", 443) ro.Spec.Strategy.Canary.TrafficRouting.ALB.RootService = "invalid-svc" - i := ingress("ingress", "stable-service", "canary-svc", 443, 50, ro.Name) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 50, ro.Name, false) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) @@ -170,8 +209,8 @@ func TestServiceNotFoundInIngress(t *testing.T) { } func TestNoChanges(t *testing.T) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 10, ro.Name) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 10, ro.Name, false) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) @@ -193,9 +232,9 @@ func TestNoChanges(t *testing.T) { } func TestErrorOnInvalidManagedBy(t *testing.T) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 5, ro.Name) - i.Annotations[ingressutil.ManagedActionsAnnotation] = "test" + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) + i.Annotations[ingressutil.ManagedAnnotations] = "test" client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) @@ -216,8 +255,8 @@ func TestErrorOnInvalidManagedBy(t *testing.T) { } func TestSetInitialDesiredWeight(t *testing.T) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 5, ro.Name) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) i.Annotations = map[string]string{} client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) @@ -239,9 +278,59 @@ func TestSetInitialDesiredWeight(t *testing.T) { assert.Len(t, client.Actions(), 1) } +func TestSetWeightPingPong(t *testing.T) { + pp := &v1alpha1.PingPongSpec{PingService: PING_SVC, PongService: PONG_SVC} + ro := fakeRollout("", "", pp, "ingress", 443) + ro.Spec.Strategy.Canary.TrafficRouting.ALB.RootService = "root-service" + ro.Status.Canary.StablePingPong = PONG_SVC + i := ingress("ingress", PING_SVC, PONG_SVC, "root-service", 443, 10, ro.Name, false) + //i.Spec. + i.Annotations = map[string]string{} + client := fake.NewSimpleClientset(i) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + err = r.SetWeight(10) + assert.Nil(t, err) + actions := client.Actions() + assert.Len(t, actions, 1) +} + +func TestUpdateDesiredWeightWithStickyConfig(t *testing.T) { + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, true) + client := fake.NewSimpleClientset(i) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + assert.Nil(t, err) + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + err = r.SetWeight(10) + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) +} + func TestUpdateDesiredWeight(t *testing.T) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 5, ro.Name) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) @@ -265,14 +354,59 @@ func TestUpdateDesiredWeight(t *testing.T) { // TestGetForwardActionStringMarshalsZeroCorrectly ensures that the annotation does not omit default value zero when marshalling // the forward action func TestGetForwardActionStringMarshalsZeroCorrectly(t *testing.T) { - r := fakeRollout("stable", "canary", "ingress", 443) - forwardAction := getForwardActionString(r, 443, 0) + r := fakeRollout("stable", "canary", nil, "ingress", 443) + forwardAction, err := getForwardActionString(r, 443, 0) + if err != nil { + t.Fatal(err) + } assert.Contains(t, forwardAction, `"Weight":0`) } +func TestGetForwardActionStringMarshalsDisabledStickyConfigCorrectly(t *testing.T) { + r := fakeRollout("stable", "canary", nil, "ingress", 443) + stickinessConfig := v1alpha1.StickinessConfig{ + Enabled: false, + DurationSeconds: 0, + } + r.Spec.Strategy.Canary.TrafficRouting.ALB.StickinessConfig = &stickinessConfig + forwardAction, err := getForwardActionString(r, 443, 0) + if err != nil { + t.Fatal(err) + } + assert.Contains(t, forwardAction, `"Weight":0`) +} + +func TestGetForwardActionStringDetectsNegativeStickyConfigDuration(t *testing.T) { + r := fakeRollout("stable", "canary", nil, "ingress", 443) + stickinessConfig := v1alpha1.StickinessConfig{ + Enabled: true, + DurationSeconds: 0, + } + r.Spec.Strategy.Canary.TrafficRouting.ALB.StickinessConfig = &stickinessConfig + forwardAction, err := getForwardActionString(r, 443, 0) + + assert.NotNilf(t, forwardAction, "There should be no forwardAction being generated: %v", forwardAction) + expectedErrorMsg := "TargetGroupStickinessConfig's duration must be between 1 and 604800 seconds (7 days)!" + assert.EqualErrorf(t, err, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, err) +} + +func TestGetForwardActionStringDetectsTooLargeStickyConfigDuration(t *testing.T) { + r := fakeRollout("stable", "canary", nil, "ingress", 443) + stickinessConfig := v1alpha1.StickinessConfig{ + Enabled: true, + DurationSeconds: 604800 + 1, + } + r.Spec.Strategy.Canary.TrafficRouting.ALB.StickinessConfig = &stickinessConfig + forwardAction, err := getForwardActionString(r, 443, 0) + + assert.NotNilf(t, forwardAction, "There should be no forwardAction being generated: %v", forwardAction) + expectedErrorMsg := "TargetGroupStickinessConfig's duration must be between 1 and 604800 seconds (7 days)!" + assert.EqualErrorf(t, err, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, err) +} + func TestErrorPatching(t *testing.T) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 5, ro.Name) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) client := fake.NewSimpleClientset(i) client.ReactionChain = nil k8sI := kubeinformers.NewSharedInformerFactory(client, 0) @@ -337,8 +471,12 @@ func (f *fakeAWSClient) getAlbStatus() *v1alpha1.ALBStatus { func TestVerifyWeight(t *testing.T) { newFakeReconciler := func(status *v1alpha1.RolloutStatus) (*Reconciler, *fakeAWSClient) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 5, ro.Name) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + ro.Status.StableRS = "a45fe23" + ro.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }} + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) i.Status.LoadBalancer = corev1.LoadBalancerStatus{ Ingress: []corev1.LoadBalancerIngress{ { @@ -371,12 +509,36 @@ func TestVerifyWeight(t *testing.T) { // LoadBalancer not found { - r, _ := newFakeReconciler(nil) + var status v1alpha1.RolloutStatus + r, _ := newFakeReconciler(&status) + weightVerified, err := r.VerifyWeight(10) + assert.NoError(t, err) + assert.False(t, *weightVerified) + } + + // VeryifyWeight not needed + { + var status v1alpha1.RolloutStatus + r, _ := newFakeReconciler(&status) + status.StableRS = "" + r.cfg.Rollout.Status.StableRS = "" weightVerified, err := r.VerifyWeight(10) assert.NoError(t, err) assert.False(t, *weightVerified) } + // VeryifyWeight that we do not need to verify weight and status.ALB is already set + { + var status v1alpha1.RolloutStatus + r, _ := newFakeReconciler(&status) + r.cfg.Rollout.Status.ALB = &v1alpha1.ALBStatus{} + r.cfg.Rollout.Status.CurrentStepIndex = nil + r.cfg.Rollout.Spec.Strategy.Canary.Steps = nil + weightVerified, err := r.VerifyWeight(10) + assert.NoError(t, err) + assert.Nil(t, weightVerified) + } + // LoadBalancer found, not at weight { var status v1alpha1.RolloutStatus @@ -412,7 +574,7 @@ func TestVerifyWeight(t *testing.T) { weightVerified, err := r.VerifyWeight(10) assert.NoError(t, err) assert.False(t, *weightVerified) - assert.Equal(t, status.ALB, *fakeClient.getAlbStatus()) + assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus()) } // LoadBalancer found, at weight @@ -450,13 +612,13 @@ func TestVerifyWeight(t *testing.T) { weightVerified, err := r.VerifyWeight(10) assert.NoError(t, err) assert.True(t, *weightVerified) - assert.Equal(t, status.ALB, *fakeClient.getAlbStatus()) + assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus()) } } func TestSetWeightWithMultipleBackends(t *testing.T) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 0, ro.Name) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 0, ro.Name, false) client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) @@ -497,7 +659,7 @@ func TestSetWeightWithMultipleBackends(t *testing.T) { assert.Nil(t, err) servicePort := 443 - expectedAction := fmt.Sprintf(actionTemplateWithExperiments, "canary-svc", servicePort, 10, weightDestinations[0].ServiceName, servicePort, weightDestinations[0].Weight, weightDestinations[1].ServiceName, servicePort, weightDestinations[1].Weight, "stable-svc", servicePort, 85) + expectedAction := fmt.Sprintf(actionTemplateWithExperiments, CANARY_SVC, servicePort, 10, weightDestinations[0].ServiceName, servicePort, weightDestinations[0].Weight, weightDestinations[1].ServiceName, servicePort, weightDestinations[1].Weight, STABLE_SVC, servicePort, 85) assert.Equal(t, expectedAction, patchedI.Annotations["alb.ingress.kubernetes.io/actions.stable-svc"]) } @@ -515,9 +677,13 @@ func TestVerifyWeightWithAdditionalDestinations(t *testing.T) { }, } newFakeReconciler := func(status *v1alpha1.RolloutStatus) (*Reconciler, *fakeAWSClient) { - ro := fakeRollout("stable-svc", "canary-svc", "ingress", 443) - i := ingress("ingress", "stable-svc", "canary-svc", 443, 0, ro.Name) - i.Annotations["alb.ingress.kubernetes.io/actions.stable-svc"] = fmt.Sprintf(actionTemplateWithExperiments, "canary-svc", 443, 10, weightDestinations[0].ServiceName, 443, weightDestinations[0].Weight, weightDestinations[1].ServiceName, 443, weightDestinations[1].Weight, "stable-svc", 443, 85) + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + ro.Status.StableRS = "a45fe23" + ro.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }} + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 0, ro.Name, false) + i.Annotations["alb.ingress.kubernetes.io/actions.stable-svc"] = fmt.Sprintf(actionTemplateWithExperiments, CANARY_SVC, 443, 10, weightDestinations[0].ServiceName, 443, weightDestinations[0].Weight, weightDestinations[1].ServiceName, 443, weightDestinations[1].Weight, STABLE_SVC, 443, 85) i.Status.LoadBalancer = corev1.LoadBalancerStatus{ Ingress: []corev1.LoadBalancerIngress{ @@ -584,7 +750,7 @@ func TestVerifyWeightWithAdditionalDestinations(t *testing.T) { weightVerified, err := r.VerifyWeight(10, weightDestinations...) assert.NoError(t, err) assert.False(t, *weightVerified) - assert.Equal(t, status.ALB, *fakeClient.getAlbStatus()) + assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus()) } // LoadBalancer found, with incorrect weights @@ -642,7 +808,7 @@ func TestVerifyWeightWithAdditionalDestinations(t *testing.T) { weightVerified, err := r.VerifyWeight(10, weightDestinations...) assert.NoError(t, err) assert.False(t, *weightVerified) - assert.Equal(t, status.ALB, *fakeClient.getAlbStatus()) + assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus()) } // LoadBalancer found, with all correct weights @@ -700,6 +866,150 @@ func TestVerifyWeightWithAdditionalDestinations(t *testing.T) { weightVerified, err := r.VerifyWeight(10, weightDestinations...) assert.NoError(t, err) assert.True(t, *weightVerified) - assert.Equal(t, status.ALB, *fakeClient.getAlbStatus()) + assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus()) + } +} + +func TestSetHeaderRoute(t *testing.T) { + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = []v1alpha1.MangedRoutes{ + {Name: "header-route"}, + } + i := ingress("ingress", STABLE_SVC, CANARY_SVC, "action1", 443, 10, ro.Name, false) + client := fake.NewSimpleClientset(i) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + err = r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "header-route", + Match: []v1alpha1.HeaderRoutingMatch{{ + HeaderName: "Agent", + HeaderValue: &v1alpha1.StringMatch{ + Prefix: "Chrome", + }, + }}, + }) + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) + + // no managed routes, no changes expected + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) +} + +func TestRemoveManagedRoutes(t *testing.T) { + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = []v1alpha1.MangedRoutes{ + {Name: "header-route"}, + } + i := ingress("ingress", STABLE_SVC, CANARY_SVC, "action1", 443, 10, ro.Name, false) + managedByValue := ingressutil.ManagedALBAnnotations{ + ro.Name: ingressutil.ManagedALBAnnotation{ + "alb.ingress.kubernetes.io/actions.action1", + "alb.ingress.kubernetes.io/actions.header-route", + "alb.ingress.kubernetes.io/conditions.header-route", + }, + } + i.Annotations["alb.ingress.kubernetes.io/actions.header-route"] = "{}" + i.Annotations["alb.ingress.kubernetes.io/conditions.header-route"] = "{}" + i.Annotations[ingressutil.ManagedAnnotations] = managedByValue.String() + i.Spec.Rules = []extensionsv1beta1.IngressRule{ + { + IngressRuleValue: extensionsv1beta1.IngressRuleValue{ + HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ + Paths: []extensionsv1beta1.HTTPIngressPath{ + { + Backend: extensionsv1beta1.IngressBackend{ + ServiceName: "action1", + ServicePort: intstr.Parse("use-annotation"), + }, + }, + }, + }, + }, + }, + { + IngressRuleValue: extensionsv1beta1.IngressRuleValue{ + HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ + Paths: []extensionsv1beta1.HTTPIngressPath{ + { + Backend: extensionsv1beta1.IngressBackend{ + ServiceName: "header-route", + ServicePort: intstr.Parse("use-annotation"), + }, + }, + }, + }, + }, + }, + } + + client := fake.NewSimpleClientset(i) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) } + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + + err = r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "header-route", + }) + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + assert.Len(t, client.Actions(), 2) +} + +func TestSetMirrorRoute(t *testing.T) { + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 10, ro.Name, false) + client := fake.NewSimpleClientset() + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + err = r.SetMirrorRoute(&v1alpha1.SetMirrorRoute{ + Name: "mirror-route", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{Exact: "GET"}, + }}, + }) + assert.Nil(t, err) + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + + assert.Len(t, client.Actions(), 0) } diff --git a/rollout/trafficrouting/ambassador/ambassador.go b/rollout/trafficrouting/ambassador/ambassador.go index 97036449e8..5c50f38f26 100644 --- a/rollout/trafficrouting/ambassador/ambassador.go +++ b/rollout/trafficrouting/ambassador/ambassador.go @@ -115,6 +115,10 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 return formatErrors(errs) } +func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) error { + return nil +} + func formatErrors(errs []error) error { errorsCount := len(errs) if errorsCount == 0 { @@ -327,3 +331,11 @@ func (r *Reconciler) sendEvent(eventType, id, msg string) { func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { return nil } + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + return nil +} + +func (r *Reconciler) RemoveManagedRoutes() error { + return nil +} diff --git a/rollout/trafficrouting/ambassador/ambassador_test.go b/rollout/trafficrouting/ambassador/ambassador_test.go index 5a558ff25a..bd6e4613de 100644 --- a/rollout/trafficrouting/ambassador/ambassador_test.go +++ b/rollout/trafficrouting/ambassador/ambassador_test.go @@ -519,6 +519,105 @@ func TestReconciler_SetWeight(t *testing.T) { }) } +func TestReconcilerSetHeaderRoute(t *testing.T) { + type fixture struct { + rollout *v1alpha1.Rollout + fakeClient *fakeClient + recorder record.EventRecorder + reconciler *ambassador.Reconciler + } + + setup := func() *fixture { + r := rollout("main-service", "canary-service", []string{"myapp-mapping"}) + fakeClient := &fakeClient{} + rec := record.NewFakeEventRecorder() + l, _ := test.NewNullLogger() + return &fixture{ + rollout: r, + fakeClient: fakeClient, + recorder: rec, + reconciler: &ambassador.Reconciler{ + Rollout: r, + Client: fakeClient, + Recorder: rec, + Log: l.WithContext(context.TODO()), + }, + } + } + t.Run("SetHeaderRoute", func(t *testing.T) { + t.Run("will always return nil", func(t *testing.T) { + // given + t.Parallel() + f := setup() + + // when + err := f.reconciler.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "set-header", + Match: []v1alpha1.HeaderRoutingMatch{{ + HeaderName: "header-name", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "value", + }, + }}, + }) + + // then + assert.Nil(t, err) + + err = f.reconciler.RemoveManagedRoutes() + assert.Nil(t, err) + }) + }) +} + +func TestReconcilerSetMirrorRoute(t *testing.T) { + type fixture struct { + rollout *v1alpha1.Rollout + fakeClient *fakeClient + recorder record.EventRecorder + reconciler *ambassador.Reconciler + } + + setup := func() *fixture { + r := rollout("main-service", "canary-service", []string{"myapp-mapping"}) + fakeClient := &fakeClient{} + rec := record.NewFakeEventRecorder() + l, _ := test.NewNullLogger() + return &fixture{ + rollout: r, + fakeClient: fakeClient, + recorder: rec, + reconciler: &ambassador.Reconciler{ + Rollout: r, + Client: fakeClient, + Recorder: rec, + Log: l.WithContext(context.TODO()), + }, + } + } + t.Run("SetMirrorRoute", func(t *testing.T) { + t.Run("will always return nil", func(t *testing.T) { + // given + t.Parallel() + f := setup() + + // when + err := f.reconciler.SetMirrorRoute(&v1alpha1.SetMirrorRoute{ + Name: "mirror-route", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{Exact: "GET"}, + }}, + }) + + // then + assert.Nil(t, err) + + err = f.reconciler.RemoveManagedRoutes() + assert.Nil(t, err) + }) + }) +} + func TestGetMappingService(t *testing.T) { t.Run("will return empty string if service not found", func(t *testing.T) { // given diff --git a/rollout/trafficrouting/appmesh/appmesh.go b/rollout/trafficrouting/appmesh/appmesh.go new file mode 100644 index 0000000000..f0528f779a --- /dev/null +++ b/rollout/trafficrouting/appmesh/appmesh.go @@ -0,0 +1,401 @@ +package appmesh + +import ( + "context" + "errors" + "fmt" + + "github.com/sirupsen/logrus" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/dynamic" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + logutil "github.com/argoproj/argo-rollouts/utils/log" + "github.com/argoproj/argo-rollouts/utils/record" +) + +const ( + Type = "AppMesh" + ErrVirtualServiceNotUsingVirtualRouter = "Virtual-service is not associated with virtual-router" + ErrVirtualServiceMissing = "Virtual-service is missing" + ErrVirtualRouterMissing = "Virtual-router is missing" + ErrVirtualNodeMissing = "Virtual-node is missing" + ErrNotWellFormed = "not well-formed" + defaultCanaryHash = "canary-hash" + defaultStableHash = "stable-hash" +) + +var ( + // Only following route-types are supported when it comes to traffic splitting + supportedRouteTypes = []string{"httpRoute", "tcpRoute", "http2Route", "grpcRoute"} +) + +// ReconcilerConfig describes static configuration data for the AppMesh reconciler +type ReconcilerConfig struct { + Rollout *v1alpha1.Rollout + Client dynamic.Interface + Recorder record.EventRecorder +} + +// Reconciler holds required fields to reconcile AppMesh resources +type Reconciler struct { + rollout *v1alpha1.Rollout + client *ResourceClient + recorder record.EventRecorder + log *logrus.Entry +} + +// NewReconciler returns a trafficrouting reconciler to work with services using App Mesh custom resources such as +// virtual-services, virtual-routers and virtual-nodes to perform traffic-splitting functionality. This reconciler +// only works with appmesh.k8s.aws/v1beta2 custom resources. This reconciler uses dynamic client to avoid hard dependency +// on App Mesh controller. +func NewReconciler(cfg ReconcilerConfig) *Reconciler { + reconciler := Reconciler{ + rollout: cfg.Rollout, + client: NewResourceClient(cfg.Client), + recorder: cfg.Recorder, + log: logutil.WithRollout(cfg.Rollout), + } + return &reconciler +} + +// UpdateHash informs a traffic routing reconciler about new canary/stable pod hashes. UpdateHash initializes +// virtual-nodes with appropriate match-labels in pod-selector. It will mutate the pod-selector in two ways. +// Firstly it will update a label with name v1alpha1.DefaultRolloutUniqueLabelKey if one exists. Secondly it will add a +// new label with name v1alpha1.DefaultRolloutUniqueLabelKey if one does not exist. +func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { + ctx := context.TODO() + + r.log.Debugf("UpdateHash: canaryHash (%s), stableHash (%s)", canaryHash, stableHash) + + if stableHash == "" { + stableHash = defaultStableHash + } + rStableVnodeRef := r.rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualNodeGroup.StableVirtualNodeRef + err := r.updateVirtualNodeWithHash(ctx, rStableVnodeRef, stableHash) + if err != nil { + return err + } + + r.log.Debugf("UpdateHash: updated stable virtual-node (%s) pod-selector to (%s)", rStableVnodeRef.Name, stableHash) + + // If both hashes are same then virtual-nodes will end up with exact same podSelector. This is not allowed by the + // admission hook installed by appmesh controller. For now assuming that both hashes correspond to stable virtual-node + // when this happens and resetting canaryHash + if canaryHash == stableHash || canaryHash == "" { + canaryHash = defaultCanaryHash + } + rCanaryVnodeRef := r.rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualNodeGroup.CanaryVirtualNodeRef + err = r.updateVirtualNodeWithHash(ctx, rCanaryVnodeRef, canaryHash) + if err != nil { + return err + } + r.log.Debugf("UpdateHash: updated canary virtual-node (%s) pod-selector to (%s)", rCanaryVnodeRef.Name, canaryHash) + + return nil +} + +// SetWeight sets the canary weight to the desired weight. SetWeight relates to a step in rollout process where +// traffic-routing shifts weight to/from stable and canary (traffic-splitting) based on the configuration. In the +// context of App Mesh, traffic-splitting is performed by adding a virtual-router with route(s) for virtual-service. +// This route includes a match condition and an action. Action is defined as weighted-targets where each target is a +// virtual-node. SetWeight adjusts the weights on this route. +func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + ctx := context.TODO() + + r.log.Debugf("SetWeight: setting desired-weight to %d", desiredWeight) + + rVirtualService := r.rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualService + uVsvc, err := r.client.GetVirtualServiceCR(ctx, r.rollout.Namespace, rVirtualService.Name) + if err != nil { + if k8serrors.IsNotFound(err) { + r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "VirtualServiceNotFound"}, "VirtualService `%s` not found in namespace `%s`", rVirtualService.Name, r.rollout.Namespace) + return errors.New(ErrVirtualServiceMissing) + } + return err + } + + uVr, err := r.client.GetVirtualRouterCRForVirtualService(ctx, uVsvc) + if err != nil { + if k8serrors.IsNotFound(err) { + r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "VirtualRouterNotFound"}, "VirtualRouter for `%s` not found in namespace `%s`", rVirtualService.Name, r.rollout.Namespace) + return errors.New(ErrVirtualRouterMissing) + } + return err + } + + err = r.reconcileVirtualRouter(ctx, rVirtualService.Routes, uVr, desiredWeight) + if err != nil { + return err + } + + r.log.Debugf("SetWeight: updated virtual router (%s) with desiredWeight (%d)", uVr.GetName(), desiredWeight) + + return nil +} + +func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) error { + return nil +} + +type routeReconcileContext struct { + route map[string]interface{} + routeIndex int + routeFldPath *field.Path + rCanaryVnodeRef *v1alpha1.AppMeshVirtualNodeReference + rStableVnodeRef *v1alpha1.AppMeshVirtualNodeReference + routesFilterMap map[string]bool + desiredWeight int32 +} + +func (r *Reconciler) reconcileVirtualRouter(ctx context.Context, rRoutes []string, uVr *unstructured.Unstructured, desiredWeight int32) error { + uVrCopy := uVr.DeepCopy() + + rCanaryVnodeRef := r.rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualNodeGroup.CanaryVirtualNodeRef + rStableVnodeRef := r.rollout.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualNodeGroup.StableVirtualNodeRef + requiresUpdate := false + + routesFilterMap := make(map[string]bool) + for _, r := range rRoutes { + routesFilterMap[r] = true + } + + routesFldPath := field.NewPath("spec", "routes") + routesI, found, err := unstructured.NestedSlice(uVrCopy.Object, "spec", "routes") + if !found || err != nil { + return field.Invalid(routesFldPath, uVrCopy.GetName(), fmt.Sprintf("No routes found")) + } + + for idx, routeI := range routesI { + routeFldPath := routesFldPath.Index(idx) + route, ok := routeI.(map[string]interface{}) + if !ok { + return field.Invalid(routeFldPath, uVrCopy.GetName(), ErrNotWellFormed) + } + + reconCtx := &routeReconcileContext{ + route: route, + routeIndex: idx, + routeFldPath: routeFldPath, + rCanaryVnodeRef: rCanaryVnodeRef, + rStableVnodeRef: rStableVnodeRef, + routesFilterMap: routesFilterMap, + desiredWeight: desiredWeight, + } + ru, err := r.reconcileRoute(ctx, uVrCopy, reconCtx) + if err != nil { + return err + } + requiresUpdate = requiresUpdate || ru + } + + //update virtual-router with updated routes + err = unstructured.SetNestedSlice(uVrCopy.Object, routesI, "spec", "routes") + if err != nil { + return err + } + if requiresUpdate { + _, err = r.client.UpdateVirtualRouterCR(ctx, uVrCopy) + if err != nil { + return err + } + } + return nil +} + +func (r *Reconciler) reconcileRoute(ctx context.Context, uVr *unstructured.Unstructured, routeCtx *routeReconcileContext) (bool, error) { + routeName, ok := routeCtx.route["name"].(string) + if !ok { + return false, field.Invalid(routeCtx.routeFldPath.Child("name"), uVr.GetName(), ErrNotWellFormed) + } + + if len(routeCtx.routesFilterMap) > 0 { + // filter out the routes that are not specified in route filter + if _, ok := routeCtx.routesFilterMap[routeName]; !ok { + return false, nil + } + } + + routeRule, routeType, err := GetRouteRule(routeCtx.route) + if err != nil && routeRule == nil { + return false, field.Invalid(routeCtx.routeFldPath, uVr.GetName(), ErrNotWellFormed) + } + + weightedTargetsFldPath := routeCtx.routeFldPath.Child(routeType).Child("action").Child("weightedTargets") + weightedTargets, found, err := unstructured.NestedSlice(routeRule, "action", "weightedTargets") + if !found || err != nil { + return false, field.Invalid(weightedTargetsFldPath, uVr.GetName(), ErrNotWellFormed) + } + + requiresUpdate := false + for idx, wtI := range weightedTargets { + wtFldPath := weightedTargetsFldPath.Index(idx) + wt, ok := wtI.(map[string]interface{}) + if !ok { + return false, field.Invalid(wtFldPath, uVr.GetName(), ErrNotWellFormed) + } + wtVnRefFldPath := wtFldPath.Child("virtualNodeRef") + wtVnRef, ok := wt["virtualNodeRef"].(map[string]interface{}) + if !ok { + return false, field.Invalid(wtVnRefFldPath, uVr.GetName(), ErrNotWellFormed) + } + wtVnName, _ := wtVnRef["name"].(string) + wtVnNamespace := defaultIfEmpty(wtVnRef["namespace"], r.rollout.Namespace) + // weight in AppMesh CRD is int64 + //https://aws.github.io/aws-app-mesh-controller-for-k8s/reference/api_spec/#appmesh.k8s.aws/v1beta2.WeightedTarget + weight, err := toInt64(wt["weight"]) + if err != nil { + return false, field.Invalid(wtFldPath.Child("weight"), uVr.GetName(), ErrNotWellFormed) + } + if wtVnName == routeCtx.rStableVnodeRef.Name && wtVnNamespace == r.rollout.Namespace { + if weight != int64(100-routeCtx.desiredWeight) { + requiresUpdate = true + wt["weight"] = int64(100 - routeCtx.desiredWeight) + } + } else if wtVnName == routeCtx.rCanaryVnodeRef.Name && wtVnNamespace == r.rollout.Namespace { + if weight != int64(routeCtx.desiredWeight) { + requiresUpdate = true + wt["weight"] = int64(routeCtx.desiredWeight) + } + } + r.log.Debugf("SetWeight: updating weight of virtualNode (%s.%s) with existing weight of (%d) to (%d)", wtVnName, wtVnNamespace, weight, wt["weight"]) + } + + if requiresUpdate { + //update route with new weighted targets + err = unstructured.SetNestedSlice(routeCtx.route, weightedTargets, routeType, "action", "weightedTargets") + if err != nil { + return false, err + } + } + + return requiresUpdate, nil +} + +func (r *Reconciler) updateVirtualNodeWithHash(ctx context.Context, vnodeRef *v1alpha1.AppMeshVirtualNodeReference, hash string) error { + uVnode, err := r.client.GetVirtualNodeCR(ctx, r.rollout.Namespace, vnodeRef.Name) + if err != nil { + if k8serrors.IsNotFound(err) { + r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "VirtualNodeNotFound"}, "VirtualNode `%s` not found in namespace `%s`", vnodeRef.Name, r.rollout.Namespace) + return errors.New(ErrVirtualNodeMissing) + } + return err + } + + newVnode := uVnode.DeepCopy() + annotations := newVnode.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[v1alpha1.ManagedByRolloutsKey] = r.rollout.Name + ml, err := getPodSelectorMatchLabels(newVnode) + if err != nil { + return err + } + if hash != "" { + ml[v1alpha1.DefaultRolloutUniqueLabelKey] = hash + } else { + delete(ml, v1alpha1.DefaultRolloutUniqueLabelKey) + } + + err = setPodSelectorMatchLabels(newVnode, ml) + if err != nil { + return err + } + + _, err = r.client.UpdateVirtualNodeCR(ctx, newVnode) + if err != nil { + return err + } + + return nil +} + +// VerifyWeight returns true if the canary is at the desired weight and additonalDestinations are at the weights specified +// Returns nil if weight verification is not supported or not applicable +func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (*bool, error) { + return nil, nil +} + +// Type returns the type of the traffic routing reconciler +func (r *Reconciler) Type() string { + return Type +} + +func getPodSelectorMatchLabels(vnode *unstructured.Unstructured) (map[string]interface{}, error) { + m, found, err := unstructured.NestedMap(vnode.Object, "spec", "podSelector", "matchLabels") + if err != nil { + return nil, err + } + if !found || m == nil { + return make(map[string]interface{}), nil + } + return m, nil +} + +func setPodSelectorMatchLabels(vnode *unstructured.Unstructured, ml map[string]interface{}) error { + return unstructured.SetNestedMap(vnode.Object, ml, "spec", "podSelector", "matchLabels") +} + +func toInt64(obj interface{}) (int64, error) { + switch i := obj.(type) { + case float64: + return int64(i), nil + case float32: + return int64(i), nil + case int64: + return i, nil + case int32: + return int64(i), nil + case int16: + return int64(i), nil + case int8: + return int64(i), nil + case uint64: + return int64(i), nil + case uint32: + return int64(i), nil + case uint16: + return int64(i), nil + case uint8: + return int64(i), nil + case int: + return int64(i), nil + case uint: + return int64(i), nil + default: + return 0, fmt.Errorf("toInt64: unknown value %v that is incompatible with int64", obj) + } +} + +func GetRouteRule(route map[string]interface{}) (map[string]interface{}, string, error) { + var routeRule map[string]interface{} + var routeType string + for _, rType := range supportedRouteTypes { + r, found, err := unstructured.NestedMap(route, rType) + if err != nil { + return nil, "", err + } + if found { + routeRule = r + routeType = rType + break + } + } + + if routeRule == nil { + return nil, "", errors.New("Route has unsupported route type") + } + + return routeRule, routeType, nil +} + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + return nil +} + +func (r *Reconciler) RemoveManagedRoutes() error { + return nil +} diff --git a/rollout/trafficrouting/appmesh/appmesh_test.go b/rollout/trafficrouting/appmesh/appmesh_test.go new file mode 100644 index 0000000000..5f7ed41843 --- /dev/null +++ b/rollout/trafficrouting/appmesh/appmesh_test.go @@ -0,0 +1,886 @@ +package appmesh + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/tj/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + k8stesting "k8s.io/client-go/testing" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + testutil "github.com/argoproj/argo-rollouts/test/util" + "github.com/argoproj/argo-rollouts/utils/record" + unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" +) + +const ( + sampleOldCanaryHash = "canary-old" + sampleNewCanaryHash = "canary-new" + sampleOldStableHash = "stable-old" + sampleNewStableHash = "stable-new" +) + +func fakeRollout() *v1alpha1.Rollout { + return &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: "myns", + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{ + VirtualService: &v1alpha1.AppMeshVirtualService{ + Name: "mysvc", + }, + VirtualNodeGroup: &v1alpha1.AppMeshVirtualNodeGroup{ + CanaryVirtualNodeRef: &v1alpha1.AppMeshVirtualNodeReference{ + Name: "mysvc-canary-vn", + }, + StableVirtualNodeRef: &v1alpha1.AppMeshVirtualNodeReference{ + Name: "mysvc-stable-vn", + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestSetWeightWithMissingVsvc(t *testing.T) { + client := testutil.NewFakeDynamicClient() + ro := fakeRollout() + cfg := ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + err := r.SetWeight(0) + assert.EqualError(t, err, ErrVirtualServiceMissing) + actions := client.Actions() + assert.Len(t, actions, 1) + assert.True(t, actions[0].Matches("get", "virtualservices")) +} + +func TestSetWeightVsvcWithVnodeProvider(t *testing.T) { + vsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVnode) + client := testutil.NewFakeDynamicClient(vsvc) + ro := fakeRollout() + cfg := ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + err := r.SetWeight(0) + assert.EqualError(t, err, ErrVirtualServiceNotUsingVirtualRouter) + actions := client.Actions() + assert.Len(t, actions, 1) + assert.True(t, actions[0].Matches("get", "virtualservices")) +} + +func TestSetWeightForVsvcWithMissingVrouter(t *testing.T) { + vsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter) + client := testutil.NewFakeDynamicClient(vsvc) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + err := r.SetWeight(50) + assert.EqualError(t, err, ErrVirtualRouterMissing) + actions := client.Actions() + assert.Len(t, actions, 2) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) +} + +func TestSetWeightForVsvcWithVrouter(t *testing.T) { + type args struct { + vsvc *unstructured.Unstructured + vrouter *unstructured.Unstructured + routeType string + rollout *v1alpha1.Rollout + } + + fixtures := []struct { + name string + args args + }{ + { + name: "http", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTPRoutes), + routeType: "httpRoute", + rollout: fakeRollout(), + }, + }, + { + name: "tcp", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithTCPRoutes), + routeType: "tcpRoute", + rollout: fakeRollout(), + }, + }, + { + name: "http2", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTP2Routes), + routeType: "http2Route", + rollout: fakeRollout(), + }, + }, + { + name: "grpc", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithGRPCRoutes), + routeType: "grpcRoute", + rollout: fakeRollout(), + }, + }, + } + + for _, wantUpdate := range []bool{true, false} { + for _, f := range fixtures { + fixture := f + t.Run(fmt.Sprintf("%s-%t", fixture.name, wantUpdate), func(t *testing.T) { + t.Parallel() + client := testutil.NewFakeDynamicClient(fixture.args.vsvc, fixture.args.vrouter) + cfg := ReconcilerConfig{ + Rollout: fixture.args.rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + desiredWeight := 0 + if wantUpdate { + desiredWeight = 55 + } + err := r.SetWeight(int32(desiredWeight)) + assert.Nil(t, err) + actions := client.Actions() + if wantUpdate { + assert.Len(t, actions, 3) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) + assert.True(t, actions[2].Matches("update", "virtualrouters")) + assertSetWeightAction(t, actions[2], int64(desiredWeight), fixture.args.routeType) + } else { + assert.Len(t, actions, 2) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) + } + }) + } + } +} + +func TestSetWeightWithUpdateVirtualRouterError(t *testing.T) { + vsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter) + vrouter := unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTPRoutes) + client := testutil.NewFakeDynamicClient(vsvc, vrouter) + updateError := errors.New("Failed to update virtual-router") + client.PrependReactor("update", "virtualrouters", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, ret, updateError + }) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + desiredWeight := 50 + err := r.SetWeight(int32(desiredWeight)) + assert.Equal(t, updateError.Error(), err.Error()) + actions := client.Actions() + assert.Len(t, actions, 3) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) + assert.True(t, actions[2].Matches("update", "virtualrouters")) + assertSetWeightAction(t, actions[2], int64(desiredWeight), "httpRoute") +} + +func TestSetWeightWithInvalidRoutes(t *testing.T) { + type args struct { + routes []interface{} + fieldPathWithError string + } + + fixtures := []struct { + name string + args args + }{ + { + name: "missing routes", + args: args{ + routes: nil, + fieldPathWithError: field.NewPath("spec", "routes").String(), + }, + }, + { + name: "route with malformed content", + args: args{ + routes: []interface{}{ + "malformed-content", + }, + fieldPathWithError: field.NewPath("spec", "routes").Index(0).String(), + }, + }, + { + name: "route with no name", + args: args{ + routes: []interface{}{ + map[string]interface{}{ + "httpRoute": map[string]interface{}{}, + }, + }, + fieldPathWithError: field.NewPath("spec", "routes").Index(0).Child("name").String(), + }, + }, + { + name: "route with bad route-type", + args: args{ + routes: []interface{}{ + map[string]interface{}{ + "name": "primary", + "badRoute": map[string]interface{}{}, + }, + }, + fieldPathWithError: field.NewPath("spec", "routes").Index(0).String(), + }, + }, + { + name: "route with no targets", + args: args{ + routes: []interface{}{ + map[string]interface{}{ + "name": "primary", + "httpRoute": map[string]interface{}{}, + }, + }, + fieldPathWithError: field.NewPath("spec", "routes").Index(0).Child("httpRoute").Child("action").Child("weightedTargets").String(), + }, + }, + } + + for _, f := range fixtures { + fixture := f + t.Run(f.name, func(t *testing.T) { + var err error + vsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter) + vrouter := unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTPRoutes) + if fixture.args.routes == nil { + unstructured.RemoveNestedField(vrouter.Object, "spec", "routes") + } else { + err = unstructured.SetNestedSlice(vrouter.Object, fixture.args.routes, "spec", "routes") + } + assert.Nil(t, err) + client := testutil.NewFakeDynamicClient(vsvc, vrouter) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + desiredWeight := 55 + err = r.SetWeight(int32(desiredWeight)) + assert.NotNil(t, err) + assert.Equal(t, (err.(*field.Error)).Field, fixture.args.fieldPathWithError) + actions := client.Actions() + assert.Len(t, actions, 2) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) + }) + } +} + +func TestSetWeightForRolloutWithRouteFilter(t *testing.T) { + type args struct { + vsvc *unstructured.Unstructured + vrouter *unstructured.Unstructured + routeType string + rollout *v1alpha1.Rollout + routeFilters []string + wantUpdate bool + } + + fixtures := []struct { + name string + args args + }{ + { + name: "with matched route-filter", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTPRoutes), + routeType: "httpRoute", + rollout: fakeRollout(), + routeFilters: []string{"primary"}, + wantUpdate: true, + }, + }, + { + name: "with mismatched route-filter", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTPRoutes), + routeType: "httpRoute", + rollout: fakeRollout(), + routeFilters: []string{"unknown"}, + wantUpdate: false, + }, + }, + { + name: "with multiple route-filter", + args: args{ + vsvc: unstructuredutil.StrToUnstructuredUnsafe(vsvcWithVrouter), + vrouter: unstructuredutil.StrToUnstructuredUnsafe(vrouterWithHTTPRoutes), + routeType: "httpRoute", + rollout: fakeRollout(), + routeFilters: []string{"unknown", "primary"}, + wantUpdate: true, + }, + }, + } + + for _, f := range fixtures { + fixture := f + t.Run(fixture.name, func(t *testing.T) { + t.Parallel() + client := testutil.NewFakeDynamicClient(fixture.args.vsvc, fixture.args.vrouter) + ro := fixture.args.rollout + ro.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualService.Routes = fixture.args.routeFilters + cfg := ReconcilerConfig{ + Rollout: fixture.args.rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + desiredWeight := 55 + err := r.SetWeight(int32(desiredWeight)) + assert.Nil(t, err) + actions := client.Actions() + if fixture.args.wantUpdate { + assert.Len(t, actions, 3) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) + assert.True(t, actions[2].Matches("update", "virtualrouters")) + assertSetWeightAction(t, actions[2], int64(desiredWeight), fixture.args.routeType) + } else { + assert.Len(t, actions, 2) + assert.True(t, actions[0].Matches("get", "virtualservices")) + assert.True(t, actions[1].Matches("get", "virtualrouters")) + } + }) + } +} + +func TestUpdateHash(t *testing.T) { + type args struct { + newCanaryHash string + newStableHash string + existingCanaryHash string + existingStableHash string + expectedCanaryHash string + expectedStableHash string + rollout *v1alpha1.Rollout + } + + fixtures := []struct { + name string + args args + }{ + { + name: "with no existing hashes", + args: args{ + newCanaryHash: sampleNewCanaryHash, + expectedCanaryHash: sampleNewCanaryHash, + newStableHash: sampleNewStableHash, + expectedStableHash: sampleNewStableHash, + rollout: fakeRollout(), + }, + }, + { + name: "with different existing hashes", + args: args{ + newCanaryHash: sampleNewCanaryHash, + existingCanaryHash: sampleOldCanaryHash, + expectedCanaryHash: sampleNewCanaryHash, + newStableHash: sampleNewStableHash, + existingStableHash: sampleOldStableHash, + expectedStableHash: sampleNewStableHash, + rollout: fakeRollout(), + }, + }, + { + name: "with existing hashes cleared", + args: args{ + newCanaryHash: "", + existingCanaryHash: sampleOldCanaryHash, + expectedCanaryHash: defaultCanaryHash, + newStableHash: "", + existingStableHash: sampleOldStableHash, + expectedStableHash: defaultStableHash, + rollout: fakeRollout(), + }, + }, + { + name: "with canaryHash == stableHash", + args: args{ + newCanaryHash: "12345", + existingCanaryHash: sampleOldCanaryHash, + expectedCanaryHash: defaultCanaryHash, + existingStableHash: sampleOldStableHash, + newStableHash: "12345", + expectedStableHash: "12345", + rollout: fakeRollout(), + }, + }, + } + + for _, f := range fixtures { + fixture := f + t.Run(fixture.name, func(t *testing.T) { + t.Parallel() + canaryVnode := createVnodeWithHash(baselineCanaryVnode, fixture.args.existingCanaryHash) + stableVnode := createVnodeWithHash(baselineStableVnode, fixture.args.existingStableHash) + client := testutil.NewFakeDynamicClient(canaryVnode, stableVnode) + cfg := ReconcilerConfig{ + Rollout: fixture.args.rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.UpdateHash(fixture.args.newCanaryHash, fixture.args.newStableHash) + assert.Nil(t, err) + actions := client.Actions() + assert.Len(t, actions, 4) + assert.True(t, actions[0].Matches("get", "virtualnodes")) + assert.True(t, actions[1].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[1], fixture.args.expectedStableHash) + assert.True(t, actions[2].Matches("get", "virtualnodes")) + assert.True(t, actions[3].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[3], fixture.args.expectedCanaryHash) + }) + } +} + +func TestSetHeaderRoute(t *testing.T) { + t.Run("not implemented check", func(t *testing.T) { + t.Parallel() + client := testutil.NewFakeDynamicClient() + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "set-header", + Match: []v1alpha1.HeaderRoutingMatch{{ + HeaderName: "header-name", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "value", + }, + }}, + }) + assert.Nil(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + + actions := client.Actions() + assert.Len(t, actions, 0) + }) +} + +func TestSetMirrorRoute(t *testing.T) { + t.Run("not implemented check", func(t *testing.T) { + t.Parallel() + client := testutil.NewFakeDynamicClient() + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.SetMirrorRoute(&v1alpha1.SetMirrorRoute{ + Name: "mirror-route", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{Exact: "GET"}, + }}, + }) + assert.Nil(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + + actions := client.Actions() + assert.Len(t, actions, 0) + }) +} + +func TestUpdateHashWhenGetStableVirtualNodeFails(t *testing.T) { + canaryHash := sampleNewCanaryHash + stableHash := sampleNewStableHash + + canaryVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineCanaryVnode) + client := testutil.NewFakeDynamicClient(canaryVnode) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.UpdateHash(canaryHash, stableHash) + assert.Equal(t, ErrVirtualNodeMissing, err.Error()) + actions := client.Actions() + assert.Len(t, actions, 1) + assert.True(t, actions[0].Matches("get", "virtualnodes")) +} + +func TestUpdateHashWhenGetCanaryVirtualNodeFails(t *testing.T) { + canaryHash := sampleNewCanaryHash + stableHash := sampleNewStableHash + + stableVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineStableVnode) + client := testutil.NewFakeDynamicClient(stableVnode) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.UpdateHash(canaryHash, stableHash) + assert.Equal(t, ErrVirtualNodeMissing, err.Error()) + actions := client.Actions() + assert.Len(t, actions, 3) + assert.True(t, actions[0].Matches("get", "virtualnodes")) + assert.True(t, actions[1].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[1], stableHash) + assert.True(t, actions[2].Matches("get", "virtualnodes")) +} + +func TestUpdateHashWhenUpdateStableVirtualNodeFails(t *testing.T) { + canaryHash := sampleNewCanaryHash + stableHash := sampleNewStableHash + + canaryVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineCanaryVnode) + stableVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineStableVnode) + client := testutil.NewFakeDynamicClient(canaryVnode, stableVnode) + updateError := errors.New("Failed to update virtual-node") + client.PrependReactor("update", "virtualnodes", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + updateAction := action.(k8stesting.UpdateAction) + uVnode, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(updateAction.GetObject()) + matchLabels, _, _ := unstructured.NestedMap(uVnode, "spec", "podSelector", "matchLabels") + if matchLabels[v1alpha1.DefaultRolloutUniqueLabelKey].(string) == stableHash { + return true, nil, updateError + } + return false, ret, nil + }) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.UpdateHash(canaryHash, stableHash) + assert.Equal(t, updateError.Error(), err.Error()) + actions := client.Actions() + assert.Len(t, actions, 2) + assert.True(t, actions[0].Matches("get", "virtualnodes")) + assert.True(t, actions[1].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[1], stableHash) +} + +func TestUpdateHashWhenUpdateCanaryVirtualNodeFails(t *testing.T) { + canaryHash := sampleNewCanaryHash + stableHash := sampleNewStableHash + + canaryVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineCanaryVnode) + stableVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineStableVnode) + client := testutil.NewFakeDynamicClient(canaryVnode, stableVnode) + updateError := errors.New("Failed to update virtual-node") + client.PrependReactor("update", "virtualnodes", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + updateAction := action.(k8stesting.UpdateAction) + uVnode, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(updateAction.GetObject()) + matchLabels, _, _ := unstructured.NestedMap(uVnode, "spec", "podSelector", "matchLabels") + if matchLabels[v1alpha1.DefaultRolloutUniqueLabelKey].(string) == canaryHash { + return true, nil, updateError + } + return false, ret, nil + }) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + err := r.UpdateHash(canaryHash, stableHash) + assert.Equal(t, updateError.Error(), err.Error()) + actions := client.Actions() + assert.Len(t, actions, 4) + assert.True(t, actions[0].Matches("get", "virtualnodes")) + assert.True(t, actions[1].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[1], stableHash) + assert.True(t, actions[2].Matches("get", "virtualnodes")) + assert.True(t, actions[3].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[3], canaryHash) + +} + +func TestUpdateHashWithVirtualNodeMissingMatchLabels(t *testing.T) { + canaryVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineCanaryVnode) + unstructured.SetNestedMap(canaryVnode.Object, make(map[string]interface{}), "spec", "podSelector") + stableVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineStableVnode) + unstructured.SetNestedMap(stableVnode.Object, make(map[string]interface{}), "spec", "podSelector") + client := testutil.NewFakeDynamicClient(canaryVnode, stableVnode) + cfg := ReconcilerConfig{ + Rollout: fakeRollout(), + Client: client, + Recorder: record.NewFakeEventRecorder(), + } + r := NewReconciler(cfg) + + canaryHash := sampleNewCanaryHash + stableHash := sampleNewStableHash + err := r.UpdateHash(canaryHash, stableHash) + assert.Nil(t, err) + actions := client.Actions() + assert.Len(t, actions, 4) + assert.True(t, actions[0].Matches("get", "virtualnodes")) + assert.True(t, actions[1].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[1], stableHash) + assert.True(t, actions[2].Matches("get", "virtualnodes")) + assert.True(t, actions[3].Matches("update", "virtualnodes")) + assertUpdateHashAction(t, actions[3], canaryHash) +} + +func createVnodeWithHash(vnodeStr string, hash string) *unstructured.Unstructured { + vnode := unstructuredutil.StrToUnstructuredUnsafe(vnodeStr) + ml, _ := getPodSelectorMatchLabels(vnode) + ml[v1alpha1.DefaultRolloutUniqueLabelKey] = hash + setPodSelectorMatchLabels(vnode, ml) + return vnode +} + +func assertUpdateHashAction(t *testing.T, action k8stesting.Action, hash string) { + updateAction := action.(k8stesting.UpdateAction) + uVnode, err := runtime.DefaultUnstructuredConverter.ToUnstructured(updateAction.GetObject()) + assert.Nil(t, err) + matchLabels, found, err := unstructured.NestedMap(uVnode, "spec", "podSelector", "matchLabels") + assert.True(t, found, "Virtual-node's podSelector is missing matchLabels") + assert.Nil(t, err) + assert.Equal(t, matchLabels[v1alpha1.DefaultRolloutUniqueLabelKey].(string), hash) +} + +func assertSetWeightAction(t *testing.T, action k8stesting.Action, desiredWeight int64, routeType string) { + updateAction := action.(k8stesting.UpdateAction) + uVr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(updateAction.GetObject()) + assert.Nil(t, err) + routesI, _, err := unstructured.NestedSlice(uVr, "spec", "routes") + assert.Nil(t, err) + for _, routeI := range routesI { + route, _ := routeI.(map[string]interface{}) + weightedTargetsI, found, err := unstructured.NestedSlice(route, routeType, "action", "weightedTargets") + assert.Nil(t, err) + assert.True(t, found, "Did not find weightedTargets in route") + assert.Len(t, weightedTargetsI, 2) + for _, wtI := range weightedTargetsI { + wt, _ := wtI.(map[string]interface{}) + vnodeName, _, err := unstructured.NestedString(wt, "virtualNodeRef", "name") + assert.Nil(t, err) + weight, err := toInt64(wt["weight"]) + assert.Nil(t, err) + if strings.Contains(vnodeName, "canary") { + assert.Equal(t, weight, desiredWeight) + } else { + assert.Equal(t, weight, 100-desiredWeight) + } + } + } +} + +const vsvcWithVnode = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + name: mysvc + namespace: myns +spec: + awsName: mysvc.myns.svc.cluster.local + provider: + virtualNode: + virtualNodeRef: + name: mysvc-vnode` + +const vsvcWithVrouter = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + namespace: myns + name: mysvc +spec: + awsName: mysvc.myns.svc.cluster.local + provider: + virtualRouter: + virtualRouterRef: + name: mysvc-vrouter` + +const vrouterWithHTTPRoutes = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + listeners: + - portMapping: + port: 8080 + protocol: http + routes: + - name: primary + httpRoute: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: mysvc-canary-vn + weight: 0 + - virtualNodeRef: + name: mysvc-stable-vn + weight: 100` + +const vrouterWithGRPCRoutes = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + listeners: + - portMapping: + port: 8080 + protocol: http + routes: + - name: primary + grpcRoute: + match: + methodName: GetItem + serviceName: MySvc + action: + weightedTargets: + - virtualNodeRef: + name: mysvc-canary-vn + weight: 0 + - virtualNodeRef: + name: mysvc-stable-vn + weight: 100` + +const vrouterWithHTTP2Routes = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + listeners: + - portMapping: + port: 8080 + protocol: http + routes: + - name: primary + http2Route: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: mysvc-canary-vn + weight: 0 + - virtualNodeRef: + name: mysvc-stable-vn + weight: 100` + +const vrouterWithTCPRoutes = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + namespace: myns + name: mysvc-vrouter +spec: + listeners: + - portMapping: + port: 8080 + protocol: http + routes: + - name: primary + tcpRoute: + action: + weightedTargets: + - virtualNodeRef: + name: mysvc-canary-vn + weight: 0 + - virtualNodeRef: + name: mysvc-stable-vn + weight: 100` + +const baselineCanaryVnode = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + namespace: myns + name: mysvc-canary-vn +spec: + podSelector: + matchLabels: + app: mysvc-pod + listeners: + - portMapping: + port: 8080 + protocol: http + serviceDiscovery: + dns: + hostname: mysvc.myns.svc.cluster.local` + +const baselineStableVnode = ` +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + namespace: myns + name: mysvc-stable-vn +spec: + podSelector: + matchLabels: + app: mysvc-pod + listeners: + - portMapping: + port: 8080 + protocol: http + serviceDiscovery: + dns: + hostname: mysvc.myns.svc.cluster.local` diff --git a/rollout/trafficrouting/appmesh/resource_client.go b/rollout/trafficrouting/appmesh/resource_client.go new file mode 100644 index 0000000000..4171b9a385 --- /dev/null +++ b/rollout/trafficrouting/appmesh/resource_client.go @@ -0,0 +1,74 @@ +package appmesh + +import ( + "context" + "errors" + + appmeshutil "github.com/argoproj/argo-rollouts/utils/appmesh" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" +) + +type ResourceClient struct { + client dynamic.Interface +} + +func NewResourceClient(client dynamic.Interface) *ResourceClient { + return &ResourceClient{ + client: client, + } +} + +func (rc *ResourceClient) GetVirtualServiceCR(ctx context.Context, namespace string, name string) (*unstructured.Unstructured, error) { + return rc.client.Resource(appmeshutil.GetAppMeshVirtualServiceGVR()). + Namespace(namespace). + Get(ctx, name, metav1.GetOptions{}) +} + +func (rc *ResourceClient) GetVirtualRouterCR(ctx context.Context, namespace string, name string) (*unstructured.Unstructured, error) { + return rc.client.Resource(appmeshutil.GetAppMeshVirtualRouterGVR()). + Namespace(namespace). + Get(ctx, name, metav1.GetOptions{}) +} + +func (rc *ResourceClient) GetVirtualNodeCR(ctx context.Context, namespace string, name string) (*unstructured.Unstructured, error) { + return rc.client.Resource(appmeshutil.GetAppMeshVirtualNodeGVR()). + Namespace(namespace). + Get(ctx, name, metav1.GetOptions{}) +} + +func (rc *ResourceClient) UpdateVirtualRouterCR(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + client := rc.client.Resource(appmeshutil.GetAppMeshVirtualRouterGVR()).Namespace(obj.GetNamespace()) + return client.Update(ctx, obj, metav1.UpdateOptions{}) +} + +func (rc *ResourceClient) UpdateVirtualNodeCR(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + client := rc.client.Resource(appmeshutil.GetAppMeshVirtualNodeGVR()).Namespace(obj.GetNamespace()) + return client.Update(ctx, obj, metav1.UpdateOptions{}) +} + +func (rc *ResourceClient) GetVirtualRouterCRForVirtualService(ctx context.Context, uVsvc *unstructured.Unstructured) (*unstructured.Unstructured, error) { + virtualRouterRefMap, found, err := unstructured.NestedMap(uVsvc.Object, "spec", "provider", "virtualRouter", "virtualRouterRef") + if !found { + return nil, errors.New(ErrVirtualServiceNotUsingVirtualRouter) + } + if err != nil { + return nil, err + } + namespace := defaultIfEmpty(virtualRouterRefMap["namespace"], uVsvc.GetNamespace()) + name := virtualRouterRefMap["name"].(string) + return rc.GetVirtualRouterCR(ctx, namespace, name) +} + +func defaultIfEmpty(strI interface{}, defaultStr string) string { + if strI == nil { + return defaultStr + } else { + str, _ := strI.(string) + if str == "" { + return defaultStr + } + return str + } +} diff --git a/rollout/trafficrouting/istio/istio.go b/rollout/trafficrouting/istio/istio.go index 108ece240c..c03bc41f41 100644 --- a/rollout/trafficrouting/istio/istio.go +++ b/rollout/trafficrouting/istio/istio.go @@ -26,6 +26,8 @@ const Http = "http" const Tls = "tls" const Type = "Istio" +const SpecHttpNotFound = "spec.http not found" + // NewReconciler returns a reconciler struct that brings the Virtual Service into the desired state func NewReconciler(r *v1alpha1.Rollout, client dynamic.Interface, recorder record.EventRecorder, virtualServiceLister, destinationRuleLister dynamiclister.Lister) *Reconciler { return &Reconciler{ @@ -125,6 +127,21 @@ func (r *Reconciler) generateVirtualServicePatches(rolloutVsvcRouteNames []strin stableSubset = r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.StableSubsetName } + // Go through all the routes on the Istio Virtual Service looking for routes that are Istio mirror routes as well as on the + // managedRoutes field on the rollout object so that we can update the Istio mirror upon set weight calls + if r.rollout.Spec.Strategy.Canary.TrafficRouting != nil && r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes != nil { + for _, httpRoute := range httpRoutes { + if httpRoute.Mirror != nil { + for _, managedRoute := range r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes { + //Make sure we only add mirror routes from the managedRoutes field to the list of routes to update for setWeight + if managedRoute.Name == httpRoute.Name { + rolloutVsvcRouteNames = append(rolloutVsvcRouteNames, httpRoute.Name) + } + } + } + } + } + // err can be ignored because we already called ValidateHTTPRoutes earlier httpRouteIndexesToPatch, _ := getHttpRouteIndexesToPatch(rolloutVsvcRouteNames, httpRoutes) tlsRouteIndexesToPatch, _ := getTlsRouteIndexesToPatch(rolloutVsvcTLSRoutes, tlsRoutes) @@ -218,7 +235,7 @@ func (r *Reconciler) reconcileVirtualService(obj *unstructured.Unstructured, vsv var httpRoutes []VirtualServiceHTTPRoute httpRoutesI, err := GetHttpRoutesI(newObj) if err == nil { - routes, err := GetHttpRoutes(newObj, httpRoutesI) + routes, err := GetHttpRoutes(httpRoutesI) httpRoutes = routes if err != nil { return nil, false, err @@ -242,16 +259,21 @@ func (r *Reconciler) reconcileVirtualService(obj *unstructured.Unstructured, vsv } } + // Generate Patches patches := r.generateVirtualServicePatches(vsvcRouteNames, httpRoutes, vsvcTLSRoutes, tlsRoutes, int64(desiredWeight), additionalDestinations...) err = patches.patchVirtualService(httpRoutesI, tlsRoutesI) if err != nil { return nil, false, err } - err = unstructured.SetNestedSlice(newObj.Object, httpRoutesI, "spec", Http) - if err != nil { - return newObj, len(patches) > 0, err + // Set HTTP Route Slice + if len(httpRoutes) > 0 { + if err := unstructured.SetNestedSlice(newObj.Object, httpRoutesI, "spec", Http); err != nil { + return newObj, len(patches) > 0, err + } } + + // Set TLS Route Slice if tlsRoutesI != nil { err = unstructured.SetNestedSlice(newObj.Object, tlsRoutesI, "spec", Tls) } @@ -266,20 +288,7 @@ func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestina ctx := context.TODO() client := r.client.Resource(istioutil.GetIstioDestinationRuleGVR()).Namespace(r.rollout.Namespace) - var dRuleUn *unstructured.Unstructured - var err error - if r.destinationRuleLister != nil { - dRuleUn, err = r.destinationRuleLister.Namespace(r.rollout.Namespace).Get(dRuleSpec.Name) - } else { - dRuleUn, err = client.Get(ctx, dRuleSpec.Name, metav1.GetOptions{}) - } - if err != nil { - if k8serrors.IsNotFound(err) { - r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "DestinationRuleNotFound"}, "DestinationRule `%s` not found", dRuleSpec.Name) - } - return err - } - origBytes, dRule, dRuleNew, err := unstructuredToDestinationRules(dRuleUn) + origBytes, dRule, dRuleNew, err := r.getDestinationRule(dRuleSpec, client, ctx) if err != nil { return err } @@ -372,6 +381,7 @@ func destinationRuleReplaceExtraMarshal(dRule *DestinationRule) []byte { } dRuleNew["spec"] = map[string]interface{}{ "subsets": subsets, + "host": dRule.Spec.Host, } dRuleNewBytes, _ := json.Marshal(dRuleNew) @@ -525,7 +535,7 @@ func jsonBytesToDestinationRule(dRuleBytes []byte) (*DestinationRule, error) { func GetHttpRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { httpRoutesI, notFound, err := unstructured.NestedSlice(obj.Object, "spec", Http) if !notFound { - return nil, fmt.Errorf(".spec.http is not defined") + return nil, fmt.Errorf(SpecHttpNotFound) } if err != nil { return nil, err @@ -536,7 +546,7 @@ func GetHttpRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { func GetTlsRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { tlsRoutesI, notFound, err := unstructured.NestedSlice(obj.Object, "spec", Tls) if !notFound { - return nil, fmt.Errorf(".spec.tls is not defined") + return nil, fmt.Errorf(SpecHttpNotFound) } if err != nil { return nil, err @@ -544,7 +554,7 @@ func GetTlsRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { return tlsRoutesI, nil } -func GetHttpRoutes(obj *unstructured.Unstructured, httpRoutesI []interface{}) ([]VirtualServiceHTTPRoute, error) { +func GetHttpRoutes(httpRoutesI []interface{}) ([]VirtualServiceHTTPRoute, error) { routeBytes, err := json.Marshal(httpRoutesI) if err != nil { return nil, err @@ -582,15 +592,7 @@ func (r *Reconciler) Type() string { // SetWeight modifies Istio resources to reach desired state func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { ctx := context.TODO() - var vsvc *unstructured.Unstructured - var virtualServices []v1alpha1.IstioVirtualService - - if istioutil.MultipleVirtualServiceConfigured(r.rollout) { - virtualServices = r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualServices - } else { - virtualServices = []v1alpha1.IstioVirtualService{*r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService} - } - + virtualServices := r.getVirtualServices() for _, virtualService := range virtualServices { name := virtualService.Name namespace, vsvcName := istioutil.GetVirtualServiceNamespaceName(name) @@ -598,17 +600,9 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 namespace = r.rollout.Namespace } - var err error client := r.client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(namespace) - if r.virtualServiceLister != nil { - vsvc, err = r.virtualServiceLister.Namespace(namespace).Get(vsvcName) - } else { - vsvc, err = client.Get(ctx, vsvcName, metav1.GetOptions{}) - } + vsvc, err := r.getVirtualService(namespace, vsvcName, client, ctx) if err != nil { - if k8serrors.IsNotFound(err) { - r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "VirtualServiceNotFound"}, "VirtualService `%s` not found", vsvcName) - } return err } modifiedVirtualService, modified, err := r.reconcileVirtualService(vsvc, virtualService.Routes, virtualService.TLSRoutes, desiredWeight, additionalDestinations...) @@ -618,6 +612,10 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 if !modified { continue } + + if err := r.orderRoutes(modifiedVirtualService); err != nil && err.Error() != SpecHttpNotFound { + return fmt.Errorf("[SetWeight] failed to order routes: %w", err) + } _, err = client.Update(ctx, modifiedVirtualService, metav1.UpdateOptions{}) if err == nil { r.log.Debugf("Updated VirtualService: %s", modifiedVirtualService) @@ -629,14 +627,212 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 return nil } +func (r *Reconciler) getVirtualServices() []v1alpha1.IstioVirtualService { + if istioutil.MultipleVirtualServiceConfigured(r.rollout) { + return r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualServices + } else { + return []v1alpha1.IstioVirtualService{*r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService} + } +} + +func (r *Reconciler) getVirtualService(namespace string, vsvcName string, client dynamic.ResourceInterface, ctx context.Context) (*unstructured.Unstructured, error) { + var vsvc *unstructured.Unstructured + var err error + if r.virtualServiceLister != nil { + vsvc, err = r.virtualServiceLister.Namespace(namespace).Get(vsvcName) + } else { + vsvc, err = client.Get(ctx, vsvcName, metav1.GetOptions{}) + } + if err != nil { + if k8serrors.IsNotFound(err) { + r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "VirtualServiceNotFound"}, "VirtualService `%s` not found", vsvcName) + } + return nil, err + } + return vsvc, err +} + +func (r *Reconciler) reconcileVirtualServiceHeaderRoutes(virtualService v1alpha1.IstioVirtualService, obj *unstructured.Unstructured, headerRouting *v1alpha1.SetHeaderRoute) error { + // HTTP Routes + httpRoutesI, err := GetHttpRoutesI(obj) + if err != nil { + return err + } + destRuleHost, err := r.getDestinationRuleHost() + if err != nil { + return err + } + + canarySvc := r.rollout.Spec.Strategy.Canary.CanaryService + if destRuleHost != "" { + canarySvc = destRuleHost + } + var canarySubset string + if r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule != nil { + canarySubset = r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.CanarySubsetName + } + + if headerRouting.Match == nil { + //Remove mirror route + err := removeRoute(obj, headerRouting.Name) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceHeaderRoutes] failed to remove route from virtual service: %w", err) + } + return nil + } + + //Remove route first to avoid duplicates + err = removeRoute(obj, headerRouting.Name) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceHeaderRoutes] failed to remove http route from virtual service: %w", err) + } + + httpRoutesI = append(httpRoutesI, createHeaderRoute(virtualService, obj, headerRouting, canarySvc, canarySubset)) + + err = unstructured.SetNestedSlice(obj.Object, httpRoutesI, "spec", Http) + if err != nil { + return err + } + return nil +} + +func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) error { + ctx := context.TODO() + virtualServices := r.getVirtualServices() + for _, virtualService := range virtualServices { + name := virtualService.Name + namespace, vsvcName := istioutil.GetVirtualServiceNamespaceName(name) + if namespace == "" { + namespace = r.rollout.Namespace + } + + client := r.client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(namespace) + vsvc, err := r.getVirtualService(namespace, vsvcName, client, ctx) + if err != nil { + return fmt.Errorf("[SetHeaderRoute] failed to get istio virtual service: %w", err) + } + + err = r.reconcileVirtualServiceHeaderRoutes(virtualService, vsvc, headerRouting) + if err != nil { + return fmt.Errorf("[SetHeaderRoute] failed to reconcile header routes: %w", err) + } + + if err := r.orderRoutes(vsvc); err != nil && err.Error() != SpecHttpNotFound { + return fmt.Errorf("[SetHeaderRoute] failed to order routes: %w", err) + } + _, err = client.Update(ctx, vsvc, metav1.UpdateOptions{}) + if err == nil { + r.log.Debugf("Updated VirtualService: %s", vsvc) + r.recorder.Eventf(r.rollout, record.EventOptions{EventReason: "Updated VirtualService"}, "VirtualService `%s` set headerRoute '%v'", vsvcName, headerRouting.Name) + } else { + return fmt.Errorf("[SetHeaderRoute] failed to update routes: %w", err) + } + } + return nil +} + +func (r *Reconciler) getDestinationRuleHost() (string, error) { + if r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule != nil { + ctx := context.TODO() + client := r.client.Resource(istioutil.GetIstioDestinationRuleGVR()).Namespace(r.rollout.Namespace) + _, dRule, _, err := r.getDestinationRule(r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule, client, ctx) + if err != nil { + return "", err + } + return dRule.Spec.Host, nil + } + return "", nil +} + +func (r *Reconciler) getDestinationRule(dRuleSpec *v1alpha1.IstioDestinationRule, client dynamic.ResourceInterface, ctx context.Context) ([]byte, *DestinationRule, *DestinationRule, error) { + var dRuleUn *unstructured.Unstructured + var err error + if r.destinationRuleLister != nil { + dRuleUn, err = r.destinationRuleLister.Namespace(r.rollout.Namespace).Get(dRuleSpec.Name) + } else { + dRuleUn, err = client.Get(ctx, dRuleSpec.Name, metav1.GetOptions{}) + } + if err != nil { + if k8serrors.IsNotFound(err) { + r.recorder.Warnf(r.rollout, record.EventOptions{EventReason: "DestinationRuleNotFound"}, "DestinationRule `%s` not found", dRuleSpec.Name) + } + return nil, nil, nil, err + } + origBytes, dRule, dRuleNew, err := unstructuredToDestinationRules(dRuleUn) + if err != nil { + return nil, nil, nil, err + } + return origBytes, dRule, dRuleNew, nil +} + +func createHeaderRoute(virtualService v1alpha1.IstioVirtualService, unVsvc *unstructured.Unstructured, headerRouting *v1alpha1.SetHeaderRoute, host string, subset string) map[string]interface{} { + var routeMatches []interface{} + for _, hrm := range headerRouting.Match { + routeMatches = append(routeMatches, createHeaderRouteMatch(hrm)) + } + + port, err := getVirtualServiceCanaryPort(unVsvc, virtualService) + if err != nil { + port = Port{Number: 0} + } + + canaryDestination := routeDestination(host, port.Number, subset, 100) + + return map[string]interface{}{ + "name": headerRouting.Name, + "match": routeMatches, + "route": []interface{}{canaryDestination}, + } +} + +func createHeaderRouteMatch(hrm v1alpha1.HeaderRoutingMatch) interface{} { + res := map[string]interface{}{} + value := hrm.HeaderValue + setMapValueIfNotEmpty(res, "exact", value.Exact) + setMapValueIfNotEmpty(res, "regex", value.Regex) + setMapValueIfNotEmpty(res, "prefix", value.Prefix) + return map[string]interface{}{ + "headers": map[string]interface{}{hrm.HeaderName: res}, + } +} + +func setMapValueIfNotEmpty(m map[string]interface{}, key string, value string) { + if value != "" { + m[key] = value + } +} + +func routeDestination(host string, port uint32, subset string, weight int64) map[string]interface{} { + dest := map[string]interface{}{ + "host": host, + } + if port > 0 { + dest["port"] = map[string]interface{}{"number": int64(port)} + } + if subset != "" { + dest["subset"] = subset + } + routeValue := map[string]interface{}{ + "weight": float64(weight), + "destination": dest, + } + return routeValue +} + func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (*bool, error) { return nil, nil } // getHttpRouteIndexesToPatch returns array indices of the httpRoutes which need to be patched when updating weights func getHttpRouteIndexesToPatch(routeNames []string, httpRoutes []VirtualServiceHTTPRoute) ([]int, error) { + //We have no routes listed in spec.strategy.canary.trafficRouting.istio.virtualService.routes so find index + //of the first empty named route if len(routeNames) == 0 { - return []int{0}, nil + for i, route := range httpRoutes { + if route.Name == "" { + return []int{i}, nil + } + } } var routeIndexesToPatch []int @@ -652,14 +848,12 @@ func getHttpRouteIndexesToPatch(routeNames []string, httpRoutes []VirtualService } func searchHttpRoute(routeName string, httpRoutes []VirtualServiceHTTPRoute) int { - routeIndex := -1 for i, route := range httpRoutes { if route.Name == routeName { - routeIndex = i - break + return i } } - return routeIndex + return -1 } // getTlsRouteIndexesToPatch returns array indices of the tlsRoutes which need to be patched when updating weights @@ -735,7 +929,23 @@ func ValidateHTTPRoutes(r *v1alpha1.Rollout, routeNames []string, httpRoutes []V return err } } - if len(routeNames) == 0 && len(httpRoutes) > 1 { + + httpRoutesBytes, err := json.Marshal(httpRoutes) + if err != nil { + return fmt.Errorf("[ValidateHTTPRoutes] failed to marshal http routes: %w", err) + } + var httpRoutesI []interface{} + err = json.Unmarshal(httpRoutesBytes, &httpRoutesI) + if err != nil { + return fmt.Errorf("[ValidateHTTPRoutes] failed to marshal http routes to []interface{}: %w", err) + } + + _, httpRoutesNotWithinManagedRoutes, err := splitManagedRoutesAndNonManagedRoutes(r.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, httpRoutesI) + if err != nil { + return fmt.Errorf("[ValidateHTTPRoutes] failed to split managed and non-managed routes: %w", err) + } + + if len(routeNames) == 0 && len(httpRoutesNotWithinManagedRoutes) > 1 { return fmt.Errorf("spec.http[] should be set in VirtualService and it must have exactly one route when omitting spec.strategy.canary.trafficRouting.istio.virtualService.routes") } return nil @@ -809,3 +1019,395 @@ func validateDestinationRule(dRule *v1alpha1.IstioDestinationRule, hasCanarySubs } return nil } + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + ctx := context.TODO() + virtualServices := r.getVirtualServices() + + for _, virtualService := range virtualServices { + name := virtualService.Name + namespace, vsvcName := istioutil.GetVirtualServiceNamespaceName(name) + if namespace == "" { + namespace = r.rollout.Namespace + } + + client := r.client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(namespace) + istioVirtualSvc, err := r.getVirtualService(namespace, vsvcName, client, ctx) + if err != nil { + return fmt.Errorf("[SetMirrorRoute] failed to get virtual service: %w", err) + } + + err = r.reconcileVirtualServiceMirrorRoutes(virtualService, istioVirtualSvc, setMirrorRoute) + if err != nil { + return fmt.Errorf("[SetMirrorRoute] failed reconcile virtual service for mirror routes: %w", err) + } + + if err := r.orderRoutes(istioVirtualSvc); err != nil && err.Error() != SpecHttpNotFound { + return fmt.Errorf("[SetMirrorRoute] failed to order routes based on managedRoute order: %w", err) + } + _, err = client.Update(ctx, istioVirtualSvc, metav1.UpdateOptions{}) + if err == nil { + r.log.Debugf("Updated VirtualService: %s", istioVirtualSvc) + r.recorder.Eventf(r.rollout, record.EventOptions{EventReason: "Updated VirtualService"}, "VirtualService `%s` set mirrorRoute '%v'", vsvcName, setMirrorRoute.Name) + } else { + return fmt.Errorf("[SetMirrorRoute] failed to update virtual service %w", err) + } + + } + return nil +} + +func (r *Reconciler) reconcileVirtualServiceMirrorRoutes(virtualService v1alpha1.IstioVirtualService, istioVirtualService *unstructured.Unstructured, mirrorRoute *v1alpha1.SetMirrorRoute) error { + destRuleHost, err := r.getDestinationRuleHost() + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to get destination rule host: %w", err) + } + canarySvc := r.rollout.Spec.Strategy.Canary.CanaryService + if destRuleHost != "" { + canarySvc = destRuleHost + } + var canarySubset string + if r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule != nil { + canarySubset = r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.CanarySubsetName + } + + //Remove mirror route when there is no match rules we require a match on routes for safety so a none listed match + //acts like a removal of the route instead of say routing all traffic + if mirrorRoute.Match == nil { + //Remove mirror route + err := removeRoute(istioVirtualService, mirrorRoute.Name) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to remove route from virtual service: %w", err) + } + return nil + } + + //Remove route first to avoid duplicates + err = removeRoute(istioVirtualService, mirrorRoute.Name) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to remove http route from virtual service: %w", err) + } + + httpRoutes, _, err := getVirtualServiceHttpRoutes(istioVirtualService) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to get virtual service http routes: %w", err) + } + + mR, err := createMirrorRoute(virtualService, httpRoutes, mirrorRoute, canarySvc, canarySubset) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to create mirror route: %w", err) + } + + vsRoutes, found, err := unstructured.NestedSlice(istioVirtualService.Object, "spec", Http) + if err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to get http routes from virtual service: %w", err) + } + if !found { + return fmt.Errorf(SpecHttpNotFound) + } + vsRoutes = append([]interface{}{mR}, vsRoutes...) + if err := unstructured.SetNestedSlice(istioVirtualService.Object, vsRoutes, "spec", Http); err != nil { + return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to update virtual service routes via set nested slice: %w", err) + } + + return nil +} + +// getVirtualServiceHttpRoutes This returns all the http routes from an istio virtual service as both a rollouts wrapped type +// []VirtualServiceHTTPRoute and a []interface{} of VirtualServiceHTTPRoute +func getVirtualServiceHttpRoutes(obj *unstructured.Unstructured) ([]VirtualServiceHTTPRoute, []interface{}, error) { + httpRoutesI, err := GetHttpRoutesI(obj) + if err != nil { + return nil, nil, fmt.Errorf("[getVirtualServiceHttpRoutes] failed to get http route interfaces: %w", err) + } + routes, err := GetHttpRoutes(httpRoutesI) + if err != nil { + return nil, httpRoutesI, fmt.Errorf("[getVirtualServiceHttpRoutes] failed to get http route types: %w", err) + } + return routes, httpRoutesI, nil +} + +// createMirrorRoute This returns a map[string]interface{} of an istio virtual service mirror route configuration using the last +// set weight as values for the non-matching destinations and canary service for the matching destination. +func createMirrorRoute(virtualService v1alpha1.IstioVirtualService, httpRoutes []VirtualServiceHTTPRoute, mirrorRouting *v1alpha1.SetMirrorRoute, canarySvc string, subset string) (map[string]interface{}, error) { + var percent int32 + if mirrorRouting.Percentage == nil { + percent = 100 + } else { + percent = *mirrorRouting.Percentage + } + + route, err := getVirtualServiceSetWeightRoute(virtualService.Routes, httpRoutes) + if err != nil { + return nil, fmt.Errorf("[createMirrorRoute] failed to get virtual service http route for keeping non-mirror weights set: %w", err) + } + + var istioMatch []RouteMatch + for _, match := range mirrorRouting.Match { + istioMatch = append(istioMatch, RouteMatch{ + Method: match.Method, + Uri: match.Path, + Headers: match.Headers, + }) + } + + mirrorDestinations := VirtualServiceDestination{ + Host: canarySvc, + Subset: subset, + } + if len(route) >= 0 && route[0].Destination.Port != nil { + // We try to pull the port from any of the routes destinations that are supposed to be updated via SetWeight + mirrorDestinations.Port = &Port{Number: route[0].Destination.Port.Number} + } + + mirrorRoute := map[string]interface{}{ + "name": mirrorRouting.Name, + "match": istioMatch, + "route": route, + "mirror": mirrorDestinations, + "mirrorPercentage": map[string]interface{}{"value": float64(percent)}, + } + + mirrorRouteBytes, err := json.Marshal(mirrorRoute) + if err != nil { + return nil, fmt.Errorf("[createMirrorRoute] failed to marshal mirror route: %w", err) + } + + var mirrorRouteI map[string]interface{} + err = json.Unmarshal(mirrorRouteBytes, &mirrorRouteI) + if err != nil { + return nil, fmt.Errorf("[createMirrorRoute] failed to unmarshal mirror route: %w", err) + } + + return mirrorRouteI, nil +} + +// getVirtualServiceSetWeightRoute This functions goes through the list of Istio Virtual service routes and finds the first +// match from the trafficRouting.istio.virtualService[s].routes field and returns the []VirtualServiceRouteDestination array +// from the istio virtual service this can be useful to get the last set destination percentages on the canary route. +func getVirtualServiceSetWeightRoute(rolloutVsvcRouteNames []string, httpRoutes []VirtualServiceHTTPRoute) ([]VirtualServiceRouteDestination, error) { + routeIndexesToPatch, err := getHttpRouteIndexesToPatch(rolloutVsvcRouteNames, httpRoutes) + if err != nil { + return nil, fmt.Errorf("[getVirtualServiceSetWeightRoute] failed to get routes that need to be patch when set weight is called: %w", err) + } + for _, routeIndex := range routeIndexesToPatch { + route := httpRoutes[routeIndex] + return route.Route, nil + } + return nil, nil +} + +// removeRoute This functions removes the `routeName` route from the Istio Virtual Service +func removeRoute(istioVirtualService *unstructured.Unstructured, routeName string) error { + vsRoutes, found, err := unstructured.NestedSlice(istioVirtualService.Object, "spec", Http) + if err != nil { + return fmt.Errorf("[removeRoute] failed to get http routes from virtual service: %w", err) + } + if !found { + return fmt.Errorf(SpecHttpNotFound) + } + + var newVsRoutes []interface{} + for _, route := range vsRoutes { + routeMap, ok := route.(map[string]interface{}) + if !ok { + return fmt.Errorf("Could not cast type to map[string]interface{} to find route name in Istio Virtual Service") + } + routeNameIstioSvc, ok := routeMap["name"].(string) + if !ok { + log.Debugf("Could not cast type to string to find route name in Istio Virtual Service, route probably has no name set") + } + if routeName != routeNameIstioSvc { + newVsRoutes = append(newVsRoutes, route) + } + } + if err := unstructured.SetNestedSlice(istioVirtualService.Object, newVsRoutes, "spec", Http); err != nil { + return fmt.Errorf("[removeRoute] failed to set http routes on virtual service: %w", err) + } + return nil +} + +// orderRoutes Is a function that orders the routes based on the managedRoute field in the rollout spec. It then places +// the sorted routes ontop of any other route that is already defined on the Istio Virtual Service. +func (r *Reconciler) orderRoutes(istioVirtualService *unstructured.Unstructured) error { + httpRouteI, found, err := unstructured.NestedSlice(istioVirtualService.Object, "spec", Http) + if err != nil { + return fmt.Errorf("[orderRoutes] failed to get virtual service http routes: %w", err) + } + if !found { + return fmt.Errorf(SpecHttpNotFound) + } + + if r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes == nil { + return nil //Not really and error there is just nothing to sort on + } + + managedRoutes := r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes + httpRoutesWithinManagedRoutes, httpRoutesNotWithinManagedRoutes, err := splitManagedRoutesAndNonManagedRoutes(managedRoutes, httpRouteI) + if err != nil { + return fmt.Errorf("[orderRoutes] could not split routes between managed and non managed: %w", err) + } + + finalRoutes, err := getOrderedVirtualServiceRoutes(httpRouteI, managedRoutes, httpRoutesWithinManagedRoutes, httpRoutesNotWithinManagedRoutes) + if err != nil { + return fmt.Errorf("[orderRoutes] could not get ordered virtual service routes: %w", err) + } + + if err := unstructured.SetNestedSlice(istioVirtualService.Object, finalRoutes, "spec", Http); err != nil { + return fmt.Errorf("[orderRoutes] set nested slice failed: %w", err) + } + + return nil +} + +// splitManagedRoutesAndNonManagedRoutes This splits the routes from an istio virtual service into two slices +// one slice contains all the routes that are also in the rollouts managedRoutes object and one that contains routes +// that where only in the virtual service (aka routes that where manually added by user) +func splitManagedRoutesAndNonManagedRoutes(managedRoutes []v1alpha1.MangedRoutes, httpRouteI []interface{}) (httpRoutesWithinManagedRoutes []VirtualServiceHTTPRoute, httpRoutesNotWithinManagedRoutes []VirtualServiceHTTPRoute, err error) { + var httpRoutes []VirtualServiceHTTPRoute + + jsonHttpRoutes, err := json.Marshal(httpRouteI) + if err != nil { + return nil, nil, fmt.Errorf("[splitManagedRoutesAndNonManagedRoutes] failed to marsharl http route interface: %w", err) + } + + if err := json.Unmarshal(jsonHttpRoutes, &httpRoutes); err != nil { + return nil, nil, fmt.Errorf("[splitManagedRoutesAndNonManagedRoutes] failed to unmarsharl http route interface: %w", err) + } + + for _, route := range httpRoutes { + var found bool = false + for _, managedRoute := range managedRoutes { + if route.Name == managedRoute.Name { + httpRoutesWithinManagedRoutes = append(httpRoutesWithinManagedRoutes, route) + found = true + break + } + } + if !found { + httpRoutesNotWithinManagedRoutes = append(httpRoutesNotWithinManagedRoutes, route) + } + } + + return httpRoutesWithinManagedRoutes, httpRoutesNotWithinManagedRoutes, nil +} + +// getOrderedVirtualServiceRoutes This returns an []interface{} of istio virtual routes where the routes are ordered based +// on the rollouts managedRoutes field. We take the routes from the rollouts managedRoutes field order them and place them on top +// of routes that are manually defined within the virtual service (aka. routes that users have defined manually) +func getOrderedVirtualServiceRoutes(httpRouteI []interface{}, managedRoutes []v1alpha1.MangedRoutes, httpRoutesWithinManagedRoutes []VirtualServiceHTTPRoute, httpRoutesNotWithinManagedRoutes []VirtualServiceHTTPRoute) ([]interface{}, error) { + var orderedManagedRoutes []VirtualServiceHTTPRoute + for _, route := range managedRoutes { + for _, managedRoute := range httpRoutesWithinManagedRoutes { + if route.Name == managedRoute.Name { + orderedManagedRoutes = append(orderedManagedRoutes, managedRoute) + } + } + } + + orderedVirtualServiceHTTPRoutes := append(orderedManagedRoutes, httpRoutesNotWithinManagedRoutes...) + + var orderedInterfaceVSVCHTTPRoutes []interface{} + for _, routeTyped := range orderedVirtualServiceHTTPRoutes { + for _, route := range httpRouteI { + r := route.(map[string]interface{}) + + // No need to check if exist because the empty string returned on cast failure is good for this check + name, _ := r["name"].(string) + if name == routeTyped.Name { + orderedInterfaceVSVCHTTPRoutes = append(orderedInterfaceVSVCHTTPRoutes, route) + } + } + } + + return orderedInterfaceVSVCHTTPRoutes, nil +} + +// getVirtualServiceCanaryPort This function returns the port that the canary service is running on. It does this by looking at the +// istio Virtual Service and finding any port from a destination that is suppose to be update via SetWeight. +func getVirtualServiceCanaryPort(unVsvc *unstructured.Unstructured, virtualService v1alpha1.IstioVirtualService) (Port, error) { + httpRoutes, _, err := getVirtualServiceHttpRoutes(unVsvc) + if err != nil { + return Port{}, fmt.Errorf("[getVirtualServiceCanaryPort] failed to get virtual service http routes: %w", err) + } + + route, err := getVirtualServiceSetWeightRoute(virtualService.Routes, httpRoutes) + if err != nil { + return Port{}, fmt.Errorf("[getVirtualServiceCanaryPort] failed to get virtual service set weight route: %w", err) + } + + var port uint32 = 0 + if len(route) > 0 && route[0].Destination.Port != nil { + port = route[0].Destination.Port.Number + } + + return Port{ + Number: port, + }, nil +} + +// RemoveManagedRoutes this removes all the routes in all the istio virtual services rollouts is managing by getting two slices +// from the splitManagedRoutesAndNonManagedRoutes function and setting the Istio Virtual Service routes to just the ones not managed +// by rollouts +func (r *Reconciler) RemoveManagedRoutes() error { + ctx := context.TODO() + virtualServices := r.getVirtualServices() + + for _, virtualService := range virtualServices { + name := virtualService.Name + namespace, vsvcName := istioutil.GetVirtualServiceNamespaceName(name) + if namespace == "" { + namespace = r.rollout.Namespace + } + + client := r.client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(namespace) + istioVirtualService, err := r.getVirtualService(namespace, vsvcName, client, ctx) + if err != nil { + return fmt.Errorf("[RemoveManagedRoutes] failed to get virtual service: %w", err) + } + + httpRouteI, found, err := unstructured.NestedSlice(istioVirtualService.Object, "spec", Http) + if err != nil { + return fmt.Errorf("[RemoveManagedRoutes] failed to get http routes from virtual service: %w", err) + } + if !found { + return fmt.Errorf("[RemoveManagedRoutes] %s: %w", SpecHttpNotFound, err) + } + + managedRoutes := r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes + if len(managedRoutes) == 0 { + return nil + } + httpRoutesWithinManagedRoutes, httpRoutesNotWithinManagedRoutes, err := splitManagedRoutesAndNonManagedRoutes(managedRoutes, httpRouteI) + if err != nil { + return fmt.Errorf("[RemoveManagedRoutes] failed to split managaed and non-managed routes: %w", err) + } + + if len(httpRoutesWithinManagedRoutes) == 0 { + //no routes to remove + return nil + } + + jsonNonManagedRoutes, err := json.Marshal(httpRoutesNotWithinManagedRoutes) + if err != nil { + return fmt.Errorf("[RemoveManagedRoutes] failed to marshal non-managed routes: %w", err) + } + var nonManagedRoutesI []interface{} + if err := json.Unmarshal(jsonNonManagedRoutes, &nonManagedRoutesI); err != nil { + return fmt.Errorf("[RemoveManagedRoutes] failed to split managaed and non-managed routes: %w", err) + } + + if err := unstructured.SetNestedSlice(istioVirtualService.Object, nonManagedRoutesI, "spec", Http); err != nil { + return fmt.Errorf("[RemoveManagedRoutes] failed to set nested slice on virtual service to remove managed routes: %w", err) + } + + _, err = client.Update(ctx, istioVirtualService, metav1.UpdateOptions{}) + if err == nil { + r.log.Debugf("Updated VirtualService: %s", istioVirtualService) + r.recorder.Eventf(r.rollout, record.EventOptions{EventReason: "Updated VirtualService"}, "VirtualService `%s` removed all managed routes.", vsvcName) + } else { + return fmt.Errorf("[RemoveManagedRoutes] failed to update kubernetes virtual service: %w", err) + } + } + return nil +} diff --git a/rollout/trafficrouting/istio/istio_test.go b/rollout/trafficrouting/istio/istio_test.go index bad150869a..bde05ff034 100644 --- a/rollout/trafficrouting/istio/istio_test.go +++ b/rollout/trafficrouting/istio/istio_test.go @@ -335,6 +335,27 @@ spec: host: canary weight: 0` +const singleRouteSubsetVsvc = `apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: vsvc + namespace: default +spec: + gateways: + - istio-rollout-gateway + hosts: + - istio-rollout.dev.argoproj.io + http: + - route: + - destination: + host: 'rollout-service' + subset: 'stable-subset' + weight: 100 + - destination: + host: rollout-service + subset: 'canary-subset' + weight: 0` + const singleRouteTlsVsvc = `apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: @@ -465,6 +486,233 @@ func TestHttpReconcileWeightsBaseCase(t *testing.T) { } } +func TestHttpReconcileHeaderRouteHostBased(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + obj := unstructuredutil.StrToUnstructuredUnsafe(regularVsvc) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + const headerName = "test-header-route" + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: headerName, + }, + }...) + + // Test for both the HTTP VS & Mixed VS + hr := &v1alpha1.SetHeaderRoute{ + Name: headerName, + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{Exact: "firefox"}, + }, + }, + } + + err := r.SetHeaderRoute(hr) + assert.Nil(t, err) + + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + + // Assertions + assert.Equal(t, httpRoutes[0].Name, headerName) + checkDestination(t, httpRoutes[0].Route, "canary", 100) + assert.Equal(t, len(httpRoutes[0].Route), 1) + assert.Equal(t, httpRoutes[1].Name, "primary") + checkDestination(t, httpRoutes[1].Route, "stable", 100) + assert.Equal(t, httpRoutes[2].Name, "secondary") + + err = r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: headerName, + }) + assert.Nil(t, err) + + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes = extractHttpRoutes(t, iVirtualService) + // Assertions + assert.Equal(t, httpRoutes[0].Name, "primary") + assert.Equal(t, httpRoutes[1].Name, "secondary") +} + +func TestHttpReconcileHeaderRouteSubsetBased(t *testing.T) { + ro := rolloutWithDestinationRule() + const RolloutService = "rollout-service" + const StableSubsetName = "stable-subset" + const CanarySubsetName = "canary-subset" + ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name = "vsvc" + ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Routes = nil + ro.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.StableSubsetName = StableSubsetName + ro.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.CanarySubsetName = CanarySubsetName + dRule := unstructuredutil.StrToUnstructuredUnsafe(` +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-destrule + namespace: default +spec: + host: rollout-service + subsets: + - name: stable-subset + - name: canary-subset +`) + + obj := unstructuredutil.StrToUnstructuredUnsafe(singleRouteSubsetVsvc) + client := testutil.NewFakeDynamicClient(obj, dRule) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + const headerName = "test-header-route" + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: headerName, + }, + }...) + + hr := &v1alpha1.SetHeaderRoute{ + Name: headerName, + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{ + Regex: "firefox", + }, + }, + }, + } + + err := r.SetHeaderRoute(hr) + assert.Nil(t, err) + + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + + // Assertions + assert.Equal(t, httpRoutes[0].Name, headerName) + assert.Equal(t, httpRoutes[0].Route[0].Destination.Host, "rollout-service") + assert.Equal(t, httpRoutes[0].Route[0].Destination.Subset, "canary-subset") +} + +func TestHttpReconcileHeaderRouteWithExtra(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + obj := unstructuredutil.StrToUnstructuredUnsafe(regularVsvcWithExtra) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + const headerName = "test-header-route" + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: headerName, + }, + }...) + + // Test for both the HTTP VS & Mixed VS + hr := &v1alpha1.SetHeaderRoute{ + Name: headerName, + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "agent", + HeaderValue: &v1alpha1.StringMatch{Exact: "firefox"}, + }, + }, + } + + err := r.SetHeaderRoute(hr) + assert.Nil(t, err) + + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + + // Assertions + assert.Equal(t, httpRoutes[0].Name, headerName) + checkDestination(t, httpRoutes[0].Route, "canary", 100) + assert.Equal(t, len(httpRoutes[0].Route), 1) + assert.Equal(t, httpRoutes[1].Name, "primary") + checkDestination(t, httpRoutes[1].Route, "stable", 100) + assert.Equal(t, httpRoutes[2].Name, "secondary") + + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes = extractHttpRoutes(t, iVirtualService) + // Assertions + assert.Equal(t, httpRoutes[0].Name, headerName) + assert.Equal(t, httpRoutes[1].Name, "primary") + assert.Equal(t, httpRoutes[2].Name, "secondary") + + routes, found, err := unstructured.NestedSlice(iVirtualService.Object, "spec", "http") + assert.NoError(t, err) + assert.True(t, found) + + r0 := routes[0].(map[string]interface{}) + route, found := r0["route"].([]interface{}) + assert.True(t, found) + + port1 := route[0].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] + assert.True(t, port1 == int64(8443)) + + r1 := routes[1].(map[string]interface{}) + _, found = r1["retries"] + assert.True(t, found) + + r2 := routes[2].(map[string]interface{}) + _, found = r2["retries"] + assert.True(t, found) + _, found = r2["corsPolicy"] + assert.True(t, found) + +} + +func TestReconcileUpdateHeader(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, v1alpha1.MangedRoutes{ + Name: "test-mirror-1", + }) + AssertReconcileUpdateHeader(t, regularVsvc, ro) +} + +func AssertReconcileUpdateHeader(t *testing.T, vsvc string, ro *v1alpha1.Rollout) *dynamicfake.FakeDynamicClient { + obj := unstructuredutil.StrToUnstructuredUnsafe(vsvc) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + var setHeader = &v1alpha1.SetHeaderRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "browser", + HeaderValue: &v1alpha1.StringMatch{ + Prefix: "Firefox", + }, + }, + }, + } + err := r.SetHeaderRoute(setHeader) + + assert.Nil(t, err) + + actions := client.Actions() + assert.Len(t, actions, 1) + assert.Equal(t, "update", actions[0].GetVerb()) + return client +} + func TestTlsReconcileWeightsBaseCase(t *testing.T) { r := &Reconciler{ rollout: rolloutWithTlsRoutes("stable", "canary", "vsvc", @@ -1157,7 +1405,7 @@ spec: jsonBytes, err := json.Marshal(dRule) assert.NoError(t, err) - assert.Equal(t, `{"metadata":{"name":"istio-destrule","namespace":"default","creationTimestamp":null,"annotations":{"argo-rollouts.argoproj.io/managed-by-rollouts":"rollout"}},"spec":{"subsets":[{"name":"stable","labels":{"rollouts-pod-template-hash":"def456","version":"v3"}},{"name":"canary","labels":{"rollouts-pod-template-hash":"abc123"},"Extra":{"trafficPolicy":{"loadBalancer":{"simple":"ROUND_ROBIN"}}}}]}}`, + assert.Equal(t, `{"metadata":{"name":"istio-destrule","namespace":"default","creationTimestamp":null,"annotations":{"argo-rollouts.argoproj.io/managed-by-rollouts":"rollout"}},"spec":{"host":"ratings.prod.svc.cluster.local","subsets":[{"name":"stable","labels":{"rollouts-pod-template-hash":"def456","version":"v3"}},{"name":"canary","labels":{"rollouts-pod-template-hash":"abc123"},"Extra":{"trafficPolicy":{"loadBalancer":{"simple":"ROUND_ROBIN"}}}}]}}`, string(jsonBytes)) } @@ -1207,6 +1455,7 @@ metadata: annotations: argo-rollouts.argoproj.io/managed-by-rollouts: rollout spec: + host: ratings.prod.svc.cluster.local subsets: - name: stable labels: @@ -1468,6 +1717,60 @@ spec: host: canary weight: 0` +const regularVsvcWithExtra = `apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: vsvc + namespace: default +spec: + gateways: + - istio-rollout-gateway + hosts: + - istio-rollout.dev.argoproj.io + http: + - name: primary + retries: + attempts: 3 + perTryTimeout: 10s + retryOn: 'gateway-error,connect-failure,refused-stream' + route: + - destination: + host: 'stable' + port: + number: 8443 + weight: 100 + - destination: + host: canary + port: + number: 8443 + weight: 0 + - name: secondary + retries: + attempts: 3 + perTryTimeout: 10s + retryOn: 'gateway-error,connect-failure,refused-stream' + corsPolicy: + allowOrigins: + - exact: https://example.com + allowMethods: + - POST + - GET + allowCredentials: false + allowHeaders: + - X-Foo-Bar + maxAge: "24h" + route: + - destination: + host: 'stable' + port: + number: 8443 + weight: 100 + - destination: + host: canary + port: + number: 8443 + weight: 0` + func TestMultipleVirtualServiceConfigured(t *testing.T) { multipleVirtualService := []v1alpha1.IstioVirtualService{{Name: "vsvc1", Routes: []string{"primary", "secondary"}}, {Name: "vsvc2", Routes: []string{"blue-green"}}} ro := multiVsRollout("stable", "canary", multipleVirtualService) @@ -1611,3 +1914,437 @@ func TestMultipleVirtualServiceReconcileInferredSingleRoute(t *testing.T) { assertHttpRouteWeightChanges(t, httpRoutes[0], "", 10, 90) } } + +func TestHttpReconcileMirrorRoute(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + obj := unstructuredutil.StrToUnstructuredUnsafe(regularVsvc) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + // Test for both the HTTP VS & Mixed VS + setMirror1 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + } + var percentage int32 = 90 + setMirror2 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-2", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + Percentage: &percentage, + } + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: "test-mirror-1", + }, { + Name: "test-mirror-2", + }, + }...) + + err := r.SetMirrorRoute(setMirror1) + assert.Nil(t, err) + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 3) + + // Assertions + assert.Equal(t, httpRoutes[0].Name, "test-mirror-1") + checkDestination(t, httpRoutes[0].Route, "canary", 0) + assert.Equal(t, httpRoutes[0].Mirror.Host, "canary") + assert.Equal(t, httpRoutes[0].Mirror.Subset, "") + assert.Equal(t, httpRoutes[0].MirrorPercentage.Value, float64(100)) + assert.Equal(t, len(httpRoutes[0].Route), 2) + assert.Equal(t, httpRoutes[1].Name, "primary") + checkDestination(t, httpRoutes[1].Route, "stable", 100) + assert.Equal(t, httpRoutes[2].Name, "secondary") + checkDestination(t, httpRoutes[2].Route, "stable", 100) + + //Delete mirror route + deleteSetMirror := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + } + err = r.SetMirrorRoute(deleteSetMirror) + assert.Nil(t, err) + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 2) + assert.Equal(t, httpRoutes[0].Name, "primary") + assert.Equal(t, httpRoutes[1].Name, "secondary") + + //Test adding two routes using fake client then cleaning them up with RemoveManagedRoutes + err = r.SetMirrorRoute(setMirror1) + assert.Nil(t, err) + err = r.SetMirrorRoute(setMirror2) + assert.Nil(t, err) + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 4) + assert.Equal(t, httpRoutes[1].MirrorPercentage.Value, float64(90)) + + r.RemoveManagedRoutes() + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 2) + +} + +func TestHttpReconcileMirrorRouteWithExtraFields(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + obj := unstructuredutil.StrToUnstructuredUnsafe(regularVsvcWithExtra) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + // Test for both the HTTP VS & Mixed VS + setMirror1 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + } + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: "test-mirror-1", + }, + }...) + + err := r.SetMirrorRoute(setMirror1) + assert.Nil(t, err) + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + routes, found, err := unstructured.NestedSlice(iVirtualService.Object, "spec", "http") + assert.NoError(t, err) + assert.True(t, found) + + r0 := routes[0].(map[string]interface{}) + mirrorRoute, found := r0["route"].([]interface{}) + assert.True(t, found) + + port1 := mirrorRoute[0].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] + port2 := mirrorRoute[1].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] + assert.True(t, port1 == float64(8443)) + assert.True(t, port2 == float64(8443)) + + r1 := routes[1].(map[string]interface{}) + _, found = r1["retries"] + assert.True(t, found) + + r2 := routes[2].(map[string]interface{}) + _, found = r2["retries"] + assert.True(t, found) + _, found = r2["corsPolicy"] + assert.True(t, found) + +} + +func TestHttpReconcileMirrorRouteOrder(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary", "secondary"}) + obj := unstructuredutil.StrToUnstructuredUnsafe(regularVsvc) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + setMirror1 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + } + var percentage int32 = 90 + setMirror2 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-2", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "POST", + }, + }}, + Percentage: &percentage, + } + setMirror3 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-3", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + Percentage: &percentage, + } + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: "test-mirror-2", + }, { + Name: "test-mirror-3", + }, { + Name: "test-mirror-1", + }, + }...) + + err := r.SetMirrorRoute(setMirror1) + assert.Nil(t, err) + err = r.SetMirrorRoute(setMirror2) + assert.Nil(t, err) + err = r.SetMirrorRoute(setMirror3) + assert.Nil(t, err) + err = r.SetWeight(40) + assert.Nil(t, err) + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 5) + assert.Equal(t, httpRoutes[0].Name, "test-mirror-2") + checkDestination(t, httpRoutes[0].Route, "canary", 40) + checkDestination(t, httpRoutes[0].Route, "stable", 60) + assert.Equal(t, httpRoutes[1].Name, "test-mirror-3") + assert.Equal(t, httpRoutes[2].Name, "test-mirror-1") + assert.Equal(t, httpRoutes[3].Name, "primary") + assert.Equal(t, httpRoutes[4].Name, "secondary") + + //Delete mirror route + deleteSetMirror := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-3", + } + err = r.SetMirrorRoute(deleteSetMirror) + assert.Nil(t, err) + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 4) + assert.Equal(t, httpRoutes[0].Name, "test-mirror-2") + assert.Equal(t, httpRoutes[1].Name, "test-mirror-1") + assert.Equal(t, httpRoutes[2].Name, "primary") + assert.Equal(t, httpRoutes[3].Name, "secondary") + + r.RemoveManagedRoutes() + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 2) +} + +func TestHttpReconcileMirrorRouteOrderSingleRouteNoName(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{}) + obj := unstructuredutil.StrToUnstructuredUnsafe(singleRouteVsvc) + client := testutil.NewFakeDynamicClient(obj) + _, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), nil, druleLister) + client.ClearActions() + + setMirror1 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + } + var percentage int32 = 90 + setMirror2 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-2", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "POST", + }, + }}, + Percentage: &percentage, + } + setMirror3 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-3", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + Percentage: &percentage, + } + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: "test-mirror-2", + }, { + Name: "test-mirror-3", + }, { + Name: "test-mirror-1", + }, + }...) + + err := r.SetWeight(30) + assert.Nil(t, err) + err = r.SetMirrorRoute(setMirror1) + assert.Nil(t, err) + err = r.SetMirrorRoute(setMirror2) + assert.Nil(t, err) + err = r.SetMirrorRoute(setMirror3) + assert.Nil(t, err) + + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 4) + assert.Equal(t, httpRoutes[0].Name, "test-mirror-2") + assert.Equal(t, httpRoutes[1].Name, "test-mirror-3") + assert.Equal(t, httpRoutes[2].Name, "test-mirror-1") + assert.Equal(t, httpRoutes[3].Name, "") + assert.Equal(t, httpRoutes[3].Route[0].Weight, int64(70)) + assert.Equal(t, httpRoutes[3].Route[1].Weight, int64(30)) + checkDestination(t, httpRoutes[0].Route, "canary", 30) + checkDestination(t, httpRoutes[1].Route, "stable", 70) + + err = r.SetWeight(40) + assert.Nil(t, err) + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes = extractHttpRoutes(t, iVirtualService) + checkDestination(t, httpRoutes[0].Route, "canary", 40) + checkDestination(t, httpRoutes[1].Route, "stable", 60) + + //Delete mirror route + deleteSetMirror := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-3", + } + err = r.SetMirrorRoute(deleteSetMirror) + assert.Nil(t, err) + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 3) + assert.Equal(t, httpRoutes[0].Name, "test-mirror-2") + assert.Equal(t, httpRoutes[1].Name, "test-mirror-1") + assert.Equal(t, httpRoutes[2].Name, "") + + r.RemoveManagedRoutes() + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 1) +} + +func TestHttpReconcileMirrorRouteSubset(t *testing.T) { + + ro := rolloutWithDestinationRule() + const RolloutService = "rollout-service" + const StableSubsetName = "stable-subset" + const CanarySubsetName = "canary-subset" + ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name = "vsvc" + ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Routes = nil + ro.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.StableSubsetName = StableSubsetName + ro.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.CanarySubsetName = CanarySubsetName + dRule := unstructuredutil.StrToUnstructuredUnsafe(` +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-destrule + namespace: default +spec: + host: rollout-service + subsets: + - name: stable-subset + - name: canary-subset +`) + + //ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + obj := unstructuredutil.StrToUnstructuredUnsafe(singleRouteSubsetVsvc) + client := testutil.NewFakeDynamicClient(obj, dRule) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + // Test for both the HTTP VS & Mixed VS + setMirror1 := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + } + r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(r.rollout.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, []v1alpha1.MangedRoutes{{ + Name: "test-mirror-1", + }, + }...) + + err := r.SetMirrorRoute(setMirror1) + assert.Nil(t, err) + iVirtualService, err := client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // HTTP Routes + httpRoutes := extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 2) + + // Assertions + assert.Equal(t, httpRoutes[0].Name, "test-mirror-1") + assert.Equal(t, httpRoutes[0].Mirror.Host, RolloutService) + assert.Equal(t, httpRoutes[0].Mirror.Subset, CanarySubsetName) + assert.Equal(t, httpRoutes[0].Route[0].Destination.Host, RolloutService) + assert.Equal(t, httpRoutes[0].Route[0].Destination.Subset, StableSubsetName) + assert.Equal(t, httpRoutes[0].Route[1].Destination.Host, RolloutService) + assert.Equal(t, httpRoutes[0].Route[1].Destination.Subset, CanarySubsetName) + assert.Equal(t, len(httpRoutes[0].Route), 2) + + assert.Equal(t, httpRoutes[1].Name, "") + assert.Nil(t, httpRoutes[1].Mirror) + assert.Equal(t, httpRoutes[1].Route[0].Destination.Host, RolloutService) + assert.Equal(t, httpRoutes[1].Route[0].Destination.Subset, StableSubsetName) + assert.Equal(t, httpRoutes[1].Route[1].Destination.Host, RolloutService) + assert.Equal(t, httpRoutes[1].Route[1].Destination.Subset, CanarySubsetName) + assert.Equal(t, len(httpRoutes[1].Route), 2) + + r.RemoveManagedRoutes() + iVirtualService, err = client.Resource(istioutil.GetIstioVirtualServiceGVR()).Namespace(r.rollout.Namespace).Get(context.TODO(), ro.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name, metav1.GetOptions{}) + assert.NoError(t, err) + httpRoutes = extractHttpRoutes(t, iVirtualService) + assert.Equal(t, len(httpRoutes), 1) +} + +func TestReconcileUpdateMirror(t *testing.T) { + ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) + ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = append(ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, v1alpha1.MangedRoutes{ + Name: "test-mirror-1", + }) + AssertReconcileUpdateMirror(t, regularVsvc, ro) +} +func AssertReconcileUpdateMirror(t *testing.T, vsvc string, ro *v1alpha1.Rollout) *dynamicfake.FakeDynamicClient { + obj := unstructuredutil.StrToUnstructuredUnsafe(vsvc) + client := testutil.NewFakeDynamicClient(obj) + vsvcLister, druleLister := getIstioListers(client) + r := NewReconciler(ro, client, record.NewFakeEventRecorder(), vsvcLister, druleLister) + client.ClearActions() + + setMirror := &v1alpha1.SetMirrorRoute{ + Name: "test-mirror-1", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{ + Exact: "GET", + }, + }}, + } + err := r.SetMirrorRoute(setMirror) + assert.Nil(t, err) + + actions := client.Actions() + assert.Len(t, actions, 1) + assert.Equal(t, "update", actions[0].GetVerb()) + return client +} diff --git a/rollout/trafficrouting/istio/istio_types.go b/rollout/trafficrouting/istio/istio_types.go index 63d361f42e..bb09693177 100644 --- a/rollout/trafficrouting/istio/istio_types.go +++ b/rollout/trafficrouting/istio/istio_types.go @@ -1,6 +1,7 @@ package istio import ( + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -17,8 +18,27 @@ type VirtualServiceSpec struct { // VirtualServiceHTTPRoute is a HTTP route in a VirtualService type VirtualServiceHTTPRoute struct { - Name string `json:"name,omitempty"` - Route []VirtualServiceRouteDestination `json:"route,omitempty"` + Name string `json:"name,omitempty"` + Match []RouteMatch `json:"match,omitempty"` + Route []VirtualServiceRouteDestination `json:"route,omitempty"` + Mirror *VirtualServiceDestination `json:"mirror,omitempty"` + MirrorPercentage *Percent `json:"mirrorPercentage,omitempty"` +} + +type RouteMatch struct { + // Method What http methods should be mirrored + // +optional + Method *v1alpha1.StringMatch `json:"method,omitempty" protobuf:"bytes,1,opt,name=method"` + // Uri What url paths should be mirrored + // +optional + Uri *v1alpha1.StringMatch `json:"uri,omitempty" protobuf:"bytes,2,opt,name=uri"` + // Headers What request with matching headers should be mirrored + // +optional + Headers map[string]v1alpha1.StringMatch `json:"headers,omitempty" protobuf:"bytes,3,opt,name=headers"` +} + +type Percent struct { + Value float64 `json:"value,omitempty"` } // VirtualServiceTLSRoute is a TLS route in a VirtualService @@ -50,6 +70,11 @@ type VirtualServiceRouteDestination struct { type VirtualServiceDestination struct { Host string `json:"host,omitempty"` Subset string `json:"subset,omitempty"` + Port *Port `json:"port,omitempty"` +} + +type Port struct { + Number uint32 `json:"number,omitempty"` } // DestinationRule is an Istio DestinationRule containing only the fields which we care about @@ -59,6 +84,7 @@ type DestinationRule struct { } type DestinationRuleSpec struct { + Host string `json:"host,omitempty"` Subsets []Subset `json:"subsets,omitempty"` } diff --git a/rollout/trafficrouting/nginx/nginx.go b/rollout/trafficrouting/nginx/nginx.go index 0892f7c175..52a687a496 100644 --- a/rollout/trafficrouting/nginx/nginx.go +++ b/rollout/trafficrouting/nginx/nginx.go @@ -222,88 +222,110 @@ func (r *Reconciler) canaryIngress(stableIngress *ingressutil.Ingress, name stri // SetWeight modifies Nginx Ingress resources to reach desired state func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { - ctx := context.TODO() - stableIngressName := r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress - canaryIngressName := ingressutil.GetCanaryIngressName(r.cfg.Rollout) - - // Check if stable ingress exists (from lister, which has a cache), error if it does not - stableIngress, err := r.cfg.IngressWrapper.GetCached(r.cfg.Rollout.Namespace, stableIngressName) - if err != nil { - r.log.WithField(logutil.IngressKey, stableIngressName).WithField("err", err.Error()).Error("error retrieving stableIngress") - return fmt.Errorf("error retrieving stableIngress `%s` from cache: %v", stableIngressName, err) - } - // Check if canary ingress exists (from lister which has a cache), determines whether we later call Create() or Update() - canaryIngress, err := r.cfg.IngressWrapper.GetCached(r.cfg.Rollout.Namespace, canaryIngressName) - - canaryIngressExists := true - if err != nil { - if !k8serrors.IsNotFound(err) { - // An error other than "not found" occurred - r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error retrieving canary ingress") - return fmt.Errorf("error retrieving canary ingress `%s` from cache: %v", canaryIngressName, err) + // Set weight for additional ingresses if present + if ingresses := r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses; ingresses != nil { + // Fail out if there is an issue setting weight on additional ingresesses. + // Fundamental assumption is that each additional Ingress is equal in importance + // as primary Ingress resource. + if err := r.SetWeightPerIngress(desiredWeight, ingresses); err != nil { + return err } - r.log.WithField(logutil.IngressKey, canaryIngressName).Infof("canary ingress not found") - canaryIngressExists = false - } - // Construct the desired canary Ingress resource - desiredCanaryIngress, err := r.canaryIngress(stableIngress, canaryIngressName, desiredWeight) - if err != nil { - r.log.WithField(logutil.IngressKey, canaryIngressName).Error(err.Error()) - return err } - if !canaryIngressExists { - r.cfg.Recorder.Eventf(r.cfg.Rollout, record.EventOptions{EventReason: "CreatingCanaryIngress"}, "Creating canary ingress `%s` with weight `%d`", canaryIngressName, desiredWeight) - _, err = r.cfg.IngressWrapper.Create(ctx, r.cfg.Rollout.Namespace, desiredCanaryIngress, metav1.CreateOptions{}) - if err == nil { - return nil + return r.SetWeightPerIngress(desiredWeight, []string{r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress}) +} + +// SetWeightMultiIngress modifies each Nginx Ingress resource to reach desired state in the scenario of a rollout +// having multiple Ngnix Ingress resources. +func (r *Reconciler) SetWeightPerIngress(desiredWeight int32, ingresses []string) error { + for _, ingress := range ingresses { + ctx := context.TODO() + stableIngressName := ingress + canaryIngressName := ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), stableIngressName) + + // Check if stable ingress exists (from lister, which has a cache), error if it does not + stableIngress, err := r.cfg.IngressWrapper.GetCached(r.cfg.Rollout.Namespace, stableIngressName) + if err != nil { + r.log.WithField(logutil.IngressKey, stableIngressName).WithField("err", err.Error()).Error("error retrieving stableIngress") + return fmt.Errorf("error retrieving stableIngress `%s` from cache: %v", stableIngressName, err) } - if !k8serrors.IsAlreadyExists(err) { - r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error creating canary ingress") - return fmt.Errorf("error creating canary ingress `%s`: %v", canaryIngressName, err) + // Check if canary ingress exists (from lister which has a cache), determines whether we later call Create() or Update() + canaryIngress, err := r.cfg.IngressWrapper.GetCached(r.cfg.Rollout.Namespace, canaryIngressName) + + canaryIngressExists := true + if err != nil { + if !k8serrors.IsNotFound(err) { + // An error other than "not found" occurred + r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error retrieving canary ingress") + return fmt.Errorf("error retrieving canary ingress `%s` from cache: %v", canaryIngressName, err) + } + r.log.WithField(logutil.IngressKey, canaryIngressName).Infof("canary ingress not found") + canaryIngressExists = false } - // Canary ingress was created by a different reconcile call before this one could complete (race) - // This means we just read it from the API now (instead of cache) and continue with the normal - // flow we take when the canary already existed. - canaryIngress, err = r.cfg.IngressWrapper.Get(ctx, r.cfg.Rollout.Namespace, canaryIngressName, metav1.GetOptions{}) + + // Construct the desired canary Ingress resource + desiredCanaryIngress, err := r.canaryIngress(stableIngress, canaryIngressName, desiredWeight) if err != nil { r.log.WithField(logutil.IngressKey, canaryIngressName).Error(err.Error()) - return fmt.Errorf("error retrieving canary ingress `%s` from api: %v", canaryIngressName, err) + return err } - } - // Canary Ingress already exists, apply a patch if needed + if !canaryIngressExists { + r.cfg.Recorder.Eventf(r.cfg.Rollout, record.EventOptions{EventReason: "CreatingCanaryIngress"}, "Creating canary ingress `%s` with weight `%d`", canaryIngressName, desiredWeight) + _, err = r.cfg.IngressWrapper.Create(ctx, r.cfg.Rollout.Namespace, desiredCanaryIngress, metav1.CreateOptions{}) + if err == nil { + return nil + } + if !k8serrors.IsAlreadyExists(err) { + r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error creating canary ingress") + return fmt.Errorf("error creating canary ingress `%s`: %v", canaryIngressName, err) + } + // Canary ingress was created by a different reconcile call before this one could complete (race) + // This means we just read it from the API now (instead of cache) and continue with the normal + // flow we take when the canary already existed. + canaryIngress, err = r.cfg.IngressWrapper.Get(ctx, r.cfg.Rollout.Namespace, canaryIngressName, metav1.GetOptions{}) + if err != nil { + r.log.WithField(logutil.IngressKey, canaryIngressName).Error(err.Error()) + return fmt.Errorf("error retrieving canary ingress `%s` from api: %v", canaryIngressName, err) + } + } - // Only modify canaryIngress if it is controlled by this Rollout - if !metav1.IsControlledBy(canaryIngress.GetObjectMeta(), r.cfg.Rollout) { - r.log.WithField(logutil.IngressKey, canaryIngressName).Error("canary ingress controlled by different object") - return fmt.Errorf("canary ingress `%s` controlled by different object", canaryIngressName) - } + // Canary Ingress already exists, apply a patch if needed - // Make patches - patch, modified, err := ingressutil.BuildIngressPatch(canaryIngress.Mode(), canaryIngress, - desiredCanaryIngress, ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) + // Only modify canaryIngress if it is controlled by this Rollout + if !metav1.IsControlledBy(canaryIngress.GetObjectMeta(), r.cfg.Rollout) { + r.log.WithField(logutil.IngressKey, canaryIngressName).Error("canary ingress controlled by different object") + return fmt.Errorf("canary ingress `%s` controlled by different object", canaryIngressName) + } - if err != nil { - r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error constructing canary ingress patch") - return fmt.Errorf("error constructing canary ingress patch for `%s`: %v", canaryIngressName, err) - } - if !modified { - r.log.WithField(logutil.IngressKey, canaryIngressName).Info("No changes to canary ingress - skipping patch") - return nil - } + // Make patches + patch, modified, err := ingressutil.BuildIngressPatch(canaryIngress.Mode(), canaryIngress, + desiredCanaryIngress, ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) - r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("patch", string(patch)).Debug("applying canary Ingress patch") - r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("desiredWeight", desiredWeight).Info("updating canary Ingress") - r.cfg.Recorder.Eventf(r.cfg.Rollout, record.EventOptions{EventReason: "PatchingCanaryIngress"}, "Updating Ingress `%s` to desiredWeight '%d'", canaryIngressName, desiredWeight) + if err != nil { + r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error constructing canary ingress patch") + return fmt.Errorf("error constructing canary ingress patch for `%s`: %v", canaryIngressName, err) + } + if !modified { + r.log.WithField(logutil.IngressKey, canaryIngressName).Info("No changes to canary ingress - skipping patch") + return nil + } - _, err = r.cfg.IngressWrapper.Patch(ctx, r.cfg.Rollout.Namespace, canaryIngressName, types.MergePatchType, patch, metav1.PatchOptions{}) - if err != nil { - r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error patching canary ingress") - return fmt.Errorf("error patching canary ingress `%s`: %v", canaryIngressName, err) + r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("patch", string(patch)).Debug("applying canary Ingress patch") + r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("desiredWeight", desiredWeight).Info("updating canary Ingress") + r.cfg.Recorder.Eventf(r.cfg.Rollout, record.EventOptions{EventReason: "PatchingCanaryIngress"}, "Updating Ingress `%s` to desiredWeight '%d'", canaryIngressName, desiredWeight) + + _, err = r.cfg.IngressWrapper.Patch(ctx, r.cfg.Rollout.Namespace, canaryIngressName, types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.log.WithField(logutil.IngressKey, canaryIngressName).WithField("err", err.Error()).Error("error patching canary ingress") + return fmt.Errorf("error patching canary ingress `%s`: %v", canaryIngressName, err) + } } + return nil +} +func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) error { return nil } @@ -315,3 +337,11 @@ func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations .. func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { return nil } + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + return nil +} + +func (r *Reconciler) RemoveManagedRoutes() error { + return nil +} diff --git a/rollout/trafficrouting/nginx/nginx_test.go b/rollout/trafficrouting/nginx/nginx_test.go index 2b3a7cf328..6edfbb8141 100644 --- a/rollout/trafficrouting/nginx/nginx_test.go +++ b/rollout/trafficrouting/nginx/nginx_test.go @@ -137,6 +137,12 @@ func fakeRollout(stableSvc, canarySvc, stableIng string) *v1alpha1.Rollout { } } +func fakeRolloutWithMultiIngress(stableSvc, canarySvc, stableIng, addStableIng string) *v1alpha1.Rollout { + rollout := fakeRollout(stableSvc, canarySvc, stableIng) + rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses = []string{addStableIng} + return rollout +} + func checkBackendService(t *testing.T, ing *ingressutil.Ingress, serviceName string) { t.Helper() switch ing.Mode() { @@ -188,7 +194,7 @@ func TestCanaryIngressCreate(t *testing.T) { stableIngress.Spec.IngressClassName = pointer.StringPtr("nginx-ext") i := ingressutil.NewLegacyIngress(stableIngress) - desiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout), 10) + desiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 10) assert.Nil(t, err, "No error returned when calling canaryIngress") checkBackendService(t, desiredCanaryIngress, "canary-service") @@ -203,6 +209,45 @@ func TestCanaryIngressCreate(t *testing.T) { assert.Equal(t, "nginx-ext", *desired.Spec.IngressClassName) } +func TestCanaryIngressCreateMultiIngress(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress"), + }, + } + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + stableIngress.Spec.IngressClassName = pointer.StringPtr("nginx-ext") + i := ingressutil.NewLegacyIngress(stableIngress) + + desiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 10) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, desiredCanaryIngress, "canary-service") + desired, err := desiredCanaryIngress.GetExtensionsIngress() + if err != nil { + t.Error(err) + t.FailNow() + } + assert.Equal(t, "true", desired.Annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "10", desired.Annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.NotNil(t, desired.Spec.IngressClassName) + assert.Equal(t, "nginx-ext", *desired.Spec.IngressClassName) + + additionalDesiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses[0]), 10) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, additionalDesiredCanaryIngress, "canary-service") + desired, err = additionalDesiredCanaryIngress.GetExtensionsIngress() + if err != nil { + t.Error(err) + t.FailNow() + } + assert.Equal(t, "true", desired.Annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "10", desired.Annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.NotNil(t, desired.Spec.IngressClassName) + assert.Equal(t, "nginx-ext", *desired.Spec.IngressClassName) +} + func TestCanaryIngressPatchWeight(t *testing.T) { r := Reconciler{ cfg: ReconcilerConfig{ @@ -218,7 +263,7 @@ func TestCanaryIngressPatchWeight(t *testing.T) { stableIngress := ingressutil.NewLegacyIngress(stable) canaryIngress := ingressutil.NewLegacyIngress(canary) - desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout), 15) + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) assert.Nil(t, err, "No error returned when calling canaryIngress") checkBackendService(t, desiredCanaryIngress, "canary-service") @@ -230,6 +275,53 @@ func TestCanaryIngressPatchWeight(t *testing.T) { assert.Equal(t, "{\"metadata\":{\"annotations\":{\"nginx.ingress.kubernetes.io/canary-weight\":\"15\"}}}", string(patch), "compareCanaryIngresses returns expected patch") } +func TestCanaryIngressPatchWeightMultiIngress(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress"), + }, + } + stable := extensionsIngress("stable-ingress", 80, "stable-service") + canary := extensionsIngress("canary-ingress", 80, "canary-service") + canary.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "10", + }) + stableIngress := ingressutil.NewLegacyIngress(stable) + canaryIngress := ingressutil.NewLegacyIngress(canary) + + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, desiredCanaryIngress, "canary-service") + + patch, modified, err := ingressutil.BuildIngressPatch(canaryIngress.Mode(), canaryIngress, desiredCanaryIngress, + ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) + assert.Nil(t, err, "compareCanaryIngresses returns no error") + assert.True(t, modified, "compareCanaryIngresses returns modified=true") + assert.Equal(t, "{\"metadata\":{\"annotations\":{\"nginx.ingress.kubernetes.io/canary-weight\":\"15\"}}}", string(patch), "compareCanaryIngresses returns expected patch") + + addStable := extensionsIngress("additional-stable-ingress", 80, "stable-service") + addCanary := extensionsIngress("additional-canary-ingress", 80, "canary-service") + addCanary.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "10", + }) + addStableIngress := ingressutil.NewLegacyIngress(addStable) + addCanaryIngress := ingressutil.NewLegacyIngress(addCanary) + + addDesiredCanaryIngress, err := r.canaryIngress(addStableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses[0]), 15) + assert.Nil(t, err, "No error returned when calling addCanaryIngress") + + checkBackendService(t, addDesiredCanaryIngress, "canary-service") + + patch, modified, err = ingressutil.BuildIngressPatch(addCanaryIngress.Mode(), addCanaryIngress, addDesiredCanaryIngress, + ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) + assert.Nil(t, err, "compareCanaryIngresses returns no error") + assert.True(t, modified, "compareCanaryIngresses returns modified=true") + assert.Equal(t, "{\"metadata\":{\"annotations\":{\"nginx.ingress.kubernetes.io/canary-weight\":\"15\"}}}", string(patch), "compareCanaryIngresses returns expected patch") +} + func TestCanaryIngressUpdatedRoute(t *testing.T) { r := Reconciler{ cfg: ReconcilerConfig{ @@ -246,7 +338,36 @@ func TestCanaryIngressUpdatedRoute(t *testing.T) { stableIngress := ingressutil.NewLegacyIngress(stable) canaryIngress := ingressutil.NewLegacyIngress(canary) - desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout), 15) + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, desiredCanaryIngress, "canary-service") + + patch, modified, err := ingressutil.BuildIngressPatch(canaryIngress.Mode(), canaryIngress, desiredCanaryIngress, + ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) + assert.Nil(t, err, "compareCanaryIngresses returns no error") + assert.True(t, modified, "compareCanaryIngresses returns modified=true") + assert.Equal(t, "{\"spec\":{\"rules\":[{\"host\":\"fakehost.example.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"canary-service\",\"servicePort\":80},\"path\":\"/bar\"}]}}]}}", string(patch), "compareCanaryIngresses returns expected patch") +} + +func TestCanaryIngressUpdatedRouteMultiIngress(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress"), + }, + } + + stable := extensionsIngress("stable-ingress", 80, "stable-service") + stable.Spec.Rules[0].HTTP.Paths[0].Path = "/bar" + canary := extensionsIngress("canary-ingress", 80, "canary-service") + canary.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + stableIngress := ingressutil.NewLegacyIngress(stable) + canaryIngress := ingressutil.NewLegacyIngress(canary) + + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) assert.Nil(t, err, "No error returned when calling canaryIngress") checkBackendService(t, desiredCanaryIngress, "canary-service") @@ -256,6 +377,27 @@ func TestCanaryIngressUpdatedRoute(t *testing.T) { assert.Nil(t, err, "compareCanaryIngresses returns no error") assert.True(t, modified, "compareCanaryIngresses returns modified=true") assert.Equal(t, "{\"spec\":{\"rules\":[{\"host\":\"fakehost.example.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"canary-service\",\"servicePort\":80},\"path\":\"/bar\"}]}}]}}", string(patch), "compareCanaryIngresses returns expected patch") + + addStable := extensionsIngress("stable-ingress", 80, "stable-service") + addStable.Spec.Rules[0].HTTP.Paths[0].Path = "/bar" + addCanary := extensionsIngress("canary-ingress", 80, "canary-service") + addCanary.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + addStableIngress := ingressutil.NewLegacyIngress(addStable) + addCanaryIngress := ingressutil.NewLegacyIngress(addCanary) + + addDesiredCanaryIngress, err := r.canaryIngress(addStableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses[0]), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, addDesiredCanaryIngress, "canary-service") + + patch, modified, err = ingressutil.BuildIngressPatch(addCanaryIngress.Mode(), addCanaryIngress, addDesiredCanaryIngress, + ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) + assert.Nil(t, err, "compareCanaryIngresses returns no error") + assert.True(t, modified, "compareCanaryIngresses returns modified=true") + assert.Equal(t, "{\"spec\":{\"rules\":[{\"host\":\"fakehost.example.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"canary-service\",\"servicePort\":80},\"path\":\"/bar\"}]}}]}}", string(patch), "compareCanaryIngresses returns expected patch") } func TestCanaryIngressRetainIngressClass(t *testing.T) { @@ -270,7 +412,30 @@ func TestCanaryIngressRetainIngressClass(t *testing.T) { }) stableIngress := ingressutil.NewLegacyIngress(stable) - desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout), 15) + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, desiredCanaryIngress, "canary-service") + + annotations := desiredCanaryIngress.GetAnnotations() + assert.Equal(t, "true", annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "15", annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.Equal(t, "nginx-foo", annotations["kubernetes.io/ingress.class"], "ingress-class annotation retained") +} + +func TestCanaryIngressRetainIngressClassMultiIngress(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress"), + }, + } + stable := extensionsIngress("stable-ingress", 80, "stable-service") + stable.SetAnnotations(map[string]string{ + "kubernetes.io/ingress.class": "nginx-foo", + }) + stableIngress := ingressutil.NewLegacyIngress(stable) + + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) assert.Nil(t, err, "No error returned when calling canaryIngress") checkBackendService(t, desiredCanaryIngress, "canary-service") @@ -279,6 +444,22 @@ func TestCanaryIngressRetainIngressClass(t *testing.T) { assert.Equal(t, "true", annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") assert.Equal(t, "15", annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") assert.Equal(t, "nginx-foo", annotations["kubernetes.io/ingress.class"], "ingress-class annotation retained") + + addStable := extensionsIngress("stable-ingress", 80, "stable-service") + addStable.SetAnnotations(map[string]string{ + "kubernetes.io/ingress.class": "nginx-foo", + }) + addStableIngress := ingressutil.NewLegacyIngress(addStable) + + addDesiredCanaryIngress, err := r.canaryIngress(addStableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses[0]), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, addDesiredCanaryIngress, "canary-service") + + annotations = addDesiredCanaryIngress.GetAnnotations() + assert.Equal(t, "true", annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "15", annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.Equal(t, "nginx-foo", annotations["kubernetes.io/ingress.class"], "ingress-class annotation retained") } func TestCanaryIngressAdditionalAnnotations(t *testing.T) { @@ -294,7 +475,7 @@ func TestCanaryIngressAdditionalAnnotations(t *testing.T) { stable := extensionsIngress("stable-ingress", 80, "stable-service") stableIngress := ingressutil.NewLegacyIngress(stable) - desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout), 15) + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) assert.Nil(t, err, "No error returned when calling canaryIngress") checkBackendService(t, desiredCanaryIngress, "canary-service") @@ -306,6 +487,46 @@ func TestCanaryIngressAdditionalAnnotations(t *testing.T) { assert.Equal(t, "DoCanary", annotations["nginx.ingress.kubernetes.io/canary-by-header-value"], "canary-by-header-value annotation set") } +func TestCanaryIngressAdditionalAnnotationsMultiIngress(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress"), + }, + } + r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalIngressAnnotations = map[string]string{ + "canary-by-header": "X-Foo", + "canary-by-header-value": "DoCanary", + } + + stable := extensionsIngress("stable-ingress", 80, "stable-service") + stableIngress := ingressutil.NewLegacyIngress(stable) + + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, desiredCanaryIngress, "canary-service") + + annotations := desiredCanaryIngress.GetAnnotations() + assert.Equal(t, "true", annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "15", annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.Equal(t, "X-Foo", annotations["nginx.ingress.kubernetes.io/canary-by-header"], "canary-by-header annotation set") + assert.Equal(t, "DoCanary", annotations["nginx.ingress.kubernetes.io/canary-by-header-value"], "canary-by-header-value annotation set") + + addStable := extensionsIngress("additional-stable-ingress", 80, "stable-service") + addStableIngress := ingressutil.NewLegacyIngress(addStable) + + addDesiredCanaryIngress, err := r.canaryIngress(addStableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses[0]), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, addDesiredCanaryIngress, "canary-service") + + annotations = addDesiredCanaryIngress.GetAnnotations() + assert.Equal(t, "true", annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "15", annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.Equal(t, "X-Foo", annotations["nginx.ingress.kubernetes.io/canary-by-header"], "canary-by-header annotation set") + assert.Equal(t, "DoCanary", annotations["nginx.ingress.kubernetes.io/canary-by-header-value"], "canary-by-header-value annotation set") +} + func TestReconciler_canaryIngress(t *testing.T) { t.Run("will build desired networking ingress successfully", func(t *testing.T) { // given @@ -320,7 +541,7 @@ func TestReconciler_canaryIngress(t *testing.T) { i := ingressutil.NewIngress(stableIngress) // when - desiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout), 10) + desiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 10) // then assert.Nil(t, err, "No error returned when calling canaryIngress") @@ -336,6 +557,56 @@ func TestReconciler_canaryIngress(t *testing.T) { }) } +func TestReconciler_canaryIngressWithMultiIngress(t *testing.T) { + t.Run("will build desired networking ingress successfully", func(t *testing.T) { + // given + t.Parallel() + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress"), + }, + } + + stableIngress := networkingIngress("stable-ingress", 80, "stable-service") + stableIngress.Spec.IngressClassName = pointer.StringPtr("nginx-ext") + i := ingressutil.NewIngress(stableIngress) + + // when + desiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), 10) + + // then + assert.Nil(t, err, "No error returned when calling canaryIngress") + checkBackendService(t, desiredCanaryIngress, "canary-service") + desired, err := desiredCanaryIngress.GetNetworkingIngress() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "true", desired.Annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "10", desired.Annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.NotNil(t, desired.Spec.IngressClassName) + assert.Equal(t, "nginx-ext", *desired.Spec.IngressClassName) + + addStableIngress := networkingIngress("additional-stable-ingress", 80, "stable-service") + addStableIngress.Spec.IngressClassName = pointer.StringPtr("nginx-ext") + i = ingressutil.NewIngress(addStableIngress) + + // when + addDesiredCanaryIngress, err := r.canaryIngress(i, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses[0]), 10) + + // then + assert.Nil(t, err, "No error returned when calling canaryIngress") + checkBackendService(t, desiredCanaryIngress, "canary-service") + desired, err = addDesiredCanaryIngress.GetNetworkingIngress() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "true", desired.Annotations["nginx.ingress.kubernetes.io/canary"], "canary annotation set to true") + assert.Equal(t, "10", desired.Annotations["nginx.ingress.kubernetes.io/canary-weight"], "canary-weight annotation set to expected value") + assert.NotNil(t, desired.Spec.IngressClassName) + assert.Equal(t, "nginx-ext", *desired.Spec.IngressClassName) + }) +} + func TestType(t *testing.T) { client := fake.NewSimpleClientset() rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") @@ -398,6 +669,41 @@ func TestReconcileStableIngressFound(t *testing.T) { } } +func TestReconcileStableIngressFoundMultiIngress(t *testing.T) { + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + addStableIngress := extensionsIngress("additional-stable-ingress", 80, "stable-service") + + client := fake.NewSimpleClientset() + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + err = r.SetWeight(10) + assert.Nil(t, err, "Reconcile returns no error") + actions := client.Actions() + assert.Len(t, actions, 2) + if !t.Failed() { + // Avoid "index out of range" errors + assert.Equal(t, "create", actions[0].GetVerb(), "action: create canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[0].GetResource(), "action: create canary ingress") + + assert.Equal(t, "create", actions[1].GetVerb(), "action: create canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[1].GetResource(), "action: create canary ingress") + } +} + func TestReconcileStableIngressFoundWrongBackend(t *testing.T) { rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") stableIngress := extensionsIngress("stable-ingress", 80, "other-service") @@ -422,6 +728,34 @@ func TestReconcileStableIngressFoundWrongBackend(t *testing.T) { assert.Contains(t, err.Error(), "has no rules using service", "correct error is returned") } +func TestReconcileStableIngressFoundWrongBackendMultiIngress(t *testing.T) { + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + // this one will work + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + // This is the one that should error out + addStableIngress := extensionsIngress("additional-stable-ingress", 80, "other-service") + + client := fake.NewSimpleClientset() + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + err = r.SetWeight(10) + assert.NotNil(t, err, "Reconcile returns error") + assert.Contains(t, err.Error(), "has no rules using service", "correct error is returned") +} + func TestReconcileStableAndCanaryIngressFoundNoOwner(t *testing.T) { rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") @@ -511,6 +845,54 @@ func TestReconcileStableAndCanaryIngressFoundPatch(t *testing.T) { } } +func TestReconcileStableAndCanaryIngressFoundPatchMultiIngress(t *testing.T) { + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + addStableIngress := extensionsIngress("additional-stable-ingress", 80, "stable-service") + canaryIngress := extensionsIngress("rollout-stable-ingress-canary", 80, "canary-service") + canaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + addCanaryIngress := extensionsIngress("rollout-additional-stable-ingress-canary", 80, "canary-service") + addCanaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + setIngressOwnerRef(canaryIngress, rollout) + setIngressOwnerRef(addCanaryIngress, rollout) + client := fake.NewSimpleClientset(canaryIngress, addCanaryIngress) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(canaryIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addCanaryIngress) + + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + err = r.SetWeight(10) + assert.Nil(t, err, "Reconcile returns no error") + actions := client.Actions() + assert.Len(t, actions, 2) + if !t.Failed() { + // Avoid "index out of range" errors + assert.Equal(t, "patch", actions[0].GetVerb(), "action: patch canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[0].GetResource(), "action: patch canary ingress") + assert.Equal(t, "patch", actions[1].GetVerb(), "action: patch canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[1].GetResource(), "action: patch canary ingress") + } +} + func TestReconcileWillInvokeNetworkingIngress(t *testing.T) { // given rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") @@ -551,6 +933,57 @@ func TestReconcileWillInvokeNetworkingIngress(t *testing.T) { } } +func TestReconcileWillInvokeNetworkingIngressMultiIngress(t *testing.T) { + // given + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + stableIngress := networkingIngress("stable-ingress", 80, "stable-service") + addStableIngress := networkingIngress("additional-stable-ingress", 80, "stable-service") + canaryIngress := networkingIngress("rollout-stable-ingress-canary", 80, "canary-service") + canaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + addCanaryIngress := networkingIngress("rollout-additional-stable-ingress-canary", 80, "canary-service") + addCanaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + canaryIngress.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(rollout, schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Rollout"})}) + addCanaryIngress.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(rollout, schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Rollout"})}) + client := fake.NewSimpleClientset(stableIngress, canaryIngress, addStableIngress, addCanaryIngress) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(canaryIngress) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(addCanaryIngress) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) + if err != nil { + t.Fatal(err) + } + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + // when + err = r.SetWeight(10) + + // then + assert.Nil(t, err, "Reconcile returns no error") + actions := client.Actions() + assert.Len(t, actions, 2) + if !t.Failed() { + // Avoid "index out of range" errors + assert.Equal(t, "patch", actions[0].GetVerb(), "action: patch canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}, actions[0].GetResource(), "action: patch canary ingress") + assert.Equal(t, "patch", actions[1].GetVerb(), "action: patch canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}, actions[1].GetResource(), "action: patch canary ingress") + } +} + func TestReconcileStableAndCanaryIngressFoundNoChange(t *testing.T) { rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") @@ -582,6 +1015,46 @@ func TestReconcileStableAndCanaryIngressFoundNoChange(t *testing.T) { assert.Len(t, actions, 0) } +func TestReconcileStableAndCanaryIngressFoundNoChangeMultiIngress(t *testing.T) { + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + addStableIngress := extensionsIngress("additional-stable-ingress", 80, "stable-service") + canaryIngress := extensionsIngress("rollout-stable-ingress-canary", 80, "canary-service") + addCanaryIngress := extensionsIngress("rollout-additional-stable-ingress-canary", 80, "canary-service") + setIngressOwnerRef(canaryIngress, rollout) + setIngressOwnerRef(addCanaryIngress, rollout) + canaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "10", + }) + addCanaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "10", + }) + client := fake.NewSimpleClientset() + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(canaryIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addCanaryIngress) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + err = r.SetWeight(10) + assert.Nil(t, err, "Reconcile returns no error") + actions := client.Actions() + assert.Len(t, actions, 0) +} + func TestReconcileCanaryCreateError(t *testing.T) { rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") @@ -622,6 +1095,48 @@ func TestReconcileCanaryCreateError(t *testing.T) { } } +func TestReconcileCanaryCreateErrorMultiIngress(t *testing.T) { + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + addStableIngress := extensionsIngress("additional-stable-ingress", 80, "stable-service") + + client := fake.NewSimpleClientset() + client.ReactionChain = nil + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + + // stableIngress exists + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + // Return with AlreadyExists error to create for canary + r.cfg.Client.(*fake.Clientset).Fake.AddReactor("create", "ingresses", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake error") + }) + + err = r.SetWeight(10) + assert.NotNil(t, err, "Reconcile returns error") + assert.Equal(t, "error creating canary ingress `rollout-additional-stable-ingress-canary`: fake error", err.Error()) + actions := client.Actions() + assert.Len(t, actions, 1) + if !t.Failed() { + // Avoid "index out of range" errors + assert.Equal(t, "create", actions[0].GetVerb(), "action: create canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[0].GetResource(), "action: create canary ingress") + } +} + func TestReconcileCanaryCreateErrorAlreadyExistsPatch(t *testing.T) { rollout := fakeRollout("stable-service", "canary-service", "stable-ingress") stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") @@ -678,3 +1193,114 @@ func TestReconcileCanaryCreateErrorAlreadyExistsPatch(t *testing.T) { assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[2].GetResource(), "action: patch canary ingress") } } + +func TestReconcileCanaryCreateErrorAlreadyExistsPatchMultiIngress(t *testing.T) { + rollout := fakeRolloutWithMultiIngress("stable-service", "canary-service", "stable-ingress", "additional-stable-ingress") + stableIngress := extensionsIngress("stable-ingress", 80, "stable-service") + addStableIngress := extensionsIngress("additional-stable-ingress", 80, "stable-service") + canaryIngress := extensionsIngress("rollout-stable-ingress-canary", 80, "canary-service") + addCanaryIngress := extensionsIngress("rollout-additional-stable-ingress-canary", 80, "canary-service") + canaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + addCanaryIngress.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "15", + }) + setIngressOwnerRef(canaryIngress, rollout) + setIngressOwnerRef(addCanaryIngress, rollout) + + client := fake.NewSimpleClientset() + client.ReactionChain = nil + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + + // stableIngress exists + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(stableIngress) + k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(addStableIngress) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + if err != nil { + t.Fatal(err) + } + + r := NewReconciler(ReconcilerConfig{ + Rollout: rollout, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + + // Return with AlreadyExists error to create for canary + r.cfg.Client.(*fake.Clientset).Fake.AddReactor("create", "ingresses", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, k8serrors.NewAlreadyExists(schema.GroupResource{ + Group: "extensions", + Resource: "ingresses", + }, "rollout-stable-ingress-canary") + }) + + // Respond with canaryIngress on GET + r.cfg.Client.(*fake.Clientset).Fake.AddReactor("get", "ingresses", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, canaryIngress, nil + }) + + err = r.SetWeight(10) + assert.Nil(t, err, "Reconcile returns no error") + actions := client.Actions() + assert.Len(t, actions, 6) + if !t.Failed() { + // Avoid "index out of range" errors + // primary ingress + assert.Equal(t, "create", actions[0].GetVerb(), "action: create canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[0].GetResource(), "action: create canary ingress") + assert.Equal(t, "get", actions[1].GetVerb(), "action: get canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[1].GetResource(), "action: get canary ingress") + assert.Equal(t, "patch", actions[2].GetVerb(), "action: patch canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[2].GetResource(), "action: patch canary ingress") + // additional ingress + assert.Equal(t, "create", actions[3].GetVerb(), "action: create canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[0].GetResource(), "action: create canary ingress") + assert.Equal(t, "get", actions[4].GetVerb(), "action: get canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[1].GetResource(), "action: get canary ingress") + assert.Equal(t, "patch", actions[5].GetVerb(), "action: patch canary ingress") + assert.Equal(t, schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, actions[2].GetResource(), "action: patch canary ingress") + } + +func TestSetHeaderRoute(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRollout("stable-service", "canary-service", "stable-ingress"), + }, + } + err := r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "set-header", + Match: []v1alpha1.HeaderRoutingMatch{{ + HeaderName: "header-name", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "value", + }, + }}, + }) + assert.Nil(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) +} + +func TestSetMirrorRoute(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRollout("stable-service", "canary-service", "stable-ingress"), + }, + } + err := r.SetMirrorRoute(&v1alpha1.SetMirrorRoute{ + Name: "mirror-route", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{Exact: "GET"}, + }}, + }) + assert.Nil(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) +} diff --git a/rollout/trafficrouting/service_helper.go b/rollout/trafficrouting/service_helper.go new file mode 100644 index 0000000000..15d03cad53 --- /dev/null +++ b/rollout/trafficrouting/service_helper.go @@ -0,0 +1,33 @@ +package trafficrouting + +import ( + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" +) + +// GetStableAndCanaryServices return a service names for current stable and canary services. +// If ping-pong feature enabled then the current ping or pong service will be returned. Which is a stable is defined +// based on a rollout status field Status.Canary.StablePingPong +func GetStableAndCanaryServices(ro *v1alpha1.Rollout) (string, string) { + if IsPingPongEnabled(ro) { + canary := ro.Spec.Strategy.Canary + if IsStablePing(ro) { + return canary.PingPong.PingService, canary.PingPong.PongService + } else { + return canary.PingPong.PongService, canary.PingPong.PingService + } + } else { + return ro.Spec.Strategy.Canary.StableService, ro.Spec.Strategy.Canary.CanaryService + } +} + +// IsStablePing return true if the 'ping' service is pointing to the stable replica set. +// Which of the service currently is using is stored in a status.canary.stablePingPong. +// Return true in a case if status StablePingPong value equal to 'ping'. Return false in +// case when the status value is 'pong' or empty +func IsStablePing(ro *v1alpha1.Rollout) bool { + return ro.Status.Canary.StablePingPong == v1alpha1.PPPing +} + +func IsPingPongEnabled(ro *v1alpha1.Rollout) bool { + return ro.Spec.Strategy.Canary != nil && ro.Spec.Strategy.Canary.PingPong != nil +} diff --git a/rollout/trafficrouting/smi/smi.go b/rollout/trafficrouting/smi/smi.go index 8d91bf9579..c781799f87 100644 --- a/rollout/trafficrouting/smi/smi.go +++ b/rollout/trafficrouting/smi/smi.go @@ -215,11 +215,11 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 if !isControlledBy { return fmt.Errorf("Rollout does not own TrafficSplit `%s`", trafficSplitName) } - err = r.patchTrafficSplit(existingTrafficSplit, trafficSplits) - if err == nil { - r.cfg.Recorder.Eventf(r.cfg.Rollout, record.EventOptions{EventReason: "TrafficSplitModified"}, "TrafficSplit `%s` modified", trafficSplitName) - } - return err + return r.patchTrafficSplit(existingTrafficSplit, trafficSplits) +} + +func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) error { + return nil } func (r *Reconciler) generateTrafficSplits(trafficSplitName string, desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) VersionedTrafficSplits { @@ -351,3 +351,11 @@ func trafficSplitV1Alpha3(ro *v1alpha1.Rollout, objectMeta metav1.ObjectMeta, ro func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { return nil } + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + return nil +} + +func (r *Reconciler) RemoveManagedRoutes() error { + return nil +} diff --git a/rollout/trafficrouting/smi/smi_test.go b/rollout/trafficrouting/smi/smi_test.go index 22a69d6098..136e006a14 100644 --- a/rollout/trafficrouting/smi/smi_test.go +++ b/rollout/trafficrouting/smi/smi_test.go @@ -576,3 +576,62 @@ func TestCreateTrafficSplitForMultipleBackends(t *testing.T) { assert.Equal(t, 80, ts3.Spec.Backends[3].Weight) }) } + +func TestReconcileSetHeaderRoute(t *testing.T) { + t.Run("not implemented", func(t *testing.T) { + ro := fakeRollout("stable-service", "canary-service", "", "") + client := fake.NewSimpleClientset() + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{}, + }) + assert.Nil(t, err) + + err = r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "set-header", + Match: []v1alpha1.HeaderRoutingMatch{{ + HeaderName: "header-name", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "value", + }, + }}, + }) + assert.Nil(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + + actions := client.Actions() + assert.Len(t, actions, 0) + }) +} + +func TestReconcileSetMirrorRoute(t *testing.T) { + t.Run("not implemented", func(t *testing.T) { + ro := fakeRollout("stable-service", "canary-service", "", "") + client := fake.NewSimpleClientset() + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{}, + }) + assert.Nil(t, err) + + err = r.SetMirrorRoute(&v1alpha1.SetMirrorRoute{ + Name: "mirror-route", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{Exact: "GET"}, + }}, + }) + assert.Nil(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + + actions := client.Actions() + assert.Len(t, actions, 0) + }) +} diff --git a/rollout/trafficrouting/traefik/mocks/traefik.go b/rollout/trafficrouting/traefik/mocks/traefik.go new file mode 100644 index 0000000000..e1ce30a648 --- /dev/null +++ b/rollout/trafficrouting/traefik/mocks/traefik.go @@ -0,0 +1,99 @@ +package mocks + +import ( + "context" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + + argoRecord "github.com/argoproj/argo-rollouts/utils/record" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/record" +) + +type FakeDynamicClient struct{} + +type FakeClient struct { + IsGetError bool + IsGetErrorManifest bool + UpdateError bool +} + +type FakeService struct { + Weight int +} + +type FakeRecorder struct{} + +var ( + TraefikServiceObj *unstructured.Unstructured + ErrorTraefikServiceObj *unstructured.Unstructured +) + +func (f *FakeRecorder) Eventf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...interface{}) { +} + +func (f *FakeRecorder) Warnf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...interface{}) { +} + +func (f *FakeRecorder) K8sRecorder() record.EventRecorder { + return nil +} + +func (f *FakeClient) Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + return nil, nil +} + +func (f *FakeClient) Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { + if f.IsGetError { + return TraefikServiceObj, errors.New("Traefik get error") + } + if f.IsGetErrorManifest { + return ErrorTraefikServiceObj, nil + } + return TraefikServiceObj, nil +} + +func (f *FakeClient) Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { + if f.UpdateError { + return obj, errors.New("Traefik update error") + } + return obj, nil +} + +func (f *FakeClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error) { + return nil, nil +} + +func (f *FakeClient) Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error { + return nil +} + +func (f *FakeClient) DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return nil +} + +func (f *FakeClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + return nil, nil +} + +func (f *FakeClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func (f *FakeClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { + return nil, nil +} + +func (f *FakeClient) Namespace(string) dynamic.ResourceInterface { + return f +} + +func (f *FakeDynamicClient) Resource(schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { + return &FakeClient{} +} diff --git a/rollout/trafficrouting/traefik/traefik.go b/rollout/trafficrouting/traefik/traefik.go new file mode 100644 index 0000000000..f5b507207f --- /dev/null +++ b/rollout/trafficrouting/traefik/traefik.go @@ -0,0 +1,177 @@ +package traefik + +import ( + "context" + "fmt" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/record" +) + +// Type holds this controller type +const Type = "Traefik" + +const traefikServices = "traefikservices" +const TraefikServiceUpdateError = "TraefikServiceUpdateError" + +var ( + apiGroupToResource = map[string]string{ + defaults.DefaultTraefikAPIGroup: traefikServices, + } +) + +type ReconcilerConfig struct { + Rollout *v1alpha1.Rollout + Client ClientInterface + Recorder record.EventRecorder +} + +type Reconciler struct { + Rollout *v1alpha1.Rollout + Client ClientInterface + Recorder record.EventRecorder +} + +func (r *Reconciler) sendWarningEvent(id, msg string) { + r.sendEvent(corev1.EventTypeWarning, id, msg) +} + +func (r *Reconciler) sendEvent(eventType, id, msg string) { + r.Recorder.Eventf(r.Rollout, record.EventOptions{EventType: eventType, EventReason: id}, msg) +} + +type ClientInterface interface { + Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) + Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) +} + +func NewReconciler(cfg *ReconcilerConfig) *Reconciler { + reconciler := &Reconciler{ + Rollout: cfg.Rollout, + Client: cfg.Client, + Recorder: cfg.Recorder, + } + return reconciler +} + +func NewDynamicClient(di dynamic.Interface, namespace string) dynamic.ResourceInterface { + return di.Resource(GetMappingGVR()).Namespace(namespace) +} + +func GetMappingGVR() schema.GroupVersionResource { + group := defaults.DefaultTraefikAPIGroup + parts := strings.Split(defaults.DefaultTraefikVersion, "/") + version := parts[len(parts)-1] + resourceName := apiGroupToResource[group] + return schema.GroupVersionResource{ + Group: group, + Version: version, + Resource: resourceName, + } +} + +func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { + return nil +} + +func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + ctx := context.TODO() + rollout := r.Rollout + traefikServiceName := rollout.Spec.Strategy.Canary.TrafficRouting.Traefik.WeightedTraefikServiceName + traefikService, err := r.Client.Get(ctx, traefikServiceName, metav1.GetOptions{}) + if err != nil { + return err + } + canaryServiceName := rollout.Spec.Strategy.Canary.CanaryService + stableServiceName := rollout.Spec.Strategy.Canary.StableService + services, isFound, err := unstructured.NestedSlice(traefikService.Object, "spec", "weighted", "services") + if err != nil { + return err + } + if !isFound { + return errors.New("spec.weighted.services was not found in traefik service manifest") + } + canaryService, err := getService(canaryServiceName, services) + if err != nil { + return err + } + if canaryService == nil { + return errors.New("traefik canary service was not found") + } + err = unstructured.SetNestedField(canaryService, int64(desiredWeight), "weight") + if err != nil { + return err + } + stableService, err := getService(stableServiceName, services) + if err != nil { + return err + } + if stableService == nil { + return errors.New("traefik stable service was not found") + } + err = unstructured.SetNestedField(stableService, int64(100-desiredWeight), "weight") + if err != nil { + return err + } + err = unstructured.SetNestedSlice(traefikService.Object, services, "spec", "weighted", "services") + if err != nil { + return err + } + _, err = r.Client.Update(ctx, traefikService, metav1.UpdateOptions{}) + if err != nil { + msg := fmt.Sprintf("Error updating traefik service %q: %s", traefikService.GetName(), err) + r.sendWarningEvent(TraefikServiceUpdateError, msg) + } + return err +} + +func getService(serviceName string, services []interface{}) (map[string]interface{}, error) { + var selectedService map[string]interface{} + for _, service := range services { + typedService, ok := service.(map[string]interface{}) + if !ok { + return nil, errors.New("Failed type assertion setting weight for traefik service") + } + nameOfCurrentService, isFound, err := unstructured.NestedString(typedService, "name") + if err != nil { + return nil, err + } + if !isFound { + return nil, errors.New("name field was not found in service") + } + if nameOfCurrentService == serviceName { + selectedService = typedService + break + } + } + return selectedService, nil +} + +func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) error { + return nil +} + +func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (*bool, error) { + return nil, nil +} + +func (r *Reconciler) Type() string { + return Type +} + +func (r *Reconciler) SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error { + return nil +} + +func (r *Reconciler) RemoveManagedRoutes() error { + return nil +} diff --git a/rollout/trafficrouting/traefik/traefik_test.go b/rollout/trafficrouting/traefik/traefik_test.go new file mode 100644 index 0000000000..d7dbbbd297 --- /dev/null +++ b/rollout/trafficrouting/traefik/traefik_test.go @@ -0,0 +1,384 @@ +package traefik + +import ( + "testing" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/traefik/mocks" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" +) + +const traefikService = ` +apiVersion: mocks.containo.us/v1alpha1 +kind: TraefikService +metadata: + labels: + service: argo-mocks + name: mocks-service +spec: + weighted: + services: + - name: stable-rollout + weight: 100 + port: 80 + - name: canary-rollout + weight: 0 + port: 80 +` + +const errorTraefikService = ` +apiVersion: mocks.containo.us/v1alpha1 +kind: TraefikService +metadata: + labels: + service: argo-mocks + name: mocks-service +` + +var ( + client *mocks.FakeClient = &mocks.FakeClient{} +) + +const ( + stableServiceName string = "stable-rollout" + fakeStableServiceName string = "fake-stable-rollout" + canaryServiceName string = "canary-rollout" + fakeCanaryServiceName string = "fake-canary-rollout" + traefikServiceName string = "mocks-service" +) + +func TestNewDynamicClient(t *testing.T) { + t.Run("NewDynamicClient", func(t *testing.T) { + // Given + t.Parallel() + fakeDynamicClient := &mocks.FakeDynamicClient{} + + // When + NewDynamicClient(fakeDynamicClient, "default") + }) +} + +func TestUpdateHash(t *testing.T) { + t.Run("UpdateHash", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + err := r.UpdateHash("", "") + + // Then + assert.NoError(t, err) + }) +} + +func TestSetWeight(t *testing.T) { + mocks.TraefikServiceObj = toUnstructured(t, traefikService) + mocks.ErrorTraefikServiceObj = toUnstructured(t, errorTraefikService) + t.Run("SetWeight", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + err := r.SetWeight(30) + + // Then + assert.NoError(t, err) + services, isFound, err := unstructured.NestedSlice(mocks.TraefikServiceObj.Object, "spec", "weighted", "services") + assert.NoError(t, err) + assert.Equal(t, isFound, true) + stableService, err := getService(stableServiceName, services) + assert.NoError(t, err) + stableServiceWeight, isFound, err := unstructured.NestedInt64(stableService, "weight") + assert.NoError(t, err) + assert.Equal(t, isFound, true) + canaryService, err := getService(canaryServiceName, services) + assert.NoError(t, err) + canaryServiceWeight, isFound, err := unstructured.NestedInt64(canaryService, "weight") + assert.Equal(t, isFound, true) + assert.NoError(t, err) + assert.Equal(t, int64(70), stableServiceWeight) + assert.Equal(t, int64(30), canaryServiceWeight) + }) + t.Run("SetWeightWithError", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: &mocks.FakeClient{ + IsGetError: true, + }, + } + r := NewReconciler(&cfg) + + // When + err := r.SetWeight(30) + + // Then + assert.Error(t, err) + }) + t.Run("SetWeightWithErrorManifest", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: &mocks.FakeClient{ + IsGetErrorManifest: true, + }, + } + r := NewReconciler(&cfg) + + // When + err := r.SetWeight(30) + + // Then + assert.Error(t, err) + }) + t.Run("SetWeightWithErrorStableName", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(fakeStableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + err := r.SetWeight(30) + + // Then + assert.Error(t, err) + }) + t.Run("SetWeightWithErrorCanaryName", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, fakeCanaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + err := r.SetWeight(30) + + // Then + assert.Error(t, err) + }) + t.Run("TraefikUpdateError", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: &mocks.FakeClient{ + UpdateError: true, + }, + Recorder: &mocks.FakeRecorder{}, + } + r := NewReconciler(&cfg) + + // When + err := r.SetWeight(30) + + // Then + assert.Error(t, err) + }) +} + +func TestSetHeaderRoute(t *testing.T) { + t.Run("SetHeaderRoute", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + err := r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "set-header", + Match: []v1alpha1.HeaderRoutingMatch{{ + HeaderName: "header-name", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "value", + }, + }}, + }) + + // Then + assert.NoError(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + }) +} + +func TestSetMirrorRoute(t *testing.T) { + t.Run("SetMirrorRoute", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + err := r.SetMirrorRoute(&v1alpha1.SetMirrorRoute{ + Name: "mirror-route", + Match: []v1alpha1.RouteMatch{{ + Method: &v1alpha1.StringMatch{Exact: "GET"}, + }}, + }) + + // Then + assert.NoError(t, err) + + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + }) +} + +func toUnstructured(t *testing.T, manifest string) *unstructured.Unstructured { + t.Helper() + obj := &unstructured.Unstructured{} + + dec := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + _, _, err := dec.Decode([]byte(manifest), nil, obj) + if err != nil { + t.Fatal(err) + } + return obj +} + +func TestVerifyWeight(t *testing.T) { + t.Run("VerifyWeight", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + isSynced, err := r.VerifyWeight(32) + + // Then + assert.Nil(t, isSynced) + assert.Nil(t, err) + }) +} + +func TestType(t *testing.T) { + mocks.TraefikServiceObj = toUnstructured(t, traefikService) + t.Run("Type", func(t *testing.T) { + // Given + t.Parallel() + cfg := ReconcilerConfig{ + Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), + Client: client, + } + r := NewReconciler(&cfg) + + // When + reconcilerType := r.Type() + + // Then + assert.Equal(t, Type, reconcilerType) + }) +} + +func TestGetService(t *testing.T) { + t.Run("ErrorGetServiceFromStruct ", func(t *testing.T) { + // Given + t.Parallel() + services := []interface{}{ + mocks.FakeService{Weight: 12}, + } + + // When + selectedServices, err := getService("default", services) + + // Then + assert.Nil(t, selectedServices) + assert.Error(t, err) + }) + t.Run("ErrorGetServiceFromMap", func(t *testing.T) { + // Given + t.Parallel() + services := map[string]interface{}{ + "weight": 100, + } + + // When + selectedServices, err := getService("default", []interface{}{services}) + + // Then + assert.Nil(t, selectedServices) + assert.Error(t, err) + }) + t.Run("GetServiceFromMap", func(t *testing.T) { + // Given + t.Parallel() + const serviceName string = "default" + services := map[string]interface{}{ + "name": serviceName, + } + + // When + selectedServices, err := getService(serviceName, []interface{}{services}) + + // Then + assert.NotNil(t, selectedServices) + assert.NoError(t, err) + }) + t.Run("ErrorGetServiceFromNil", func(t *testing.T) { + // Given + t.Parallel() + services := map[string]interface{}{ + "name": nil, + } + + // When + selectedServices, err := getService("default", []interface{}{services}) + + // Then + assert.Nil(t, selectedServices) + assert.Error(t, err) + }) +} + +func newRollout(stableSvc, canarySvc, traefikServiceName string) *v1alpha1.Rollout { + return &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: "default", + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + StableService: stableSvc, + CanaryService: canarySvc, + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Traefik: &v1alpha1.TraefikTrafficRouting{ + WeightedTraefikServiceName: traefikServiceName, + }, + }, + }, + }, + }, + } +} diff --git a/rollout/trafficrouting/trafficroutingutil.go b/rollout/trafficrouting/trafficroutingutil.go index 53c20ffd2b..3ff9c9c4ec 100644 --- a/rollout/trafficrouting/trafficroutingutil.go +++ b/rollout/trafficrouting/trafficroutingutil.go @@ -10,9 +10,15 @@ type TrafficRoutingReconciler interface { UpdateHash(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error // SetWeight sets the canary weight to the desired weight SetWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error + // SetHeaderRoute sets the header routing step + SetHeaderRoute(setHeaderRoute *v1alpha1.SetHeaderRoute) error + // SetMirrorRoute sets up the traffic router to mirror traffic to a service + SetMirrorRoute(setMirrorRoute *v1alpha1.SetMirrorRoute) error // VerifyWeight returns true if the canary is at the desired weight and additionalDestinations are at the weights specified // Returns nil if weight verification is not supported or not applicable VerifyWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (*bool, error) + // RemoveAllRoutes Removes all routes that are managed by rollouts by looking at spec.strategy.canary.trafficRouting.managedRoutes + RemoveManagedRoutes() error // Type returns the type of the traffic routing reconciler Type() string } diff --git a/rollout/trafficrouting_test.go b/rollout/trafficrouting_test.go index a7e83557ea..6c1e49f476 100644 --- a/rollout/trafficrouting_test.go +++ b/rollout/trafficrouting_test.go @@ -17,45 +17,38 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/rollout/mocks" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/alb" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/appmesh" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/nginx" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/smi" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/traefik" + traefikMocks "github.com/argoproj/argo-rollouts/rollout/trafficrouting/traefik/mocks" testutil "github.com/argoproj/argo-rollouts/test/util" "github.com/argoproj/argo-rollouts/utils/conditions" istioutil "github.com/argoproj/argo-rollouts/utils/istio" logutil "github.com/argoproj/argo-rollouts/utils/log" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) -// newFakeTrafficRoutingReconciler returns a fake TrafficRoutingReconciler with mocked success return values -func newFakeTrafficRoutingReconciler() []*mocks.TrafficRoutingReconciler { - reconcilerList := []*mocks.TrafficRoutingReconciler{} - for _, trafficRoutingReconciler := range reconcilerList { - trafficRoutingReconciler.On("Type").Return("fake") - trafficRoutingReconciler.On("SetWeight", mock.Anything, mock.Anything).Return(nil) - trafficRoutingReconciler.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) - trafficRoutingReconciler.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - } - return reconcilerList -} - // newFakeTrafficRoutingReconciler returns a fake TrafficRoutingReconciler with mocked success return values func newFakeSingleTrafficRoutingReconciler() *mocks.TrafficRoutingReconciler { trafficRoutingReconciler := mocks.TrafficRoutingReconciler{} trafficRoutingReconciler.On("Type").Return("fake") trafficRoutingReconciler.On("SetWeight", mock.Anything, mock.Anything).Return(nil) + trafficRoutingReconciler.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + trafficRoutingReconciler.On("SetMirrorRoute", mock.Anything, mock.Anything).Return(nil) trafficRoutingReconciler.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) trafficRoutingReconciler.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + trafficRoutingReconciler.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil) return &trafficRoutingReconciler } // newUnmockedFakeTrafficRoutingReconciler returns a fake TrafficRoutingReconciler with unmocked // methods (except Type() mocked) -func newUnmockedFakeTrafficRoutingReconciler() *[]*mocks.TrafficRoutingReconciler { - reconcilerList := []*mocks.TrafficRoutingReconciler{} - for _, trafficRoutingReconciler := range reconcilerList { - trafficRoutingReconciler.On("Type").Return("fake") - } - return &reconcilerList +func newUnmockedFakeTrafficRoutingReconciler() *mocks.TrafficRoutingReconciler { + trafficRoutingReconciler := mocks.TrafficRoutingReconciler{} + trafficRoutingReconciler.On("Type").Return("fake") + return &trafficRoutingReconciler } func newTrafficWeightFixture(t *testing.T) (*fixture, *v1alpha1.Rollout) { @@ -95,45 +88,42 @@ func newTrafficWeightFixture(t *testing.T) (*fixture, *v1alpha1.Rollout) { func TestReconcileTrafficRoutingSetWeightErr(t *testing.T) { f, ro := newTrafficWeightFixture(t) defer f.Close() - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(errors.New("Error message")) - f.runExpectError(getKey(ro, t), true) - } + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(errors.New("Error message")) + f.runExpectError(getKey(ro, t), true) } // verify error is not returned when VerifyWeight returns error (so that we can continue reconciling) func TestReconcileTrafficRoutingVerifyWeightErr(t *testing.T) { f, ro := newTrafficWeightFixture(t) defer f.Close() - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(false), errors.New("Error message")) - f.runExpectError(getKey(ro, t), true) - } + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(false), errors.New("Error message")) + f.expectPatchRolloutAction(ro) + f.run(getKey(ro, t)) } // verify we requeue when VerifyWeight returns false func TestReconcileTrafficRoutingVerifyWeightFalse(t *testing.T) { f, ro := newTrafficWeightFixture(t) defer f.Close() - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(false), nil) - c, i, k8sI := f.newController(noResyncPeriodFunc) - enqueued := false - c.enqueueRolloutAfter = func(obj interface{}, duration time.Duration) { - enqueued = true - } - f.expectPatchRolloutAction(ro) - f.runController(getKey(ro, t), true, false, c, i, k8sI) - assert.True(t, enqueued) + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(false), nil) + c, i, k8sI := f.newController(noResyncPeriodFunc) + enqueued := false + c.enqueueRolloutAfter = func(obj interface{}, duration time.Duration) { + enqueued = true } + f.expectPatchRolloutAction(ro) + f.runController(getKey(ro, t), true, false, c, i, k8sI) + assert.True(t, enqueued) } func TestRolloutUseDesiredWeight(t *testing.T) { @@ -179,16 +169,64 @@ func TestRolloutUseDesiredWeight(t *testing.T) { f.expectPatchRolloutAction(r2) - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { - // make sure SetWeight was called with correct value - assert.Equal(t, int32(10), desiredWeight) - return nil - }) - fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(true, nil) + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(10), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) + f.run(getKey(r2, t)) +} + +func TestRolloutUseDesiredWeight100(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: pointer.Int32Ptr(10), + }, + { + Pause: &v1alpha1.RolloutPause{}, + }, } + r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(2), intstr.FromInt(1), intstr.FromInt(0)) + r2 := bumpVersion(r1) + r2.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{} + r2.Spec.Strategy.Canary.CanaryService = "canary" + r2.Spec.Strategy.Canary.StableService = "stable" + + rs1 := newReplicaSetWithStatus(r1, 10, 10) + rs2 := newReplicaSetWithStatus(r2, 10, 10) + + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + canarySelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} + stableSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} + canarySvc := newService("canary", 80, canarySelector, r2) + stableSvc := newService("stable", 80, stableSelector, r2) + + f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + + r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 10, 0, 10, false) + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + f.expectPatchRolloutAction(r2) + + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(100), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) f.run(getKey(r2, t)) } @@ -245,45 +283,43 @@ func TestRolloutWithExperimentStep(t *testing.T) { t.Run("Experiment Running - WeightDestination created", func(t *testing.T) { ex.Status.Phase = v1alpha1.AnalysisPhaseRunning - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { - // make sure SetWeight was called with correct value - assert.Equal(t, int32(10), desiredWeight) - assert.Equal(t, int32(5), weightDestinations[0].Weight) - assert.Equal(t, ex.Status.TemplateStatuses[0].ServiceName, weightDestinations[0].ServiceName) - assert.Equal(t, ex.Status.TemplateStatuses[0].PodTemplateHash, weightDestinations[0].PodTemplateHash) - return nil - }) - fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { - assert.Equal(t, int32(10), desiredWeight) - assert.Equal(t, int32(5), weightDestinations[0].Weight) - assert.Equal(t, ex.Status.TemplateStatuses[0].ServiceName, weightDestinations[0].ServiceName) - assert.Equal(t, ex.Status.TemplateStatuses[0].PodTemplateHash, weightDestinations[0].PodTemplateHash) - return nil - }) - } + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(10), desiredWeight) + assert.Equal(t, int32(5), weightDestinations[0].Weight) + assert.Equal(t, ex.Status.TemplateStatuses[0].ServiceName, weightDestinations[0].ServiceName) + assert.Equal(t, ex.Status.TemplateStatuses[0].PodTemplateHash, weightDestinations[0].PodTemplateHash) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { + assert.Equal(t, int32(10), desiredWeight) + assert.Equal(t, int32(5), weightDestinations[0].Weight) + assert.Equal(t, ex.Status.TemplateStatuses[0].ServiceName, weightDestinations[0].ServiceName) + assert.Equal(t, ex.Status.TemplateStatuses[0].PodTemplateHash, weightDestinations[0].PodTemplateHash) + return nil + }) f.run(getKey(r2, t)) }) t.Run("Experiment Pending - no WeightDestination created", func(t *testing.T) { ex.Status.Phase = v1alpha1.AnalysisPhasePending - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { - // make sure SetWeight was called with correct value - assert.Equal(t, int32(10), desiredWeight) - assert.Len(t, weightDestinations, 0) - return nil - }) - fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { - assert.Equal(t, int32(10), desiredWeight) - assert.Len(t, weightDestinations, 0) - return nil - }) - } + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(10), desiredWeight) + assert.Len(t, weightDestinations, 0) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { + assert.Equal(t, int32(10), desiredWeight) + assert.Len(t, weightDestinations, 0) + return nil + }) f.run(getKey(r2, t)) }) } @@ -326,18 +362,98 @@ func TestRolloutUsePreviousSetWeight(t *testing.T) { f.expectUpdateReplicaSetAction(rs2) f.expectPatchRolloutAction(r2) - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { - // make sure SetWeight was called with correct value - assert.Equal(t, int32(10), desiredWeight) + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(10), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything, mock.Anything).Return(pointer.BoolPtr(true), nil) + f.fakeTrafficRouting.On("error patching alb ingress", mock.Anything, mock.Anything).Return(true, nil) + f.run(getKey(r2, t)) +} + +func TestRolloutUseDynamicWeightOnPromoteFull(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: pointer.Int32Ptr(5), + }, + { + SetWeight: pointer.Int32Ptr(25), + }, + } + r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0)) + r2 := bumpVersion(r1) + r2.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{} + r2.Spec.Strategy.Canary.CanaryService = "canary" + r2.Spec.Strategy.Canary.StableService = "stable" + + rs1 := newReplicaSetWithStatus(r1, 5, 5) + rs2 := newReplicaSetWithStatus(r2, 10, 5) + + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + canarySelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} + stableSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} + canarySvc := newService("canary", 80, canarySelector, r2) + stableSvc := newService("stable", 80, stableSelector, r2) + + r2.Status.Canary.Weights = &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 5, + ServiceName: "canary", + PodTemplateHash: rs2PodHash, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 95, + ServiceName: "stable", + PodTemplateHash: rs1PodHash, + }, + } + + f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + + r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 15, 0, 10, false) + r2.Status.PromoteFull = true + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + f.expectUpdateReplicaSetAction(rs2) + f.expectPatchRolloutAction(r2) + + t.Run("DynamicStableScale true", func(t *testing.T) { + r2.Spec.Strategy.Canary.DynamicStableScale = true + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + assert.Equal(t, int32(50), desiredWeight) return nil }) - fakeTrafficRouting.On("VerifyWeight", mock.Anything, mock.Anything).Return(pointer.BoolPtr(true), nil) - fakeTrafficRouting.On("error patching alb ingress", mock.Anything, mock.Anything).Return(true, nil) - } - f.run(getKey(r2, t)) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) + f.run(getKey(r2, t)) + }) + + t.Run("DynamicStableScale false", func(t *testing.T) { + r2.Spec.Strategy.Canary.DynamicStableScale = false + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + assert.Equal(t, int32(5), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) + f.run(getKey(r2, t)) + }) } func TestRolloutSetWeightToZeroWhenFullyRolledOut(t *testing.T) { @@ -371,16 +487,16 @@ func TestRolloutSetWeightToZeroWhenFullyRolledOut(t *testing.T) { f.expectPatchRolloutAction(r1) - f.fakeTrafficRouting = *newUnmockedFakeTrafficRoutingReconciler() - for _, fakeTrafficRouting := range f.fakeTrafficRouting { - fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) - fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { - // make sure SetWeight was called with correct value - assert.Equal(t, int32(0), desiredWeight) - return nil - }) - fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) - } + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(0), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) f.run(getKey(r1, t)) } @@ -508,6 +624,46 @@ func TestNewTrafficRoutingReconciler(t *testing.T) { assert.Equal(t, smi.Type, networkReconciler.Type()) } } + { + tsController := Controller{} + r := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0)) + r.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{}, + } + roCtx := &rolloutContext{ + rollout: r, + log: logutil.WithRollout(r), + } + networkReconcilerList, err := tsController.NewTrafficRoutingReconciler(roCtx) + for _, networkReconciler := range networkReconcilerList { + assert.Nil(t, err) + assert.NotNil(t, networkReconciler) + assert.Equal(t, appmesh.Type, networkReconciler.Type()) + } + } + { + tsController := Controller{ + reconcilerBase: reconcilerBase{ + dynamicclientset: &traefikMocks.FakeDynamicClient{}, + }, + } + r := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0)) + r.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + Traefik: &v1alpha1.TraefikTrafficRouting{ + WeightedTraefikServiceName: "traefik-service", + }, + } + roCtx := &rolloutContext{ + rollout: r, + log: logutil.WithRollout(r), + } + networkReconcilerList, err := tsController.NewTrafficRoutingReconciler(roCtx) + for _, networkReconciler := range networkReconcilerList { + assert.Nil(t, err) + assert.NotNil(t, networkReconciler) + assert.Equal(t, traefik.Type, networkReconciler.Type()) + } + } { // (2) Multiple Reconcilers (Nginx + SMI) tsController := Controller{} @@ -567,7 +723,9 @@ func TestCanaryWithTrafficRoutingAddScaleDownDelay(t *testing.T) { f := newFixture(t) defer f.Close() - r1 := newCanaryRollout("foo", 1, nil, nil, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }}, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1)) r1.Spec.Strategy.Canary.CanaryService = "canary" r1.Spec.Strategy.Canary.StableService = "stable" r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ @@ -579,9 +737,10 @@ func TestCanaryWithTrafficRoutingAddScaleDownDelay(t *testing.T) { rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] r2 = updateCanaryRolloutStatus(r2, rs2PodHash, 2, 1, 2, false) r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation)) - r2.Status.CurrentStepIndex = nil availableCondition, _ := newAvailableCondition(true) conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(true) + conditions.SetRolloutCondition(&r2.Status, completedCondition) _, r2.Status.Canary.Weights = calculateWeightStatus(r2, rs2PodHash, rs2PodHash, 0) selector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} @@ -605,9 +764,11 @@ func TestCanaryWithTrafficRoutingScaleDownLimit(t *testing.T) { f := newFixture(t) defer f.Close() - inTheFuture := metav1.Now().Add(10 * time.Second).UTC().Format(time.RFC3339) + inTheFuture := timeutil.MetaNow().Add(10 * time.Second).UTC().Format(time.RFC3339) - r1 := newCanaryRollout("foo", 1, nil, nil, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(1)) + r1 := newCanaryRollout("foo", 1, nil, []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(10), + }}, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(1)) rs1 := newReplicaSetWithStatus(r1, 1, 1) rs1.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = inTheFuture r1.Spec.Strategy.Canary.ScaleDownDelayRevisionLimit = pointer.Int32Ptr(1) @@ -646,3 +807,145 @@ func TestCanaryWithTrafficRoutingScaleDownLimit(t *testing.T) { _, ok := rs1Updated.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] assert.False(t, ok, "annotation not removed") } + +// TestDynamicScalingDontIncreaseWeightWhenAborted verifies we don't increase the traffic weight if +// we are aborted, using dynamic scaling, and available stable replicas is less than desired +func TestDynamicScalingDontIncreaseWeightWhenAborted(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: pointer.Int32Ptr(50), + }, + { + Pause: &v1alpha1.RolloutPause{}, + }, + } + r1 := newCanaryRollout("foo", 5, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(1)) + r1.Spec.Strategy.Canary.DynamicStableScale = true + r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + SMI: &v1alpha1.SMITrafficRouting{}, + } + r1.Spec.Strategy.Canary.CanaryService = "canary" + r1.Spec.Strategy.Canary.StableService = "stable" + r1.Status.ReadyReplicas = 4 + r1.Status.AvailableReplicas = 4 + r1.Status.Abort = true + r1.Status.AbortedAt = &metav1.Time{Time: time.Now().Add(-1 * time.Minute)} + r2 := bumpVersion(r1) + + rs1 := newReplicaSetWithStatus(r1, 5, 4) // have less available than desired to test calculation + rs2 := newReplicaSetWithStatus(r2, 0, 0) + + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + canarySelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} + stableSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} + canarySvc := newService("canary", 80, canarySelector, r1) + stableSvc := newService("stable", 80, stableSelector, r1) + r2.Status.StableRS = rs1PodHash + r2.Status.Canary.Weights = &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 0, + ServiceName: "canary", + PodTemplateHash: rs2PodHash, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 100, + ServiceName: "stable", + PodTemplateHash: rs1PodHash, + }, + } + + f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + f.expectPatchRolloutAction(r2) + + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(0), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) + f.run(getKey(r1, t)) +} + +// TestDynamicScalingDecreaseWeightAccordingToStableAvailabilityWhenAborted verifies we decrease the weight +// to the canary depending on the availability of the stable ReplicaSet when aborting +func TestDynamicScalingDecreaseWeightAccordingToStableAvailabilityWhenAborted(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: pointer.Int32Ptr(50), + }, + { + Pause: &v1alpha1.RolloutPause{}, + }, + } + r1 := newCanaryRollout("foo", 5, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(1)) + r1.Spec.Strategy.Canary.DynamicStableScale = true + r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + SMI: &v1alpha1.SMITrafficRouting{}, + } + r1.Spec.Strategy.Canary.CanaryService = "canary" + r1.Spec.Strategy.Canary.StableService = "stable" + r1.Status.ReadyReplicas = 5 + r1.Status.AvailableReplicas = 5 + r1.Status.Abort = true + r1.Status.AbortedAt = &metav1.Time{Time: time.Now().Add(-1 * time.Minute)} + r2 := bumpVersion(r1) + + rs1 := newReplicaSetWithStatus(r1, 5, 1) + rs2 := newReplicaSetWithStatus(r2, 4, 4) + + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + canarySelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} + stableSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} + canarySvc := newService("canary", 80, canarySelector, r1) + stableSvc := newService("stable", 80, stableSelector, r1) + r2.Status.StableRS = rs1PodHash + r2.Status.Canary.Weights = &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 100, + ServiceName: "canary", + PodTemplateHash: rs2PodHash, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 0, + ServiceName: "stable", + PodTemplateHash: rs1PodHash, + }, + } + + f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + f.expectPatchRolloutAction(r2) + + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(80), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil) + f.run(getKey(r1, t)) +} diff --git a/server/server.go b/server/server.go index e0dca4bbbd..62c8714b9f 100644 --- a/server/server.go +++ b/server/server.go @@ -7,7 +7,9 @@ import ( "io/fs" "net" "net/http" - "os" + "path" + "regexp" + "strings" "time" "github.com/argoproj/pkg/errors" @@ -24,12 +26,14 @@ import ( "k8s.io/client-go/dynamic" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/tools/cache" "github.com/argoproj/argo-rollouts/pkg/apiclient/rollout" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" rolloutclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" rolloutinformers "github.com/argoproj/argo-rollouts/pkg/client/informers/externalversions" + listers "github.com/argoproj/argo-rollouts/pkg/client/listers/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/abort" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/get" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/promote" @@ -58,6 +62,7 @@ type ServerOptions struct { RolloutsClientset rolloutclientset.Interface DynamicClientset dynamic.Interface Namespace string + RootPath string } const ( @@ -76,16 +81,11 @@ func NewServer(o ServerOptions) *ArgoRolloutsServer { return &ArgoRolloutsServer{Options: o} } -type spaFileSystem struct { - root http.FileSystem -} +var re = regexp.MustCompile(``) -func (fs *spaFileSystem) Open(name string) (http.File, error) { - f, err := fs.root.Open(name) - if os.IsNotExist(err) { - return fs.root.Open("index.html") - } - return f, err +func withRootPath(fileContent []byte, rootpath string) []byte { + var temp = re.ReplaceAllString(string(fileContent), ``) + return []byte(temp) } func (s *ArgoRolloutsServer) newHTTPServer(ctx context.Context, port int) *http.Server { @@ -99,7 +99,11 @@ func (s *ArgoRolloutsServer) newHTTPServer(ctx context.Context, port int) *http. gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(json.JSONMarshaler)) gwmux := runtime.NewServeMux(gwMuxOpts, - runtime.WithIncomingHeaderMatcher(func(key string) (string, bool) { return key, true }), + runtime.WithIncomingHeaderMatcher(func(key string) (string, bool) { + // Dropping "Connection" header as a workaround for https://github.com/grpc-ecosystem/grpc-gateway/issues/2447 + // The fix is part of grpc-gateway v2.x but not available in v1.x, so workaround should be removed after upgrading to grpc v2.x + return key, strings.ToLower(key) != "connection" + }), runtime.WithProtoErrorHandler(runtime.DefaultHTTPProtoErrorHandler), ) @@ -115,16 +119,105 @@ func (s *ArgoRolloutsServer) newHTTPServer(ctx context.Context, port int) *http. var handler http.Handler = gwmux - ui, err := fs.Sub(static, "static") + mux.Handle("/api/", handler) + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + requestedURI := path.Clean(r.RequestURI) + rootPath := path.Clean("/" + s.Options.RootPath) + + if requestedURI == "/" { + http.Redirect(w, r, rootPath+"/", http.StatusFound) + return + } + + //If the rootPath is not in the prefix 404 + if !strings.HasPrefix(requestedURI, rootPath) { + http.NotFound(w, r) + return + } + //If the rootPath is the requestedURI, serve index.html + if requestedURI == rootPath { + fileBytes, openErr := s.readIndexHtml() + if openErr != nil { + log.Errorf("Error opening file index.html: %v", openErr) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Write(fileBytes) + return + } + + embedPath := path.Join("static", strings.TrimPrefix(requestedURI, rootPath)) + file, openErr := static.Open(embedPath) + if openErr != nil { + fErr := openErr.(*fs.PathError) + //If the file is not found, serve index.html + if fErr.Err == fs.ErrNotExist { + fileBytes, openErr := s.readIndexHtml() + if openErr != nil { + log.Errorf("Error opening file index.html: %v", openErr) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Write(fileBytes) + return + } else { + log.Errorf("Error opening file %s: %v", embedPath, openErr) + w.WriteHeader(http.StatusInternalServerError) + return + } + } + defer file.Close() + + stat, statErr := file.Stat() + if statErr != nil { + log.Errorf("Failed to stat file or dir %s: %v", embedPath, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + fileBytes := make([]byte, stat.Size()) + _, err = file.Read(fileBytes) + if err != nil { + log.Errorf("Failed to read file %s: %v", embedPath, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Write(fileBytes) + }) + + return &httpS +} + +func (s *ArgoRolloutsServer) readIndexHtml() ([]byte, error) { + file, err := static.Open("static/index.html") if err != nil { - log.Error("Could not load UI static files") - panic(err) + log.Errorf("Failed to open file %s: %v", "static/index.html", err) + return nil, err } + defer func() { + if file != nil { + if err := file.Close(); err != nil { + log.Errorf("Error closing file: %v", err) + } + } + }() - mux.Handle("/api/", handler) - mux.Handle("/", http.FileServer(&spaFileSystem{http.FS(ui)})) + stat, err := file.Stat() + if err != nil { + log.Errorf("Failed to stat file or dir %s: %v", "static/index.html", err) + return nil, err + } - return &httpS + fileBytes := make([]byte, stat.Size()) + _, err = file.Read(fileBytes) + if err != nil { + log.Errorf("Failed to read file %s: %v", "static/index.html", err) + return nil, err + } + + return withRootPath(fileBytes, s.Options.RootPath), nil } func (s *ArgoRolloutsServer) newGRPCServer() *grpc.Server { @@ -166,7 +259,7 @@ func (s *ArgoRolloutsServer) Run(ctx context.Context, port int, dashboard bool) startupMessage := fmt.Sprintf("Argo Rollouts api-server serving on port %d (namespace: %s)", port, s.Options.Namespace) if dashboard { - startupMessage = fmt.Sprintf("Argo Rollouts Dashboard is now available at localhost %d", port) + startupMessage = fmt.Sprintf("Argo Rollouts Dashboard is now available at http://localhost:%d/%s", port, s.Options.RootPath) } log.Info(startupMessage) @@ -267,7 +360,7 @@ func (s *ArgoRolloutsServer) ListRolloutInfos(ctx context.Context, q *rollout.Ro var riList []*rollout.RolloutInfo for i := range rolloutList.Items { cur := rolloutList.Items[i] - ri := info.NewRolloutInfo(&cur, nil, nil, nil, nil) + ri := info.NewRolloutInfo(&cur, nil, nil, nil, nil, nil) ri.ReplicaSets = info.GetReplicaSetInfo(cur.UID, &cur, allReplicaSets, allPods) riList = append(riList, ri) } @@ -293,65 +386,61 @@ func (s *ArgoRolloutsServer) WatchRolloutInfos(q *rollout.RolloutInfoListQuery, } } ctx := ws.Context() - rolloutIf := s.Options.RolloutsClientset.ArgoprojV1alpha1().Rollouts(q.GetNamespace()) + + rolloutsInformerFactory := rolloutinformers.NewSharedInformerFactoryWithOptions(s.Options.RolloutsClientset, 0, rolloutinformers.WithNamespace(q.Namespace)) + rolloutsLister := rolloutsInformerFactory.Argoproj().V1alpha1().Rollouts().Lister().Rollouts(q.Namespace) + rolloutInformer := rolloutsInformerFactory.Argoproj().V1alpha1().Rollouts().Informer() kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(s.Options.KubeClientset, 0, kubeinformers.WithNamespace(q.Namespace)) podsLister := kubeInformerFactory.Core().V1().Pods().Lister().Pods(q.GetNamespace()) rsLister := kubeInformerFactory.Apps().V1().ReplicaSets().Lister().ReplicaSets(q.GetNamespace()) kubeInformerFactory.Start(ws.Context().Done()) + podsInformer := kubeInformerFactory.Core().V1().Pods().Informer() + + rolloutUpdateChan := make(chan *v1alpha1.Rollout) + + rolloutInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + rolloutUpdateChan <- obj.(*v1alpha1.Rollout) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + rolloutUpdateChan <- newObj.(*v1alpha1.Rollout) + }, + }) + podsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(obj interface{}) { + podUpdated(obj.(*corev1.Pod), rsLister, rolloutsLister, rolloutUpdateChan) + }, + }) + + go rolloutInformer.Run(ctx.Done()) cache.WaitForCacheSync( ws.Context().Done(), - kubeInformerFactory.Core().V1().Pods().Informer().HasSynced, + podsInformer.HasSynced, kubeInformerFactory.Apps().V1().ReplicaSets().Informer().HasSynced, + rolloutInformer.HasSynced, ) - watchIf, err := rolloutIf.Watch(ctx, v1.ListOptions{}) - if err != nil { - return err - } - - var ro *v1alpha1.Rollout - retries := 0 -L: for { select { - case next := <-watchIf.ResultChan(): - ro, _ = next.Object.(*v1alpha1.Rollout) case <-ctx.Done(): - break L - } - if ro == nil { - watchIf.Stop() - newWatchIf, err := rolloutIf.Watch(ctx, v1.ListOptions{}) + return nil + case ro := <-rolloutUpdateChan: + allPods, err := podsLister.List(labels.Everything()) if err != nil { - if retries > 5 { - return err - } - log.Warn(err) - time.Sleep(time.Second) - retries++ - } else { - watchIf = newWatchIf - retries = 0 + return err + } + allReplicaSets, err := rsLister.List(labels.Everything()) + if err != nil { + return err } - continue - } - allPods, err := podsLister.List(labels.Everything()) - if err != nil { - return err - } - allReplicaSets, err := rsLister.List(labels.Everything()) - if err != nil { - return err - } - // get shallow rollout info - ri := info.NewRolloutInfo(ro, allReplicaSets, allPods, nil, nil) - send(ri) + // get shallow rollout info + ri := info.NewRolloutInfo(ro, allReplicaSets, allPods, nil, nil, nil) + send(ri) + } } - watchIf.Stop() - return nil } func (s *ArgoRolloutsServer) RolloutToRolloutInfo(ro *v1alpha1.Rollout) (*rollout.RolloutInfo, error) { @@ -360,7 +449,7 @@ func (s *ArgoRolloutsServer) RolloutToRolloutInfo(ro *v1alpha1.Rollout) (*rollou if err != nil { return nil, err } - return info.NewRolloutInfo(ro, allReplicaSets, allPods, nil, nil), nil + return info.NewRolloutInfo(ro, allReplicaSets, allPods, nil, nil, nil), nil } func (s *ArgoRolloutsServer) GetNamespace(ctx context.Context, e *empty.Empty) (*rollout.NamespaceInfo, error) { @@ -399,7 +488,10 @@ func (s *ArgoRolloutsServer) getRollout(namespace string, name string) (*v1alpha func (s *ArgoRolloutsServer) SetRolloutImage(ctx context.Context, q *rollout.SetImageRequest) (*v1alpha1.Rollout, error) { imageString := fmt.Sprintf("%s:%s", q.GetImage(), q.GetTag()) - set.SetImage(s.Options.DynamicClientset, q.GetNamespace(), q.GetRollout(), q.GetContainer(), imageString) + _, err := set.SetImage(s.Options.DynamicClientset, q.GetNamespace(), q.GetRollout(), q.GetContainer(), imageString) + if err != nil { + return nil, err + } return s.getRollout(q.GetNamespace(), q.GetRollout()) } @@ -428,3 +520,22 @@ func (s *ArgoRolloutsServer) Version(ctx context.Context, _ *empty.Empty) (*roll RolloutsVersion: version.String(), }, nil } + +func podUpdated(pod *corev1.Pod, rsLister appslisters.ReplicaSetNamespaceLister, + rolloutLister listers.RolloutNamespaceLister, rolloutUpdated chan *v1alpha1.Rollout) { + for _, podOwner := range pod.GetOwnerReferences() { + if podOwner.Kind == "ReplicaSet" { + rs, err := rsLister.Get(podOwner.Name) + if err == nil { + for _, rsOwner := range rs.GetOwnerReferences() { + if rsOwner.APIVersion == v1alpha1.SchemeGroupVersion.String() && rsOwner.Kind == "Rollout" { + ro, err := rolloutLister.Get(rsOwner.Name) + if err == nil { + rolloutUpdated <- ro + } + } + } + } + } + } +} diff --git a/test/e2e/alb/rollout-alb-experiment-no-setweight.yaml b/test/e2e/alb/rollout-alb-experiment-no-setweight.yaml index cb80c86468..544da2799d 100644 --- a/test/e2e/alb/rollout-alb-experiment-no-setweight.yaml +++ b/test/e2e/alb/rollout-alb-experiment-no-setweight.yaml @@ -40,7 +40,7 @@ spec: selector: app: alb-rollout --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: alb-rollout-ingress @@ -51,9 +51,12 @@ spec: - http: paths: - path: /* + pathType: ImplementationSpecific backend: - serviceName: alb-rollout-root - servicePort: use-annotation + service: + name: alb-rollout-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/alb/rollout-alb-experiment.yaml b/test/e2e/alb/rollout-alb-experiment.yaml index e7b975701c..c718e11069 100644 --- a/test/e2e/alb/rollout-alb-experiment.yaml +++ b/test/e2e/alb/rollout-alb-experiment.yaml @@ -40,7 +40,7 @@ spec: selector: app: alb-rollout --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: alb-rollout-ingress @@ -51,9 +51,12 @@ spec: - http: paths: - path: /* + pathType: ImplementationSpecific backend: - serviceName: alb-rollout-root - servicePort: use-annotation + service: + name: alb-rollout-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/appmesh/appmesh-canary-rollout.yaml b/test/e2e/appmesh/appmesh-canary-rollout.yaml new file mode 100644 index 0000000000..f3bc3a0de1 --- /dev/null +++ b/test/e2e/appmesh/appmesh-canary-rollout.yaml @@ -0,0 +1,163 @@ +apiVersion: appmesh.k8s.aws/v1beta2 +kind: Mesh +metadata: + name: appmesh-canary-rollout +spec: + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: default + +--- +# This service is used by virtual-service to resolve initial dns requests done by app container +apiVersion: v1 +kind: Service +metadata: + name: my-svc +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: my-app + +--- +apiVersion: v1 +kind: Service +metadata: + name: my-svc-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + # This selector will be updated with the pod-template-hash of the canary ReplicaSet. + app: my-app + +--- +apiVersion: v1 +kind: Service +metadata: + name: my-svc-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + # This selector will be updated with the pod-template-hash of the stable ReplicaSet. + app: my-app + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualService +metadata: + name: my-svc +spec: + provider: + virtualRouter: + virtualRouterRef: + name: my-vrouter + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualRouter +metadata: + name: my-vrouter +spec: + listeners: + - portMapping: + port: 80 + protocol: http + routes: + - name: primary + httpRoute: + match: + prefix: / + action: + weightedTargets: + - virtualNodeRef: + name: my-vn-canary + weight: 0 + - virtualNodeRef: + name: my-vn-stable + weight: 100 + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + name: my-vn-canary +spec: + podSelector: + matchLabels: + app: my-app + rollouts-pod-template-hash: canary-tbd + listeners: + - portMapping: + port: 80 + protocol: http + serviceDiscovery: + dns: + hostname: my-svc-canary.appmesh-canary-rollout.svc.cluster.local + +--- +apiVersion: appmesh.k8s.aws/v1beta2 +kind: VirtualNode +metadata: + name: my-vn-stable +spec: + podSelector: + matchLabels: + app: my-app + rollouts-pod-template-hash: stable-tbd + listeners: + - portMapping: + port: 80 + protocol: http + serviceDiscovery: + dns: + hostname: my-svc-stable.appmesh-canary-rollout.svc.cluster.local + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: my-rollout +spec: + replicas: 2 + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: demo + image: argoproj/rollouts-demo:blue + imagePullPolicy: Always + ports: + - name: http + containerPort: 8080 + strategy: + canary: + canaryService: my-svc-canary + stableService: my-svc-stable + trafficRouting: + appMesh: + virtualService: + name: my-svc + virtualNodeGroup: + canaryVirtualNodeRef: + name: my-vn-canary + stableVirtualNodeRef: + name: my-vn-stable + steps: + - setWeight: 50 + - pause: {} diff --git a/test/e2e/appmesh_test.go b/test/e2e/appmesh_test.go new file mode 100644 index 0000000000..c2ab47f290 --- /dev/null +++ b/test/e2e/appmesh_test.go @@ -0,0 +1,97 @@ +// +build e2e + +package e2e + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/argoproj/argo-rollouts/test/fixtures" + "github.com/stretchr/testify/suite" + "github.com/tj/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +type AppMeshSuite struct { + fixtures.E2ESuite +} + +func TestAppMeshSuite(t *testing.T) { + suite.Run(t, new(AppMeshSuite)) +} + +func (s *AppMeshSuite) SetupSuite() { + s.E2ESuite.SetupSuite() + if !s.AppMeshEnabled { + s.T().SkipNow() + } +} + +func (s *AppMeshSuite) TestAppMeshCanaryRollout() { + s.Given(). + RolloutObjects(`@appmesh/appmesh-canary-rollout.yaml`). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + //Before rollout canary should be 0 weight + Assert(func(t *fixtures.Then) { + uVr := t.GetAppMeshVirtualRouter() + canaryWeight := int64(0) + s.assertWeightedTargets(uVr, canaryWeight) + }). + When(). + UpdateSpec(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + //During deployment canary should increment to stepWeight + Assert(func(t *fixtures.Then) { + uVr := t.GetAppMeshVirtualRouter() + canaryWeight := int64(*(t.Rollout().Spec.Strategy.Canary.Steps[0].SetWeight)) + s.assertWeightedTargets(uVr, canaryWeight) + }). + When(). + PromoteRollout(). + WaitForRolloutStatus("Healthy") +} + +func (s *AppMeshSuite) assertWeightedTargets(uVr *unstructured.Unstructured, canaryWeight int64) { + wtMap := s.getWeightedTargets(uVr) + for routeName, wt := range wtMap { + assert.Equal(s.T(), canaryWeight, wt.canaryWeight, fmt.Sprintf("Route %s has wrong weight for canary", routeName)) + assert.Equal(s.T(), 100-canaryWeight, wt.stableWeight, fmt.Sprintf("Route %s has wrong weight for stable", routeName)) + } +} + +func (s *AppMeshSuite) getWeightedTargets(uVr *unstructured.Unstructured) map[string]weightedTargets { + result := make(map[string]weightedTargets) + routesI, _, _ := unstructured.NestedSlice(uVr.Object, "spec", "routes") + for _, rI := range routesI { + route, _ := rI.(map[string]interface{}) + routeName, _ := route["name"].(string) + wtsI, _, _ := unstructured.NestedSlice(route, "httpRoute", "action", "weightedTargets") + wtStruct := weightedTargets{} + for _, wtI := range wtsI { + wt, _ := wtI.(map[string]interface{}) + vnodeName, _, _ := unstructured.NestedString(wt, "virtualNodeRef", "name") + weight, _, _ := unstructured.NestedInt64(wt, "weight") + fmt.Printf("Found wt %+v with vnodeName (%s), weight (%d)", wt, vnodeName, weight) + if strings.Contains(vnodeName, "canary") { + wtStruct.canaryWeight = weight + } else { + wtStruct.stableWeight = weight + } + } + result[routeName] = wtStruct + } + return result +} + +type weightedTargets struct { + canaryWeight int64 + stableWeight int64 +} diff --git a/test/e2e/aws_test.go b/test/e2e/aws_test.go index f367da7e7c..a2a4434781 100644 --- a/test/e2e/aws_test.go +++ b/test/e2e/aws_test.go @@ -1,8 +1,10 @@ +//go:build e2e // +build e2e package e2e import ( + "encoding/json" "fmt" "os" "testing" @@ -10,8 +12,10 @@ import ( "github.com/stretchr/testify/suite" "github.com/tj/assert" + "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/test/fixtures" + ingress2 "github.com/argoproj/argo-rollouts/utils/ingress" ) type AWSSuite struct { @@ -22,12 +26,9 @@ func TestAWSSuite(t *testing.T) { suite.Run(t, new(AWSSuite)) } -const actionTemplate = `{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d}]}}` - const actionTemplateWithExperiment = `{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d}]}}` const actionTemplateWithExperiments = `{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d},{"ServiceName":"%s","ServicePort":"%d","Weight":%d}]}}` - // TestALBUpdate is a simple integration test which verifies the controller can work in a real AWS // environment. It is intended to be run with the `--aws-verify-target-group` controller flag. Success of // this test against a controller using that flag, indicates that the controller was able to perform @@ -56,6 +57,55 @@ func (s *AWSSuite) TestALBBlueGreenUpdate() { WaitForRolloutStatus("Healthy") } +func (s *AWSSuite) TestALBPingPongUpdate() { + s.Given(). + RolloutObjects("@functional/alb-pingpong-rollout.yaml"). + When().ApplyManifests().WaitForRolloutStatus("Healthy"). + Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)). + // Update 1. Test the weight switch from ping => pong + When().UpdateSpec(). + WaitForRolloutCanaryStepIndex(1).Sleep(1 * time.Second).Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 75, 25)). + When().PromoteRollout(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 0, 100)). + // Update 2. Test the weight switch from pong => ping + When().UpdateSpec(). + WaitForRolloutCanaryStepIndex(1).Sleep(1 * time.Second).Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 25, 75)). + When().PromoteRollout(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)) +} + +func assertWeights(s *AWSSuite, groupA, groupB string, weightA, weightB int64) func(t *fixtures.Then) { + return func(t *fixtures.Then) { + ingress := t.GetALBIngress() + action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] + assert.True(s.T(), ok) + + var albAction ingress2.ALBAction + if err := json.Unmarshal([]byte(action), &albAction); err != nil { + panic(err) + } + for _, targetGroup := range albAction.ForwardConfig.TargetGroups { + switch targetGroup.ServiceName { + case groupA: + assert.True(s.T(), *targetGroup.Weight == weightA, fmt.Sprintf("Weight doesn't match: %d and %d", *targetGroup.Weight, weightA)) + case groupB: + assert.True(s.T(), *targetGroup.Weight == weightB, fmt.Sprintf("Weight doesn't match: %d and %d", *targetGroup.Weight, weightB)) + default: + assert.True(s.T(), false, "Service is not expected in the target group: "+targetGroup.ServiceName) + } + } + } +} + func (s *AWSSuite) TestALBExperimentStep() { s.Given(). RolloutObjects("@alb/rollout-alb-experiment.yaml"). @@ -63,24 +113,16 @@ func (s *AWSSuite) TestALBExperimentStep() { ApplyManifests(). WaitForRolloutStatus("Healthy"). Then(). - Assert(func(t *fixtures.Then) { - ingress := t.GetALBIngress() - action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] - assert.True(s.T(), ok) - - port := 80 - expectedAction := fmt.Sprintf(actionTemplate, "alb-rollout-canary", port, 0, "alb-rollout-stable", port, 100) - assert.Equal(s.T(), expectedAction, action) - }). + Assert(assertWeights(s, "alb-rollout-canary", "alb-rollout-stable", 0, 100)). ExpectExperimentCount(0). When(). UpdateSpec(). WaitForRolloutCanaryStepIndex(1). - Sleep(10*time.Second). + Sleep(10 * time.Second). Then(). Assert(func(t *fixtures.Then) { ingress := t.GetALBIngress() - action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] + action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] assert.True(s.T(), ok) ex := t.GetRolloutExperiments().Items[0] @@ -93,17 +135,9 @@ func (s *AWSSuite) TestALBExperimentStep() { When(). PromoteRollout(). WaitForRolloutStatus("Healthy"). - Sleep(1*time.Second). // stable is currently set first, and then changes made to VirtualServices/DestinationRules + Sleep(1 * time.Second). // stable is currently set first, and then changes made to VirtualServices/DestinationRules Then(). - Assert(func(t *fixtures.Then) { - ingress := t.GetALBIngress() - action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] - assert.True(s.T(), ok) - - port := 80 - expectedAction := fmt.Sprintf(actionTemplate, "alb-rollout-canary", port, 0, "alb-rollout-stable", port, 100) - assert.Equal(s.T(), expectedAction, action) - }) + Assert(assertWeights(s, "alb-rollout-canary", "alb-rollout-stable", 0, 100)) } func (s *AWSSuite) TestALBExperimentStepNoSetWeight() { @@ -113,23 +147,15 @@ func (s *AWSSuite) TestALBExperimentStepNoSetWeight() { ApplyManifests(). WaitForRolloutStatus("Healthy"). Then(). - Assert(func(t *fixtures.Then) { - ingress := t.GetALBIngress() - action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] - assert.True(s.T(), ok) - - port := 80 - expectedAction := fmt.Sprintf(actionTemplate, "alb-rollout-canary", port, 0, "alb-rollout-stable", port, 100) - assert.Equal(s.T(), expectedAction, action) - }). + Assert(assertWeights(s, "alb-rollout-canary", "alb-rollout-stable", 0, 100)). ExpectExperimentCount(0). When(). UpdateSpec(). - Sleep(10*time.Second). + Sleep(10 * time.Second). Then(). Assert(func(t *fixtures.Then) { ingress := t.GetALBIngress() - action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] + action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] assert.True(s.T(), ok) experiment := t.GetRolloutExperiments().Items[0] @@ -142,16 +168,78 @@ func (s *AWSSuite) TestALBExperimentStepNoSetWeight() { When(). PromoteRollout(). WaitForRolloutStatus("Healthy"). - Sleep(1*time.Second). // stable is currently set first, and then changes made to VirtualServices/DestinationRules + Sleep(1 * time.Second). // stable is currently set first, and then changes made to VirtualServices/DestinationRules Then(). - Assert(func(t *fixtures.Then) { - ingress := t.GetALBIngress() - action, ok := ingress.Annotations["alb.ingress.kubernetes.io/actions.alb-rollout-root"] - assert.True(s.T(), ok) + Assert(assertWeights(s, "alb-rollout-canary", "alb-rollout-stable", 0, 100)) +} - port := 80 - expectedAction := fmt.Sprintf(actionTemplate, "alb-rollout-canary", port, 0, "alb-rollout-stable", port, 100) - assert.Equal(s.T(), expectedAction, action) +func (s *AWSSuite) TestAlbHeaderRoute() { + s.Given(). + RolloutObjects("@header-routing/alb-header-route.yaml"). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + Assert(func(t *fixtures.Then) { + assertAlbActionDoesNotExist(t, s, "header-route") + assertAlbActionServiceWeight(t, s, "action1", "canary-service", 0) + assertAlbActionServiceWeight(t, s, "action1", "stable-service", 100) + }). + When(). + UpdateSpec(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + assertAlbActionDoesNotExist(t, s, "header-route") + assertAlbActionServiceWeight(t, s, "action1", "canary-service", 20) + assertAlbActionServiceWeight(t, s, "action1", "stable-service", 80) + }). + When(). + PromoteRollout(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + assertAlbActionServiceWeight(t, s, "header-route", "canary-service", 100) + assertAlbActionServiceWeight(t, s, "action1", "canary-service", 20) + assertAlbActionServiceWeight(t, s, "action1", "stable-service", 80) + }). + When(). + PromoteRollout(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + assertAlbActionDoesNotExist(t, s, "header-route") }) } +func assertAlbActionServiceWeight(t *fixtures.Then, s *AWSSuite, actionName, serviceName string, expectedWeight int64) { + ingress := t.GetALBIngress() + key := "alb.ingress.kubernetes.io/actions." + actionName + actionStr, ok := ingress.Annotations[key] + assert.True(s.T(), ok, "Annotation for action was not found: %s", key) + + var albAction ingress2.ALBAction + err := json.Unmarshal([]byte(actionStr), &albAction) + if err != nil { + panic(err) + } + + found := false + for _, group := range albAction.ForwardConfig.TargetGroups { + if group.ServiceName == serviceName { + assert.Equal(s.T(), pointer.Int64(expectedWeight), group.Weight) + found = true + } + } + assert.True(s.T(), found, "Service %s was not found", serviceName) +} + +func assertAlbActionDoesNotExist(t *fixtures.Then, s *AWSSuite, actionName string) { + ingress := t.GetALBIngress() + key := "alb.ingress.kubernetes.io/actions." + actionName + _, ok := ingress.Annotations[key] + assert.False(s.T(), ok, "Annotation for action should not exist: %s", key) +} diff --git a/test/e2e/bluegreen_test.go b/test/e2e/bluegreen_test.go index 0d1e38bf5d..5ca433c3c7 100644 --- a/test/e2e/bluegreen_test.go +++ b/test/e2e/bluegreen_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e @@ -111,6 +112,9 @@ spec: requests: memory: 16Mi cpu: 1m + volumes: + - name: cache-volume + emptyDir: {} `). When(). ApplyManifests(). diff --git a/test/e2e/canary_test.go b/test/e2e/canary_test.go index dd6a9c1c75..8732db50da 100644 --- a/test/e2e/canary_test.go +++ b/test/e2e/canary_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e diff --git a/test/e2e/crds/istio.yaml b/test/e2e/crds/istio.yaml index 7491f5b3d5..c2999ea163 100644 --- a/test/e2e/crds/istio.yaml +++ b/test/e2e/crds/istio.yaml @@ -1,5 +1,102 @@ # DO NOT EDIT - Generated by Cue OpenAPI generator based on Istio APIs. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: wasmplugins.extensions.istio.io +spec: + group: extensions.istio.io + names: + categories: + - istio-io + - extensions-istio-io + kind: WasmPlugin + listKind: WasmPluginList + plural: wasmplugins + singular: wasmplugin + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Extend the functionality provided by the Istio proxy through + WebAssembly filters. See more details at: https://istio.io/docs/reference/config/proxy_extensions/wasm-plugin.html' + properties: + imagePullPolicy: + description: The pull behaviour to be applied when fetching an OCI + image. + enum: + - UNSPECIFIED_POLICY + - IfNotPresent + - Always + type: string + imagePullSecret: + description: Credentials to use for OCI image pulling. + type: string + phase: + description: Determines where in the filter chain this `WasmPlugin` + is to be injected. + enum: + - UNSPECIFIED_PHASE + - AUTHN + - AUTHZ + - STATS + type: string + pluginConfig: + description: The configuration that will be passed on to the plugin. + type: object + x-kubernetes-preserve-unknown-fields: true + pluginName: + type: string + priority: + description: Determines ordering of `WasmPlugins` in the same `phase`. + nullable: true + type: integer + selector: + properties: + matchLabels: + additionalProperties: + type: string + type: object + type: object + sha256: + description: SHA256 checksum that will be used to verify Wasm module + or OCI container. + type: string + url: + description: URL of a Wasm module or OCI container. + type: string + verificationKey: + type: string + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + +--- +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: @@ -11,19 +108,6 @@ metadata: release: istio name: destinationrules.networking.istio.io spec: - additionalPrinterColumns: - - JSONPath: .spec.host - description: The name of a service from the service registry - name: Host - type: string - - JSONPath: .metadata.creationTimestamp - description: 'CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - name: Age - type: date group: networking.istio.io names: categories: @@ -35,376 +119,122 @@ spec: shortNames: - dr singular: destinationrule - preserveUnknownFields: false scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration affecting load balancing, outlier detection, - etc. See more details at: https://istio.io/docs/reference/config/networking/destination-rule.html' - properties: - exportTo: - description: A list of namespaces to which this destination rule is - exported. - items: - format: string + versions: + - additionalPrinterColumns: + - description: The name of a service from the service registry + jsonPath: .spec.host + name: Host + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting load balancing, outlier detection, + etc. See more details at: https://istio.io/docs/reference/config/networking/destination-rule.html' + properties: + exportTo: + description: A list of namespaces to which this destination rule is + exported. + items: + type: string + type: array + host: + description: The name of a service from the service registry. type: string - type: array - host: - description: The name of a service from the service registry. - format: string - type: string - subsets: - items: - properties: - labels: - additionalProperties: - format: string + subsets: + items: + properties: + labels: + additionalProperties: + type: string + type: object + name: + description: Name of the subset. type: string - type: object - name: - description: Name of the subset. - format: string - type: string - trafficPolicy: - description: Traffic policies that apply to this subset. - properties: - connectionPool: - properties: - http: - description: HTTP connection pool settings. - properties: - h2UpgradePolicy: - description: Specify if http1.1 connection should - be upgraded to http2 for the associated destination. - enum: - - DEFAULT - - DO_NOT_UPGRADE - - UPGRADE - type: string - http1MaxPendingRequests: - description: Maximum number of pending HTTP requests - to a destination. - format: int32 - type: integer - http2MaxRequests: - description: Maximum number of requests to a backend. - format: int32 - type: integer - idleTimeout: - description: The idle timeout for upstream connection - pool connections. - type: string - maxRequestsPerConnection: - description: Maximum number of requests per connection - to a backend. - format: int32 - type: integer - maxRetries: - format: int32 - type: integer - useClientProtocol: - description: If set to true, client protocol will - be preserved while initiating connection to backend. - type: boolean - type: object - tcp: - description: Settings common to both HTTP and TCP upstream - connections. - properties: - connectTimeout: - description: TCP connection timeout. - type: string - maxConnections: - description: Maximum number of HTTP1 /TCP connections - to a destination host. - format: int32 - type: integer - tcpKeepalive: - description: If set then set SO_KEEPALIVE on the socket - to enable TCP Keepalives. - properties: - interval: - description: The time duration between keep-alive - probes. - type: string - probes: - type: integer - time: - type: string - type: object - type: object - type: object - loadBalancer: - description: Settings controlling the load balancer algorithms. - oneOf: - - not: - anyOf: - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash - properties: - consistentHash: - properties: - httpCookie: - description: Hash based on HTTP cookie. - properties: - name: - description: Name of the cookie. - format: string - type: string - path: - description: Path to set for the cookie. - format: string - type: string - ttl: - description: Lifetime of the cookie. - type: string - type: object - httpHeaderName: - description: Hash based on a specific HTTP header. - format: string - type: string - httpQueryParameterName: - description: Hash based on a specific HTTP query parameter. - format: string - type: string - minimumRingSize: - type: integer - useSourceIp: - description: Hash based on the source IP address. - type: boolean - type: object - localityLbSetting: - properties: - distribute: - description: 'Optional: only one of distribute or - failover can be set.' - items: - properties: - from: - description: Originating locality, '/' separated, - e.g. - format: string - type: string - to: - additionalProperties: - type: integer - description: Map of upstream localities to traffic - distribution weights. - type: object - type: object - type: array - enabled: - description: enable locality load balancing, this - is DestinationRule-level and will override mesh - wide settings in entirety. - nullable: true - type: boolean - failover: - description: 'Optional: only failover or distribute - can be set.' - items: - properties: - from: - description: Originating region. - format: string - type: string - to: - format: string - type: string - type: object - type: array - type: object - simple: - enum: - - ROUND_ROBIN - - LEAST_CONN - - RANDOM - - PASSTHROUGH - type: string - type: object - outlierDetection: - properties: - baseEjectionTime: - description: Minimum ejection duration. - type: string - consecutive5xxErrors: - description: Number of 5xx errors before a host is ejected - from the connection pool. - nullable: true - type: integer - consecutiveErrors: - format: int32 - type: integer - consecutiveGatewayErrors: - description: Number of gateway errors before a host is - ejected from the connection pool. - nullable: true - type: integer - interval: - description: Time interval between ejection sweep analysis. - type: string - maxEjectionPercent: - format: int32 - type: integer - minHealthPercent: - format: int32 - type: integer - type: object - portLevelSettings: - description: Traffic policies specific to individual ports. - items: + trafficPolicy: + description: Traffic policies that apply to this subset. + properties: + connectionPool: properties: - connectionPool: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests + to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol will + be preserved while initiating connection to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. properties: - http: - description: HTTP connection pool settings. + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the + socket to enable TCP Keepalives. properties: - h2UpgradePolicy: - description: Specify if http1.1 connection should - be upgraded to http2 for the associated destination. - enum: - - DEFAULT - - DO_NOT_UPGRADE - - UPGRADE - type: string - http1MaxPendingRequests: - description: Maximum number of pending HTTP - requests to a destination. - format: int32 - type: integer - http2MaxRequests: - description: Maximum number of requests to a - backend. - format: int32 - type: integer - idleTimeout: - description: The idle timeout for upstream connection - pool connections. + interval: + description: The time duration between keep-alive + probes. type: string - maxRequestsPerConnection: - description: Maximum number of requests per - connection to a backend. - format: int32 + probes: type: integer - maxRetries: - format: int32 - type: integer - useClientProtocol: - description: If set to true, client protocol - will be preserved while initiating connection - to backend. - type: boolean - type: object - tcp: - description: Settings common to both HTTP and TCP - upstream connections. - properties: - connectTimeout: - description: TCP connection timeout. + time: type: string - maxConnections: - description: Maximum number of HTTP1 /TCP connections - to a destination host. - format: int32 - type: integer - tcpKeepalive: - description: If set then set SO_KEEPALIVE on - the socket to enable TCP Keepalives. - properties: - interval: - description: The time duration between keep-alive - probes. - type: string - probes: - type: integer - time: - type: string - type: object type: object type: object - loadBalancer: - description: Settings controlling the load balancer - algorithms. - oneOf: - - not: - anyOf: - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - not: + anyOf: - required: - simple - properties: @@ -430,510 +260,267 @@ spec: - httpQueryParameterName required: - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + properties: + consistentHash: properties: - consistentHash: + httpCookie: + description: Hash based on HTTP cookie. properties: - httpCookie: - description: Hash based on HTTP cookie. - properties: - name: - description: Name of the cookie. - format: string - type: string - path: - description: Path to set for the cookie. - format: string - type: string - ttl: - description: Lifetime of the cookie. - type: string - type: object - httpHeaderName: - description: Hash based on a specific HTTP header. - format: string + name: + description: Name of the cookie. type: string - httpQueryParameterName: - description: Hash based on a specific HTTP query - parameter. - format: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. type: string - minimumRingSize: - type: integer - useSourceIp: - description: Hash based on the source IP address. - type: boolean - type: object - localityLbSetting: - properties: - distribute: - description: 'Optional: only one of distribute - or failover can be set.' - items: - properties: - from: - description: Originating locality, '/' - separated, e.g. - format: string - type: string - to: - additionalProperties: - type: integer - description: Map of upstream localities - to traffic distribution weights. - type: object - type: object - type: array - enabled: - description: enable locality load balancing, - this is DestinationRule-level and will override - mesh wide settings in entirety. - nullable: true - type: boolean - failover: - description: 'Optional: only failover or distribute - can be set.' - items: - properties: - from: - description: Originating region. - format: string - type: string - to: - format: string - type: string - type: object - type: array type: object - simple: - enum: - - ROUND_ROBIN - - LEAST_CONN - - RANDOM - - PASSTHROUGH + httpHeaderName: + description: Hash based on a specific HTTP header. type: string - type: object - outlierDetection: - properties: - baseEjectionTime: - description: Minimum ejection duration. - type: string - consecutive5xxErrors: - description: Number of 5xx errors before a host - is ejected from the connection pool. - nullable: true - type: integer - consecutiveErrors: - format: int32 - type: integer - consecutiveGatewayErrors: - description: Number of gateway errors before a host - is ejected from the connection pool. - nullable: true - type: integer - interval: - description: Time interval between ejection sweep - analysis. + httpQueryParameterName: + description: Hash based on a specific HTTP query + parameter. type: string - maxEjectionPercent: - format: int32 - type: integer - minHealthPercent: - format: int32 + minimumRingSize: type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean type: object - port: + localityLbSetting: properties: - number: - type: integer - type: object - tls: - description: TLS related settings for connections to - the upstream service. - properties: - caCertificates: - format: string - type: string - clientCertificate: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - credentialName: - format: string - type: string - mode: - enum: - - DISABLE - - SIMPLE - - MUTUAL - - ISTIO_MUTUAL - type: string - privateKey: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - sni: - description: SNI string to present to the server - during TLS handshake. - format: string - type: string - subjectAltNames: + distribute: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' separated, + e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities to + traffic distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, this + is DestinationRule-level and will override mesh + wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered list + of labels used to sort endpoints to do priority + based load balancing. items: - format: string type: string type: array type: object - type: object - type: array - tls: - description: TLS related settings for connections to the upstream - service. - properties: - caCertificates: - format: string - type: string - clientCertificate: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - credentialName: - format: string - type: string - mode: - enum: - - DISABLE - - SIMPLE - - MUTUAL - - ISTIO_MUTUAL - type: string - privateKey: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - sni: - description: SNI string to present to the server during - TLS handshake. - format: string - type: string - subjectAltNames: - items: - format: string - type: string - type: array - type: object - type: object - type: object - type: array - trafficPolicy: - properties: - connectionPool: - properties: - http: - description: HTTP connection pool settings. - properties: - h2UpgradePolicy: - description: Specify if http1.1 connection should be upgraded - to http2 for the associated destination. - enum: - - DEFAULT - - DO_NOT_UPGRADE - - UPGRADE - type: string - http1MaxPendingRequests: - description: Maximum number of pending HTTP requests to - a destination. - format: int32 - type: integer - http2MaxRequests: - description: Maximum number of requests to a backend. - format: int32 - type: integer - idleTimeout: - description: The idle timeout for upstream connection pool - connections. - type: string - maxRequestsPerConnection: - description: Maximum number of requests per connection to - a backend. - format: int32 - type: integer - maxRetries: - format: int32 - type: integer - useClientProtocol: - description: If set to true, client protocol will be preserved - while initiating connection to backend. - type: boolean - type: object - tcp: - description: Settings common to both HTTP and TCP upstream connections. - properties: - connectTimeout: - description: TCP connection timeout. - type: string - maxConnections: - description: Maximum number of HTTP1 /TCP connections to - a destination host. - format: int32 - type: integer - tcpKeepalive: - description: If set then set SO_KEEPALIVE on the socket - to enable TCP Keepalives. - properties: - interval: - description: The time duration between keep-alive probes. - type: string - probes: - type: integer - time: + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH type: string type: object - type: object - type: object - loadBalancer: - description: Settings controlling the load balancer algorithms. - oneOf: - - not: - anyOf: - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash - properties: - consistentHash: - properties: - httpCookie: - description: Hash based on HTTP cookie. + outlierDetection: properties: - name: - description: Name of the cookie. - format: string - type: string - path: - description: Path to set for the cookie. - format: string + baseEjectionTime: + description: Minimum ejection duration. type: string - ttl: - description: Lifetime of the cookie. + consecutive5xxErrors: + description: Number of 5xx errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a host + is ejected from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep analysis. type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish local + origin failures from external errors. + type: boolean type: object - httpHeaderName: - description: Hash based on a specific HTTP header. - format: string - type: string - httpQueryParameterName: - description: Hash based on a specific HTTP query parameter. - format: string - type: string - minimumRingSize: - type: integer - useSourceIp: - description: Hash based on the source IP address. - type: boolean - type: object - localityLbSetting: - properties: - distribute: - description: 'Optional: only one of distribute or failover - can be set.' - items: - properties: - from: - description: Originating locality, '/' separated, - e.g. - format: string - type: string - to: - additionalProperties: - type: integer - description: Map of upstream localities to traffic - distribution weights. - type: object - type: object - type: array - enabled: - description: enable locality load balancing, this is DestinationRule-level - and will override mesh wide settings in entirety. - nullable: true - type: boolean - failover: - description: 'Optional: only failover or distribute can - be set.' + portLevelSettings: + description: Traffic policies specific to individual ports. items: properties: - from: - description: Originating region. - format: string - type: string - to: - format: string - type: string - type: object - type: array - type: object - simple: - enum: - - ROUND_ROBIN - - LEAST_CONN - - RANDOM - - PASSTHROUGH - type: string - type: object - outlierDetection: - properties: - baseEjectionTime: - description: Minimum ejection duration. - type: string - consecutive5xxErrors: - description: Number of 5xx errors before a host is ejected from - the connection pool. - nullable: true - type: integer - consecutiveErrors: - format: int32 - type: integer - consecutiveGatewayErrors: - description: Number of gateway errors before a host is ejected - from the connection pool. - nullable: true - type: integer - interval: - description: Time interval between ejection sweep analysis. - type: string - maxEjectionPercent: - format: int32 - type: integer - minHealthPercent: - format: int32 - type: integer - type: object - portLevelSettings: - description: Traffic policies specific to individual ports. - items: - properties: - connectionPool: - properties: - http: - description: HTTP connection pool settings. - properties: - h2UpgradePolicy: - description: Specify if http1.1 connection should - be upgraded to http2 for the associated destination. - enum: - - DEFAULT - - DO_NOT_UPGRADE - - UPGRADE - type: string - http1MaxPendingRequests: - description: Maximum number of pending HTTP requests - to a destination. - format: int32 - type: integer - http2MaxRequests: - description: Maximum number of requests to a backend. - format: int32 - type: integer - idleTimeout: - description: The idle timeout for upstream connection - pool connections. - type: string - maxRequestsPerConnection: - description: Maximum number of requests per connection - to a backend. - format: int32 - type: integer - maxRetries: - format: int32 - type: integer - useClientProtocol: - description: If set to true, client protocol will - be preserved while initiating connection to backend. - type: boolean - type: object - tcp: - description: Settings common to both HTTP and TCP upstream - connections. - properties: - connectTimeout: - description: TCP connection timeout. - type: string - maxConnections: - description: Maximum number of HTTP1 /TCP connections - to a destination host. - format: int32 - type: integer - tcpKeepalive: - description: If set then set SO_KEEPALIVE on the socket - to enable TCP Keepalives. + connectionPool: properties: - interval: - description: The time duration between keep-alive - probes. - type: string - probes: - type: integer - time: - type: string + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection + should be upgraded to http2 for the associated + destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP + requests to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to + a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream + connection pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per + connection to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol + will be preserved while initiating connection + to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and + TCP upstream connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP + connections to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE + on the socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between + keep-alive probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object type: object - type: object - type: object - loadBalancer: - description: Settings controlling the load balancer algorithms. - oneOf: - - not: - anyOf: - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: + loadBalancer: + description: Settings controlling the load balancer + algorithms. + oneOf: + - not: + anyOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName - required: - httpHeaderName - required: @@ -942,976 +529,2957 @@ spec: - useSourceIp - required: - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash - - required: - - simple - - properties: - consistentHash: - oneOf: - - not: - anyOf: - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - - required: - - httpHeaderName - - required: - - httpCookie - - required: - - useSourceIp - - required: - - httpQueryParameterName - required: - - consistentHash - properties: - consistentHash: - properties: - httpCookie: - description: Hash based on HTTP cookie. + required: + - consistentHash properties: - name: - description: Name of the cookie. - format: string + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP + header. + type: string + httpQueryParameterName: + description: Hash based on a specific HTTP + query parameter. + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + localityLbSetting: + properties: + distribute: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' + separated, e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities + to traffic distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, + this is DestinationRule-level and will override + mesh wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered + list of labels used to sort endpoints to + do priority based load balancing. + items: + type: string + type: array + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH type: string - path: - description: Path to set for the cookie. - format: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. type: string - ttl: - description: Lifetime of the cookie. + consecutive5xxErrors: + description: Number of 5xx errors before a host + is ejected from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a + host is ejected from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep + analysis. type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish + local origin failures from external errors. + type: boolean type: object - httpHeaderName: - description: Hash based on a specific HTTP header. - format: string - type: string - httpQueryParameterName: - description: Hash based on a specific HTTP query parameter. - format: string - type: string - minimumRingSize: - type: integer - useSourceIp: - description: Hash based on the source IP address. - type: boolean - type: object - localityLbSetting: - properties: - distribute: - description: 'Optional: only one of distribute or - failover can be set.' - items: - properties: - from: - description: Originating locality, '/' separated, - e.g. - format: string - type: string - to: - additionalProperties: - type: integer - description: Map of upstream localities to traffic - distribution weights. - type: object - type: object - type: array - enabled: - description: enable locality load balancing, this - is DestinationRule-level and will override mesh - wide settings in entirety. - nullable: true - type: boolean - failover: - description: 'Optional: only failover or distribute - can be set.' - items: - properties: - from: - description: Originating region. - format: string - type: string - to: - format: string + port: + properties: + number: + type: integer + type: object + tls: + description: TLS related settings for connections + to the upstream service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server + during TLS handshake. + type: string + subjectAltNames: + items: type: string - type: object - type: array + type: array + type: object type: object - simple: - enum: - - ROUND_ROBIN - - LEAST_CONN - - RANDOM - - PASSTHROUGH - type: string - type: object - outlierDetection: + type: array + tls: + description: TLS related settings for connections to the + upstream service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + type: string + subjectAltNames: + items: + type: string + type: array + type: object + type: object + type: object + type: array + trafficPolicy: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. properties: - baseEjectionTime: - description: Minimum ejection duration. + h2UpgradePolicy: + description: Specify if http1.1 connection should be upgraded + to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE type: string - consecutive5xxErrors: - description: Number of 5xx errors before a host is ejected - from the connection pool. - nullable: true - type: integer - consecutiveErrors: + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests to + a destination. format: int32 type: integer - consecutiveGatewayErrors: - description: Number of gateway errors before a host is - ejected from the connection pool. - nullable: true + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 type: integer - interval: - description: Time interval between ejection sweep analysis. + idleTimeout: + description: The idle timeout for upstream connection + pool connections. type: string - maxEjectionPercent: + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. format: int32 type: integer - minHealthPercent: + maxRetries: format: int32 type: integer + useClientProtocol: + description: If set to true, client protocol will be preserved + while initiating connection to backend. + type: boolean type: object - port: - properties: - number: - type: integer - type: object - tls: - description: TLS related settings for connections to the upstream - service. + tcp: + description: Settings common to both HTTP and TCP upstream + connections. properties: - caCertificates: - format: string + connectTimeout: + description: TCP connection timeout. type: string - clientCertificate: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - credentialName: - format: string - type: string - mode: - enum: - - DISABLE - - SIMPLE - - MUTUAL - - ISTIO_MUTUAL - type: string - privateKey: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - sni: - description: SNI string to present to the server during - TLS handshake. - format: string - type: string - subjectAltNames: - items: - format: string - type: string - type: array + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the socket + to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object type: object type: object - type: array - tls: - description: TLS related settings for connections to the upstream - service. - properties: - caCertificates: - format: string - type: string - clientCertificate: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - credentialName: - format: string - type: string - mode: - enum: - - DISABLE - - SIMPLE - - MUTUAL - - ISTIO_MUTUAL - type: string - privateKey: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - sni: - description: SNI string to present to the server during TLS - handshake. - format: string - type: string - subjectAltNames: - items: - format: string - type: string - type: array - type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - "helm.sh/resource-policy": keep - labels: - app: istio-pilot - chart: istio - heritage: Tiller - release: istio - name: envoyfilters.networking.istio.io -spec: - group: networking.istio.io - names: - categories: - - istio-io - - networking-istio-io - kind: EnvoyFilter - listKind: EnvoyFilterList - plural: envoyfilters - singular: envoyfilter - preserveUnknownFields: true - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Customizing Envoy configuration generated by Istio. See more - details at: https://istio.io/docs/reference/config/networking/envoy-filter.html' - properties: - configPatches: - description: One or more patches with match conditions. - items: - properties: - applyTo: - enum: - - INVALID - - LISTENER - - FILTER_CHAIN - - NETWORK_FILTER - - HTTP_FILTER - - ROUTE_CONFIGURATION - - VIRTUAL_HOST - - HTTP_ROUTE - - CLUSTER - - EXTENSION_CONFIG - type: string - match: - description: Match on listener/route configuration/cluster. + loadBalancer: + description: Settings controlling the load balancer algorithms. oneOf: - not: anyOf: - required: - - listener - - required: - - routeConfiguration - - required: - - cluster - - required: - - listener - - required: - - routeConfiguration + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash - required: - - cluster + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash properties: - cluster: - description: Match on envoy cluster attributes. - properties: - name: - description: The exact name of the cluster to match. - format: string - type: string - portNumber: - description: The service port for which this cluster was - generated. - type: integer - service: - description: The fully qualified service name for this - cluster. - format: string - type: string - subset: - description: The subset associated with the service. - format: string - type: string - type: object - context: - description: The specific config generation context to match - on. - enum: - - ANY - - SIDECAR_INBOUND - - SIDECAR_OUTBOUND - - GATEWAY - type: string - listener: - description: Match on envoy listener attributes. + consistentHash: properties: - filterChain: - description: Match a specific filter chain in a listener. + httpCookie: + description: Hash based on HTTP cookie. properties: - applicationProtocols: - description: Applies only to sidecars. - format: string - type: string - destinationPort: - description: The destination_port value used by a - filter chain's match condition. - type: integer - filter: - description: The name of a specific filter to apply - the patch to. - properties: - name: - description: The filter name to match on. - format: string - type: string - subFilter: - properties: - name: - description: The filter name to match on. - format: string - type: string - type: object - type: object name: - description: The name assigned to the filter chain. - format: string + description: Name of the cookie. type: string - sni: - description: The SNI value used by a filter chain's - match condition. - format: string + path: + description: Path to set for the cookie. type: string - transportProtocol: - description: Applies only to `SIDECAR_INBOUND` context. - format: string + ttl: + description: Lifetime of the cookie. type: string type: object - name: - description: Match a specific listener by its name. - format: string + httpHeaderName: + description: Hash based on a specific HTTP header. type: string - portName: - format: string + httpQueryParameterName: + description: Hash based on a specific HTTP query parameter. type: string - portNumber: + minimumRingSize: type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean type: object - proxy: - description: Match on properties associated with a proxy. + localityLbSetting: properties: - metadata: - additionalProperties: - format: string + distribute: + description: 'Optional: only one of distribute, failover + or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' separated, + e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities to traffic + distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, this is DestinationRule-level + and will override mesh wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, failover + or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered list of labels + used to sort endpoints to do priority based load balancing. + items: type: string - type: object - proxyVersion: - format: string - type: string - type: object - routeConfiguration: - description: Match on envoy HTTP route configuration attributes. - properties: - gateway: - format: string - type: string - name: - description: Route configuration name to match on. - format: string - type: string - portName: - description: Applicable only for GATEWAY context. - format: string - type: string - portNumber: - type: integer - vhost: - properties: - name: - format: string - type: string - route: - description: Match a specific route within the virtual - host. - properties: - action: - description: Match a route with specific action - type. - enum: - - ANY - - ROUTE - - REDIRECT - - DIRECT_RESPONSE - type: string - name: - format: string - type: string - type: object - type: object + type: array type: object - type: object - patch: - description: The patch to apply along with the operation. - properties: - filterClass: - description: Determines the filter insertion order. - enum: - - UNSPECIFIED - - AUTHN - - AUTHZ - - STATS - type: string - operation: - description: Determines how the patch should be applied. + simple: enum: - - INVALID - - MERGE - - ADD - - REMOVE - - INSERT_BEFORE - - INSERT_AFTER - - INSERT_FIRST - - REPLACE + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH type: string - value: - description: The JSON config of the object being patched. - type: object type: object - type: object - type: array - workloadSelector: - properties: - labels: - additionalProperties: - format: string - type: string - type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - "helm.sh/resource-policy": keep - labels: - app: istio-pilot - chart: istio - heritage: Tiller - release: istio - name: gateways.networking.istio.io -spec: - group: networking.istio.io - names: - categories: - - istio-io - - networking-istio-io - kind: Gateway - listKind: GatewayList - plural: gateways - shortNames: - - gw - singular: gateway - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration affecting edge load balancer. See more details - at: https://istio.io/docs/reference/config/networking/gateway.html' - properties: - selector: - additionalProperties: - format: string - type: string - type: object - servers: - description: A list of server specifications. - items: - properties: - bind: - format: string - type: string - defaultEndpoint: - format: string - type: string - hosts: - description: One or more hosts exposed by this gateway. - items: - format: string - type: string - type: array - name: - description: An optional name of the server, when set must be - unique across all servers. - format: string - type: string - port: + outlierDetection: properties: - name: - description: Label assigned to the port. - format: string + baseEjectionTime: + description: Minimum ejection duration. type: string - number: - description: A valid non-negative integer port number. + consecutive5xxErrors: + description: Number of 5xx errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true type: integer - protocol: - description: The protocol exposed on the port. - format: string + interval: + description: Time interval between ejection sweep analysis. type: string - targetPort: + maxEjectionPercent: + format: int32 type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish local origin + failures from external errors. + type: boolean type: object - tls: - description: Set of TLS related options that govern the server's - behavior. - properties: - caCertificates: - description: REQUIRED if mode is `MUTUAL`. - format: string - type: string - cipherSuites: - description: 'Optional: If specified, only support the specified - cipher list.' - items: - format: string - type: string - type: array - credentialName: - format: string + portLevelSettings: + description: Traffic policies specific to individual ports. + items: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests + to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol will + be preserved while initiating connection to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the + socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - not: + anyOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + type: string + httpQueryParameterName: + description: Hash based on a specific HTTP query + parameter. + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + localityLbSetting: + properties: + distribute: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' separated, + e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities to + traffic distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, this + is DestinationRule-level and will override mesh + wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered list + of labels used to sort endpoints to do priority + based load balancing. + items: + type: string + type: array + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutive5xxErrors: + description: Number of 5xx errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a host + is ejected from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish local + origin failures from external errors. + type: boolean + type: object + port: + properties: + number: + type: integer + type: object + tls: + description: TLS related settings for connections to the + upstream service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + type: string + subjectAltNames: + items: + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: type: string - httpsRedirect: - type: boolean - maxProtocolVersion: - description: 'Optional: Maximum TLS protocol version.' - enum: - - TLS_AUTO - - TLSV1_0 - - TLSV1_1 - - TLSV1_2 - - TLSV1_3 + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. type: string - minProtocolVersion: - description: 'Optional: Minimum TLS protocol version.' - enum: - - TLS_AUTO - - TLSV1_0 - - TLSV1_1 - - TLSV1_2 - - TLSV1_3 + credentialName: type: string + insecureSkipVerify: + nullable: true + type: boolean mode: enum: - - PASSTHROUGH + - DISABLE - SIMPLE - MUTUAL - - AUTO_PASSTHROUGH - ISTIO_MUTUAL type: string privateKey: - description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. - format: string + description: REQUIRED if mode is `MUTUAL`. type: string - serverCertificate: - description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. - format: string + sni: + description: SNI string to present to the server during TLS + handshake. type: string subjectAltNames: items: - format: string - type: string - type: array - verifyCertificateHash: - items: - format: string - type: string - type: array - verifyCertificateSpki: - items: - format: string type: string type: array type: object type: object - type: array - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: true - - name: v1beta1 - served: true - storage: false - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - "helm.sh/resource-policy": keep - labels: - app: istio-pilot - chart: istio - heritage: Tiller - release: istio - name: serviceentries.networking.istio.io -spec: - additionalPrinterColumns: - - JSONPath: .spec.hosts - description: The hosts associated with the ServiceEntry - name: Hosts - type: string - - JSONPath: .spec.location - description: Whether the service is external to the mesh or part of the mesh (MESH_EXTERNAL - or MESH_INTERNAL) - name: Location - type: string - - JSONPath: .spec.resolution - description: Service discovery mode for the hosts (NONE, STATIC, or DNS) - name: Resolution - type: string - - JSONPath: .metadata.creationTimestamp - description: 'CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - name: Age - type: date - group: networking.istio.io - names: - categories: - - istio-io - - networking-istio-io - kind: ServiceEntry - listKind: ServiceEntryList - plural: serviceentries - shortNames: - - se - singular: serviceentry - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration affecting service registry. See more details - at: https://istio.io/docs/reference/config/networking/service-entry.html' - properties: - addresses: - description: The virtual IP addresses associated with the service. - items: - format: string + subresources: + status: {} + - additionalPrinterColumns: + - description: The name of a service from the service registry + jsonPath: .spec.host + name: Host + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting load balancing, outlier detection, + etc. See more details at: https://istio.io/docs/reference/config/networking/destination-rule.html' + properties: + exportTo: + description: A list of namespaces to which this destination rule is + exported. + items: + type: string + type: array + host: + description: The name of a service from the service registry. type: string - type: array - endpoints: - description: One or more endpoints associated with the service. - items: - properties: - address: - format: string - type: string - labels: - additionalProperties: - format: string + subsets: + items: + properties: + labels: + additionalProperties: + type: string + type: object + name: + description: Name of the subset. type: string - description: One or more labels associated with the endpoint. - type: object - locality: - description: The locality associated with the endpoint. - format: string - type: string - network: - format: string - type: string - ports: - additionalProperties: - type: integer - description: Set of ports associated with the endpoint. - type: object - serviceAccount: - format: string - type: string - weight: - description: The load balancing weight associated with the endpoint. - type: integer - type: object - type: array - exportTo: - description: A list of namespaces to which this service is exported. - items: - format: string - type: string - type: array - hosts: - description: The hosts associated with the ServiceEntry. - items: - format: string - type: string - type: array - location: - enum: - - MESH_EXTERNAL - - MESH_INTERNAL - type: string - ports: - description: The ports associated with the external service. - items: - properties: - name: - description: Label assigned to the port. - format: string - type: string - number: - description: A valid non-negative integer port number. - type: integer - protocol: - description: The protocol exposed on the port. - format: string - type: string - targetPort: - type: integer - type: object - type: array - resolution: - description: Service discovery mode for the hosts. - enum: - - NONE - - STATIC - - DNS - type: string - subjectAltNames: - items: - format: string - type: string - type: array - workloadSelector: - description: Applicable only for MESH_INTERNAL services. - properties: - labels: - additionalProperties: - format: string - type: string - type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - "helm.sh/resource-policy": keep - labels: - app: istio-pilot - chart: istio - heritage: Tiller - release: istio - name: sidecars.networking.istio.io -spec: - group: networking.istio.io - names: - categories: - - istio-io - - networking-istio-io - kind: Sidecar - listKind: SidecarList - plural: sidecars - singular: sidecar - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration affecting network reachability of a sidecar. - See more details at: https://istio.io/docs/reference/config/networking/sidecar.html' - properties: - egress: - items: - properties: - bind: - format: string - type: string - captureMode: - enum: - - DEFAULT - - IPTABLES - - NONE - type: string - hosts: + trafficPolicy: + description: Traffic policies that apply to this subset. + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests + to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol will + be preserved while initiating connection to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the + socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - not: + anyOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + type: string + httpQueryParameterName: + description: Hash based on a specific HTTP query + parameter. + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + localityLbSetting: + properties: + distribute: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' separated, + e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities to + traffic distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, this + is DestinationRule-level and will override mesh + wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered list + of labels used to sort endpoints to do priority + based load balancing. + items: + type: string + type: array + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutive5xxErrors: + description: Number of 5xx errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a host + is ejected from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish local + origin failures from external errors. + type: boolean + type: object + portLevelSettings: + description: Traffic policies specific to individual ports. + items: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection + should be upgraded to http2 for the associated + destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP + requests to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to + a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream + connection pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per + connection to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol + will be preserved while initiating connection + to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and + TCP upstream connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP + connections to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE + on the socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between + keep-alive probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer + algorithms. + oneOf: + - not: + anyOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP + header. + type: string + httpQueryParameterName: + description: Hash based on a specific HTTP + query parameter. + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + localityLbSetting: + properties: + distribute: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' + separated, e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities + to traffic distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, + this is DestinationRule-level and will override + mesh wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered + list of labels used to sort endpoints to + do priority based load balancing. + items: + type: string + type: array + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutive5xxErrors: + description: Number of 5xx errors before a host + is ejected from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a + host is ejected from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep + analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish + local origin failures from external errors. + type: boolean + type: object + port: + properties: + number: + type: integer + type: object + tls: + description: TLS related settings for connections + to the upstream service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server + during TLS handshake. + type: string + subjectAltNames: + items: + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections to the + upstream service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + type: string + subjectAltNames: + items: + type: string + type: array + type: object + type: object + type: object + type: array + trafficPolicy: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should be upgraded + to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests to + a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol will be preserved + while initiating connection to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the socket + to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - not: + anyOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + type: string + httpQueryParameterName: + description: Hash based on a specific HTTP query parameter. + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + localityLbSetting: + properties: + distribute: + description: 'Optional: only one of distribute, failover + or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' separated, + e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities to traffic + distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, this is DestinationRule-level + and will override mesh wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, failover + or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered list of labels + used to sort endpoints to do priority based load balancing. + items: + type: string + type: array + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutive5xxErrors: + description: Number of 5xx errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish local origin + failures from external errors. + type: boolean + type: object + portLevelSettings: + description: Traffic policies specific to individual ports. items: - format: string + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests + to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + useClientProtocol: + description: If set to true, client protocol will + be preserved while initiating connection to backend. + type: boolean + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the + socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - not: + anyOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + - required: + - simple + - properties: + consistentHash: + oneOf: + - not: + anyOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + - required: + - httpQueryParameterName + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + type: string + httpQueryParameterName: + description: Hash based on a specific HTTP query + parameter. + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + localityLbSetting: + properties: + distribute: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating locality, '/' separated, + e.g. + type: string + to: + additionalProperties: + type: integer + description: Map of upstream localities to + traffic distribution weights. + type: object + type: object + type: array + enabled: + description: enable locality load balancing, this + is DestinationRule-level and will override mesh + wide settings in entirety. + nullable: true + type: boolean + failover: + description: 'Optional: only one of distribute, + failover or failoverPriority can be set.' + items: + properties: + from: + description: Originating region. + type: string + to: + type: string + type: object + type: array + failoverPriority: + description: failoverPriority is an ordered list + of labels used to sort endpoints to do priority + based load balancing. + items: + type: string + type: array + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutive5xxErrors: + description: Number of 5xx errors before a host is ejected + from the connection pool. + nullable: true + type: integer + consecutiveErrors: + format: int32 + type: integer + consecutiveGatewayErrors: + description: Number of gateway errors before a host + is ejected from the connection pool. + nullable: true + type: integer + consecutiveLocalOriginFailures: + nullable: true + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + splitExternalLocalOriginErrors: + description: Determines whether to distinguish local + origin failures from external errors. + type: boolean + type: object + port: + properties: + number: + type: integer + type: object + tls: + description: TLS related settings for connections to the + upstream service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + type: string + subjectAltNames: + items: + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + type: string + credentialName: + type: string + insecureSkipVerify: + nullable: true + type: boolean + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + type: string + sni: + description: SNI string to present to the server during TLS + handshake. + type: string + subjectAltNames: + items: + type: string + type: array + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: envoyfilters.networking.istio.io +spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: EnvoyFilter + listKind: EnvoyFilterList + plural: envoyfilters + singular: envoyfilter + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Customizing Envoy configuration generated by Istio. See + more details at: https://istio.io/docs/reference/config/networking/envoy-filter.html' + properties: + configPatches: + description: One or more patches with match conditions. + items: + properties: + applyTo: + enum: + - INVALID + - LISTENER + - FILTER_CHAIN + - NETWORK_FILTER + - HTTP_FILTER + - ROUTE_CONFIGURATION + - VIRTUAL_HOST + - HTTP_ROUTE + - CLUSTER + - EXTENSION_CONFIG + - BOOTSTRAP + type: string + match: + description: Match on listener/route configuration/cluster. + oneOf: + - not: + anyOf: + - required: + - listener + - required: + - routeConfiguration + - required: + - cluster + - required: + - listener + - required: + - routeConfiguration + - required: + - cluster + properties: + cluster: + description: Match on envoy cluster attributes. + properties: + name: + description: The exact name of the cluster to match. + type: string + portNumber: + description: The service port for which this cluster + was generated. + type: integer + service: + description: The fully qualified service name for this + cluster. + type: string + subset: + description: The subset associated with the service. + type: string + type: object + context: + description: The specific config generation context to match + on. + enum: + - ANY + - SIDECAR_INBOUND + - SIDECAR_OUTBOUND + - GATEWAY + type: string + listener: + description: Match on envoy listener attributes. + properties: + filterChain: + description: Match a specific filter chain in a listener. + properties: + applicationProtocols: + description: Applies only to sidecars. + type: string + destinationPort: + description: The destination_port value used by + a filter chain's match condition. + type: integer + filter: + description: The name of a specific filter to apply + the patch to. + properties: + name: + description: The filter name to match on. + type: string + subFilter: + properties: + name: + description: The filter name to match on. + type: string + type: object + type: object + name: + description: The name assigned to the filter chain. + type: string + sni: + description: The SNI value used by a filter chain's + match condition. + type: string + transportProtocol: + description: Applies only to `SIDECAR_INBOUND` context. + type: string + type: object + name: + description: Match a specific listener by its name. + type: string + portName: + type: string + portNumber: + type: integer + type: object + proxy: + description: Match on properties associated with a proxy. + properties: + metadata: + additionalProperties: + type: string + type: object + proxyVersion: + type: string + type: object + routeConfiguration: + description: Match on envoy HTTP route configuration attributes. + properties: + gateway: + type: string + name: + description: Route configuration name to match on. + type: string + portName: + description: Applicable only for GATEWAY context. + type: string + portNumber: + type: integer + vhost: + properties: + name: + type: string + route: + description: Match a specific route within the virtual + host. + properties: + action: + description: Match a route with specific action + type. + enum: + - ANY + - ROUTE + - REDIRECT + - DIRECT_RESPONSE + type: string + name: + type: string + type: object + type: object + type: object + type: object + patch: + description: The patch to apply along with the operation. + properties: + filterClass: + description: Determines the filter insertion order. + enum: + - UNSPECIFIED + - AUTHN + - AUTHZ + - STATS + type: string + operation: + description: Determines how the patch should be applied. + enum: + - INVALID + - MERGE + - ADD + - REMOVE + - INSERT_BEFORE + - INSERT_AFTER + - INSERT_FIRST + - REPLACE + type: string + value: + description: The JSON config of the object being patched. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + type: array + priority: + description: Priority defines the order in which patch sets are applied + within a context. + format: int32 + type: integer + workloadSelector: + properties: + labels: + additionalProperties: + type: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: gateways.networking.istio.io +spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: Gateway + listKind: GatewayList + plural: gateways + shortNames: + - gw + singular: gateway + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting edge load balancer. See more details + at: https://istio.io/docs/reference/config/networking/gateway.html' + properties: + selector: + additionalProperties: + type: string + type: object + servers: + description: A list of server specifications. + items: + properties: + bind: + type: string + defaultEndpoint: + type: string + hosts: + description: One or more hosts exposed by this gateway. + items: + type: string + type: array + name: + description: An optional name of the server, when set must be + unique across all servers. + type: string + port: + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object + tls: + description: Set of TLS related options that govern the server's + behavior. + properties: + caCertificates: + description: REQUIRED if mode is `MUTUAL`. + type: string + cipherSuites: + description: 'Optional: If specified, only support the specified + cipher list.' + items: + type: string + type: array + credentialName: + type: string + httpsRedirect: + type: boolean + maxProtocolVersion: + description: 'Optional: Maximum TLS protocol version.' + enum: + - TLS_AUTO + - TLSV1_0 + - TLSV1_1 + - TLSV1_2 + - TLSV1_3 + type: string + minProtocolVersion: + description: 'Optional: Minimum TLS protocol version.' + enum: + - TLS_AUTO + - TLSV1_0 + - TLSV1_1 + - TLSV1_2 + - TLSV1_3 + type: string + mode: + enum: + - PASSTHROUGH + - SIMPLE + - MUTUAL + - AUTO_PASSTHROUGH + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. + type: string + serverCertificate: + description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. + type: string + subjectAltNames: + items: + type: string + type: array + verifyCertificateHash: + items: + type: string + type: array + verifyCertificateSpki: + items: + type: string + type: array + type: object + type: object + type: array + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting edge load balancer. See more details + at: https://istio.io/docs/reference/config/networking/gateway.html' + properties: + selector: + additionalProperties: + type: string + type: object + servers: + description: A list of server specifications. + items: + properties: + bind: + type: string + defaultEndpoint: + type: string + hosts: + description: One or more hosts exposed by this gateway. + items: + type: string + type: array + name: + description: An optional name of the server, when set must be + unique across all servers. + type: string + port: + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object + tls: + description: Set of TLS related options that govern the server's + behavior. + properties: + caCertificates: + description: REQUIRED if mode is `MUTUAL`. + type: string + cipherSuites: + description: 'Optional: If specified, only support the specified + cipher list.' + items: + type: string + type: array + credentialName: + type: string + httpsRedirect: + type: boolean + maxProtocolVersion: + description: 'Optional: Maximum TLS protocol version.' + enum: + - TLS_AUTO + - TLSV1_0 + - TLSV1_1 + - TLSV1_2 + - TLSV1_3 + type: string + minProtocolVersion: + description: 'Optional: Minimum TLS protocol version.' + enum: + - TLS_AUTO + - TLSV1_0 + - TLSV1_1 + - TLSV1_2 + - TLSV1_3 + type: string + mode: + enum: + - PASSTHROUGH + - SIMPLE + - MUTUAL + - AUTO_PASSTHROUGH + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. + type: string + serverCertificate: + description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. + type: string + subjectAltNames: + items: + type: string + type: array + verifyCertificateHash: + items: + type: string + type: array + verifyCertificateSpki: + items: + type: string + type: array + type: object + type: object + type: array + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: serviceentries.networking.istio.io +spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: ServiceEntry + listKind: ServiceEntryList + plural: serviceentries + shortNames: + - se + singular: serviceentry + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The hosts associated with the ServiceEntry + jsonPath: .spec.hosts + name: Hosts + type: string + - description: Whether the service is external to the mesh or part of the mesh + (MESH_EXTERNAL or MESH_INTERNAL) + jsonPath: .spec.location + name: Location + type: string + - description: Service discovery mode for the hosts (NONE, STATIC, or DNS) + jsonPath: .spec.resolution + name: Resolution + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting service registry. See more details + at: https://istio.io/docs/reference/config/networking/service-entry.html' + properties: + addresses: + description: The virtual IP addresses associated with the service. + items: + type: string + type: array + endpoints: + description: One or more endpoints associated with the service. + items: + properties: + address: + type: string + labels: + additionalProperties: + type: string + description: One or more labels associated with the endpoint. + type: object + locality: + description: The locality associated with the endpoint. + type: string + network: + type: string + ports: + additionalProperties: + type: integer + description: Set of ports associated with the endpoint. + type: object + serviceAccount: + type: string + weight: + description: The load balancing weight associated with the endpoint. + type: integer + type: object + type: array + exportTo: + description: A list of namespaces to which this service is exported. + items: + type: string + type: array + hosts: + description: The hosts associated with the ServiceEntry. + items: + type: string + type: array + location: + enum: + - MESH_EXTERNAL + - MESH_INTERNAL + type: string + ports: + description: The ports associated with the external service. + items: + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object + type: array + resolution: + description: Service discovery mode for the hosts. + enum: + - NONE + - STATIC + - DNS + - DNS_ROUND_ROBIN + type: string + subjectAltNames: + items: + type: string + type: array + workloadSelector: + description: Applicable only for MESH_INTERNAL services. + properties: + labels: + additionalProperties: + type: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: The hosts associated with the ServiceEntry + jsonPath: .spec.hosts + name: Hosts + type: string + - description: Whether the service is external to the mesh or part of the mesh + (MESH_EXTERNAL or MESH_INTERNAL) + jsonPath: .spec.location + name: Location + type: string + - description: Service discovery mode for the hosts (NONE, STATIC, or DNS) + jsonPath: .spec.resolution + name: Resolution + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting service registry. See more details + at: https://istio.io/docs/reference/config/networking/service-entry.html' + properties: + addresses: + description: The virtual IP addresses associated with the service. + items: + type: string + type: array + endpoints: + description: One or more endpoints associated with the service. + items: + properties: + address: + type: string + labels: + additionalProperties: + type: string + description: One or more labels associated with the endpoint. + type: object + locality: + description: The locality associated with the endpoint. + type: string + network: + type: string + ports: + additionalProperties: + type: integer + description: Set of ports associated with the endpoint. + type: object + serviceAccount: + type: string + weight: + description: The load balancing weight associated with the endpoint. + type: integer + type: object + type: array + exportTo: + description: A list of namespaces to which this service is exported. + items: + type: string + type: array + hosts: + description: The hosts associated with the ServiceEntry. + items: + type: string + type: array + location: + enum: + - MESH_EXTERNAL + - MESH_INTERNAL + type: string + ports: + description: The ports associated with the external service. + items: + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object + type: array + resolution: + description: Service discovery mode for the hosts. + enum: + - NONE + - STATIC + - DNS + - DNS_ROUND_ROBIN + type: string + subjectAltNames: + items: + type: string + type: array + workloadSelector: + description: Applicable only for MESH_INTERNAL services. + properties: + labels: + additionalProperties: + type: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: sidecars.networking.istio.io +spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: Sidecar + listKind: SidecarList + plural: sidecars + singular: sidecar + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting network reachability of a sidecar. + See more details at: https://istio.io/docs/reference/config/networking/sidecar.html' + properties: + egress: + items: + properties: + bind: type: string - type: array - port: - description: The port associated with the listener. + captureMode: + enum: + - DEFAULT + - IPTABLES + - NONE + type: string + hosts: + items: + type: string + type: array + port: + description: The port associated with the listener. + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object + type: object + type: array + ingress: + items: + properties: + bind: + description: The IP to which the listener should be bound. + type: string + captureMode: + enum: + - DEFAULT + - IPTABLES + - NONE + type: string + defaultEndpoint: + type: string + port: + description: The port associated with the listener. + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object + type: object + type: array + outboundTrafficPolicy: + description: Configuration for the outbound traffic policy. + properties: + egressProxy: properties: - name: - description: Label assigned to the port. - format: string + host: + description: The name of a service from the service registry. type: string - number: - description: A valid non-negative integer port number. - type: integer - protocol: - description: The protocol exposed on the port. - format: string + port: + description: Specifies the port on the host that is being + addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. type: string - targetPort: - type: integer type: object - type: object - type: array - ingress: - items: - properties: - bind: - description: The IP to which the listener should be bound. - format: string - type: string - captureMode: + mode: enum: - - DEFAULT - - IPTABLES - - NONE + - REGISTRY_ONLY + - ALLOW_ANY type: string - defaultEndpoint: - format: string - type: string - port: - description: The port associated with the listener. - properties: - name: - description: Label assigned to the port. - format: string - type: string - number: - description: A valid non-negative integer port number. - type: integer - protocol: - description: The protocol exposed on the port. - format: string - type: string - targetPort: - type: integer + type: object + workloadSelector: + properties: + labels: + additionalProperties: + type: string type: object type: object - type: array - outboundTrafficPolicy: - description: Configuration for the outbound traffic policy. - properties: - egressProxy: + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting network reachability of a sidecar. + See more details at: https://istio.io/docs/reference/config/networking/sidecar.html' + properties: + egress: + items: properties: - host: - description: The name of a service from the service registry. - format: string + bind: type: string + captureMode: + enum: + - DEFAULT + - IPTABLES + - NONE + type: string + hosts: + items: + type: string + type: array port: - description: Specifies the port on the host that is being addressed. + description: The port associated with the listener. properties: + name: + description: Label assigned to the port. + type: string number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: type: integer type: object - subset: - description: The name of a subset within the service. - format: string + type: object + type: array + ingress: + items: + properties: + bind: + description: The IP to which the listener should be bound. + type: string + captureMode: + enum: + - DEFAULT + - IPTABLES + - NONE + type: string + defaultEndpoint: type: string + port: + description: The port associated with the listener. + properties: + name: + description: Label assigned to the port. + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + type: string + targetPort: + type: integer + type: object type: object - mode: - enum: - - REGISTRY_ONLY - - ALLOW_ANY - type: string - type: object - workloadSelector: - properties: - labels: - additionalProperties: - format: string + type: array + outboundTrafficPolicy: + description: Configuration for the outbound traffic policy. + properties: + egressProxy: + properties: + host: + description: The name of a service from the service registry. + type: string + port: + description: Specifies the port on the host that is being + addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + mode: + enum: + - REGISTRY_ONLY + - ALLOW_ANY type: string - type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 - served: true - storage: true - - name: v1beta1 + type: object + workloadSelector: + properties: + labels: + additionalProperties: + type: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: false + subresources: + status: {} --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: @@ -1923,23 +3491,6 @@ metadata: release: istio name: virtualservices.networking.istio.io spec: - additionalPrinterColumns: - - JSONPath: .spec.gateways - description: The names of gateways and sidecars that should apply these routes - name: Gateways - type: string - - JSONPath: .spec.hosts - description: The destination hosts to which traffic is being sent - name: Hosts - type: string - - JSONPath: .metadata.creationTimestamp - description: 'CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - name: Age - type: date group: networking.istio.io names: categories: @@ -1951,268 +3502,974 @@ spec: shortNames: - vs singular: virtualservice - preserveUnknownFields: false scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration affecting label/content routing, sni routing, - etc. See more details at: https://istio.io/docs/reference/config/networking/virtual-service.html' - properties: - exportTo: - description: A list of namespaces to which this virtual service is exported. - items: - format: string - type: string - type: array - gateways: - description: The names of gateways and sidecars that should apply these - routes. - items: - format: string - type: string - type: array - hosts: - description: The destination hosts to which traffic is being sent. - items: - format: string - type: string - type: array - http: - description: An ordered list of route rules for HTTP traffic. - items: - properties: - corsPolicy: - description: Cross-Origin Resource Sharing policy (CORS). - properties: - allowCredentials: - nullable: true - type: boolean - allowHeaders: - items: - format: string + versions: + - additionalPrinterColumns: + - description: The names of gateways and sidecars that should apply these routes + jsonPath: .spec.gateways + name: Gateways + type: string + - description: The destination hosts to which traffic is being sent + jsonPath: .spec.hosts + name: Hosts + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting label/content routing, sni routing, + etc. See more details at: https://istio.io/docs/reference/config/networking/virtual-service.html' + properties: + exportTo: + description: A list of namespaces to which this virtual service is + exported. + items: + type: string + type: array + gateways: + description: The names of gateways and sidecars that should apply + these routes. + items: + type: string + type: array + hosts: + description: The destination hosts to which traffic is being sent. + items: + type: string + type: array + http: + description: An ordered list of route rules for HTTP traffic. + items: + properties: + corsPolicy: + description: Cross-Origin Resource Sharing policy (CORS). + properties: + allowCredentials: + nullable: true + type: boolean + allowHeaders: + items: + type: string + type: array + allowMethods: + description: List of HTTP methods allowed to access the + resource. + items: + type: string + type: array + allowOrigin: + description: The list of origins that are allowed to perform + CORS requests. + items: + type: string + type: array + allowOrigins: + description: String patterns that match allowed origins. + items: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + type: array + exposeHeaders: + items: + type: string + type: array + maxAge: type: string - type: array - allowMethods: - description: List of HTTP methods allowed to access the resource. - items: - format: string + type: object + delegate: + properties: + name: + description: Name specifies the name of the delegate VirtualService. type: string - type: array - allowOrigin: - description: The list of origins that are allowed to perform - CORS requests. - items: - format: string + namespace: + description: Namespace specifies the namespace where the + delegate VirtualService resides. type: string - type: array - allowOrigins: - description: String patterns that match allowed origins. - items: + type: object + fault: + description: Fault injection policy to apply on HTTP traffic + at the client side. + properties: + abort: + oneOf: + - not: + anyOf: + - required: + - httpStatus + - required: + - grpcStatus + - required: + - http2Error + - required: + - httpStatus + - required: + - grpcStatus + - required: + - http2Error + properties: + grpcStatus: + type: string + http2Error: + type: string + httpStatus: + description: HTTP status code to use to abort the Http + request. + format: int32 + type: integer + percentage: + description: Percentage of requests to be aborted with + the error code provided. + properties: + value: + format: double + type: number + type: object + type: object + delay: oneOf: - not: anyOf: + - required: + - fixedDelay + - required: + - exponentialDelay + - required: + - fixedDelay + - required: + - exponentialDelay + properties: + exponentialDelay: + type: string + fixedDelay: + description: Add a fixed delay before forwarding the + request. + type: string + percent: + description: Percentage of requests on which the delay + will be injected (0-100). + format: int32 + type: integer + percentage: + description: Percentage of requests on which the delay + will be injected. + properties: + value: + format: double + type: number + type: object + type: object + type: object + headers: + properties: + request: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + response: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + type: object + match: + items: + properties: + authority: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + gateways: + description: Names of gateways where the rule should be + applied. + items: + type: string + type: array + headers: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex - required: - exact - required: - prefix - required: - regex - - required: - - exact - - required: - - prefix - - required: - - regex - properties: - exact: - format: string - type: string - prefix: - format: string - type: string - regex: - description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string - type: string - type: object - type: array - exposeHeaders: - items: - format: string - type: string - type: array - maxAge: - type: string - type: object - delegate: - properties: - name: - description: Name specifies the name of the delegate VirtualService. - format: string - type: string - namespace: - description: Namespace specifies the namespace where the delegate - VirtualService resides. - format: string - type: string - type: object - fault: - description: Fault injection policy to apply on HTTP traffic at - the client side. - properties: - abort: - oneOf: - - not: - anyOf: + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + type: object + ignoreUriCase: + description: Flag to specify whether the URI matching + should be case-insensitive. + type: boolean + method: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex - required: - - httpStatus + - exact - required: - - grpcStatus + - prefix - required: - - http2Error - - required: - - httpStatus - - required: - - grpcStatus - - required: - - http2Error - properties: - grpcStatus: - format: string - type: string - http2Error: - format: string + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + name: + description: The name assigned to a match. type: string - httpStatus: - description: HTTP status code to use to abort the Http - request. - format: int32 + port: + description: Specifies the ports on the host that is being + addressed. type: integer - percentage: - description: Percentage of requests to be aborted with - the error code provided. + queryParams: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + description: Query parameters for matching. + type: object + scheme: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex properties: - value: - format: double - type: number + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + sourceLabels: + additionalProperties: + type: string + type: object + sourceNamespace: + description: Source namespace constraining the applicability + of a rule to workloads in that namespace. + type: string + uri: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + withoutHeaders: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + description: withoutHeader has the same syntax with the + header, but has opposite meaning. + type: object + type: object + type: array + mirror: + properties: + host: + description: The name of a service from the service registry. + type: string + port: + description: Specifies the port on the host that is being + addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + mirror_percent: + description: Percentage of the traffic to be mirrored by the + `mirror` field. + nullable: true + type: integer + mirrorPercent: + description: Percentage of the traffic to be mirrored by the + `mirror` field. + nullable: true + type: integer + mirrorPercentage: + description: Percentage of the traffic to be mirrored by the + `mirror` field. + properties: + value: + format: double + type: number + type: object + name: + description: The name assigned to the route for debugging purposes. + type: string + redirect: + description: A HTTP rule can either redirect or forward (default) + traffic. + oneOf: + - not: + anyOf: + - required: + - port + - required: + - derivePort + - required: + - port + - required: + - derivePort + properties: + authority: + type: string + derivePort: + enum: + - FROM_PROTOCOL_DEFAULT + - FROM_REQUEST_PORT + type: string + port: + description: On a redirect, overwrite the port portion of + the URL with this value. + type: integer + redirectCode: + type: integer + scheme: + description: On a redirect, overwrite the scheme portion + of the URL with this value. + type: string + uri: + type: string + type: object + retries: + description: Retry policy for HTTP requests. + properties: + attempts: + description: Number of retries to be allowed for a given + request. + format: int32 + type: integer + perTryTimeout: + description: Timeout per attempt for a given request, including + the initial call and any retries. + type: string + retryOn: + description: Specifies the conditions under which retry + takes place. + type: string + retryRemoteLocalities: + description: Flag to specify whether the retries should + retry to other localities. + nullable: true + type: boolean + type: object + rewrite: + description: Rewrite HTTP URIs and Authority headers. + properties: + authority: + description: rewrite the Authority/Host header with this + value. + type: string + uri: + type: string + type: object + route: + description: A HTTP rule can either redirect or forward (default) + traffic. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + headers: + properties: + request: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + response: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object type: object - type: object - delay: - oneOf: - - not: - anyOf: - - required: - - fixedDelay - - required: - - exponentialDelay - - required: - - fixedDelay - - required: - - exponentialDelay - properties: - exponentialDelay: - type: string - fixedDelay: - description: Add a fixed delay before forwarding the request. - type: string - percent: - description: Percentage of requests on which the delay - will be injected (0-100). + weight: format: int32 type: integer - percentage: - description: Percentage of requests on which the delay - will be injected. - properties: - value: - format: double - type: number - type: object type: object - type: object - headers: - properties: - request: + type: array + timeout: + description: Timeout for HTTP requests, default is disabled. + type: string + type: object + type: array + tcp: + description: An ordered list of route rules for opaque TCP traffic. + items: + properties: + match: + items: properties: - add: - additionalProperties: - format: string + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. + items: type: string - type: object - remove: + type: array + gateways: + description: Names of gateways where the rule should be + applied. items: - format: string type: string type: array - set: + port: + description: Specifies the port on the host that is being + addressed. + type: integer + sourceLabels: additionalProperties: - format: string type: string type: object + sourceNamespace: + description: Source namespace constraining the applicability + of a rule to workloads in that namespace. + type: string + sourceSubnet: + description: IPv4 or IPv6 ip address of source with optional + subnet. + type: string type: object - response: + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: properties: - add: - additionalProperties: - format: string - type: string + destination: + properties: + host: + description: The name of a service from the service + registry. + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string type: object - remove: + weight: + format: int32 + type: integer + type: object + type: array + type: object + type: array + tls: + items: + properties: + match: + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should be + applied. + items: + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. + type: integer + sniHosts: + description: SNI (server name indicator) to match on. items: - format: string type: string type: array - set: + sourceLabels: additionalProperties: - format: string type: string type: object + sourceNamespace: + description: Source namespace constraining the applicability + of a rule to workloads in that namespace. + type: string type: object - type: object - match: - items: + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + weight: + format: int32 + type: integer + type: object + type: array + type: object + type: array + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: The names of gateways and sidecars that should apply these routes + jsonPath: .spec.gateways + name: Gateways + type: string + - description: The destination hosts to which traffic is being sent + jsonPath: .spec.hosts + name: Hosts + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting label/content routing, sni routing, + etc. See more details at: https://istio.io/docs/reference/config/networking/virtual-service.html' + properties: + exportTo: + description: A list of namespaces to which this virtual service is + exported. + items: + type: string + type: array + gateways: + description: The names of gateways and sidecars that should apply + these routes. + items: + type: string + type: array + hosts: + description: The destination hosts to which traffic is being sent. + items: + type: string + type: array + http: + description: An ordered list of route rules for HTTP traffic. + items: + properties: + corsPolicy: + description: Cross-Origin Resource Sharing policy (CORS). properties: - authority: + allowCredentials: + nullable: true + type: boolean + allowHeaders: + items: + type: string + type: array + allowMethods: + description: List of HTTP methods allowed to access the + resource. + items: + type: string + type: array + allowOrigin: + description: The list of origins that are allowed to perform + CORS requests. + items: + type: string + type: array + allowOrigins: + description: String patterns that match allowed origins. + items: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + type: array + exposeHeaders: + items: + type: string + type: array + maxAge: + type: string + type: object + delegate: + properties: + name: + description: Name specifies the name of the delegate VirtualService. + type: string + namespace: + description: Namespace specifies the namespace where the + delegate VirtualService resides. + type: string + type: object + fault: + description: Fault injection policy to apply on HTTP traffic + at the client side. + properties: + abort: oneOf: - not: anyOf: - required: - - exact + - httpStatus - required: - - prefix + - grpcStatus - required: - - regex + - http2Error - required: - - exact + - httpStatus - required: - - prefix + - grpcStatus - required: - - regex + - http2Error properties: - exact: - format: string + grpcStatus: + type: string + http2Error: type: string - prefix: - format: string + httpStatus: + description: HTTP status code to use to abort the Http + request. + format: int32 + type: integer + percentage: + description: Percentage of requests to be aborted with + the error code provided. + properties: + value: + format: double + type: number + type: object + type: object + delay: + oneOf: + - not: + anyOf: + - required: + - fixedDelay + - required: + - exponentialDelay + - required: + - fixedDelay + - required: + - exponentialDelay + properties: + exponentialDelay: type: string - regex: - description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string + fixedDelay: + description: Add a fixed delay before forwarding the + request. type: string + percent: + description: Percentage of requests on which the delay + will be injected (0-100). + format: int32 + type: integer + percentage: + description: Percentage of requests on which the delay + will be injected. + properties: + value: + format: double + type: number + type: object type: object - gateways: - description: Names of gateways where the rule should be - applied. - items: - format: string - type: string - type: array - headers: - additionalProperties: + type: object + headers: + properties: + request: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + response: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + type: object + match: + items: + properties: + authority: oneOf: - not: anyOf: @@ -2230,59 +4487,51 @@ spec: - regex properties: exact: - format: string type: string prefix: - format: string type: string regex: description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string type: string type: object - type: object - ignoreUriCase: - description: Flag to specify whether the URI matching should - be case-insensitive. - type: boolean - method: - oneOf: - - not: - anyOf: + gateways: + description: Names of gateways where the rule should be + applied. + items: + type: string + type: array + headers: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex - required: - exact - required: - prefix - required: - regex - - required: - - exact - - required: - - prefix - - required: - - regex - properties: - exact: - format: string - type: string - prefix: - format: string - type: string - regex: - description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string - type: string - type: object - name: - description: The name assigned to a match. - format: string - type: string - port: - description: Specifies the ports on the host that is being - addressed. - type: integer - queryParams: - additionalProperties: + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + type: object + ignoreUriCase: + description: Flag to specify whether the URI matching + should be case-insensitive. + type: boolean + method: oneOf: - not: anyOf: @@ -2300,86 +4549,82 @@ spec: - regex properties: exact: - format: string type: string prefix: - format: string type: string regex: description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string type: string type: object - description: Query parameters for matching. - type: object - scheme: - oneOf: - - not: - anyOf: + name: + description: The name assigned to a match. + type: string + port: + description: Specifies the ports on the host that is being + addressed. + type: integer + queryParams: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex - required: - exact - required: - prefix - required: - regex - - required: - - exact - - required: - - prefix - - required: - - regex - properties: - exact: - format: string - type: string - prefix: - format: string - type: string - regex: - description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + description: Query parameters for matching. + type: object + scheme: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string + type: object + sourceLabels: + additionalProperties: type: string - type: object - sourceLabels: - additionalProperties: - format: string + type: object + sourceNamespace: + description: Source namespace constraining the applicability + of a rule to workloads in that namespace. type: string - type: object - sourceNamespace: - description: Source namespace constraining the applicability - of a rule to workloads in that namespace. - format: string - type: string - uri: - oneOf: - - not: - anyOf: - - required: - - exact - - required: - - prefix - - required: - - regex - - required: - - exact - - required: - - prefix - - required: - - regex - properties: - exact: - format: string - type: string - prefix: - format: string - type: string - regex: - description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string - type: string - type: object - withoutHeaders: - additionalProperties: + uri: oneOf: - not: anyOf: @@ -2397,340 +4642,357 @@ spec: - regex properties: exact: - format: string type: string prefix: - format: string type: string regex: description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). - format: string type: string type: object - description: withoutHeader has the same syntax with the - header, but has opposite meaning. - type: object - type: object - type: array - mirror: - properties: - host: - description: The name of a service from the service registry. - format: string - type: string - port: - description: Specifies the port on the host that is being - addressed. - properties: - number: - type: integer - type: object - subset: - description: The name of a subset within the service. - format: string - type: string - type: object - mirror_percent: - description: Percentage of the traffic to be mirrored by the `mirror` - field. - nullable: true - type: integer - mirrorPercent: - description: Percentage of the traffic to be mirrored by the `mirror` - field. - nullable: true - type: integer - mirrorPercentage: - description: Percentage of the traffic to be mirrored by the `mirror` - field. - properties: - value: - format: double - type: number - type: object - name: - description: The name assigned to the route for debugging purposes. - format: string - type: string - redirect: - description: A HTTP rule can either redirect or forward (default) - traffic. - properties: - authority: - format: string - type: string - redirectCode: - type: integer - uri: - format: string - type: string - type: object - retries: - description: Retry policy for HTTP requests. - properties: - attempts: - description: Number of retries to be allowed for a given request. - format: int32 - type: integer - perTryTimeout: - description: Timeout per retry attempt for a given request. - type: string - retryOn: - description: Specifies the conditions under which retry takes - place. - format: string - type: string - retryRemoteLocalities: - description: Flag to specify whether the retries should retry - to other localities. - nullable: true - type: boolean - type: object - rewrite: - description: Rewrite HTTP URIs and Authority headers. - properties: - authority: - description: rewrite the Authority/Host header with this value. - format: string - type: string - uri: - format: string - type: string - type: object - route: - description: A HTTP rule can either redirect or forward (default) - traffic. - items: - properties: - destination: - properties: - host: - description: The name of a service from the service - registry. - format: string - type: string - port: - description: Specifies the port on the host that is - being addressed. + withoutHeaders: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - exact + - required: + - prefix + - required: + - regex + - required: + - exact + - required: + - prefix + - required: + - regex properties: - number: - type: integer + exact: + type: string + prefix: + type: string + regex: + description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax). + type: string type: object - subset: - description: The name of a subset within the service. - format: string - type: string - type: object - headers: + description: withoutHeader has the same syntax with the + header, but has opposite meaning. + type: object + type: object + type: array + mirror: + properties: + host: + description: The name of a service from the service registry. + type: string + port: + description: Specifies the port on the host that is being + addressed. properties: - request: - properties: - add: - additionalProperties: - format: string - type: string - type: object - remove: - items: - format: string - type: string - type: array - set: - additionalProperties: - format: string - type: string - type: object - type: object - response: - properties: - add: - additionalProperties: - format: string - type: string - type: object - remove: - items: - format: string - type: string - type: array - set: - additionalProperties: - format: string - type: string - type: object - type: object + number: + type: integer type: object - weight: - format: int32 - type: integer + subset: + description: The name of a subset within the service. + type: string type: object - type: array - timeout: - description: Timeout for HTTP requests, default is disabled. - type: string - type: object - type: array - tcp: - description: An ordered list of route rules for opaque TCP traffic. - items: - properties: - match: - items: + mirror_percent: + description: Percentage of the traffic to be mirrored by the + `mirror` field. + nullable: true + type: integer + mirrorPercent: + description: Percentage of the traffic to be mirrored by the + `mirror` field. + nullable: true + type: integer + mirrorPercentage: + description: Percentage of the traffic to be mirrored by the + `mirror` field. properties: - destinationSubnets: - description: IPv4 or IPv6 ip addresses of destination with - optional subnet. - items: - format: string - type: string - type: array - gateways: - description: Names of gateways where the rule should be - applied. - items: - format: string - type: string - type: array + value: + format: double + type: number + type: object + name: + description: The name assigned to the route for debugging purposes. + type: string + redirect: + description: A HTTP rule can either redirect or forward (default) + traffic. + oneOf: + - not: + anyOf: + - required: + - port + - required: + - derivePort + - required: + - port + - required: + - derivePort + properties: + authority: + type: string + derivePort: + enum: + - FROM_PROTOCOL_DEFAULT + - FROM_REQUEST_PORT + type: string port: - description: Specifies the port on the host that is being - addressed. + description: On a redirect, overwrite the port portion of + the URL with this value. type: integer - sourceLabels: - additionalProperties: - format: string - type: string - type: object - sourceNamespace: - description: Source namespace constraining the applicability - of a rule to workloads in that namespace. - format: string + redirectCode: + type: integer + scheme: + description: On a redirect, overwrite the scheme portion + of the URL with this value. type: string - sourceSubnet: - description: IPv4 or IPv6 ip address of source with optional - subnet. - format: string + uri: type: string type: object - type: array - route: - description: The destination to which the connection should be - forwarded to. - items: + retries: + description: Retry policy for HTTP requests. properties: - destination: - properties: - host: - description: The name of a service from the service - registry. - format: string - type: string - port: - description: Specifies the port on the host that is - being addressed. - properties: - number: - type: integer - type: object - subset: - description: The name of a subset within the service. - format: string - type: string - type: object - weight: + attempts: + description: Number of retries to be allowed for a given + request. format: int32 type: integer + perTryTimeout: + description: Timeout per attempt for a given request, including + the initial call and any retries. + type: string + retryOn: + description: Specifies the conditions under which retry + takes place. + type: string + retryRemoteLocalities: + description: Flag to specify whether the retries should + retry to other localities. + nullable: true + type: boolean type: object - type: array - type: object - type: array - tls: - items: - properties: - match: - items: + rewrite: + description: Rewrite HTTP URIs and Authority headers. properties: - destinationSubnets: - description: IPv4 or IPv6 ip addresses of destination with - optional subnet. - items: - format: string - type: string - type: array - gateways: - description: Names of gateways where the rule should be - applied. - items: - format: string - type: string - type: array - port: - description: Specifies the port on the host that is being - addressed. - type: integer - sniHosts: - description: SNI (server name indicator) to match on. - items: - format: string - type: string - type: array - sourceLabels: - additionalProperties: - format: string - type: string - type: object - sourceNamespace: - description: Source namespace constraining the applicability - of a rule to workloads in that namespace. - format: string + authority: + description: rewrite the Authority/Host header with this + value. + type: string + uri: type: string type: object - type: array - route: - description: The destination to which the connection should be - forwarded to. - items: - properties: - destination: - properties: - host: - description: The name of a service from the service - registry. - format: string - type: string - port: - description: Specifies the port on the host that is - being addressed. - properties: - number: - type: integer - type: object - subset: - description: The name of a subset within the service. - format: string + route: + description: A HTTP rule can either redirect or forward (default) + traffic. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + headers: + properties: + request: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + response: + properties: + add: + additionalProperties: + type: string + type: object + remove: + items: + type: string + type: array + set: + additionalProperties: + type: string + type: object + type: object + type: object + weight: + format: int32 + type: integer + type: object + type: array + timeout: + description: Timeout for HTTP requests, default is disabled. + type: string + type: object + type: array + tcp: + description: An ordered list of route rules for opaque TCP traffic. + items: + properties: + match: + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should be + applied. + items: + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. + type: integer + sourceLabels: + additionalProperties: + type: string + type: object + sourceNamespace: + description: Source namespace constraining the applicability + of a rule to workloads in that namespace. + type: string + sourceSubnet: + description: IPv4 or IPv6 ip address of source with optional + subnet. + type: string + type: object + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + weight: + format: int32 + type: integer + type: object + type: array + type: object + type: array + tls: + items: + properties: + match: + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. + items: type: string - type: object - weight: - format: int32 - type: integer - type: object - type: array - type: object - type: array - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 - served: true - storage: true - - name: v1beta1 + type: array + gateways: + description: Names of gateways where the rule should be + applied. + items: + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. + type: integer + sniHosts: + description: SNI (server name indicator) to match on. + items: + type: string + type: array + sourceLabels: + additionalProperties: + type: string + type: object + sourceNamespace: + description: Source namespace constraining the applicability + of a rule to workloads in that namespace. + type: string + type: object + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + type: string + type: object + weight: + format: int32 + type: integer + type: object + type: array + type: object + type: array + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: false + subresources: + status: {} --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: @@ -2742,19 +5004,6 @@ metadata: release: istio name: workloadentries.networking.istio.io spec: - additionalPrinterColumns: - - JSONPath: .metadata.creationTimestamp - description: 'CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - name: Age - type: date - - JSONPath: .spec.address - description: Address associated with the network endpoint. - name: Address - type: string group: networking.istio.io names: categories: @@ -2766,59 +5015,115 @@ spec: shortNames: - we singular: workloadentry - preserveUnknownFields: false scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration affecting VMs onboarded into the mesh. See more - details at: https://istio.io/docs/reference/config/networking/workload-entry.html' - properties: - address: - format: string - type: string - labels: - additionalProperties: - format: string + versions: + - additionalPrinterColumns: + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Address associated with the network endpoint. + jsonPath: .spec.address + name: Address + type: string + name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting VMs onboarded into the mesh. See + more details at: https://istio.io/docs/reference/config/networking/workload-entry.html' + properties: + address: + type: string + labels: + additionalProperties: + type: string + description: One or more labels associated with the endpoint. + type: object + locality: + description: The locality associated with the endpoint. + type: string + network: + type: string + ports: + additionalProperties: + type: integer + description: Set of ports associated with the endpoint. + type: object + serviceAccount: type: string - description: One or more labels associated with the endpoint. - type: object - locality: - description: The locality associated with the endpoint. - format: string - type: string - network: - format: string - type: string - ports: - additionalProperties: + weight: + description: The load balancing weight associated with the endpoint. type: integer - description: Set of ports associated with the endpoint. - type: object - serviceAccount: - format: string - type: string - weight: - description: The load balancing weight associated with the endpoint. - type: integer - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: true - - name: v1beta1 + subresources: + status: {} + - additionalPrinterColumns: + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Address associated with the network endpoint. + jsonPath: .spec.address + name: Address + type: string + name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting VMs onboarded into the mesh. See + more details at: https://istio.io/docs/reference/config/networking/workload-entry.html' + properties: + address: + type: string + labels: + additionalProperties: + type: string + description: One or more labels associated with the endpoint. + type: object + locality: + description: The locality associated with the endpoint. + type: string + network: + type: string + ports: + additionalProperties: + type: integer + description: Set of ports associated with the endpoint. + type: object + serviceAccount: + type: string + weight: + description: The load balancing weight associated with the endpoint. + type: integer + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: false + subresources: + status: {} --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -2828,15 +5133,6 @@ metadata: release: istio name: workloadgroups.networking.istio.io spec: - additionalPrinterColumns: - - JSONPath: .metadata.creationTimestamp - description: 'CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - name: Age - type: date group: networking.istio.io names: categories: @@ -2848,167 +5144,162 @@ spec: shortNames: - wg singular: workloadgroup - preserveUnknownFields: false scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Describes a collection of workload instances. See more details - at: https://istio.io/docs/reference/config/networking/workload-group.html' - properties: - metadata: - description: Metadata that will be used for all corresponding `WorkloadEntries`. - properties: - annotations: - additionalProperties: - format: string - type: string - type: object - labels: - additionalProperties: - format: string - type: string - type: object - type: object - probe: - description: '`ReadinessProbe` describes the configuration the user - must provide for healthchecking on their workload.' - oneOf: - - not: - anyOf: - - required: - - httpGet - - required: - - tcpSocket - - required: - - exec - - required: - - httpGet - - required: - - tcpSocket - - required: - - exec - properties: - exec: - description: Health is determined by how the command that is executed - exited. - properties: - command: - description: Command to run. - items: - format: string - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to be considered - failed after having succeeded. - format: int32 - type: integer - httpGet: - properties: - host: - description: Host name to connect to, defaults to the pod IP. - format: string - type: string - httpHeaders: - description: Headers the proxy will pass on to make the request. - items: - properties: - name: - format: string - type: string - value: - format: string - type: string - type: object - type: array - path: - description: Path to access on the HTTP server. - format: string + versions: + - additionalPrinterColumns: + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Describes a collection of workload instances. See more details + at: https://istio.io/docs/reference/config/networking/workload-group.html' + properties: + metadata: + description: Metadata that will be used for all corresponding `WorkloadEntries`. + properties: + annotations: + additionalProperties: type: string - port: - description: Port on which the endpoint lives. - type: integer - scheme: - format: string + type: object + labels: + additionalProperties: type: string - type: object - initialDelaySeconds: - description: Number of seconds after the container has started before - readiness probes are initiated. - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered - successful after having failed. - format: int32 - type: integer - tcpSocket: - description: Health is determined by if the proxy is able to connect. - properties: - host: - format: string + type: object + type: object + probe: + description: '`ReadinessProbe` describes the configuration the user + must provide for healthchecking on their workload.' + oneOf: + - not: + anyOf: + - required: + - httpGet + - required: + - tcpSocket + - required: + - exec + - required: + - httpGet + - required: + - tcpSocket + - required: + - exec + properties: + exec: + description: Health is determined by how the command that is executed + exited. + properties: + command: + description: Command to run. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. + format: int32 + type: integer + httpGet: + properties: + host: + description: Host name to connect to, defaults to the pod + IP. + type: string + httpHeaders: + description: Headers the proxy will pass on to make the request. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Port on which the endpoint lives. + type: integer + scheme: + type: string + type: object + initialDelaySeconds: + description: Number of seconds after the container has started + before readiness probes are initiated. + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. + format: int32 + type: integer + tcpSocket: + description: Health is determined by if the proxy is able to connect. + properties: + host: + type: string + port: + type: integer + type: object + timeoutSeconds: + description: Number of seconds after which the probe times out. + format: int32 + type: integer + type: object + template: + description: Template to be used for the generation of `WorkloadEntry` + resources that belong to this `WorkloadGroup`. + properties: + address: + type: string + labels: + additionalProperties: type: string - port: + description: One or more labels associated with the endpoint. + type: object + locality: + description: The locality associated with the endpoint. + type: string + network: + type: string + ports: + additionalProperties: type: integer - type: object - timeoutSeconds: - description: Number of seconds after which the probe times out. - format: int32 - type: integer - type: object - template: - description: Template to be used for the generation of `WorkloadEntry` - resources that belong to this `WorkloadGroup`. - properties: - address: - format: string - type: string - labels: - additionalProperties: - format: string + description: Set of ports associated with the endpoint. + type: object + serviceAccount: type: string - description: One or more labels associated with the endpoint. - type: object - locality: - description: The locality associated with the endpoint. - format: string - type: string - network: - format: string - type: string - ports: - additionalProperties: + weight: + description: The load balancing weight associated with the endpoint. type: integer - description: Set of ports associated with the endpoint. - type: object - serviceAccount: - format: string - type: string - weight: - description: The load balancing weight associated with the endpoint. - type: integer - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1alpha3 + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: true + subresources: + status: {} --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: @@ -3026,221 +5317,197 @@ spec: categories: - istio-io - security-istio-io - kind: AuthorizationPolicy - listKind: AuthorizationPolicyList - plural: authorizationpolicies - singular: authorizationpolicy - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: 'Configuration for access control on workloads. See more details - at: https://istio.io/docs/reference/config/security/authorization-policy.html' - oneOf: - - not: - anyOf: - - required: - - provider - - required: - - provider - properties: - action: - description: Optional. - enum: - - ALLOW - - DENY - - AUDIT - - CUSTOM - type: string - provider: - description: Specifies detailed configuration of the CUSTOM action. - properties: - name: - description: Specifies the name of the extension provider. - format: string - type: string - type: object - rules: - description: Optional. - items: - properties: - from: - description: Optional. - items: - properties: - source: - description: Source specifies the source of a request. - properties: - ipBlocks: - description: Optional. - items: - format: string - type: string - type: array - namespaces: - description: Optional. - items: - format: string - type: string - type: array - notIpBlocks: - description: Optional. - items: - format: string - type: string - type: array - notNamespaces: - description: Optional. - items: - format: string - type: string - type: array - notPrincipals: - description: Optional. - items: - format: string - type: string - type: array - notRemoteIpBlocks: - description: Optional. - items: - format: string - type: string - type: array - notRequestPrincipals: - description: Optional. - items: - format: string - type: string - type: array - principals: - description: Optional. - items: - format: string - type: string - type: array - remoteIpBlocks: - description: Optional. - items: - format: string - type: string - type: array - requestPrincipals: - description: Optional. - items: - format: string - type: string - type: array - type: object - type: object - type: array - to: - description: Optional. - items: - properties: - operation: - description: Operation specifies the operation of a request. - properties: - hosts: - description: Optional. - items: - format: string - type: string - type: array - methods: - description: Optional. - items: - format: string - type: string - type: array - notHosts: - description: Optional. - items: - format: string - type: string - type: array - notMethods: - description: Optional. - items: - format: string - type: string - type: array - notPaths: - description: Optional. - items: - format: string - type: string - type: array - notPorts: - description: Optional. - items: - format: string - type: string - type: array - paths: - description: Optional. - items: - format: string - type: string - type: array - ports: - description: Optional. - items: - format: string - type: string - type: array - type: object - type: object - type: array - when: - description: Optional. - items: - properties: - key: - description: The name of an Istio attribute. - format: string - type: string - notValues: - description: Optional. - items: - format: string - type: string - type: array - values: - description: Optional. - items: - format: string - type: string - type: array - type: object - type: array - type: object - type: array - selector: - description: Optional. - properties: - matchLabels: - additionalProperties: - format: string - type: string - type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object + kind: AuthorizationPolicy + listKind: AuthorizationPolicyList + plural: authorizationpolicies + singular: authorizationpolicy + scope: Namespaced versions: - name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Configuration for access control on workloads. See more + details at: https://istio.io/docs/reference/config/security/authorization-policy.html' + oneOf: + - not: + anyOf: + - required: + - provider + - required: + - provider + properties: + action: + description: Optional. + enum: + - ALLOW + - DENY + - AUDIT + - CUSTOM + type: string + provider: + description: Specifies detailed configuration of the CUSTOM action. + properties: + name: + description: Specifies the name of the extension provider. + type: string + type: object + rules: + description: Optional. + items: + properties: + from: + description: Optional. + items: + properties: + source: + description: Source specifies the source of a request. + properties: + ipBlocks: + description: Optional. + items: + type: string + type: array + namespaces: + description: Optional. + items: + type: string + type: array + notIpBlocks: + description: Optional. + items: + type: string + type: array + notNamespaces: + description: Optional. + items: + type: string + type: array + notPrincipals: + description: Optional. + items: + type: string + type: array + notRemoteIpBlocks: + description: Optional. + items: + type: string + type: array + notRequestPrincipals: + description: Optional. + items: + type: string + type: array + principals: + description: Optional. + items: + type: string + type: array + remoteIpBlocks: + description: Optional. + items: + type: string + type: array + requestPrincipals: + description: Optional. + items: + type: string + type: array + type: object + type: object + type: array + to: + description: Optional. + items: + properties: + operation: + description: Operation specifies the operation of a request. + properties: + hosts: + description: Optional. + items: + type: string + type: array + methods: + description: Optional. + items: + type: string + type: array + notHosts: + description: Optional. + items: + type: string + type: array + notMethods: + description: Optional. + items: + type: string + type: array + notPaths: + description: Optional. + items: + type: string + type: array + notPorts: + description: Optional. + items: + type: string + type: array + paths: + description: Optional. + items: + type: string + type: array + ports: + description: Optional. + items: + type: string + type: array + type: object + type: object + type: array + when: + description: Optional. + items: + properties: + key: + description: The name of an Istio attribute. + type: string + notValues: + description: Optional. + items: + type: string + type: array + values: + description: Optional. + items: + type: string + type: array + type: object + type: array + type: object + type: array + selector: + description: Optional. + properties: + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: true + subresources: + status: {} --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: @@ -3253,19 +5520,6 @@ metadata: release: istio name: peerauthentications.security.istio.io spec: - additionalPrinterColumns: - - JSONPath: .spec.mtls.mode - description: Defines the mTLS mode used for peer authentication. - name: Mode - type: string - - JSONPath: .metadata.creationTimestamp - description: 'CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - name: Age - type: date group: security.istio.io names: categories: @@ -3277,31 +5531,31 @@ spec: shortNames: - pa singular: peerauthentication - preserveUnknownFields: false scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: PeerAuthentication defines how traffic will be tunneled (or - not) to the sidecar. - properties: - mtls: - description: Mutual TLS settings for workload. - properties: - mode: - description: Defines the mTLS mode used for peer authentication. - enum: - - UNSET - - DISABLE - - PERMISSIVE - - STRICT - type: string - type: object - portLevelMtls: - additionalProperties: + versions: + - additionalPrinterColumns: + - description: Defines the mTLS mode used for peer authentication. + jsonPath: .spec.mtls.mode + name: Mode + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: PeerAuthentication defines how traffic will be tunneled (or + not) to the sidecar. + properties: + mtls: + description: Mutual TLS settings for workload. properties: mode: description: Defines the mTLS mode used for peer authentication. @@ -3312,30 +5566,41 @@ spec: - STRICT type: string type: object - description: Port specific mutual TLS settings. - type: object - selector: - description: The selector determines the workloads to apply the ChannelAuthentication - on. - properties: - matchLabels: - additionalProperties: - format: string - type: string + portLevelMtls: + additionalProperties: + properties: + mode: + description: Defines the mTLS mode used for peer authentication. + enum: + - UNSET + - DISABLE + - PERMISSIVE + - STRICT + type: string type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1beta1 + description: Port specific mutual TLS settings. + type: object + selector: + description: The selector determines the workloads to apply the ChannelAuthentication + on. + properties: + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: true + subresources: + status: {} --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: @@ -3359,90 +5624,318 @@ spec: shortNames: - ra singular: requestauthentication - preserveUnknownFields: false scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - spec: - description: RequestAuthentication defines what request authentication methods - are supported by a workload. - properties: - jwtRules: - description: Define the list of JWTs that can be validated at the selected - workloads' proxy. - items: + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + spec: + description: RequestAuthentication defines what request authentication + methods are supported by a workload. + properties: + jwtRules: + description: Define the list of JWTs that can be validated at the + selected workloads' proxy. + items: + properties: + audiences: + items: + type: string + type: array + forwardOriginalToken: + description: If set to true, the original token will be kept + for the upstream request. + type: boolean + fromHeaders: + description: List of header locations from which JWT is expected. + items: + properties: + name: + description: The HTTP header name. + type: string + prefix: + description: The prefix that should be stripped before + decoding the token. + type: string + type: object + type: array + fromParams: + description: List of query parameters from which JWT is expected. + items: + type: string + type: array + issuer: + description: Identifies the issuer that issued the JWT. + type: string + jwks: + description: JSON Web Key Set of public keys to validate signature + of the JWT. + type: string + jwks_uri: + type: string + jwksUri: + type: string + outputPayloadToHeader: + type: string + type: object + type: array + selector: + description: Optional. properties: - audiences: - items: - format: string + matchLabels: + additionalProperties: type: string - type: array - forwardOriginalToken: - description: If set to true, the orginal token will be kept for - the ustream request. - type: boolean - fromHeaders: - description: List of header locations from which JWT is expected. - items: - properties: - name: - description: The HTTP header name. - format: string - type: string - prefix: - description: The prefix that should be stripped before decoding - the token. - format: string - type: string - type: object - type: array - fromParams: - description: List of query parameters from which JWT is expected. - items: - format: string + type: object + type: object + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + istio: telemetry + release: istio + name: telemetries.telemetry.istio.io +spec: + group: telemetry.istio.io + names: + categories: + - istio-io + - telemetry-istio-io + kind: Telemetry + listKind: TelemetryList + plural: telemetries + shortNames: + - telemetry + singular: telemetry + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + spec: + description: 'Telemetry configuration for workloads. See more details + at: https://istio.io/docs/reference/config/telemetry.html' + properties: + accessLogging: + description: Optional. + items: + properties: + disabled: + description: Controls logging. + nullable: true + type: boolean + providers: + description: Optional. + items: + properties: + name: + description: Required. + type: string + type: object + type: array + type: object + type: array + metrics: + description: Optional. + items: + properties: + overrides: + description: Optional. + items: + properties: + disabled: + description: Optional. + nullable: true + type: boolean + match: + description: Match allows provides the scope of the override. + oneOf: + - not: + anyOf: + - required: + - metric + - required: + - customMetric + - required: + - metric + - required: + - customMetric + properties: + customMetric: + description: Allows free-form specification of a metric. + type: string + metric: + description: One of the well-known Istio Standard + Metrics. + enum: + - ALL_METRICS + - REQUEST_COUNT + - REQUEST_DURATION + - REQUEST_SIZE + - RESPONSE_SIZE + - TCP_OPENED_CONNECTIONS + - TCP_CLOSED_CONNECTIONS + - TCP_SENT_BYTES + - TCP_RECEIVED_BYTES + - GRPC_REQUEST_MESSAGES + - GRPC_RESPONSE_MESSAGES + type: string + mode: + description: 'Controls which mode of metrics generation + is selected: CLIENT and/or SERVER.' + enum: + - CLIENT_AND_SERVER + - CLIENT + - SERVER + type: string + type: object + tagOverrides: + additionalProperties: + properties: + operation: + description: Operation controls whether or not to + update/add a tag, or to remove it. + enum: + - UPSERT + - REMOVE + type: string + value: + description: Value is only considered if the operation + is `UPSERT`. + type: string + type: object + description: Optional. + type: object + type: object + type: array + providers: + description: Optional. + items: + properties: + name: + description: Required. + type: string + type: object + type: array + type: object + type: array + selector: + description: Optional. + properties: + matchLabels: + additionalProperties: type: string - type: array - issuer: - description: Identifies the issuer that issued the JWT. - format: string - type: string - jwks: - description: JSON Web Key Set of public keys to validate signature - of the JWT. - format: string - type: string - jwks_uri: - format: string - type: string - jwksUri: - format: string - type: string - outputPayloadToHeader: - format: string - type: string + type: object type: object - type: array - selector: - description: The selector determines the workloads to apply the RequestAuthentication - on. - properties: - matchLabels: - additionalProperties: - format: string - type: string + tracing: + description: Optional. + items: + properties: + customTags: + additionalProperties: + oneOf: + - not: + anyOf: + - required: + - literal + - required: + - environment + - required: + - header + - required: + - literal + - required: + - environment + - required: + - header + properties: + environment: + description: Environment adds the value of an environment + variable to each span. + properties: + defaultValue: + description: Optional. + type: string + name: + description: Name of the environment variable from + which to extract the tag value. + type: string + type: object + header: + description: RequestHeader adds the value of an header + from the request to each span. + properties: + defaultValue: + description: Optional. + type: string + name: + description: Name of the header from which to extract + the tag value. + type: string + type: object + literal: + description: Literal adds the same, hard-coded value to + each span. + properties: + value: + description: The tag value to use. + type: string + type: object + type: object + description: Optional. + type: object + disableSpanReporting: + description: Controls span reporting. + nullable: true + type: boolean + providers: + description: Optional. + items: + properties: + name: + description: Required. + type: string + type: object + type: array + randomSamplingPercentage: + nullable: true + type: number type: object - type: object - type: object - status: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - versions: - - name: v1beta1 + type: array + type: object + status: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object served: true storage: true + subresources: + status: {} ---- \ No newline at end of file +--- diff --git a/test/e2e/crds/split.yaml b/test/e2e/crds/split.yaml index 04aa266b71..716b8552b9 100644 --- a/test/e2e/crds/split.yaml +++ b/test/e2e/crds/split.yaml @@ -1,25 +1,265 @@ --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: trafficsplits.split.smi-spec.io spec: group: split.smi-spec.io - version: v1alpha1 - scope: Namespaced names: kind: TrafficSplit listKind: TrafficSplitList + plural: trafficsplits shortNames: - ts - plural: trafficsplits singular: trafficsplit + scope: Namespaced versions: - name: v1alpha1 + schema: + openAPIV3Schema: + description: TrafficSplit is the Schema for the trafficsplits API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrafficSplitSpec defines the desired state of TrafficSplit + properties: + backends: + items: + description: TrafficSplitBackend defines a backend + properties: + service: + type: string + weight: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: array + service: + type: string + type: object + status: + description: TrafficSplitStatus defines the observed state of TrafficSplit + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: TrafficSplit is the Schema for the trafficsplits API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrafficSplitSpec defines the desired state of TrafficSplit + properties: + backends: + items: + description: TrafficSplitBackend defines a backend + properties: + service: + type: string + weight: + type: integer + required: + - service + - weight + type: object + type: array + service: + type: string + type: object + status: + description: TrafficSplitStatus defines the observed state of TrafficSplit + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha3 + schema: + openAPIV3Schema: + description: TrafficSplit is the Schema for the trafficsplits API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrafficSplitSpec defines the desired state of TrafficSplit + properties: + backends: + description: Backends defines a list of Kubernetes services used as + the traffic split destination + items: + description: TrafficSplitBackend defines a backend + properties: + service: + description: Service is the name of a Kubernetes service + type: string + weight: + description: Weight defines the traffic split percentage + type: integer + required: + - service + - weight + type: object + type: array + matches: + description: Matches allows defining a list of HTTP route groups that + this traffic split object should match + items: + description: TypedLocalObjectReference contains enough information + to let you locate the typed referenced object inside the same + namespace. + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in + the core API group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + type: array + service: + description: Service represents the apex service + type: string + required: + - backends + - service + type: object + status: + description: TrafficSplitStatus defines the observed state of TrafficSplit + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha4 + schema: + openAPIV3Schema: + description: TrafficSplit is the Schema for the trafficsplits API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrafficSplitSpec defines the desired state of TrafficSplit + properties: + backends: + description: Backends defines a list of Kubernetes services used as + the traffic split destination + items: + description: TrafficSplitBackend defines a backend + properties: + service: + description: Service is the name of a Kubernetes service + type: string + weight: + description: Weight defines the traffic split percentage + type: integer + required: + - service + - weight + type: object + type: array + matches: + description: Matches allows defining a list of HTTP route groups that + this traffic split object should match + items: + description: TypedLocalObjectReference contains enough information + to let you locate the typed referenced object inside the same + namespace. + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in + the core API group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + type: array + service: + description: Service represents the apex service + type: string + required: + - backends + - service + type: object + status: + description: TrafficSplitStatus defines the observed state of TrafficSplit + type: object + type: object served: true storage: true - additionalPrinterColumns: - - name: Service - type: string - description: The apex service of this split. - JSONPath: .spec.service + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/e2e/experiment_test.go b/test/e2e/experiment_test.go index 2994c35f8a..4ff844cb50 100644 --- a/test/e2e/experiment_test.go +++ b/test/e2e/experiment_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e @@ -16,6 +17,7 @@ import ( type ExperimentSuite struct { fixtures.E2ESuite } + // TestRolloutWithExperimentAndAnalysis this tests the ability for a rollout to launch an experiment, // and use self-referencing features/pass metadata arguments to the experiment and analysis, such as: // * specRef: stable @@ -97,6 +99,26 @@ func (s *ExperimentSuite) TestExperimentWithServiceAndScaleDownDelay() { ExpectExperimentServiceCount("experiment-with-service", 0) } +func (s *ExperimentSuite) TestExperimentWithDryRunMetrics() { + g := s.Given() + g.ApplyManifests("@functional/experiment-dry-run-analysis.yaml") + g.When(). + WaitForExperimentPhase("experiment-with-dry-run", "Successful"). + Sleep(time.Second*3). + Then(). + ExpectExperimentDryRunSummary(1, 0, 1, "experiment-with-dry-run") +} + +func (s *ExperimentSuite) TestExperimentWithMeasurementRetentionMetrics() { + g := s.Given() + g.ApplyManifests("@functional/experiment-measurement-retention-analysis.yaml") + g.When(). + WaitForExperimentPhase("experiment-with-mr", "Successful"). + Sleep(time.Second*3). + Then(). + ExpectExperimentMeasurementsLength(0, 2, "experiment-with-mr") +} + func TestExperimentSuite(t *testing.T) { suite.Run(t, new(ExperimentSuite)) } diff --git a/test/e2e/functional/alb-bluegreen-rollout.yaml b/test/e2e/functional/alb-bluegreen-rollout.yaml index e06c2b5bea..3eedb62a8d 100644 --- a/test/e2e/functional/alb-bluegreen-rollout.yaml +++ b/test/e2e/functional/alb-bluegreen-rollout.yaml @@ -26,7 +26,7 @@ spec: selector: app: alb-bluegreen --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: alb-bluegreen-ingress @@ -37,9 +37,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: alb-bluegreen-stable - servicePort: 80 + service: + name: alb-bluegreen-stable + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional/alb-canary-rollout.yaml b/test/e2e/functional/alb-canary-rollout.yaml index df90ecc3cd..e05f73d2c2 100644 --- a/test/e2e/functional/alb-canary-rollout.yaml +++ b/test/e2e/functional/alb-canary-rollout.yaml @@ -40,7 +40,7 @@ spec: selector: app: alb-canary --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: alb-canary-ingress @@ -51,9 +51,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: alb-canary-root - servicePort: use-annotation + service: + name: alb-canary-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional/alb-pingpong-rollout.yaml b/test/e2e/functional/alb-pingpong-rollout.yaml new file mode 100644 index 0000000000..ddb7904e76 --- /dev/null +++ b/test/e2e/functional/alb-pingpong-rollout.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ping-service +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-canary +--- +apiVersion: v1 +kind: Service +metadata: + name: pong-service +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-canary +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: alb-canary-ingress + annotations: + kubernetes.io/ingress.class: alb +spec: + rules: + - http: + paths: + - path: /* + backend: + service: + name: alb-rollout-root + port: + name: use-annotation + pathType: ImplementationSpecific +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: alb-canary +spec: + replicas: 2 + selector: + matchLabels: + app: alb-canary + template: + metadata: + labels: + app: alb-canary + spec: + containers: + - name: alb-canary + image: "argoproj/rollouts-demo:red" + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + scaleDownDelaySeconds: 2 + pingPong: + pingService: ping-service + pongService: pong-service + trafficRouting: + alb: + ingress: alb-canary-ingress + rootService: alb-rollout-root + servicePort: 80 + steps: + - setWeight: 25 + - pause: {duration: 5s} diff --git a/test/e2e/functional/canary-dynamic-stable-scale.yaml b/test/e2e/functional/canary-dynamic-stable-scale.yaml index 612f684dea..aae5e8c324 100644 --- a/test/e2e/functional/canary-dynamic-stable-scale.yaml +++ b/test/e2e/functional/canary-dynamic-stable-scale.yaml @@ -38,7 +38,7 @@ spec: selector: app: dynamic-stable-scale --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: dynamic-stable-scale-ingress @@ -49,9 +49,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: dynamic-stable-scale-root - servicePort: use-annotation + service: + name: dynamic-stable-scale-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional/canary-scaledowndelay.yaml b/test/e2e/functional/canary-scaledowndelay.yaml index 21fc6a4d42..7d469fe929 100644 --- a/test/e2e/functional/canary-scaledowndelay.yaml +++ b/test/e2e/functional/canary-scaledowndelay.yaml @@ -40,7 +40,7 @@ spec: selector: app: canary-scaledowndelay --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: canary-scaledowndelay-ingress @@ -51,9 +51,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: canary-scaledowndelay-root - servicePort: use-annotation + service: + name: canary-scaledowndelay-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional/canary-scaledownonabort.yaml b/test/e2e/functional/canary-scaledownonabort.yaml index 8d5d05aff3..308becf609 100644 --- a/test/e2e/functional/canary-scaledownonabort.yaml +++ b/test/e2e/functional/canary-scaledownonabort.yaml @@ -40,7 +40,7 @@ spec: selector: app: canary-scaledowndelay --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: canary-scaledowndelay-ingress @@ -51,9 +51,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: canary-scaledowndelay-root - servicePort: use-annotation + service: + name: canary-scaledowndelay-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional/canary-unscaledownonabort.yaml b/test/e2e/functional/canary-unscaledownonabort.yaml index cf9ce26438..a4b4955988 100644 --- a/test/e2e/functional/canary-unscaledownonabort.yaml +++ b/test/e2e/functional/canary-unscaledownonabort.yaml @@ -40,7 +40,7 @@ spec: selector: app: canary-scaledowndelay --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: canary-scaledowndelay-ingress @@ -51,9 +51,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: canary-scaledowndelay-root - servicePort: use-annotation + service: + name: canary-scaledowndelay-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional/experiment-dry-run-analysis.yaml b/test/e2e/functional/experiment-dry-run-analysis.yaml new file mode 100644 index 0000000000..466e38f612 --- /dev/null +++ b/test/e2e/functional/experiment-dry-run-analysis.yaml @@ -0,0 +1,51 @@ +kind: AnalysisTemplate +apiVersion: argoproj.io/v1alpha1 +metadata: + name: dry-run-job +spec: + metrics: + - name: test-1 + provider: + job: + spec: + template: + spec: + containers: + - name: sleep + image: alpine:3.8 + command: [exit, "1"] + restartPolicy: Never + backoffLimit: 0 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: experiment-with-dry-run +spec: + duration: 30s + progressDeadlineSeconds: 30 + templates: + - name: baseline + replicas: 1 + service: {} + selector: + matchLabels: + app: experiment-with-dry-run + template: + metadata: + labels: + app: experiment-with-dry-run + spec: + containers: + - name: experiment-with-dry-run + image: nginx:1.19-alpine + resources: + requests: + memory: 16Mi + cpu: 1m + analyses: + - name: dry-run-job + templateName: dry-run-job + dryRun: + - metricName: test.* diff --git a/test/e2e/functional/experiment-measurement-retention-analysis.yaml b/test/e2e/functional/experiment-measurement-retention-analysis.yaml new file mode 100644 index 0000000000..95da9274f7 --- /dev/null +++ b/test/e2e/functional/experiment-measurement-retention-analysis.yaml @@ -0,0 +1,55 @@ +kind: AnalysisTemplate +apiVersion: argoproj.io/v1alpha1 +metadata: + name: measurement-retention-job +spec: + metrics: + - name: test-1 + interval: 3s + failureLimit: 1 + provider: + job: + spec: + template: + spec: + containers: + - name: sleep + image: alpine:3.8 + command: [sh, -c] + args: [exit 0] + restartPolicy: Never + backoffLimit: 0 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: experiment-with-mr +spec: + duration: 30s + progressDeadlineSeconds: 30 + templates: + - name: baseline + replicas: 1 + service: {} + selector: + matchLabels: + app: experiment-with-mr + template: + metadata: + labels: + app: experiment-with-mr + spec: + containers: + - name: experiment-with-mr + image: nginx:1.19-alpine + resources: + requests: + memory: 16Mi + cpu: 1m + analyses: + - name: measurement-retention-job + templateName: measurement-retention-job + measurementRetention: + - metricName: test.* + limit: 2 diff --git a/test/e2e/functional/nginx-template.yaml b/test/e2e/functional/nginx-template.yaml index 85058636f7..9fa3335af1 100644 --- a/test/e2e/functional/nginx-template.yaml +++ b/test/e2e/functional/nginx-template.yaml @@ -26,7 +26,7 @@ spec: selector: app: REPLACEME --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: REPLACEME-ingress @@ -37,9 +37,12 @@ spec: - http: paths: - path: /* + pathType: Prefix backend: - serviceName: REPLACEME-stable - servicePort: 80 + service: + name: REPLACEME-stable + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index d5e44f49f3..8ca5a026ac 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e @@ -89,10 +90,12 @@ spec: ExpectRevisionPodCount("1", 0). ExpectRevisionPodCount("2", 1). ExpectRolloutEvents([]string{ + "RolloutNotCompleted", // Rollout not completed, started update to revision 0 (7fd9b5545c) "RolloutUpdated", // Rollout updated to revision 1 "NewReplicaSetCreated", // Created ReplicaSet abort-retry-promote-698fbfb9dc (revision 1) "ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-698fbfb9dc (revision 1) from 0 to 1 "RolloutCompleted", // Rollout completed update to revision 1 (698fbfb9dc): Initial deploy + "RolloutNotCompleted", "RolloutUpdated", // Rollout updated to revision 2 "NewReplicaSetCreated", // Created ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) "ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 0 to 1 @@ -643,7 +646,8 @@ spec: maxUnavailable: 0 steps: - setWeight: 50 - - pause: {} + - pause: + duration: 5s selector: matchLabels: app: bad2good-setweight @@ -670,10 +674,10 @@ spec: containers: - name: bad2good-setweight command: null`). - WaitForRolloutStatus("Progressing"). - WaitForRolloutStatus("Degraded"). + WaitForRolloutStatus("Healthy"). Then(). - ExpectCanaryStablePodCount(2, 2) + ExpectRevisionPodCount("2", 4). + ExpectRevisionPodCount("1", 0) } // TestBlueGreenUpdate @@ -704,6 +708,7 @@ func (s *FunctionalSuite) TestBlueGreenUpdate() { "SwitchService", // Switched selector for service 'bluegreen' from '' to '7dcd8f8869' "RolloutUpdated", // Rollout updated to revision 2 "NewReplicaSetCreated", // Created ReplicaSet bluegreen-5498785cd6 (revision 2) + "RolloutNotCompleted", // Rollout went to not completed state started update to revision 2 (85c6899) "ScalingReplicaSet", // Scaled up ReplicaSet bluegreen-5498785cd6 (revision 2) from 0 to 3 "SwitchService", // Switched selector for service 'bluegreen' from '7dcd8f8869' to '6c779b88b6' "RolloutCompleted", // Rollout completed update to revision 2 (6c779b88b6): Completed blue-green update @@ -756,6 +761,7 @@ spec: - pause: {} `). WaitForRolloutStatus("Paused"). + WaitForRolloutAvailableReplicas(2). Then(). ExpectReplicaCounts(2, 2, 1, 2, 2). // desired, current, updated, ready, available ExpectServiceSelector("bluegreen-to-canary", map[string]string{"app": "bluegreen-to-canary"}, false) @@ -956,7 +962,7 @@ spec: Then(). ExpectRevisionPodCount("2", 0). ExpectRollout("Abort=True", func(r *v1alpha1.Rollout) bool { - return r.Status.Abort == true && len(r.Status.Conditions) == 3 + return r.Status.Abort == true && len(r.Status.Conditions) == 4 }) } @@ -1074,16 +1080,53 @@ spec: ExpectActiveRevision("2") } +func (s *FunctionalSuite) TestCompleteRolloutRestart() { + s.Given(). + HealthyRollout(` +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-restart +spec: + progressDeadlineAbort: true + progressDeadlineSeconds: 15 + replicas: 2 + selector: + matchLabels: + app: ollout-restart + template: + metadata: + labels: + app: ollout-restart + spec: + containers: + - name: ollout-restart + image: nginx:1.19-alpine + imagePullPolicy: Always + strategy: + canary: + steps: + - setWeight: 20 +`). + When(). + WatchRolloutStatus("Healthy"). + Sleep(16 * time.Second). // give it enough time to pass the progressDeadlineSeconds + Then(). + When(). + RestartRollout(). + WatchRolloutStatus("Healthy") +} + func (s *FunctionalSuite) TestKubectlWaitForCompleted() { s.Given(). HealthyRollout(` kind: Service apiVersion: v1 metadata: - name: kubectl-wait-completed + name: kubectl-wait-healthy spec: selector: - app: kubectl-wait-completed + app: kubectl-wait-healthy ports: - protocol: TCP port: 80 @@ -1092,19 +1135,19 @@ spec: apiVersion: argoproj.io/v1alpha1 kind: Rollout metadata: - name: kubectl-wait-completed + name: kubectl-wait-healthy spec: replicas: 1 selector: matchLabels: - app: kubectl-wait-completed + app: kubectl-wait-healthy template: metadata: labels: - app: kubectl-wait-completed + app: kubectl-wait-healthy spec: containers: - - name: kubectl-wait-completed + - name: kubectl-wait-healthy image: nginx:1.19-alpine imagePullPolicy: Always ports: @@ -1118,21 +1161,21 @@ spec: strategy: blueGreen: - activeService: kubectl-wait-completed + activeService: kubectl-wait-healthy autoPromotionEnabled: true `). When(). UpdateSpec(). Then(). - ExpectRollout("Completed=False", func(r *v1alpha1.Rollout) bool { - cmd := exec.Command("kubectl", "wait", "--for=condition=Completed=False", fmt.Sprintf("rollout/%s", r.Name)) + ExpectRollout("Healthy=False", func(r *v1alpha1.Rollout) bool { + cmd := exec.Command("kubectl", "wait", "--for=condition=Healthy=False", fmt.Sprintf("rollout/%s", r.Name)) out, err := cmd.CombinedOutput() return err == nil && strings.Contains(string(out), fmt.Sprintf("rollout.argoproj.io/%s condition met", r.Name)) }). ExpectRolloutStatus("Progressing"). ExpectActiveRevision("1"). - ExpectRollout("Completed=True", func(r *v1alpha1.Rollout) bool { - cmd := exec.Command("kubectl", "wait", "--for=condition=Completed=True", fmt.Sprintf("rollout/%s", r.Name)) + ExpectRollout("Healthy=True", func(r *v1alpha1.Rollout) bool { + cmd := exec.Command("kubectl", "wait", "--for=condition=Healthy=True", fmt.Sprintf("rollout/%s", r.Name)) out, err := cmd.CombinedOutput() return err == nil && strings.Contains(string(out), fmt.Sprintf("rollout.argoproj.io/%s condition met", r.Name)) }). diff --git a/test/e2e/header-routing/alb-header-route.yaml b/test/e2e/header-routing/alb-header-route.yaml new file mode 100644 index 0000000000..71c9e7aa6f --- /dev/null +++ b/test/e2e/header-routing/alb-header-route.yaml @@ -0,0 +1,107 @@ +apiVersion: v1 +kind: Service +metadata: + name: canary-service +spec: + type: NodePort + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: alb-rollout +--- +apiVersion: v1 +kind: Service +metadata: + name: stable-service +spec: + type: NodePort + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: alb-rollout +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: alb-rollout-ingress + annotations: + alb.ingress.kubernetes.io/security-groups: 'iks-intuit-cidr-ingress-tcp-443' + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-2:795188202216:certificate/27d920c5-a8a6-4210-9f31-bd4a2d439039 + alb.ingress.kubernetes.io/load-balancer-attributes: access_logs.s3.enabled=false + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS-1-2-2017-01 + kubernetes.io/ingress.class: aws-alb + alb.ingress.kubernetes.io/load-balancer-name: rollouts-sample + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/healthcheck-protocol: HTTP + alb.ingress.kubernetes.io/healthcheck-port: traffic-port + alb.ingress.kubernetes.io/healthcheck-path: /color + alb.ingress.kubernetes.io/backend-protocol: HTTP + alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}]' + alb.ingress.kubernetes.io/ssl-redirect: '443' + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/subnets: IngressSubnetAz1, IngressSubnetAz2, IngressSubnetAz3 +spec: + rules: + - http: + paths: + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: action1 + port: + name: use-annotation + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + replicas: 5 + selector: + matchLabels: + app: alb-rollout + template: + metadata: + labels: + app: alb-rollout + spec: + containers: + - name: alb-rollout + image: "argoproj/rollouts-demo:yellow" + ports: + - name: http + containerPort: 8080 + protocol: TCP + strategy: + canary: + scaleDownDelaySeconds: 5 + stableService: stable-service + canaryService: canary-service + trafficRouting: + managedRoutes: + - name: header-route + alb: + ingress: alb-rollout-ingress + rootService: action1 + servicePort: 8080 + steps: + - setWeight: 20 + - pause: {} + - setHeaderRoute: + name: header-route + match: + - headerName: Custom-Header + headerValue: + exact: Mozilla* + - pause: {} + - setHeaderRoute: + name: header-route + - pause: {} diff --git a/test/e2e/header-routing/istio-hr-host.yaml b/test/e2e/header-routing/istio-hr-host.yaml new file mode 100644 index 0000000000..8b1036fb42 --- /dev/null +++ b/test/e2e/header-routing/istio-hr-host.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: Service +metadata: + name: canary-service +spec: + selector: + app: rollouts-demo + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http +--- +apiVersion: v1 +kind: Service +metadata: + name: stable-service +spec: + selector: + app: rollouts-demo + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: rollouts-demo-vsvc +spec: + gateways: + - rollouts-demo-gateway + hosts: + - rollouts-demo.com + http: +# - name: argo-rollouts-header-route +# route: +# - destination: +# host: canary-service +# weight: 100 + - name: primary + route: + - destination: + host: stable-service + weight: 80 + - destination: + host: canary-service + weight: 20 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + replicas: 5 + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - name: rollouts-demo + image: "nginx:1.19-alpine" + ports: + - name: http + containerPort: 8080 + protocol: TCP + strategy: + canary: + canaryService: canary-service + stableService: stable-service + trafficRouting: + managedRoutes: + - name: set-header-1 + istio: + virtualService: + name: rollouts-demo-vsvc + routes: + - primary + steps: + - setWeight: 20 + - setHeaderRoute: + name: set-header-1 + match: + - headerName: agent + headerValue: + regex: firefox(.*) + - pause: { } + - setHeaderRoute: + name: set-header-1 + match: + - headerName: agent + headerValue: + regex: chrome(.*) + - pause: { } + - setWeight: 40 + - setHeaderRoute: + name: set-header-1 + - pause: {} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: rollouts-demo-gateway +spec: + selector: + istio: ingressgateway + servers: + - hosts: + - '*' + port: + name: http + number: 80 + protocol: HTTP diff --git a/test/e2e/header_route_test.go b/test/e2e/header_route_test.go new file mode 100644 index 0000000000..5b9fe7cd16 --- /dev/null +++ b/test/e2e/header_route_test.go @@ -0,0 +1,91 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/tj/assert" + + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" + + "github.com/argoproj/argo-rollouts/test/fixtures" +) + +type HeaderRouteSuite struct { + fixtures.E2ESuite +} + +func TestHeaderRoutingSuite(t *testing.T) { + suite.Run(t, new(HeaderRouteSuite)) +} + +func (s *HeaderRouteSuite) SetupSuite() { + s.E2ESuite.SetupSuite() + if !s.IstioEnabled { + s.T().SkipNow() + } +} + +func (s *HeaderRouteSuite) TestIstioHostHeaderRoute() { + s.Given(). + RolloutObjects("@header-routing/istio-hr-host.yaml"). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), "primary", vsvc.Spec.HTTP[0].Name) + }). + When(). + UpdateSpec(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), "set-header-1", vsvc.Spec.HTTP[0].Name) + assertDestination(s, vsvc.Spec.HTTP[0], "canary-service", int64(100)) + assertDestination(s, vsvc.Spec.HTTP[1], "stable-service", int64(80)) + assertDestination(s, vsvc.Spec.HTTP[1], "canary-service", int64(20)) + }). + When(). + PromoteRollout(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + When(). + PromoteRollout(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assertDestination(s, vsvc.Spec.HTTP[0], "stable-service", int64(60)) + assertDestination(s, vsvc.Spec.HTTP[0], "canary-service", int64(40)) + }). + When(). + PromoteRolloutFull(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assertDestination(s, vsvc.Spec.HTTP[0], "stable-service", int64(100)) + assertDestination(s, vsvc.Spec.HTTP[0], "canary-service", int64(0)) + }) +} + +func assertDestination(s *HeaderRouteSuite, route istio.VirtualServiceHTTPRoute, service string, weight int64) { + for _, destination := range route.Route { + if destination.Destination.Host == service { + assert.Equal(s.T(), weight, destination.Weight) + return + } + } + assert.Fail(s.T(), "Could not find the destination for service: %s", service) +} diff --git a/test/e2e/istio/istio-host-split-update-in-middle.yaml b/test/e2e/istio/istio-host-split-update-in-middle.yaml new file mode 100644 index 0000000000..f1d6821b18 --- /dev/null +++ b/test/e2e/istio/istio-host-split-update-in-middle.yaml @@ -0,0 +1,86 @@ +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-canary +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: istio-host-split-stable +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split + http: + - name: primary + route: + - destination: + host: istio-host-split-stable + weight: 100 + - destination: + host: istio-host-split-canary + weight: 0 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-host-split +spec: + replicas: 2 + strategy: + canary: + canaryService: istio-host-split-canary + stableService: istio-host-split-stable + trafficRouting: + istio: + virtualService: + name: istio-host-split-vsvc + routes: + - primary + steps: + - setWeight: 0 + - setCanaryScale: + replicas: 1 + - pause: {} + selector: + matchLabels: + app: istio-host-split + template: + metadata: + labels: + app: istio-host-split + spec: + containers: + - name: istio-host-split + image: nginx:1.19-alpine + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m diff --git a/test/e2e/istio_test.go b/test/e2e/istio_test.go index 47710b5110..2a0b2fe7c2 100644 --- a/test/e2e/istio_test.go +++ b/test/e2e/istio_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e @@ -257,9 +258,9 @@ func (s *IstioSuite) TestIstioAbortUpdate() { ExpectRevisionPodCount("2", 1) } -func (s *IstioSuite) TestIstioAbortUpdateDeleteAllCanaryPods() { +func (s *IstioSuite) TestIstioUpdateInMiddleZeroCanaryReplicas() { s.Given(). - RolloutObjects("@istio/istio-rollout-abort-delete-all-canary-pods.yaml"). + RolloutObjects("@istio/istio-host-split-update-in-middle.yaml"). When(). ApplyManifests(). WaitForRolloutStatus("Healthy"). @@ -268,11 +269,26 @@ func (s *IstioSuite) TestIstioAbortUpdateDeleteAllCanaryPods() { UpdateSpec(). WaitForRolloutStatus("Paused"). Then(). - ExpectRevisionPodCount("2", 2). + ExpectRevisionPodCount("2", 1). When(). - PromoteRollout(). + UpdateSpec(). WaitForRolloutStatus("Paused"). Then(). + ExpectRevisionPodCount("3", 1) +} + +func (s *IstioSuite) TestIstioAbortUpdateDeleteAllCanaryPods() { + s.Given(). + RolloutObjects("@istio/istio-rollout-abort-delete-all-canary-pods.yaml"). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + When(). + UpdateSpec(). + WaitForRolloutStatus("Paused"). + Then(). + ExpectRevisionPodCount("2", 2). When(). PromoteRollout(). WaitForRolloutStatus("Paused"). @@ -361,17 +377,17 @@ func (s *IstioSuite) TestIstioSubsetSplitExperimentStep() { WaitForRolloutStatus("Healthy"). Then(). Assert(func(t *fixtures.Then) { - vsvc := t.GetVirtualService() - assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) // stable - assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) // canary + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) // stable + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) // canary - rs1 := t.GetReplicaSetByRevision("1") - destrule := t.GetDestinationRule() - assert.Len(s.T(), destrule.Spec.Subsets, 2) - assert.Equal(s.T(), rs1.Spec.Template.Labels[v1alpha1.DefaultRolloutUniqueLabelKey], destrule.Spec.Subsets[0].Labels[v1alpha1.DefaultRolloutUniqueLabelKey]) // stable - assert.Equal(s.T(), rs1.Spec.Template.Labels[v1alpha1.DefaultRolloutUniqueLabelKey], destrule.Spec.Subsets[1].Labels[v1alpha1.DefaultRolloutUniqueLabelKey]) // canary + rs1 := t.GetReplicaSetByRevision("1") + destrule := t.GetDestinationRule() + assert.Len(s.T(), destrule.Spec.Subsets, 2) + assert.Equal(s.T(), rs1.Spec.Template.Labels[v1alpha1.DefaultRolloutUniqueLabelKey], destrule.Spec.Subsets[0].Labels[v1alpha1.DefaultRolloutUniqueLabelKey]) // stable + assert.Equal(s.T(), rs1.Spec.Template.Labels[v1alpha1.DefaultRolloutUniqueLabelKey], destrule.Spec.Subsets[1].Labels[v1alpha1.DefaultRolloutUniqueLabelKey]) // canary - }). + }). When(). UpdateSpec(). WaitForRolloutCanaryStepIndex(1). @@ -401,7 +417,7 @@ func (s *IstioSuite) TestIstioSubsetSplitExperimentStep() { Assert(func(t *fixtures.Then) { vsvc := t.GetVirtualService() assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) // stable - assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) // canary + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) // canary destrule := t.GetDestinationRule() rs2 := t.GetReplicaSetByRevision("2") @@ -413,5 +429,3 @@ func (s *IstioSuite) TestIstioSubsetSplitExperimentStep() { s.TearDownSuite() } - - diff --git a/test/e2e/mirror-route/istio-mirror-host.yaml b/test/e2e/mirror-route/istio-mirror-host.yaml new file mode 100644 index 0000000000..d3f20b0352 --- /dev/null +++ b/test/e2e/mirror-route/istio-mirror-host.yaml @@ -0,0 +1,124 @@ +apiVersion: v1 +kind: Service +metadata: + name: canary-service +spec: + selector: + app: rollouts-demo + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http +--- +apiVersion: v1 +kind: Service +metadata: + name: stable-service +spec: + selector: + app: rollouts-demo + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: rollouts-demo-vsvc +spec: + gateways: + - rollouts-demo-gateway + hosts: + - rollouts-demo.com + http: + - name: primary + route: + - destination: + host: stable-service + weight: 80 + - destination: + host: canary-service + weight: 20 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + replicas: 5 + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - name: rollouts-demo + image: "nginx:1.19-alpine" + ports: + - name: http + containerPort: 8080 + protocol: TCP + strategy: + canary: + canaryService: canary-service + stableService: stable-service + trafficRouting: + managedRoutes: + - name: mirror-route-1 + - name: mirror-route-2 + istio: + virtualService: + name: rollouts-demo-vsvc + routes: + - primary + steps: + - setWeight: 20 + - setMirrorRoute: + name: mirror-route-1 + percentage: 100 + match: + - path: + prefix: / + - setMirrorRoute: + name: mirror-route-2 + percentage: 80 + match: + - path: + prefix: / + method: + exact: GET + - pause: { } + - setMirrorRoute: + name: mirror-route-1 + percentage: 100 + match: + - path: + prefix: /rewrite + - pause: { } + - setWeight: 40 + - setMirrorRoute: + name: mirror-route-1 + - pause: {} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: rollouts-demo-gateway +spec: + selector: + istio: ingressgateway + servers: + - hosts: + - '*' + port: + name: http + number: 80 + protocol: HTTP diff --git a/test/e2e/mirror_route_test.go b/test/e2e/mirror_route_test.go new file mode 100644 index 0000000000..5190a985f0 --- /dev/null +++ b/test/e2e/mirror_route_test.go @@ -0,0 +1,102 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/tj/assert" + + "github.com/argoproj/argo-rollouts/test/fixtures" +) + +type MirrorRouteSuite struct { + fixtures.E2ESuite +} + +func TestMirrorRouteSuite(t *testing.T) { + suite.Run(t, new(MirrorRouteSuite)) +} + +func (s *MirrorRouteSuite) SetupSuite() { + s.E2ESuite.SetupSuite() + if !s.IstioEnabled { + s.T().SkipNow() + } +} + +func (s *MirrorRouteSuite) TestIstioHostMirrorRoute() { + s.Given(). + RolloutObjects("@mirror-route/istio-mirror-host.yaml"). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), "primary", vsvc.Spec.HTTP[0].Name) + }). + When(). + UpdateSpec(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), "mirror-route-1", vsvc.Spec.HTTP[0].Name) + assert.Equal(s.T(), float64(100), vsvc.Spec.HTTP[0].MirrorPercentage.Value) + assert.Equal(s.T(), "mirror-route-2", vsvc.Spec.HTTP[1].Name) + assert.Equal(s.T(), float64(80), vsvc.Spec.HTTP[1].MirrorPercentage.Value) + assertMirrorDestination(s, vsvc.Spec.HTTP[0], "stable-service", int64(80)) + assertMirrorDestination(s, vsvc.Spec.HTTP[0], "canary-service", int64(20)) + assertMirrorDestination(s, vsvc.Spec.HTTP[1], "stable-service", int64(80)) + assertMirrorDestination(s, vsvc.Spec.HTTP[1], "canary-service", int64(20)) + + assertMirrorDestination(s, vsvc.Spec.HTTP[2], "stable-service", int64(80)) + assertMirrorDestination(s, vsvc.Spec.HTTP[2], "canary-service", int64(20)) + }). + When(). + PromoteRollout(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + When(). + PromoteRollout(). + WaitForRolloutStatus("Paused"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), "mirror-route-2", vsvc.Spec.HTTP[0].Name) + assertMirrorDestination(s, vsvc.Spec.HTTP[0], "stable-service", int64(60)) + assertMirrorDestination(s, vsvc.Spec.HTTP[0], "canary-service", int64(40)) + assert.Equal(s.T(), "primary", vsvc.Spec.HTTP[1].Name) + assertMirrorDestination(s, vsvc.Spec.HTTP[1], "stable-service", int64(60)) + assertMirrorDestination(s, vsvc.Spec.HTTP[1], "canary-service", int64(40)) + }). + When(). + PromoteRolloutFull(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), 1, len(vsvc.Spec.HTTP)) + assertMirrorDestination(s, vsvc.Spec.HTTP[0], "stable-service", int64(100)) + assertMirrorDestination(s, vsvc.Spec.HTTP[0], "canary-service", int64(0)) + }) +} + +func assertMirrorDestination(s *MirrorRouteSuite, route istio.VirtualServiceHTTPRoute, service string, weight int64) { + for _, destination := range route.Route { + if destination.Destination.Host == service { + assert.Equal(s.T(), weight, destination.Weight) + return + } + } + assert.Fail(s.T(), "Could not find the destination for service: %s", service) +} diff --git a/test/e2e/smi/rollout-smi-experiment.yaml b/test/e2e/smi/rollout-smi-experiment.yaml index 88e5a529e4..a71a759805 100644 --- a/test/e2e/smi/rollout-smi-experiment.yaml +++ b/test/e2e/smi/rollout-smi-experiment.yaml @@ -28,7 +28,7 @@ spec: # This selector will be updated with the pod-template-hash of the stable ReplicaSet. e.g.: # rollouts-pod-template-hash: 789746c88d --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollout-smi-experiment-stable @@ -40,10 +40,13 @@ spec: http: paths: - path: / + pathType: ImplementationSpecific backend: # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollout-smi-experiment-stable - servicePort: 80 + service: + name: rollout-smi-experiment-stable + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/smi_ingress/rollout-smi-ingress-canary.yaml b/test/e2e/smi_ingress/rollout-smi-ingress-canary.yaml index 024203b9f3..c9b71c5fcc 100644 --- a/test/e2e/smi_ingress/rollout-smi-ingress-canary.yaml +++ b/test/e2e/smi_ingress/rollout-smi-ingress-canary.yaml @@ -28,7 +28,7 @@ spec: # This selector will be updated with the pod-template-hash of the stable ReplicaSet. e.g.: # rollouts-pod-template-hash: 789746c88d --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollout-smi-ingress-canary-stable @@ -40,10 +40,13 @@ spec: http: paths: - path: / + pathType: Prefix backend: # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollout-smi-ingress-canary-stable - servicePort: 80 + service: + name: rollout-smi-ingress-canary-stable + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/test/e2e/smi_ingress_test.go b/test/e2e/smi_ingress_test.go index 2086935f84..ca08bd4248 100644 --- a/test/e2e/smi_ingress_test.go +++ b/test/e2e/smi_ingress_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e diff --git a/test/e2e/smi_test.go b/test/e2e/smi_test.go index ff92cbcc8e..28f50f15e8 100644 --- a/test/e2e/smi_test.go +++ b/test/e2e/smi_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e @@ -60,7 +61,7 @@ func (s *SMISuite) TestSMIExperimentStep() { Assert(func(t *fixtures.Then) { ts := t.GetTrafficSplit() - assert.Len(s.T(), ts.Spec.Backends, 3) + assert.Len(s.T(), ts.Spec.Backends, 3) assert.Equal(s.T(), "rollout-smi-experiment-canary", ts.Spec.Backends[0].Service) assert.Equal(s.T(), int64(5), ts.Spec.Backends[0].Weight.Value()) @@ -87,7 +88,7 @@ func (s *SMISuite) TestSMIExperimentStep() { Assert(func(t *fixtures.Then) { ts := t.GetTrafficSplit() - assert.Len(s.T(), ts.Spec.Backends, 2) + assert.Len(s.T(), ts.Spec.Backends, 2) assert.Equal(s.T(), "rollout-smi-experiment-canary", ts.Spec.Backends[0].Service) assert.Equal(s.T(), int64(0), ts.Spec.Backends[0].Weight.Value()) diff --git a/test/fixtures/common.go b/test/fixtures/common.go index fa0c5b7def..370c80724e 100644 --- a/test/fixtures/common.go +++ b/test/fixtures/common.go @@ -21,7 +21,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -37,6 +37,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/get" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options" "github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/viewcontroller" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting/appmesh" "github.com/argoproj/argo-rollouts/rollout/trafficrouting/istio" "github.com/argoproj/argo-rollouts/utils/annotations" istioutil "github.com/argoproj/argo-rollouts/utils/istio" @@ -527,26 +528,26 @@ func (c *Common) GetServices() (*corev1.Service, *corev1.Service) { return desiredSvc, stableSvc } -func (c *Common) GetALBIngress() *extensionsv1beta1.Ingress { +func (c *Common) GetALBIngress() *networkingv1.Ingress { ro := c.Rollout() name := ro.Spec.Strategy.Canary.TrafficRouting.ALB.Ingress - ingress, err := c.kubeClient.ExtensionsV1beta1().Ingresses(c.namespace).Get(c.Context, name, metav1.GetOptions{}) + ingress, err := c.kubeClient.NetworkingV1().Ingresses(c.namespace).Get(c.Context, name, metav1.GetOptions{}) c.CheckError(err) return ingress } -func (c *Common) GetNginxIngressStable() *extensionsv1beta1.Ingress { +func (c *Common) GetNginxIngressStable() *networkingv1.Ingress { ro := c.Rollout() name := ro.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress - ingress, err := c.kubeClient.ExtensionsV1beta1().Ingresses(c.namespace).Get(c.Context, name, metav1.GetOptions{}) + ingress, err := c.kubeClient.NetworkingV1().Ingresses(c.namespace).Get(c.Context, name, metav1.GetOptions{}) c.CheckError(err) return ingress } -func (c *Common) GetNginxIngressCanary() *extensionsv1beta1.Ingress { +func (c *Common) GetNginxIngressCanary() *networkingv1.Ingress { ro := c.Rollout() name := ro.Name + "-" + ro.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress + "-canary" - ingress, err := c.kubeClient.ExtensionsV1beta1().Ingresses(c.namespace).Get(c.Context, name, metav1.GetOptions{}) + ingress, err := c.kubeClient.NetworkingV1().Ingresses(c.namespace).Get(c.Context, name, metav1.GetOptions{}) c.CheckError(err) return ingress } @@ -571,6 +572,19 @@ func (c *Common) GetVirtualService() *istio.VirtualService { return &vsvc } +func (c *Common) GetAppMeshVirtualRouter() *unstructured.Unstructured { + ro := c.Rollout() + ctx := context.TODO() + resClient := appmesh.NewResourceClient(c.dynamicClient) + name := ro.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualService.Name + c.log.Infof("GetVirtualServiceCR with namespace(%s), name(%s)", c.namespace, name) + uVsvc, err := resClient.GetVirtualServiceCR(ctx, c.namespace, name) + c.CheckError(err) + uVr, err := resClient.GetVirtualRouterCRForVirtualService(ctx, uVsvc) + c.CheckError(err) + return uVr +} + func (c *Common) GetDestinationRule() *istio.DestinationRule { ro := c.Rollout() name := ro.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule.Name diff --git a/test/fixtures/e2e_suite.go b/test/fixtures/e2e_suite.go index e7ac739ab6..6eb059b446 100644 --- a/test/fixtures/e2e_suite.go +++ b/test/fixtures/e2e_suite.go @@ -26,6 +26,8 @@ import ( rov1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" clientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" + appmeshutil "github.com/argoproj/argo-rollouts/utils/appmesh" + "github.com/argoproj/argo-rollouts/utils/defaults" istioutil "github.com/argoproj/argo-rollouts/utils/istio" logutil "github.com/argoproj/argo-rollouts/utils/log" smiutil "github.com/argoproj/argo-rollouts/utils/smi" @@ -34,7 +36,7 @@ import ( const ( // E2E_INSTANCE_ID is the instance id label attached to objects created by the e2e tests EnvVarE2EInstanceID = "E2E_INSTANCE_ID" - // E2E_WAIT_TIMEOUT is a timeout in seconds when waiting for a test condition (default: 60) + // E2E_WAIT_TIMEOUT is a timeout in seconds when waiting for a test condition (default: 90) EnvVarE2EWaitTimeout = "E2E_WAIT_TIMEOUT" // E2E_POD_DELAY slows down pod startup and shutdown by the value in seconds (default: 0) // Used humans slow down rollout activity during a test @@ -50,7 +52,7 @@ const ( ) var ( - E2EWaitTimeout time.Duration = time.Second * 60 + E2EWaitTimeout time.Duration = time.Second * 120 E2EPodDelay = 0 E2EALBIngressAnnotations map[string]string @@ -118,8 +120,9 @@ type E2ESuite struct { suite.Suite Common - IstioEnabled bool - SMIEnabled bool + IstioEnabled bool + SMIEnabled bool + AppMeshEnabled bool } func (s *E2ESuite) SetupSuite() { @@ -137,8 +140,8 @@ func (s *E2ESuite) SetupSuite() { restConfig, err := config.ClientConfig() s.CheckError(err) s.Common.kubernetesHost = restConfig.Host - restConfig.Burst = 50 - restConfig.QPS = 20 + restConfig.Burst = defaults.DefaultBurst * 2 + restConfig.QPS = defaults.DefaultQPS * 2 s.namespace, _, err = config.Namespace() s.CheckError(err) s.kubeClient, err = kubernetes.NewForConfig(restConfig) @@ -158,6 +161,10 @@ func (s *E2ESuite) SetupSuite() { if smiutil.DoesSMIExist(s.smiClient, s.namespace) { s.SMIEnabled = true } + + if appmeshutil.DoesAppMeshExist(s.dynamicClient, s.namespace) { + s.AppMeshEnabled = true + } } func (s *E2ESuite) TearDownSuite() { diff --git a/test/fixtures/then.go b/test/fixtures/then.go index b99d65347c..eac4c9419b 100644 --- a/test/fixtures/then.go +++ b/test/fixtures/then.go @@ -210,6 +210,52 @@ func (t *Then) ExpectExperimentTemplateReplicaSet(expectation string, experiment return t } +func (t *Then) ExpectExperimentDryRunSummary(expectedCount, expectedErrorCount, expectedFailureCount int32, experiment string) *Then { + expectation := v1alpha1.RunSummary{ + Count: expectedCount, + Error: expectedErrorCount, + Failed: expectedFailureCount, + } + t.log.Infof("Expected Dry-Run Summary: Count=%d, Successful=%d, Failed=%d, Error=%d, Inconclusive=%d", expectation.Count, expectation.Successful, expectation.Failed, expectation.Error, expectation.Inconclusive) + ex, err := t.rolloutClient.ArgoprojV1alpha1().Experiments(t.namespace).Get(t.Context, experiment, metav1.GetOptions{}) + t.CheckError(err) + ar := t.GetExperimentAnalysisRun(ex) + dryRunSummary := ar.Status.DryRunSummary + if dryRunSummary != nil { + t.log.Infof("Analysis Dry-Run Summary: Count=%d, Successful=%d, Failed=%d, Error=%d, Inconclusive=%d", dryRunSummary.Count, dryRunSummary.Successful, dryRunSummary.Failed, dryRunSummary.Error, dryRunSummary.Inconclusive) + if expectation == *dryRunSummary { + t.log.Infof("Expectation Matches!") + } else { + t.log.Errorf("Dry-Run Summary of AnalysisRun: '%s' doesn't match the expectations", ar.Name) + t.t.FailNow() + } + } else { + t.log.Errorf("Dry-Run Summary not found in the AnalysisRun: '%s'", ar.Name) + t.t.FailNow() + } + return t +} + +func (t *Then) ExpectExperimentMeasurementsLength(metricResultsIndex, expectedMeasurementsLength int, experiment string) *Then { + t.log.Infof("Expected Measurements Length '%d' for MetricResults index '%d'", expectedMeasurementsLength, metricResultsIndex) + ex, err := t.rolloutClient.ArgoprojV1alpha1().Experiments(t.namespace).Get(t.Context, experiment, metav1.GetOptions{}) + t.CheckError(err) + ar := t.GetExperimentAnalysisRun(ex) + if len(ar.Status.MetricResults) <= metricResultsIndex { + t.log.Errorf("MetricResults Array doesn't have given index '%d' in the AnalysisRun: '%s'", metricResultsIndex, ar.Name) + t.t.FailNow() + } + measurementsLength := len(ar.Status.MetricResults[metricResultsIndex].Measurements) + t.log.Infof("Actual Measurements Length at index '%d': '%d'", metricResultsIndex, measurementsLength) + if measurementsLength == expectedMeasurementsLength { + t.log.Infof("Expectation Matches!") + } else { + t.log.Errorf("Measurements Length at index '%d' of AnalysisRun: '%s' doesn't match the expectations", metricResultsIndex, ar.Name) + t.t.FailNow() + } + return t +} + func (t *Then) ExpectExperimentTemplateReplicaSetNumReplicas(experiment string, template string, expectedReplicas int) *Then { return t.ExpectExperimentTemplateReplicaSet(fmt.Sprintf("experiment template '%s' num replicas == %d", template, expectedReplicas), experiment, template, func(rs *appsv1.ReplicaSet) bool { return int(rs.Status.Replicas) == expectedReplicas diff --git a/test/fixtures/when.go b/test/fixtures/when.go index 7df2beb810..40998dc73f 100644 --- a/test/fixtures/when.go +++ b/test/fixtures/when.go @@ -75,7 +75,7 @@ func (w *When) injectDelays(un *unstructured.Unstructured) { if E2EPodDelay == 0 { return } - sleepHandler := corev1.Handler{ + sleepHandler := corev1.LifecycleHandler{ Exec: &corev1.ExecAction{ Command: []string{"sleep", strconv.Itoa(E2EPodDelay)}, }, @@ -452,10 +452,17 @@ func (w *When) DeleteRollout() *When { func (w *When) WaitForExperimentCondition(name string, test func(ex *rov1.Experiment) bool, condition string, timeout time.Duration) *When { start := time.Now() w.log.Infof("Waiting for Experiment %s condition: %s", name, condition) - opts := metav1.ListOptions{FieldSelector: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name)).String()} - watch, err := w.rolloutClient.ArgoprojV1alpha1().Experiments(w.namespace).Watch(w.Context, opts) + exIf := w.dynamicClient.Resource(rov1.ExperimentGVR).Namespace(w.namespace) + ex, err := exIf.Get(w.Context, name, metav1.GetOptions{}) w.CheckError(err) - defer watch.Stop() + retryWatcher, err := watchutil.NewRetryWatcher(ex.GetResourceVersion(), &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + opts := metav1.ListOptions{FieldSelector: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name)).String()} + return w.rolloutClient.ArgoprojV1alpha1().Experiments(w.namespace).Watch(w.Context, opts) + }, + }) + w.CheckError(err) + defer retryWatcher.Stop() timeoutCh := make(chan bool, 1) go func() { time.Sleep(timeout) @@ -463,7 +470,7 @@ func (w *When) WaitForExperimentCondition(name string, test func(ex *rov1.Experi }() for { select { - case event := <-watch.ResultChan(): + case event := <-retryWatcher.ResultChan(): ex, ok := event.Object.(*rov1.Experiment) if ok { if test(ex) { @@ -482,10 +489,17 @@ func (w *When) WaitForExperimentCondition(name string, test func(ex *rov1.Experi func (w *When) WaitForAnalysisRunCondition(name string, test func(ar *rov1.AnalysisRun) bool, condition string, timeout time.Duration) *When { start := time.Now() w.log.Infof("Waiting for AnalysisRun %s condition: %s", name, condition) - opts := metav1.ListOptions{FieldSelector: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name)).String()} - watch, err := w.rolloutClient.ArgoprojV1alpha1().AnalysisRuns(w.namespace).Watch(w.Context, opts) + arIf := w.dynamicClient.Resource(rov1.AnalysisRunGVR).Namespace(w.namespace) + ar, err := arIf.Get(w.Context, name, metav1.GetOptions{}) w.CheckError(err) - defer watch.Stop() + retryWatcher, err := watchutil.NewRetryWatcher(ar.GetResourceVersion(), &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + opts := metav1.ListOptions{FieldSelector: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name)).String()} + return w.rolloutClient.ArgoprojV1alpha1().AnalysisRuns(w.namespace).Watch(w.Context, opts) + }, + }) + w.CheckError(err) + defer retryWatcher.Stop() timeoutCh := make(chan bool, 1) go func() { time.Sleep(timeout) @@ -493,7 +507,7 @@ func (w *When) WaitForAnalysisRunCondition(name string, test func(ar *rov1.Analy }() for { select { - case event := <-watch.ResultChan(): + case event := <-retryWatcher.ResultChan(): ar, ok := event.Object.(*rov1.AnalysisRun) if ok { if test(ar) { diff --git a/test/kustomize/rollout/expected.yaml b/test/kustomize/rollout/expected.yaml index eebed46dc9..e9dee28f1c 100644 --- a/test/kustomize/rollout/expected.yaml +++ b/test/kustomize/rollout/expected.yaml @@ -351,7 +351,7 @@ spec: host: guestbook-canary-svc weight: 0 --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: @@ -365,5 +365,7 @@ spec: http: paths: - backend: - serviceName: website - servicePort: 80 + service: + name: website + port: + number: 80 diff --git a/test/kustomize/rollout/rollout.yaml b/test/kustomize/rollout/rollout.yaml index 5a0db90149..03b78371a3 100644 --- a/test/kustomize/rollout/rollout.yaml +++ b/test/kustomize/rollout/rollout.yaml @@ -182,7 +182,7 @@ spec: targetPort: 8080 --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: networking-ingress @@ -192,8 +192,10 @@ spec: http: paths: - backend: - serviceName: website - servicePort: 80 + service: + name: website + port: + number: 80 --- apiVersion: extensions/v1beta1 diff --git a/ui/package.json b/ui/package.json index 5a27c50c0a..15627c8da8 100644 --- a/ui/package.json +++ b/ui/package.json @@ -3,16 +3,9 @@ "version": "0.1.0", "private": true, "dependencies": { - "@testing-library/jest-dom": "^5.11.4", - "@testing-library/react": "^11.1.0", - "@testing-library/user-event": "^12.1.10", - "@types/jest": "^26.0.15", - "@types/node": "^12.0.0", - "@types/react": "^16.9.3", - "@types/react-dom": "^16.9.3", - "@types/react-helmet": "^6.1.0", - "@types/react-router-dom": "^5.1.7", "argo-ui": "git+https://github.com/argoproj/argo-ui.git", + "classnames": "2.2.6", + "isomorphic-fetch": "^3.0.0", "moment": "^2.29.1", "moment-timezone": "^0.5.33", "portable-fetch": "^3.0.0", @@ -21,19 +14,17 @@ "react-helmet": "^6.1.0", "react-hot-loader": "^3.1.3", "react-keyhooks": "^0.2.3", - "react-router-dom": "^5.2.0", - "react-scripts": "4.0.3", + "react-router-dom": "5.2.0", "rxjs": "^6.6.6", - "typescript": "^4.1.2", - "web-vitals": "^1.0.1", - "webpack-dev-server": "^3.11.2" + "typescript": "4.3.5", + "web-vitals": "^1.0.1" }, "scripts": { "start": "webpack serve --config ./src/app/webpack.dev.js", "build": "rm -rf dist && webpack --config ./src/app/webpack.prod.js", "test": "react-scripts test", "eject": "react-scripts eject", - "protogen": "swagger-codegen generate -i ../pkg/apiclient/rollout/rollout.swagger.json -l typescript-fetch -o src/models/rollout/generated" + "protogen": "../hack/swagger-codegen.sh generate -i ../pkg/apiclient/rollout/rollout.swagger.json -l typescript-fetch -o src/models/rollout/generated" }, "eslintConfig": { "extends": [ @@ -54,16 +45,28 @@ ] }, "devDependencies": { + "@testing-library/jest-dom": "^5.11.4", + "@testing-library/react": "^11.1.0", + "@testing-library/user-event": "^12.1.10", + "@types/classnames": "2.2.9", + "@types/jest": "^26.0.15", + "@types/node": "^12.0.0", + "@types/react": "^16.9.3", + "@types/react-dom": "^16.9.3", + "@types/react-helmet": "^6.1.0", + "@types/react-router-dom": "^5.1.7", "copy-webpack-plugin": "^6.3.2", "mini-css-extract-plugin": "^1.3.9", "raw-loader": "^4.0.2", + "react-scripts": "4.0.3", "sass": "^1.32.8", "ts-loader": "^8.0.17", "webpack-bundle-analyzer": "^4.4.0", "webpack-cli": "^4.5.0", + "webpack-dev-server": "^3.11.2", "webpack-merge": "^5.7.3" }, "resolutions": { "@types/react": "16.9.3" } -} +} \ No newline at end of file diff --git a/ui/src/app/App.tsx b/ui/src/app/App.tsx index bbc5bec71f..b1d28fa5d5 100644 --- a/ui/src/app/App.tsx +++ b/ui/src/app/App.tsx @@ -3,7 +3,7 @@ import {Header} from './components/header/header'; import {createBrowserHistory} from 'history'; import * as React from 'react'; import {Key, KeybindingContext, KeybindingProvider} from 'react-keyhooks'; -import {Redirect, Route, Router, Switch} from 'react-router-dom'; +import {Route, Router, Switch} from 'react-router-dom'; import './App.scss'; import {NamespaceContext, RolloutAPI} from './shared/context/api'; import {Modal} from './components/modal/modal'; @@ -60,12 +60,23 @@ const App = () => { const [namespace, setNamespace] = React.useState(init); const [availableNamespaces, setAvailableNamespaces] = React.useState([]); React.useEffect(() => { - RolloutAPI.rolloutServiceGetNamespace().then((info) => { - if (!namespace) { - setNamespace(info.namespace); - } - setAvailableNamespaces(info.availableNamespaces); - }); + try { + RolloutAPI.rolloutServiceGetNamespace() + .then((info) => { + if (!info) { + throw new Error(); + } + if (!namespace) { + setNamespace(info.namespace); + } + setAvailableNamespaces(info.availableNamespaces); + }) + .catch((e) => { + setAvailableNamespaces([namespace]); + }); + } catch (e) { + setAvailableNamespaces([namespace]); + } }, []); const changeNamespace = (val: string) => { setNamespace(val); @@ -79,11 +90,9 @@ const App = () => { - - } shortcuts={[ {key: '/', description: 'Search'}, @@ -93,9 +102,7 @@ const App = () => { ]} changeNamespace={changeNamespace} /> - } changeNamespace={changeNamespace} /> - - + } changeNamespace={changeNamespace} /> diff --git a/ui/src/app/components/header/header.tsx b/ui/src/app/components/header/header.tsx index ff58a99114..edda274ea8 100644 --- a/ui/src/app/components/header/header.tsx +++ b/ui/src/app/components/header/header.tsx @@ -13,6 +13,7 @@ export const Header = (props: {pageHasShortcuts: boolean; changeNamespace: (val: const history = useHistory(); const namespaceInfo = React.useContext(NamespaceContext); const {name} = useParams<{name: string}>(); + const {namespace} = useParams<{namespace: string}>(); const api = React.useContext(RolloutAPIContext); const [version, setVersion] = React.useState('v?'); const [nsInput, setNsInput] = React.useState(namespaceInfo.namespace); @@ -23,6 +24,12 @@ export const Header = (props: {pageHasShortcuts: boolean; changeNamespace: (val: }; getVersion(); }, []); + React.useEffect(() => { + if (namespace && namespace != namespaceInfo.namespace) { + props.changeNamespace(namespace); + setNsInput(namespace); + } + }, []); return ( @@ -54,8 +61,9 @@ export const Header = (props: {pageHasShortcuts: boolean; changeNamespace: (val: placeholder='Namespace' onChange={(el) => setNsInput(el.target.value)} onItemClick={(val) => { - props.changeNamespace(val ? val : nsInput); - history.push(`/rollouts`); + const selectedNamespace = val ? val : nsInput; + props.changeNamespace(selectedNamespace); + history.push(`/${selectedNamespace}`); }} value={nsInput} /> diff --git a/ui/src/app/components/pods/pods.scss b/ui/src/app/components/pods/pods.scss index 29b72ac338..ee3121b111 100644 --- a/ui/src/app/components/pods/pods.scss +++ b/ui/src/app/components/pods/pods.scss @@ -32,6 +32,7 @@ $POD_SIZE: 30px; } } +/*This is used as an icon in pod, analysis job and analysis nonjob */ .pod-icon { width: $POD_SIZE; height: $POD_SIZE; @@ -42,6 +43,7 @@ $POD_SIZE: 30px; background-color: $fog; color: $shine; border-radius: 3px; + margin: 2px; cursor: pointer; &--success { diff --git a/ui/src/app/components/pods/pods.tsx b/ui/src/app/components/pods/pods.tsx index 30b13ed854..b45bd2e168 100644 --- a/ui/src/app/components/pods/pods.tsx +++ b/ui/src/app/components/pods/pods.tsx @@ -1,7 +1,8 @@ -import {Menu, ThemeDiv, Tooltip, WaitFor} from 'argo-ui/v2'; +import {Menu, ThemeDiv, Tooltip, WaitFor, InfoItem} from 'argo-ui/v2'; import * as React from 'react'; +import * as moment from 'moment'; +import {Duration, Ticker} from 'argo-ui'; import {RolloutReplicaSetInfo} from '../../../models/rollout/generated'; -import {Pod} from '../../../models/rollout/rollout'; import {ReplicaSetStatus, ReplicaSetStatusIcon} from '../status-icon/status-icon'; import './pods.scss'; @@ -21,6 +22,7 @@ export const ParsePodStatus = (status: string): PodStatus => { return PodStatus.Pending; case 'Running': case 'Completed': + case 'Successful': return PodStatus.Success; case 'Failed': case 'InvalidImageName': @@ -34,8 +36,8 @@ export const ParsePodStatus = (status: string): PodStatus => { } }; -export const PodIcon = (props: {status: string}) => { - const {status} = props; +export const PodIcon = (props: {status: string; customIcon?: string}) => { + const {status, customIcon} = props; let icon; let spin = false; if (status.startsWith('Init:')) { @@ -51,25 +53,27 @@ export const PodIcon = (props: {status: string}) => { const className = ParsePodStatus(status); - switch (className) { - case PodStatus.Pending: - icon = 'fa-circle-notch'; - spin = true; - break; - case PodStatus.Success: - icon = 'fa-check'; - break; - case PodStatus.Failed: - icon = 'fa-times'; - break; - case PodStatus.Warning: - icon = 'fa-exclamation-triangle'; - break; - default: - spin = false; - icon = 'fa-question-circle'; - break; - } + if (customIcon) icon = customIcon; + else + switch (className) { + case PodStatus.Pending: + icon = 'fa-circle-notch'; + spin = true; + break; + case PodStatus.Success: + icon = 'fa-check'; + break; + case PodStatus.Failed: + icon = 'fa-times'; + break; + case PodStatus.Warning: + icon = 'fa-exclamation-triangle'; + break; + default: + spin = false; + icon = 'fa-question-circle'; + break; + } return ( @@ -107,6 +111,25 @@ export const ReplicaSet = (props: {rs: RolloutReplicaSetInfo; showRevision?: boo {rsName} {props.showRevision &&
Revision {props.rs.revision}
} + {props.rs.scaleDownDeadline && ( +
+ + {(now) => { + const time = moment(props.rs.scaleDownDeadline).diff(now, 'second'); + return time <= 0 ? null : ( + + Scaledown in + + }> + ) as any} icon='fa fa-clock'> + + ); + }} + +
+ )}
)} @@ -114,7 +137,17 @@ export const ReplicaSet = (props: {rs: RolloutReplicaSetInfo; showRevision?: boo {props.rs.pods.map((pod, i) => ( - + +
Status: {pod.status}
+
{pod.objectMeta?.name}
+ + } + /> ))}
@@ -123,16 +156,10 @@ export const ReplicaSet = (props: {rs: RolloutReplicaSetInfo; showRevision?: boo ); }; -export const PodWidget = (props: {pod: Pod}) => ( - navigator.clipboard.writeText(props.pod.objectMeta?.name), icon: 'fa-clipboard'}]}> - -
Status: {props.pod.status}
-
{props.pod.objectMeta?.name}
- - }> - +export const PodWidget = ({name, status, tooltip, customIcon}: {name: string; status: string; tooltip: React.ReactNode; customIcon?: string}) => ( + navigator.clipboard.writeText(name), icon: 'fa-clipboard'}]}> + + ); diff --git a/ui/src/app/components/rollout-actions/rollout-actions.tsx b/ui/src/app/components/rollout-actions/rollout-actions.tsx index fb7c8321e0..283af8c4ca 100644 --- a/ui/src/app/components/rollout-actions/rollout-actions.tsx +++ b/ui/src/app/components/rollout-actions/rollout-actions.tsx @@ -18,6 +18,7 @@ export const RolloutActionButton = (props: {action: RolloutAction; rollout: Roll const namespaceCtx = React.useContext(NamespaceContext); const restartedAt = formatTimestamp(props.rollout.restartedAt || ''); + const isDeploying = props.rollout.status === RolloutStatus.Progressing || props.rollout.status === RolloutStatus.Paused const actionMap = new Map([ [ @@ -36,6 +37,7 @@ export const RolloutActionButton = (props: {action: RolloutAction; rollout: Roll label: 'RETRY', icon: 'fa-redo-alt', action: api.rolloutServiceRetryRollout, + disabled: props.rollout.status !== RolloutStatus.Degraded, shouldConfirm: true, }, ], @@ -45,6 +47,7 @@ export const RolloutActionButton = (props: {action: RolloutAction; rollout: Roll label: 'ABORT', icon: 'fa-exclamation-circle', action: api.rolloutServiceAbortRollout, + disabled: !isDeploying, shouldConfirm: true, }, ], @@ -55,7 +58,7 @@ export const RolloutActionButton = (props: {action: RolloutAction; rollout: Roll icon: 'fa-chevron-circle-up', action: api.rolloutServicePromoteRollout, body: {full: false}, - disabled: props.rollout.status !== RolloutStatus.Paused, + disabled: !isDeploying, shouldConfirm: true, }, ], @@ -64,9 +67,9 @@ export const RolloutActionButton = (props: {action: RolloutAction; rollout: Roll { label: 'PROMOTE-FULL', icon: 'fa-arrow-circle-up', - body: {full: true}, action: api.rolloutServicePromoteRollout, - disabled: props.rollout.status !== RolloutStatus.Paused, + body: {full: true}, + disabled: !isDeploying, shouldConfirm: true, }, ], diff --git a/ui/src/app/components/rollout/containers.tsx b/ui/src/app/components/rollout/containers.tsx index 6c31e7b9a9..051c8c606c 100644 --- a/ui/src/app/components/rollout/containers.tsx +++ b/ui/src/app/components/rollout/containers.tsx @@ -98,8 +98,12 @@ const ContainerWidget = (props: {container: RolloutContainerInfo; images: ImageI return (
{container.name}
-
- {!editing ? : img.image)} placeholder='New Image' {...newImageInput} />} +
+ {!editing ? ( + + ) : ( + img.image)} placeholder='New Image' {...newImageInput} /> + )}
); diff --git a/ui/src/app/components/rollout/revision.tsx b/ui/src/app/components/rollout/revision.tsx index 014423cbfe..f61a9229c7 100644 --- a/ui/src/app/components/rollout/revision.tsx +++ b/ui/src/app/components/rollout/revision.tsx @@ -2,11 +2,13 @@ import {ActionButton, EffectDiv, formatTimestamp, InfoItemProps, InfoItemRow, Th import * as React from 'react'; import {RolloutAnalysisRunInfo, RolloutExperimentInfo, RolloutReplicaSetInfo} from '../../../models/rollout/generated'; import {IconForTag} from '../../shared/utils/utils'; -import {ReplicaSets} from '../pods/pods'; +import {PodWidget, ReplicaSets} from '../pods/pods'; import {ImageInfo, parseImages} from './rollout'; +import './rollout.scss'; +import '../pods/pods.scss'; export interface Revision { - number: number; + number: string; replicaSets: RolloutReplicaSetInfo[]; experiments: RolloutExperimentInfo[]; analysisRuns: RolloutAnalysisRunInfo[]; @@ -33,6 +35,7 @@ interface RevisionWidgetProps { initCollapsed?: boolean; rollback?: (revision: number) => void; current: boolean; + message: String; } export const RevisionWidget = (props: RevisionWidgetProps) => { @@ -46,7 +49,14 @@ export const RevisionWidget = (props: RevisionWidgetProps) => { Revision {revision.number}
{!props.current && props.rollback && ( - props.rollback(revision.number)} label='ROLLBACK' icon='fa-undo-alt' style={{fontSize: '13px'}} indicateLoading shouldConfirm /> + props.rollback(Number(revision.number))} + label='ROLLBACK' + icon='fa-undo-alt' + style={{fontSize: '13px'}} + indicateLoading + shouldConfirm + /> )} setCollapsed(!collapsed)}> @@ -75,22 +85,181 @@ export const RevisionWidget = (props: RevisionWidgetProps) => { const AnalysisRunWidget = (props: {analysisRuns: RolloutAnalysisRunInfo[]}) => { const {analysisRuns} = props; + const [selection, setSelection] = React.useState(null); + return ( -
Analysis Runs
+
Analysis Runs
- {analysisRuns.map((ar) => ( - -
{ar.objectMeta.name}
-
Created at {formatTimestamp(JSON.stringify(ar.objectMeta.creationTimestamp))}
- - }> - -
- ))} + {analysisRuns.map((ar) => { + let temp = ar.objectMeta.name.split('-'); + let len = temp.length; + return ( + +
+ Name: {ar.objectMeta.name} +
+
+ Created at: + {formatTimestamp(JSON.stringify(ar.objectMeta?.creationTimestamp))} +
+
+ Status: + {ar.status} +
+ + }> +
+ (selection?.objectMeta.name === ar.objectMeta.name ? setSelection(null) : setSelection(ar))} + label={`Analysis ${temp[len - 2] + '-' + temp[len - 1]}`} + /> +
+
+ ); + })}
+ + {selection && ( + +
+ {selection.objectMeta?.name} + +
+ {selection?.jobs && ( +
+
+ {selection.jobs.map((job) => { + return ( + +
job-name: {job.objectMeta?.name}
+
StartedAt: {formatTimestamp(JSON.stringify(job.startedAt))}
+
Status: {job.status}
+
MetricName: {job.metricName}
+
+ } + customIcon='fa-chart-bar' + /> + ); + })} +
+ metric.name === selection.jobs[0].metricName) + .map((metric) => { + return ( + + {metric?.name && ( +
+ MetricName: {metric.name} +
+ )} + {metric?.successCondition && ( +
+ SuccessCondition: + {metric.successCondition} +
+ )} + {metric?.failureLimit && ( +
+ FailureLimit: {metric.failureLimit} +
+ )} + {metric?.inconclusiveLimit && ( +
+ InconclusiveLimit: + {metric.inconclusiveLimit} +
+ )} + {metric?.count && ( +
+ Count: + {metric.count} +
+ )} +
+ ); + })}> + +
+
+ )} + {selection?.nonJobInfo && ( +
+
+ {selection.nonJobInfo.map((nonJob) => { + return ( + +
Value: {JSON.stringify(JSON.parse(nonJob.value), null, 2)}
+
StartedAt: {formatTimestamp(JSON.stringify(nonJob.startedAt))}
+
Status: {nonJob.status}
+
MetricName: {nonJob.metricName}
+
+ } + customIcon='fa-chart-bar' + /> + ); + })} +
+ metric.name === selection.nonJobInfo[0].metricName) + .map((metric) => { + return ( + + {metric?.name && ( +
+ MetricName: {metric.name} +
+ )} + {metric?.successCondition && ( +
+ SuccessCondition: + {metric.successCondition} +
+ )} + {metric?.failureLimit && ( +
+ FailureLimit: {metric.failureLimit} +
+ )} + {metric?.inconclusiveLimit && ( +
+ InconclusiveLimit: + {metric.inconclusiveLimit} +
+ )} + {metric?.count && ( +
+ Count: + {metric.count} +
+ )} +
+ ); + })}> + +
+
+ )} + + )} ); }; diff --git a/ui/src/app/components/rollout/rollout.scss b/ui/src/app/components/rollout/rollout.scss index 897f43e1ef..88819de47b 100644 --- a/ui/src/app/components/rollout/rollout.scss +++ b/ui/src/app/components/rollout/rollout.scss @@ -76,6 +76,54 @@ border: 2px solid $sherbert; } + &-title { + padding-left: 16px; + padding-right: 16px; + margin: 0 -16px; + display: flex; + align-items: center; + &--experiment { + padding-bottom: 8px; + border-bottom: 1px solid $argo-color-gray-6; + } + & > i:first-child { + margin-right: 5px; + } + } + + &__content { + margin-left: -16px; + margin-right: -16px; + max-height: 160px; + overflow-y: auto; + + &-body { + padding: 8px 16px; + } + + &-header { + display: flex; + align-items: center; + justify-content: space-between; + } + + &-title { + padding-top: 8px; + padding-bottom: 4px; + font-size: 14px; + } + + &-value { + font-weight: 600; + padding-bottom: 4px; + border-bottom: 1px solid $argo-color-gray-4; + &:last-child { + padding-bottom: -8px; + border-bottom: none; + } + } + } + &:hover > &__background { transform: scale(1.02); } @@ -148,30 +196,110 @@ padding-top: 1em; border-top: 1px solid $argo-color-gray-4; &__runs { - margin-top: 1em; + margin: 12px 0px; display: flex; + flex-wrap: wrap; + + &-action { + &.analysis--pending .action-button__background { + background-color: $argo-color-gray-4; + border-color: $argo-color-gray-4; + } + + &.analysis--success .action-button__background { + background-color: $argo-success-color; + border-color: $argo-success-color; + } + + &.analysis--failure .action-button__background { + background-color: rgb(238, 111, 111); + border-color: rgb(238, 111, 111); + } + + .action-button { + font-size: '10px'; + line-height: 1; + border: '1px solid'; + padding: '8px 8px 8px 10px'; + border-radius: '12px'; + color: 'white'; + margin-top: 4px; + } + + &.analysis--success .action-button:hover { + .action-button__background { + background-color: seagreen; + border-color: seagreen; + } + } + + &.analysis--failure .action-button:hover { + .action-button__background { + background-color: red; + border-color: red; + } + } + } } &--dark { border-top: 1px solid $silver-lining; } + &-header { + font-size: 16px; + font-weight: 500; + display: flex; + align-items: center; + & i { + margin-left: 4px; + } + } &__run { width: 40px; + cursor: pointer; margin-right: 5px; + margin-bottom: 12px; height: 10px; - border-radius: 3px; + border-radius: 5px; background-color: $argo-color-gray-4; + &--successful { background-color: $argo-success-color; } - &--failed { background-color: $coral; } - &--running { - background-color: $sky; + &__jobs { + margin-top: 8px; + margin-bottom: 12px; + background-color: $argo-color-gray-3; + border-radius: 3px; + display: flex; + align-items: center; + padding: 7px; + + &-info { + font-size: 20px; + margin-left: 4px; + } + + &-list { + flex: 1 1 0; + display: flex; + align-items: center; + flex-wrap: wrap; + } } } + &--success { + color: $argo-success-color; + margin-left: 4px; + } + + &--failure { + color: $clay; + margin-left: 4px; + } } .containers { diff --git a/ui/src/app/components/rollout/rollout.tsx b/ui/src/app/components/rollout/rollout.tsx index 12a00ba6a5..6c3f3a0393 100644 --- a/ui/src/app/components/rollout/rollout.tsx +++ b/ui/src/app/components/rollout/rollout.tsx @@ -3,7 +3,15 @@ import * as React from 'react'; import {Helmet} from 'react-helmet'; import {Key, KeybindingContext} from 'react-keyhooks'; import {useHistory, useParams} from 'react-router-dom'; -import {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep, RolloutReplicaSetInfo, RolloutRolloutInfo, RolloutServiceApi} from '../../../models/rollout/generated'; +import { + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1HeaderRoutingMatch, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentTemplate, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute, + RolloutReplicaSetInfo, + RolloutRolloutInfo, + RolloutServiceApi, +} from '../../../models/rollout/generated'; import {RolloutInfo} from '../../../models/rollout/rollout'; import {NamespaceContext, RolloutAPIContext} from '../../shared/context/api'; import {useWatchRollout} from '../../shared/services/rollout'; @@ -12,6 +20,7 @@ import {RolloutStatus, StatusIcon} from '../status-icon/status-icon'; import {ContainersWidget} from './containers'; import {Revision, RevisionWidget} from './revision'; import './rollout.scss'; +import {Fragment} from 'react'; const RolloutActions = React.lazy(() => import('../rollout-actions/rollout-actions')); export interface ImageInfo { @@ -121,11 +130,11 @@ export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?: interactive={ interactive ? { - editState: interactive.editState, - setImage: (container, image, tag) => { - interactive.api.rolloutServiceSetRolloutImage({}, interactive.namespace, rollout.objectMeta?.name, container, image, tag); - }, - } + editState: interactive.editState, + setImage: (container, image, tag) => { + interactive.api.rolloutServiceSetRolloutImage({}, interactive.namespace, rollout.objectMeta?.name, container, image, tag); + }, + } : null } /> @@ -144,6 +153,7 @@ export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?: initCollapsed={false} rollback={interactive ? (r) => interactive.api.rolloutServiceUndoRollout({}, interactive.namespace, rollout.objectMeta.name, `${r}`) : null} current={i === 0} + message={rollout.message} /> ))} @@ -153,9 +163,11 @@ export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?: Steps
- {rollout.steps.map((step, i) => ( - - ))} + {rollout.steps + .filter((step) => Object.keys(step).length) + .map((step, i, arr) => ( + + ))}
)} @@ -221,7 +233,7 @@ const ProcessRevisions = (ri: RolloutInfo): Revision[] => { if (!ri) { return; } - const map: {[key: number]: Revision} = {}; + const map: {[key: string]: Revision} = {}; const emptyRevision = {replicaSets: [], experiments: [], analysisRuns: []} as Revision; @@ -266,6 +278,12 @@ const parseDuration = (duration: string): string => { }; const Step = (props: {step: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep; complete?: boolean; current?: boolean; last?: boolean}) => { + const [openedTemplate, setOpenedTemplate] = React.useState(''); + const [openCanary, setOpenCanary] = React.useState(false); + const [openAnalysis, setOpenAnalysis] = React.useState(false); + const [openHeader, setOpenHeader] = React.useState(false); + const [openMirror, setOpenMirror] = React.useState(false); + let icon: string; let content = ''; let unit = ''; @@ -294,13 +312,192 @@ const Step = (props: {step: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1 icon = 'fa-flask'; } + if (props.step.setMirrorRoute) { + content = `Set Mirror: ${props.step.setMirrorRoute.name}`; + if(!props.step.setMirrorRoute.match) { + content = `Remove Mirror: ${props.step.setMirrorRoute.name}`; + } + } + + if (props.step.setHeaderRoute) { + content = `Set Header: ${props.step.setHeaderRoute.name}`; + if (!props.step.setHeaderRoute.match) { + content = `Remove Header: ${props.step.setHeaderRoute.name}`; + } + } + return ( - {content} - {unit} +
+ {icon && } {content} + {unit} + {props.step.setCanaryScale && ( + setOpenCanary(!openCanary)}> + + + )} + {props.step.analysis && ( + setOpenAnalysis(!openAnalysis)}> + + + )} + + {props.step.setHeaderRoute && props.step.setHeaderRoute.match &&( + setOpenHeader(!openHeader)}> + + + )} + {props.step.setMirrorRoute && props.step.setMirrorRoute.match && ( + setOpenMirror(!openMirror)}> + + + )} +
+ {props.step.experiment?.templates && ( +
+ {props.step.experiment?.templates.map((template) => { + return ; + })} +
+ )} + + {props.step.analysis?.templates && openAnalysis && ( +
+
Templates
+
    + {props.step.analysis?.templates.map((template) => { + return ( +
    +
  • {template.templateName}
  • +
    + ); + })} +
+
+ )} + {props.step?.setCanaryScale && openCanary && } + {props.step?.setHeaderRoute && openHeader && } + {props.step?.setMirrorRoute && openMirror && }
{!props.last && }
); }; + +const ExperimentWidget = ({template, opened, onToggle}: { + template: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentTemplate; + opened: boolean; + onToggle: (name: string) => void; +}) => { + const icon = opened ? 'fa-chevron-circle-up' : 'fa-chevron-circle-down'; + return ( + + + {template.name} + onToggle(opened ? '' : template.name)}> + + + + {opened && } + + ); +}; + +const WidgetItem = ({values}: {values: Record}) => { + return ( + + {Object.keys(values).map((val) => { + if (!values[val]) return null; + return ( + +
{val.toUpperCase()}
+
{String(values[val])}
+
+ ); + })} +
+ ); +}; + +const WidgetItemSetMirror = ({value}: {value: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute}) => { + if (!value) return null; + return ( + + +
Name
+
{value.name}
+
Percentage
+
{value.percentage}
+ {Object.values(value.match).map((val, index) => { + if (!val) return null; + let stringMatcherValue = "" + let stringMatcherType = "" + let fragments = [] + if (val.path != null) { + if(val.path.exact != null) {stringMatcherValue = val.path.exact; stringMatcherType="Exact"} + if(val.path.prefix != null) {stringMatcherValue = val.path.prefix; stringMatcherType="Prefix"} + if(val.path.regex != null) {stringMatcherValue = val.path.regex; stringMatcherType="Regex"} + fragments.push( + +
{index} - Path ({stringMatcherType})
+
{stringMatcherValue}
+
+ ); + } + if (val.method != null) { + if(val.method.exact != null) {stringMatcherValue = val.method.exact; stringMatcherType="Exact"} + if(val.method.prefix != null) {stringMatcherValue = val.method.prefix; stringMatcherType="Prefix"} + if(val.method.regex != null) {stringMatcherValue = val.method.regex; stringMatcherType="Regex"} + fragments.push( + +
{index} - Method ({stringMatcherType})
+
{stringMatcherValue}
+
+ ); + } + return fragments + })} +
+
+ ); +}; + +const WidgetItemSetHeader = ({values}: {values: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1HeaderRoutingMatch[]}) => { + if (!values) return null; + return ( + + {values.map((record) => { + if (!record.headerName) return null; + if (!record.headerValue) return null; + + let headerValue = "" + let headerValueType = "" + if (record.headerValue.regex) { + headerValue = record.headerValue.regex + headerValueType = "Regex" + } + if (record.headerValue.prefix) { + headerValue = record.headerValue.prefix + headerValueType = "Prefix" + } + if (record.headerValue.exact) { + headerValue = record.headerValue.exact + headerValueType = "Exact" + } + return ( + +
Name
+
{record.headerName}
+
{headerValueType}
+
{headerValue}
+
+ ); + })} +
+ ); +}; diff --git a/ui/src/app/components/rollouts-list/rollouts-list.scss b/ui/src/app/components/rollouts-list/rollouts-list.scss index b5c58c61cd..3892e8cb9a 100644 --- a/ui/src/app/components/rollouts-list/rollouts-list.scss +++ b/ui/src/app/components/rollouts-list/rollouts-list.scss @@ -24,12 +24,12 @@ $colWidth: ($WIDGET_WIDTH + (2 * $widgetPadding)) + $widgetMarginRight; width: 3 * $colWidth; margin: 0 auto; - @media screen and (max-width: 3 * $colWidth) { + @media screen and (max-width: (3 * $colWidth)) { width: 2 * $colWidth; margin: 0 auto; } - @media screen and (max-width: 2 * $colWidth) { + @media screen and (max-width: (2 * $colWidth)) { width: $colWidth; .rollouts-list__widget { @@ -82,7 +82,7 @@ $colWidth: ($WIDGET_WIDTH + (2 * $widgetPadding)) + $widgetMarginRight; } } - @media screen and (max-width: 2 * $colWidth) { + @media screen and (max-width: (2 * $colWidth)) { width: 80%; } } diff --git a/ui/src/app/components/rollouts-list/rollouts-list.tsx b/ui/src/app/components/rollouts-list/rollouts-list.tsx index dfbcb1d515..bd660ff471 100644 --- a/ui/src/app/components/rollouts-list/rollouts-list.tsx +++ b/ui/src/app/components/rollouts-list/rollouts-list.tsx @@ -29,6 +29,12 @@ export const RolloutsList = () => { const [filteredRollouts, setFilteredRollouts] = React.useState(rollouts); const [pos, nav, reset] = useNav(filteredRollouts.length); const [searchString, setSearchString, searchInput] = useAutocomplete(''); + const searchParam = new URLSearchParams(window.location.search).get('q'); + React.useEffect(() => { + if (searchParam && searchParam != searchString) { + setSearchString(searchParam); + } + }, []); const {useKeybinding, keybindingState} = React.useContext(KeybindingContext); @@ -81,6 +87,9 @@ export const RolloutsList = () => { if ((filtered || []).length > 0) { setFilteredRollouts(filtered); } + if (searchString) { + history.replace(`/${namespaceCtx.namespace}?q=${searchString}`); + } }, [searchString, rollouts]); const namespaceCtx = React.useContext(NamespaceContext); @@ -97,7 +106,7 @@ export const RolloutsList = () => { className='rollouts-list__search' placeholder='Search...' style={{marginBottom: '1.5em'}} - onItemClick={(item) => history.push(`/rollout/${item}`)} + onItemClick={(item) => history.push(`/rollout/${namespaceCtx.namespace}/${item}`)} icon='fa-search' {...searchInput} /> @@ -176,7 +185,7 @@ export const RolloutWidget = (props: {rollout: RolloutInfo; deselect: () => void return ( - + { diff --git a/ui/src/models/rollout/generated/api.ts b/ui/src/models/rollout/generated/api.ts index 5ad4585a3b..6416533dda 100644 --- a/ui/src/models/rollout/generated/api.ts +++ b/ui/src/models/rollout/generated/api.ts @@ -77,6 +77,31 @@ export class RequiredError extends Error { } } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus { + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus + */ + loadBalancer?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus + */ + canaryTargetGroup?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus + */ + stableTargetGroup?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef; +} /** * * @export @@ -101,6 +126,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBTrafficR * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBTrafficRouting */ rootService?: string; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StickinessConfig} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBTrafficRouting + */ + stickinessConfig?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StickinessConfig; /** * * @type {string} @@ -146,6 +177,25 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRun */ valueFrom?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ArgumentValueFrom; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStrategy + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStrategy { + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStrategy + */ + successfulRunHistoryLimit?: number; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStrategy + */ + unsuccessfulRunHistoryLimit?: number; +} /** * * @export @@ -165,6 +215,76 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AntiAffinit */ requiredDuringSchedulingIgnoredDuringExecution?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RequiredDuringSchedulingIgnoredDuringExecution; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshTrafficRouting + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshTrafficRouting { + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualService} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshTrafficRouting + */ + virtualService?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualService; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeGroup} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshTrafficRouting + */ + virtualNodeGroup?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeGroup; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeGroup + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeGroup { + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeGroup + */ + canaryVirtualNodeRef?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeGroup + */ + stableVirtualNodeRef?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualNodeReference + */ + name?: string; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualService + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualService { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualService + */ + name?: string; + /** + * Routes is list of HTTP routes within virtual router associated with virtual service to edit. If omitted, virtual service must have a single route of this type. + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshVirtualService + */ + routes?: Array; +} /** * * @export @@ -184,6 +304,25 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ArgumentVal */ fieldRef?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1FieldRef; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef + */ + name?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AwsResourceRef + */ + arn?: string; +} /** * * @export @@ -305,6 +444,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1BlueGreenSt * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1BlueGreenStrategy */ activeMetadata?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PodTemplateMetadata; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1BlueGreenStrategy + */ + abortScaleDownDelaySeconds?: number; } /** * @@ -330,6 +475,18 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStatu * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStatus */ currentExperiment?: string; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStatus + */ + weights?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStatus + */ + stablePingPong?: string; } /** * CanaryStep defines a step of a canary deployment. @@ -367,6 +524,18 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep */ setCanaryScale?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetHeaderRoute} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep + */ + setHeaderRoute?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetHeaderRoute; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStep + */ + setMirrorRoute?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute; } /** * @@ -446,6 +615,37 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStrat * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStrategy */ scaleDownDelayRevisionLimit?: number; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStrategy + */ + abortScaleDownDelaySeconds?: number; + /** + * DynamicStableScale is a traffic routing feature which dynamically scales the stable ReplicaSet to minimize total pods which are running during an update. This is calculated by scaling down the stable as traffic is increased to canary. When disabled (the default behavior) the stable ReplicaSet remains fully scaled to support instantaneous aborts. + * @type {boolean} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStrategy + */ + dynamicStableScale?: boolean; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PingPongSpec} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CanaryStrategy + */ + pingPong?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PingPongSpec; +} +/** + * DryRun defines the settings for running the analysis in Dry-Run mode. + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DryRun + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DryRun { + /** + * Name of the metric which needs to be evaluated in the Dry-Run mode. Wildcard '*' is supported and denotes all the available metrics. + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DryRun + */ + metricName?: string; } /** * @@ -460,6 +660,25 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1FieldRef { */ fieldPath?: string; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1HeaderRoutingMatch + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1HeaderRoutingMatch { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1HeaderRoutingMatch + */ + headerName?: string; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1HeaderRoutingMatch + */ + headerValue?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch; +} /** * * @export @@ -503,6 +722,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioTraffi * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioTrafficRouting */ destinationRule?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioDestinationRule; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioTrafficRouting + */ + virtualServices?: Array; } /** * @@ -517,11 +742,49 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioVirtua */ name?: string; /** - * + * A list of HTTP routes within VirtualService to edit. If omitted, VirtualService must have a single route of this type. * @type {Array} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioVirtualService */ routes?: Array; + /** + * A list of TLS/HTTPS routes within VirtualService to edit. If omitted, VirtualService must have a single route of this type. + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1IstioVirtualService + */ + tlsRoutes?: Array; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MangedRoutes + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MangedRoutes { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MangedRoutes + */ + name?: string; +} +/** + * MeasurementRetention defines the settings for retaining the number of measurements during the analysis. + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MeasurementRetention + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MeasurementRetention { + /** + * MetricName is the name of the metric on which this retention policy should be applied. + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MeasurementRetention + */ + metricName?: string; + /** + * Limit is the maximum number of measurements to be retained for this given metric. + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MeasurementRetention + */ + limit?: number; } /** * @@ -547,6 +810,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1NginxTraffi * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1NginxTrafficRouting */ additionalIngressAnnotations?: { [key: string]: string; }; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1NginxTrafficRouting + */ + additionalStableIngresses?: Array; } /** * @@ -592,6 +861,25 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PauseCondit */ startTime?: K8sIoApimachineryPkgApisMetaV1Time; } +/** + * PingPongSpec holds the ping and pong service name. + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PingPongSpec + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PingPongSpec { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PingPongSpec + */ + pingService?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PingPongSpec + */ + pongService?: string; +} /** * * @export @@ -674,6 +962,18 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnal * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysis */ args?: Array; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysis + */ + dryRun?: Array; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysis + */ + measurementRetention?: Array; } /** * @@ -879,6 +1179,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExpe * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentTemplate */ selector?: K8sIoApimachineryPkgApisMetaV1LabelSelector; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentTemplate + */ + weight?: number; } /** * @@ -953,12 +1259,24 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutSpec * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutSpec */ progressDeadlineSeconds?: number; + /** + * + * @type {boolean} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutSpec + */ + progressDeadlineAbort?: boolean; /** * * @type {K8sIoApimachineryPkgApisMetaV1Time} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutSpec */ restartAt?: K8sIoApimachineryPkgApisMetaV1Time; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStrategy} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutSpec + */ + analysis?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStrategy; } /** * @@ -1092,6 +1410,30 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutStat * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutStatus */ promoteFull?: boolean; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutStatus + */ + phase?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutStatus + */ + message?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutStatus + */ + workloadObservedGeneration?: string; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutStatus + */ + alb?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ALBStatus; } /** * @@ -1141,57 +1483,276 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTraf * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting */ - smi?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting; + smi?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AmbassadorTrafficRouting} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting + */ + ambassador?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AmbassadorTrafficRouting; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshTrafficRouting} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting + */ + appMesh?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AppMeshTrafficRouting; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TraefikTrafficRouting} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting + */ + traefik?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TraefikTrafficRouting; + /** + * A list of HTTP routes that Argo Rollouts manages, the order of this array also becomes the precedence in the upstream traffic router. + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting + */ + managedRoutes?: Array; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RouteMatch + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RouteMatch { + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RouteMatch + */ + method?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RouteMatch + */ + path?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch; + /** + * + * @type {{ [key: string]: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch; }} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RouteMatch + */ + headers?: { [key: string]: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch; }; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting + */ + rootService?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting + */ + trafficSplitName?: string; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale { + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + */ + weight?: number; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + */ + replicas?: number; + /** + * + * @type {boolean} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + */ + matchTrafficWeight?: boolean; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetHeaderRoute + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetHeaderRoute { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetHeaderRoute + */ + name?: string; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetHeaderRoute + */ + match?: Array; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute + */ + name?: string; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute + */ + match?: Array; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetMirrorRoute + */ + percentage?: number; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StickinessConfig + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StickinessConfig { + /** + * + * @type {boolean} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StickinessConfig + */ + enabled?: boolean; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StickinessConfig + */ + durationSeconds?: string; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch + */ + exact?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch + */ + prefix?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1StringMatch + */ + regex?: string; +} +/** + * TLSRoute holds the information on the virtual service's TLS/HTTPS routes that are desired to be matched for changing weights. + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TLSRoute + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TLSRoute { + /** + * Port number of the TLS Route desired to be matched in the given Istio VirtualService. + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TLSRoute + */ + port?: string; + /** + * A list of all the SNI Hosts of the TLS Route desired to be matched in the given Istio VirtualService. + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TLSRoute + */ + sniHosts?: Array; +} +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TraefikTrafficRouting + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TraefikTrafficRouting { /** * - * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AmbassadorTrafficRouting} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TraefikTrafficRouting */ - ambassador?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AmbassadorTrafficRouting; + weightedTraefikServiceName?: string; } /** * * @export - * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights */ -export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting { +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights { /** * - * @type {string} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights */ - rootService?: string; + canary?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination; /** * - * @type {string} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SMITrafficRouting + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights */ - trafficSplitName?: string; + stable?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights + */ + additional?: Array; + /** + * + * @type {boolean} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TrafficWeights + */ + verified?: boolean; } /** * * @export - * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination */ -export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale { +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination { /** * * @type {number} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination */ weight?: number; /** * - * @type {number} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination */ - replicas?: number; + serviceName?: string; /** * - * @type {boolean} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1SetCanaryScale + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WeightDestination */ - matchTrafficWeight?: boolean; + podTemplateHash?: string; } /** * @@ -1411,7 +1972,7 @@ export interface K8sIoApiCoreV1AzureFileVolumeSource { */ export interface K8sIoApiCoreV1CSIVolumeSource { /** - * Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + * driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. * @type {string} * @memberof K8sIoApiCoreV1CSIVolumeSource */ @@ -1554,7 +2115,7 @@ export interface K8sIoApiCoreV1ConfigMapEnvSource { optional?: boolean; } /** - * Selects a key from a ConfigMap. + * * @export * @interface K8sIoApiCoreV1ConfigMapKeySelector */ @@ -1974,7 +2535,7 @@ export interface K8sIoApiCoreV1EnvVarSource { secretKeyRef?: K8sIoApiCoreV1SecretKeySelector; } /** - * An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag. + * An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. * @export * @interface K8sIoApiCoreV1EphemeralContainer */ @@ -1986,7 +2547,7 @@ export interface K8sIoApiCoreV1EphemeralContainer { */ ephemeralContainerCommon?: K8sIoApiCoreV1EphemeralContainerCommon; /** - * + * If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. +optional * @type {string} * @memberof K8sIoApiCoreV1EphemeralContainer */ @@ -2029,7 +2590,7 @@ export interface K8sIoApiCoreV1EphemeralContainerCommon { */ workingDir?: string; /** - * Ports are not allowed for ephemeral containers. + * * @type {Array} * @memberof K8sIoApiCoreV1EphemeralContainerCommon */ @@ -2143,12 +2704,6 @@ export interface K8sIoApiCoreV1EphemeralVolumeSource { * @memberof K8sIoApiCoreV1EphemeralVolumeSource */ volumeClaimTemplate?: K8sIoApiCoreV1PersistentVolumeClaimTemplate; - /** - * - * @type {boolean} - * @memberof K8sIoApiCoreV1EphemeralVolumeSource - */ - readOnly?: boolean; } /** * ExecAction describes a \"run in container\" action. @@ -2207,7 +2762,7 @@ export interface K8sIoApiCoreV1FCVolumeSource { */ export interface K8sIoApiCoreV1FlexVolumeSource { /** - * Driver is the name of the driver to use for this volume. + * driver is the name of the driver to use for this volume. * @type {string} * @memberof K8sIoApiCoreV1FlexVolumeSource */ @@ -2287,6 +2842,25 @@ export interface K8sIoApiCoreV1GCEPersistentDiskVolumeSource { */ readOnly?: boolean; } +/** + * + * @export + * @interface K8sIoApiCoreV1GRPCAction + */ +export interface K8sIoApiCoreV1GRPCAction { + /** + * Port number of the gRPC service. Number must be in the range 1 to 65535. + * @type {number} + * @memberof K8sIoApiCoreV1GRPCAction + */ + port?: number; + /** + * Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC. +optional +default=\"\" + * @type {string} + * @memberof K8sIoApiCoreV1GRPCAction + */ + service?: string; +} /** * Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. * @export @@ -2393,31 +2967,6 @@ export interface K8sIoApiCoreV1HTTPHeader { */ value?: string; } -/** - * Handler defines a specific action that should be taken TODO: pass structured data to these actions, and document that data here. - * @export - * @interface K8sIoApiCoreV1Handler - */ -export interface K8sIoApiCoreV1Handler { - /** - * - * @type {K8sIoApiCoreV1ExecAction} - * @memberof K8sIoApiCoreV1Handler - */ - exec?: K8sIoApiCoreV1ExecAction; - /** - * - * @type {K8sIoApiCoreV1HTTPGetAction} - * @memberof K8sIoApiCoreV1Handler - */ - httpGet?: K8sIoApiCoreV1HTTPGetAction; - /** - * - * @type {K8sIoApiCoreV1TCPSocketAction} - * @memberof K8sIoApiCoreV1Handler - */ - tcpSocket?: K8sIoApiCoreV1TCPSocketAction; -} /** * HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. * @export @@ -2463,19 +3012,19 @@ export interface K8sIoApiCoreV1HostPathVolumeSource { */ export interface K8sIoApiCoreV1ISCSIVolumeSource { /** - * iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + * targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). * @type {string} * @memberof K8sIoApiCoreV1ISCSIVolumeSource */ targetPortal?: string; /** - * Target iSCSI Qualified Name. + * iqn is the target iSCSI Qualified Name. * @type {string} * @memberof K8sIoApiCoreV1ISCSIVolumeSource */ iqn?: string; /** - * iSCSI Target Lun number. + * lun represents iSCSI Target Lun number. * @type {number} * @memberof K8sIoApiCoreV1ISCSIVolumeSource */ @@ -2536,13 +3085,13 @@ export interface K8sIoApiCoreV1ISCSIVolumeSource { */ export interface K8sIoApiCoreV1KeyToPath { /** - * The key to project. + * key is the key to project. * @type {string} * @memberof K8sIoApiCoreV1KeyToPath */ key?: string; /** - * The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + * path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. * @type {string} * @memberof K8sIoApiCoreV1KeyToPath */ @@ -2562,19 +3111,44 @@ export interface K8sIoApiCoreV1KeyToPath { export interface K8sIoApiCoreV1Lifecycle { /** * - * @type {K8sIoApiCoreV1Handler} + * @type {K8sIoApiCoreV1LifecycleHandler} * @memberof K8sIoApiCoreV1Lifecycle */ - postStart?: K8sIoApiCoreV1Handler; + postStart?: K8sIoApiCoreV1LifecycleHandler; /** * - * @type {K8sIoApiCoreV1Handler} + * @type {K8sIoApiCoreV1LifecycleHandler} * @memberof K8sIoApiCoreV1Lifecycle */ - preStop?: K8sIoApiCoreV1Handler; + preStop?: K8sIoApiCoreV1LifecycleHandler; +} +/** + * LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified. + * @export + * @interface K8sIoApiCoreV1LifecycleHandler + */ +export interface K8sIoApiCoreV1LifecycleHandler { + /** + * + * @type {K8sIoApiCoreV1ExecAction} + * @memberof K8sIoApiCoreV1LifecycleHandler + */ + exec?: K8sIoApiCoreV1ExecAction; + /** + * + * @type {K8sIoApiCoreV1HTTPGetAction} + * @memberof K8sIoApiCoreV1LifecycleHandler + */ + httpGet?: K8sIoApiCoreV1HTTPGetAction; + /** + * + * @type {K8sIoApiCoreV1TCPSocketAction} + * @memberof K8sIoApiCoreV1LifecycleHandler + */ + tcpSocket?: K8sIoApiCoreV1TCPSocketAction; } /** - * LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + * * @export * @interface K8sIoApiCoreV1LocalObjectReference */ @@ -2631,7 +3205,7 @@ export interface K8sIoApiCoreV1NodeAffinity { preferredDuringSchedulingIgnoredDuringExecution?: Array; } /** - * A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms. + * * @export * @interface K8sIoApiCoreV1NodeSelector */ @@ -2669,7 +3243,7 @@ export interface K8sIoApiCoreV1NodeSelectorRequirement { values?: Array; } /** - * A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + * * @export * @interface K8sIoApiCoreV1NodeSelectorTerm */ @@ -2688,7 +3262,7 @@ export interface K8sIoApiCoreV1NodeSelectorTerm { matchFields?: Array; } /** - * ObjectFieldSelector selects an APIVersioned field of an object. + * * @export * @interface K8sIoApiCoreV1ObjectFieldSelector */ @@ -2754,6 +3328,12 @@ export interface K8sIoApiCoreV1PersistentVolumeClaimSpec { * @memberof K8sIoApiCoreV1PersistentVolumeClaimSpec */ dataSource?: K8sIoApiCoreV1TypedLocalObjectReference; + /** + * + * @type {K8sIoApiCoreV1TypedLocalObjectReference} + * @memberof K8sIoApiCoreV1PersistentVolumeClaimSpec + */ + dataSourceRef?: K8sIoApiCoreV1TypedLocalObjectReference; } /** * PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource. @@ -2806,7 +3386,7 @@ export interface K8sIoApiCoreV1PhotonPersistentDiskVolumeSource { */ pdID?: string; /** - * Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. + * fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. * @type {string} * @memberof K8sIoApiCoreV1PhotonPersistentDiskVolumeSource */ @@ -2855,6 +3435,12 @@ export interface K8sIoApiCoreV1PodAffinityTerm { * @memberof K8sIoApiCoreV1PodAffinityTerm */ topologyKey?: string; + /** + * + * @type {K8sIoApimachineryPkgApisMetaV1LabelSelector} + * @memberof K8sIoApiCoreV1PodAffinityTerm + */ + namespaceSelector?: K8sIoApimachineryPkgApisMetaV1LabelSelector; } /** * Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -2919,6 +3505,19 @@ export interface K8sIoApiCoreV1PodDNSConfigOption { */ value?: string; } +/** + * PodOS defines the OS parameters of a pod. + * @export + * @interface K8sIoApiCoreV1PodOS + */ +export interface K8sIoApiCoreV1PodOS { + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1PodOS + */ + name?: string; +} /** * * @export @@ -2975,7 +3574,7 @@ export interface K8sIoApiCoreV1PodSecurityContext { */ supplementalGroups?: Array; /** - * 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. +optional + * 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. +optional * @type {string} * @memberof K8sIoApiCoreV1PodSecurityContext */ @@ -3215,6 +3814,12 @@ export interface K8sIoApiCoreV1PodSpec { * @memberof K8sIoApiCoreV1PodSpec */ setHostnameAsFQDN?: boolean; + /** + * + * @type {K8sIoApiCoreV1PodOS} + * @memberof K8sIoApiCoreV1PodSpec + */ + os?: K8sIoApiCoreV1PodOS; } /** * @@ -3248,7 +3853,7 @@ export interface K8sIoApiCoreV1PortworxVolumeSource { */ volumeID?: string; /** - * FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. + * fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. * @type {string} * @memberof K8sIoApiCoreV1PortworxVolumeSource */ @@ -3287,10 +3892,10 @@ export interface K8sIoApiCoreV1PreferredSchedulingTerm { export interface K8sIoApiCoreV1Probe { /** * - * @type {K8sIoApiCoreV1Handler} + * @type {K8sIoApiCoreV1ProbeHandler} * @memberof K8sIoApiCoreV1Probe */ - handler?: K8sIoApiCoreV1Handler; + handler?: K8sIoApiCoreV1ProbeHandler; /** * * @type {number} @@ -3321,6 +3926,43 @@ export interface K8sIoApiCoreV1Probe { * @memberof K8sIoApiCoreV1Probe */ failureThreshold?: number; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1Probe + */ + terminationGracePeriodSeconds?: string; +} +/** + * ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified. + * @export + * @interface K8sIoApiCoreV1ProbeHandler + */ +export interface K8sIoApiCoreV1ProbeHandler { + /** + * + * @type {K8sIoApiCoreV1ExecAction} + * @memberof K8sIoApiCoreV1ProbeHandler + */ + exec?: K8sIoApiCoreV1ExecAction; + /** + * + * @type {K8sIoApiCoreV1HTTPGetAction} + * @memberof K8sIoApiCoreV1ProbeHandler + */ + httpGet?: K8sIoApiCoreV1HTTPGetAction; + /** + * + * @type {K8sIoApiCoreV1TCPSocketAction} + * @memberof K8sIoApiCoreV1ProbeHandler + */ + tcpSocket?: K8sIoApiCoreV1TCPSocketAction; + /** + * + * @type {K8sIoApiCoreV1GRPCAction} + * @memberof K8sIoApiCoreV1ProbeHandler + */ + grpc?: K8sIoApiCoreV1GRPCAction; } /** * @@ -3354,7 +3996,7 @@ export interface K8sIoApiCoreV1QuobyteVolumeSource { */ registry?: string; /** - * Volume is a string that references an already created Quobyte volume by name. + * volume is a string that references an already created Quobyte volume by name. * @type {string} * @memberof K8sIoApiCoreV1QuobyteVolumeSource */ @@ -3521,13 +4163,13 @@ export interface K8sIoApiCoreV1SELinuxOptions { */ export interface K8sIoApiCoreV1ScaleIOVolumeSource { /** - * The host address of the ScaleIO API Gateway. + * gateway is the host address of the ScaleIO API Gateway. * @type {string} * @memberof K8sIoApiCoreV1ScaleIOVolumeSource */ gateway?: string; /** - * The name of the storage system as configured in ScaleIO. + * system is the name of the storage system as configured in ScaleIO. * @type {string} * @memberof K8sIoApiCoreV1ScaleIOVolumeSource */ @@ -3563,7 +4205,7 @@ export interface K8sIoApiCoreV1ScaleIOVolumeSource { */ storageMode?: string; /** - * The name of a volume already created in the ScaleIO system that is associated with this volume source. + * volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. * @type {string} * @memberof K8sIoApiCoreV1ScaleIOVolumeSource */ @@ -3620,7 +4262,7 @@ export interface K8sIoApiCoreV1SecretEnvSource { optional?: boolean; } /** - * SecretKeySelector selects a key of a Secret. + * * @export * @interface K8sIoApiCoreV1SecretKeySelector */ @@ -3792,7 +4434,7 @@ export interface K8sIoApiCoreV1ServiceAccountTokenProjection { */ expirationSeconds?: string; /** - * Path is the path relative to the mount point of the file to project the token into. + * path is the path relative to the mount point of the file to project the token into. * @type {string} * @memberof K8sIoApiCoreV1ServiceAccountTokenProjection */ @@ -3805,7 +4447,7 @@ export interface K8sIoApiCoreV1ServiceAccountTokenProjection { */ export interface K8sIoApiCoreV1StorageOSVolumeSource { /** - * VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + * volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. * @type {string} * @memberof K8sIoApiCoreV1StorageOSVolumeSource */ @@ -3917,19 +4559,19 @@ export interface K8sIoApiCoreV1Toleration { */ export interface K8sIoApiCoreV1TopologySpreadConstraint { /** - * MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: +-------+-------+-------+ | zone1 | zone2 | zone3 | +-------+-------+-------+ | P | P | | +-------+-------+-------+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. + * MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. +-------+-------+-------+ | zone1 | zone2 | zone3 | +-------+-------+-------+ | P P | P P | P | +-------+-------+-------+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. * @type {number} * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ maxSkew?: number; /** - * TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field. + * TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field. * @type {string} * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ topologyKey?: string; /** - * WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assigment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: +-------+-------+-------+ | zone1 | zone2 | zone3 | +-------+-------+-------+ | P P P | P | P | +-------+-------+-------+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. + * WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: +-------+-------+-------+ | zone1 | zone2 | zone3 | +-------+-------+-------+ | P P P | P | P | +-------+-------+-------+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. * @type {string} * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ @@ -3940,9 +4582,15 @@ export interface K8sIoApiCoreV1TopologySpreadConstraint { * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ labelSelector?: K8sIoApimachineryPkgApisMetaV1LabelSelector; + /** + * MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: +-------+-------+-------+ | zone1 | zone2 | zone3 | +-------+-------+-------+ | P P | P P | P P | +-------+-------+-------+ The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. +optional + * @type {number} + * @memberof K8sIoApiCoreV1TopologySpreadConstraint + */ + minDomains?: number; } /** - * TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. + * * @export * @interface K8sIoApiCoreV1TypedLocalObjectReference */ @@ -4333,6 +4981,12 @@ export interface K8sIoApiCoreV1WindowsSecurityContextOptions { * @memberof K8sIoApiCoreV1WindowsSecurityContextOptions */ runAsUserName?: string; + /** + * + * @type {boolean} + * @memberof K8sIoApiCoreV1WindowsSecurityContextOptions + */ + hostProcess?: boolean; } /** * Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. +protobuf=true +protobuf.embed=string +protobuf.options.marshal=false +protobuf.options.(gogoproto.goproto_stringer)=false +k8s:deepcopy-gen=true +k8s:openapi-gen=true @@ -4446,6 +5100,12 @@ export interface K8sIoApimachineryPkgApisMetaV1ManagedFieldsEntry { * @memberof K8sIoApimachineryPkgApisMetaV1ManagedFieldsEntry */ fieldsV1?: K8sIoApimachineryPkgApisMetaV1FieldsV1; + /** + * Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. + * @type {string} + * @memberof K8sIoApimachineryPkgApisMetaV1ManagedFieldsEntry + */ + subresource?: string; } /** * ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. @@ -4460,7 +5120,7 @@ export interface K8sIoApimachineryPkgApisMetaV1ObjectMeta { */ name?: string; /** - * GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency +optional + * GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency +optional * @type {string} * @memberof K8sIoApimachineryPkgApisMetaV1ObjectMeta */ @@ -4472,7 +5132,7 @@ export interface K8sIoApimachineryPkgApisMetaV1ObjectMeta { */ namespace?: string; /** - * SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release. +optional + * * @type {string} * @memberof K8sIoApimachineryPkgApisMetaV1ObjectMeta */ @@ -4538,7 +5198,7 @@ export interface K8sIoApimachineryPkgApisMetaV1ObjectMeta { */ finalizers?: Array; /** - * + * Deprecated: ClusterName is a legacy field that was always cleared by the system and never used; it will be removed completely in 1.25. The name in the go struct is changed to help clients detect accidental use. +optional * @type {string} * @memberof K8sIoApimachineryPkgApisMetaV1ObjectMeta */ @@ -4551,7 +5211,7 @@ export interface K8sIoApimachineryPkgApisMetaV1ObjectMeta { managedFields?: Array; } /** - * OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + * * @export * @interface K8sIoApimachineryPkgApisMetaV1OwnerReference */ @@ -4676,10 +5336,10 @@ export interface RolloutAnalysisRunInfo { icon?: string; /** * - * @type {number} + * @type {string} * @memberof RolloutAnalysisRunInfo */ - revision?: number; + revision?: string; /** * * @type {string} @@ -4716,6 +5376,18 @@ export interface RolloutAnalysisRunInfo { * @memberof RolloutAnalysisRunInfo */ jobs?: Array; + /** + * + * @type {Array} + * @memberof RolloutAnalysisRunInfo + */ + nonJobInfo?: Array; + /** + * + * @type {Array} + * @memberof RolloutAnalysisRunInfo + */ + metrics?: Array; } /** * @@ -4756,10 +5428,10 @@ export interface RolloutExperimentInfo { icon?: string; /** * - * @type {number} + * @type {string} * @memberof RolloutExperimentInfo */ - revision?: number; + revision?: string; /** * * @type {string} @@ -4809,6 +5481,55 @@ export interface RolloutJobInfo { * @memberof RolloutJobInfo */ icon?: string; + /** + * + * @type {string} + * @memberof RolloutJobInfo + */ + metricName?: string; + /** + * + * @type {K8sIoApimachineryPkgApisMetaV1Time} + * @memberof RolloutJobInfo + */ + startedAt?: K8sIoApimachineryPkgApisMetaV1Time; +} +/** + * + * @export + * @interface RolloutMetrics + */ +export interface RolloutMetrics { + /** + * + * @type {string} + * @memberof RolloutMetrics + */ + name?: string; + /** + * + * @type {string} + * @memberof RolloutMetrics + */ + successCondition?: string; + /** + * + * @type {number} + * @memberof RolloutMetrics + */ + count?: number; + /** + * + * @type {number} + * @memberof RolloutMetrics + */ + inconclusiveLimit?: number; + /** + * + * @type {number} + * @memberof RolloutMetrics + */ + failureLimit?: number; } /** * @@ -4823,12 +5544,43 @@ export interface RolloutNamespaceInfo { */ namespace?: string; /** - * + * * @type {Array} * @memberof RolloutNamespaceInfo */ availableNamespaces?: Array; } +/** + * + * @export + * @interface RolloutNonJobInfo + */ +export interface RolloutNonJobInfo { + /** + * + * @type {string} + * @memberof RolloutNonJobInfo + */ + value?: string; + /** + * + * @type {string} + * @memberof RolloutNonJobInfo + */ + status?: string; + /** + * + * @type {string} + * @memberof RolloutNonJobInfo + */ + metricName?: string; + /** + * + * @type {K8sIoApimachineryPkgApisMetaV1Time} + * @memberof RolloutNonJobInfo + */ + startedAt?: K8sIoApimachineryPkgApisMetaV1Time; +} /** * * @export @@ -4917,10 +5669,10 @@ export interface RolloutReplicaSetInfo { icon?: string; /** * - * @type {number} + * @type {string} * @memberof RolloutReplicaSetInfo */ - revision?: number; + revision?: string; /** * * @type {boolean} @@ -4981,6 +5733,18 @@ export interface RolloutReplicaSetInfo { * @memberof RolloutReplicaSetInfo */ pods?: Array; + /** + * + * @type {boolean} + * @memberof RolloutReplicaSetInfo + */ + ping?: boolean; + /** + * + * @type {boolean} + * @memberof RolloutReplicaSetInfo + */ + pong?: boolean; } /** * diff --git a/ui/yarn.lock b/ui/yarn.lock index 3551c8ca01..649423d763 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -1157,13 +1157,20 @@ dependencies: regenerator-runtime "^0.13.4" -"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.5", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": +"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": version "7.14.0" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.14.0.tgz#46794bc20b612c5f75e62dd071e24dfd95f1cbe6" integrity sha512-JELkvo/DlpNdJ7dlyw/eY7E0suy5i5GQH+Vlxaq1nsNJ+H7f4Vtv3jMeCEgRhZZQFXTjldYfQgv2qmM6M1v5wA== dependencies: regenerator-runtime "^0.13.4" +"@babel/runtime@^7.12.1": + version "7.17.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.17.2.tgz#66f68591605e59da47523c631416b18508779941" + integrity sha512-hzeyJyMA1YGdJTuWU0e/j4wKXrU4OMFvY2MSlaI9B7VQb0r5cxTE3EAIS2Q7Tn2RIcDkRvTA/v2JsAEhxe99uw== + dependencies: + regenerator-runtime "^0.13.4" + "@babel/runtime@^7.4.2", "@babel/runtime@^7.8.7": version "7.15.3" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.15.3.tgz#2e1c2880ca118e5b2f9988322bd8a7656a32502b" @@ -1762,6 +1769,11 @@ dependencies: "@babel/types" "^7.3.0" +"@types/classnames@2.2.9": + version "2.2.9" + resolved "https://registry.yarnpkg.com/@types/classnames/-/classnames-2.2.9.tgz#d868b6febb02666330410fe7f58f3c4b8258be7b" + integrity sha512-MNl+rT5UmZeilaPxAVs6YaPC2m6aA8rofviZbhbxpPpl61uKodfdQVsBtgJGTqGizEf02oW3tsVe7FYB8kK14A== + "@types/eslint@^7.2.6": version "7.2.10" resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-7.2.10.tgz#4b7a9368d46c0f8cd5408c23288a59aa2394d917" @@ -1800,6 +1812,11 @@ resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.8.tgz#49348387983075705fe8f4e02fb67f7daaec4934" integrity sha512-S78QIYirQcUoo6UJZx9CSP0O2ix9IaeAXwQi26Rhr/+mg7qqPy8TzaxHSUut7eGjL8WmLccT7/MXf304WjqHcA== +"@types/history@^4.7.11": + version "4.7.11" + resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.11.tgz#56588b17ae8f50c53983a524fc3cc47437969d64" + integrity sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA== + "@types/html-minifier-terser@^5.0.0": version "5.1.1" resolved "https://registry.yarnpkg.com/@types/html-minifier-terser/-/html-minifier-terser-5.1.1.tgz#3c9ee980f1a10d6021ae6632ca3e79ca2ec4fb50" @@ -1911,11 +1928,11 @@ "@types/react" "*" "@types/react-router-dom@^5.1.7": - version "5.1.7" - resolved "https://registry.yarnpkg.com/@types/react-router-dom/-/react-router-dom-5.1.7.tgz#a126d9ea76079ffbbdb0d9225073eb5797ab7271" - integrity sha512-D5mHD6TbdV/DNHYsnwBTv+y73ei+mMjrkGrla86HthE4/PVvL1J94Bu3qABU+COXzpL23T1EZapVVpwHuBXiUg== + version "5.3.3" + resolved "https://registry.yarnpkg.com/@types/react-router-dom/-/react-router-dom-5.3.3.tgz#e9d6b4a66fcdbd651a5f106c2656a30088cc1e83" + integrity sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw== dependencies: - "@types/history" "*" + "@types/history" "^4.7.11" "@types/react" "*" "@types/react-router" "*" @@ -3437,10 +3454,10 @@ class-utils@^0.3.5: isobject "^3.0.0" static-extend "^0.1.1" -classnames@^2.2.5, classnames@^2.2.6: - version "2.3.1" - resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.3.1.tgz#dfcfa3891e306ec1dad105d0e88f4417b8535e8e" - integrity sha512-OlQdbZ7gLfGarSqxesMesDa5uz7KFbID8Kpq/SxIoNGDqY8lSYs0D+hhtBXhcdB3rcbXArFr7vlHheLk1voeNA== +classnames@2.2.6, classnames@^2.2.5, classnames@^2.2.6: + version "2.2.6" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" + integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q== clean-css@^4.2.3: version "4.2.3" @@ -9574,19 +9591,7 @@ react-refresh@^0.8.3: resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.8.3.tgz#721d4657672d400c5e3c75d063c4a85fb2d5d68f" integrity sha512-X8jZHc7nCMjaCqoU+V2I0cOhNW+QMBwSUkeXnTi8IPe6zaRWfn60ZzvFDZqWPfmSJfjub7dDW1SP0jaHWLu/hg== -react-router-dom@^4.2.2: - version "4.3.1" - resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-4.3.1.tgz#4c2619fc24c4fa87c9fd18f4fb4a43fe63fbd5c6" - integrity sha512-c/MlywfxDdCp7EnB7YfPMOfMD3tOtIjrQlj/CKfNMBxdmpJP8xcz5P/UAFn3JbnQCNUxsHyVVqllF9LhgVyFCA== - dependencies: - history "^4.7.2" - invariant "^2.2.4" - loose-envify "^1.3.1" - prop-types "^15.6.1" - react-router "^4.3.1" - warning "^4.0.1" - -react-router-dom@^5.2.0: +react-router-dom@5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.2.0.tgz#9e65a4d0c45e13289e66c7b17c7e175d0ea15662" integrity sha512-gxAmfylo2QUjcwxI63RhQ5G85Qqt4voZpUXSEqCwykV0baaOTQDR1f0PmY8AELqIyVc0NEZUj0Gov5lNGcXgsA== @@ -9599,6 +9604,18 @@ react-router-dom@^5.2.0: tiny-invariant "^1.0.2" tiny-warning "^1.0.0" +react-router-dom@^4.2.2: + version "4.3.1" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-4.3.1.tgz#4c2619fc24c4fa87c9fd18f4fb4a43fe63fbd5c6" + integrity sha512-c/MlywfxDdCp7EnB7YfPMOfMD3tOtIjrQlj/CKfNMBxdmpJP8xcz5P/UAFn3JbnQCNUxsHyVVqllF9LhgVyFCA== + dependencies: + history "^4.7.2" + invariant "^2.2.4" + loose-envify "^1.3.1" + prop-types "^15.6.1" + react-router "^4.3.1" + warning "^4.0.1" + react-router@5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.2.0.tgz#424e75641ca8747fbf76e5ecca69781aa37ea293" @@ -11464,16 +11481,11 @@ typedarray@^0.0.6: resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= -typescript@^4.0.3: +typescript@4.3.5, typescript@^4.0.3: version "4.3.5" resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.3.5.tgz#4d1c37cc16e893973c45a06886b7113234f119f4" integrity sha512-DqQgihaQ9cUrskJo9kIyW/+g0Vxsk8cDtZ52a3NGh0YNTfpUSArXSohyUGnvbPazEPLu398C0UxmKSOrPumUzA== -typescript@^4.1.2: - version "4.2.4" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.2.4.tgz#8610b59747de028fda898a8aef0e103f156d0961" - integrity sha512-V+evlYHZnQkaz8TRBuxTA92yZBPotr5H+WhQ7bD3hZUndx5tGOa1fuCgeSjxAzM1RiN5IzvadIXTVefuuwZCRg== - unbox-primitive@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.1.tgz#085e215625ec3162574dc8859abee78a59b14471" @@ -12369,4 +12381,4 @@ yargs@^15.4.1: yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" - integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== \ No newline at end of file diff --git a/utils/analysis/factory.go b/utils/analysis/factory.go index 90b1a7de1f..0fb57030f5 100644 --- a/utils/analysis/factory.go +++ b/utils/analysis/factory.go @@ -3,17 +3,20 @@ package analysis import ( "encoding/json" "fmt" + "regexp" "strconv" + "strings" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" templateutil "github.com/argoproj/argo-rollouts/utils/template" - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" appsv1 "k8s.io/api/apps/v1" "k8s.io/kubernetes/pkg/fieldpath" ) // BuildArgumentsForRolloutAnalysisRun builds the arguments for a analysis base created by a rollout -func BuildArgumentsForRolloutAnalysisRun(args []v1alpha1.AnalysisRunArgument, stableRS, newRS *appsv1.ReplicaSet, r *v1alpha1.Rollout) []v1alpha1.Argument { +func BuildArgumentsForRolloutAnalysisRun(args []v1alpha1.AnalysisRunArgument, stableRS, newRS *appsv1.ReplicaSet, r *v1alpha1.Rollout) ([]v1alpha1.Argument, error) { + var err error arguments := []v1alpha1.Argument{} for i := range args { arg := args[i] @@ -26,21 +29,28 @@ func BuildArgumentsForRolloutAnalysisRun(args []v1alpha1.AnalysisRunArgument, st case v1alpha1.Stable: value = stableRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] } - } else { - if arg.ValueFrom.FieldRef != nil { - value, _ = fieldpath.ExtractFieldPathAsString(r, arg.ValueFrom.FieldRef.FieldPath) + } else if arg.ValueFrom.FieldRef != nil { + if strings.HasPrefix(arg.ValueFrom.FieldRef.FieldPath, "metadata") { + value, err = fieldpath.ExtractFieldPathAsString(r, arg.ValueFrom.FieldRef.FieldPath) + if err != nil { + return nil, err + } + } else { + // in case of error - return empty value for Validation stage, so it will pass validation + // returned error will only be used in Analysis stage + value, err = extractValueFromRollout(r, arg.ValueFrom.FieldRef.FieldPath) } } - } + analysisArg := v1alpha1.Argument{ Name: arg.Name, Value: &value, } arguments = append(arguments, analysisArg) - } - return arguments + + return arguments, err } // PostPromotionLabels returns a map[string]string of common labels for the post promotion analysis @@ -125,7 +135,7 @@ func ValidateMetrics(metrics []v1alpha1.Metric) error { duplicateNames := make(map[string]bool) for i, metric := range metrics { if _, ok := duplicateNames[metric.Name]; ok { - return fmt.Errorf("metrics[%d]: duplicate name '%s", i, metric.Name) + return fmt.Errorf("metrics[%d]: duplicate name '%s'", i, metric.Name) } duplicateNames[metric.Name] = true if err := ValidateMetric(metric); err != nil { @@ -209,6 +219,12 @@ func ValidateMetric(metric v1alpha1.Metric) error { if metric.Provider.CloudWatch != nil { numProviders++ } + if metric.Provider.Graphite != nil { + numProviders++ + } + if metric.Provider.Influxdb != nil { + numProviders++ + } if numProviders == 0 { return fmt.Errorf("no provider specified") } @@ -217,3 +233,42 @@ func ValidateMetric(metric v1alpha1.Metric) error { } return nil } + +func extractValueFromRollout(r *v1alpha1.Rollout, path string) (string, error) { + j, _ := json.Marshal(r) + m := interface{}(nil) + json.Unmarshal(j, &m) + sections := regexp.MustCompile("[\\.\\[\\]]+").Split(path, -1) + for _, section := range sections { + if section == "" { + continue // if path ends with a separator char, Split returns an empty last section + } + + if asArray, ok := m.([]interface{}); ok { + if i, err := strconv.Atoi(section); err != nil { + return "", fmt.Errorf("invalid index '%s'", section) + } else if i >= len(asArray) { + return "", fmt.Errorf("index %d out of range", i) + } else { + m = asArray[i] + } + } else if asMap, ok := m.(map[string]interface{}); ok { + m = asMap[section] + } else { + return "", fmt.Errorf("invalid path %s in rollout", path) + } + } + + if m == nil { + return "", fmt.Errorf("invalid path %s in rollout", path) + } + + var isArray, isMap bool + _, isArray = m.([]interface{}) + _, isMap = m.(map[string]interface{}) + if isArray || isMap { + return "", fmt.Errorf("path %s in rollout must terminate in a primitive value", path) + } + + return fmt.Sprintf("%v", m), nil +} diff --git a/utils/analysis/factory_test.go b/utils/analysis/factory_test.go index 12ec2db117..c485081999 100644 --- a/utils/analysis/factory_test.go +++ b/utils/analysis/factory_test.go @@ -4,22 +4,21 @@ import ( "fmt" "testing" - "k8s.io/apimachinery/pkg/util/intstr" - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/utils/pointer" ) func TestBuildArgumentsForRolloutAnalysisRun(t *testing.T) { - new := v1alpha1.Latest stable := v1alpha1.Stable + annotationPath := fmt.Sprintf("metadata.annotations['%s']", annotations.RevisionAnnotation) rolloutAnalysis := &v1alpha1.RolloutAnalysis{ Args: []v1alpha1.AnalysisRunArgument{ { @@ -50,6 +49,18 @@ func TestBuildArgumentsForRolloutAnalysisRun(t *testing.T) { FieldRef: &v1alpha1.FieldRef{FieldPath: "metadata.labels['env']"}, }, }, + { + Name: annotationPath, + ValueFrom: &v1alpha1.ArgumentValueFrom{ + FieldRef: &v1alpha1.FieldRef{FieldPath: annotationPath}, + }, + }, + { + Name: "status.pauseConditions[0].reason", + ValueFrom: &v1alpha1.ArgumentValueFrom{ + FieldRef: &v1alpha1.FieldRef{FieldPath: "status.pauseConditions[0].reason"}, + }, + }, }, } stableRS := &appsv1.ReplicaSet{ @@ -64,7 +75,6 @@ func TestBuildArgumentsForRolloutAnalysisRun(t *testing.T) { Labels: map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "123456"}, }, } - ro := &v1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -102,16 +112,24 @@ func TestBuildArgumentsForRolloutAnalysisRun(t *testing.T) { "env": "test", }}, }, - Status: v1alpha1.RolloutStatus{}, + Status: v1alpha1.RolloutStatus{ + PauseConditions: []v1alpha1.PauseCondition{ + { + Reason: "test-reason", + }, + }, + }, } - args := BuildArgumentsForRolloutAnalysisRun(rolloutAnalysis.Args, stableRS, newRS, ro) + args, err := BuildArgumentsForRolloutAnalysisRun(rolloutAnalysis.Args, stableRS, newRS, ro) + assert.NoError(t, err) assert.Contains(t, args, v1alpha1.Argument{Name: "hard-coded-value-key", Value: pointer.StringPtr("hard-coded-value")}) assert.Contains(t, args, v1alpha1.Argument{Name: "stable-key", Value: pointer.StringPtr("abcdef")}) assert.Contains(t, args, v1alpha1.Argument{Name: "new-key", Value: pointer.StringPtr("123456")}) assert.Contains(t, args, v1alpha1.Argument{Name: "metadata.labels['app']", Value: pointer.StringPtr("app")}) assert.Contains(t, args, v1alpha1.Argument{Name: "metadata.labels['env']", Value: pointer.StringPtr("test")}) - + assert.Contains(t, args, v1alpha1.Argument{Name: annotationPath, Value: pointer.StringPtr("1")}) + assert.Contains(t, args, v1alpha1.Argument{Name: "status.pauseConditions[0].reason", Value: pointer.StringPtr("test-reason")}) } func TestPrePromotionLabels(t *testing.T) { @@ -303,7 +321,7 @@ func TestValidateMetrics(t *testing.T) { }, } err := ValidateMetrics(spec.Metrics) - assert.EqualError(t, err, "metrics[1]: duplicate name 'success-rate") + assert.EqualError(t, err, "metrics[1]: duplicate name 'success-rate'") }) t.Run("Ensure failureLimit >= 0", func(t *testing.T) { failureLimit := intstr.FromInt(-1) @@ -380,6 +398,8 @@ func TestValidateMetrics(t *testing.T) { Datadog: &v1alpha1.DatadogMetric{}, NewRelic: &v1alpha1.NewRelicMetric{}, CloudWatch: &v1alpha1.CloudWatchMetric{}, + Graphite: &v1alpha1.GraphiteMetric{}, + Influxdb: &v1alpha1.InfluxdbMetric{}, }, }, }, @@ -426,3 +446,81 @@ func TestResolveMetricArgsWithQuotes(t *testing.T) { assert.NoError(t, err) assert.Equal(t, fmt.Sprintf(arg), newMetric.SuccessCondition) } + +func Test_extractValueFromRollout(t *testing.T) { + ro := &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + "app": "app", + }, + }, + Status: v1alpha1.RolloutStatus{ + PauseConditions: []v1alpha1.PauseCondition{ + { + Reason: "test-reason", + }, + }, + }, + } + tests := map[string]struct { + path string + want string + wantErr string + }{ + "should return a simple metadata value": { + path: "metadata.name", + want: "test", + }, + "should return a label using dot notation": { + path: "metadata.labels.app", + want: "app", + }, + "should fail returning a label using accessor notation": { + path: "metadata.labels['app']", + wantErr: "invalid path metadata.labels['app'] in rollout", + }, + "should return a status value": { + path: "status.pauseConditions[0].reason", + want: "test-reason", + }, + "should fail when array indexer is not an int": { + path: "status.pauseConditions[blah].reason", + wantErr: "invalid index 'blah'", + }, + "should fail when array indexer is out of range": { + path: "status.pauseConditions[12].reason", + wantErr: "index 12 out of range", + }, + "should fail when path references an empty field": { + path: "status.pauseConditions[0].startTime", + wantErr: "invalid path status.pauseConditions[0].startTime in rollout", + }, + "should fail when path is inavlid": { + path: "some.invalid[2].non.existing.path", + wantErr: "invalid path some.invalid[2].non.existing.path in rollout", + }, + "should fail when path references a non-primitive value": { + path: "status.pauseConditions[0]", + wantErr: "path status.pauseConditions[0] in rollout must terminate in a primitive value", + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got, err := extractValueFromRollout(ro, tt.path) + if err != nil { + if tt.wantErr != "" { + assert.EqualError(t, err, tt.wantErr) + } else { + t.Errorf("extractValueFromRollout() error = %v", err) + } + + return + } + + if got != tt.want { + t.Errorf("extractValueFromRollout() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/utils/analysis/helpers.go b/utils/analysis/helpers.go index 90d6fcabea..77583136af 100644 --- a/utils/analysis/helpers.go +++ b/utils/analysis/helpers.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "regexp" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" log "github.com/sirupsen/logrus" @@ -86,12 +87,58 @@ func IsTerminating(run *v1alpha1.AnalysisRun) bool { for _, res := range run.Status.MetricResults { switch res.Phase { case v1alpha1.AnalysisPhaseFailed, v1alpha1.AnalysisPhaseError, v1alpha1.AnalysisPhaseInconclusive: - return true + // If this metric is running in the dryRun mode then we don't care about the failures and hence the terminal + // decision shouldn't be affected. + return !res.DryRun } } return false } +// GetMeasurementRetentionMetrics returns an array of metric names matching the RegEx rules from the MeasurementRetention rules. +func GetMeasurementRetentionMetrics(measurementRetentionMetrics []v1alpha1.MeasurementRetention, metrics []v1alpha1.Metric) (map[string]*v1alpha1.MeasurementRetention, error) { + metricsMap := make(map[string]*v1alpha1.MeasurementRetention) + if len(measurementRetentionMetrics) == 0 { + return metricsMap, nil + } + // Iterate all the rules in `measurementRetentionMetrics` and try to match the `metrics` one by one + for index, measurementRetentionObject := range measurementRetentionMetrics { + matchCount := 0 + for _, metric := range metrics { + if matched, _ := regexp.MatchString(measurementRetentionObject.MetricName, metric.Name); matched { + metricsMap[metric.Name] = &measurementRetentionObject + matchCount++ + } + } + if matchCount < 1 { + return metricsMap, fmt.Errorf("measurementRetention[%d]: Rule didn't match any metric name(s)", index) + } + } + return metricsMap, nil +} + +// GetDryRunMetrics returns an array of metric names matching the RegEx rules from the Dry-Run metrics. +func GetDryRunMetrics(dryRunMetrics []v1alpha1.DryRun, metrics []v1alpha1.Metric) (map[string]bool, error) { + metricsMap := make(map[string]bool) + if len(dryRunMetrics) == 0 { + return metricsMap, nil + } + // Iterate all the rules in `dryRunMetrics` and try to match the `metrics` one by one + for index, dryRunObject := range dryRunMetrics { + matchCount := 0 + for _, metric := range metrics { + if matched, _ := regexp.MatchString(dryRunObject.MetricName, metric.Name); matched { + metricsMap[metric.Name] = true + matchCount++ + } + } + if matchCount < 1 { + return metricsMap, fmt.Errorf("dryRun[%d]: Rule didn't match any metric name(s)", index) + } + } + return metricsMap, nil +} + // GetResult returns the metric result by name func GetResult(run *v1alpha1.AnalysisRun, metricName string) *v1alpha1.MetricResult { for _, result := range run.Status.MetricResults { @@ -133,6 +180,14 @@ func LastMeasurement(run *v1alpha1.AnalysisRun, metricName string) *v1alpha1.Mea return nil } +func ArrayMeasurement(run *v1alpha1.AnalysisRun, metricName string) []v1alpha1.Measurement { + if result := GetResult(run, metricName); result != nil && len(result.Measurements) > 0 { + return result.Measurements + } + + return nil +} + // TerminateRun terminates an analysis run func TerminateRun(analysisRunIf argoprojclient.AnalysisRunInterface, name string) error { _, err := analysisRunIf.Patch(context.TODO(), name, patchtypes.MergePatchType, []byte(`{"spec":{"terminate":true}}`), metav1.PatchOptions{}) @@ -142,11 +197,11 @@ func TerminateRun(analysisRunIf argoprojclient.AnalysisRunInterface, name string // IsSemanticallyEqual checks to see if two analysis runs are semantically equal func IsSemanticallyEqual(left, right v1alpha1.AnalysisRunSpec) bool { // NOTE: only consider metrics & args when comparing for semantic equality - leftBytes, err := json.Marshal(v1alpha1.AnalysisRunSpec{Metrics: left.Metrics, Args: left.Args}) + leftBytes, err := json.Marshal(v1alpha1.AnalysisRunSpec{Metrics: left.Metrics, DryRun: left.DryRun, MeasurementRetention: left.MeasurementRetention, Args: left.Args}) if err != nil { panic(err) } - rightBytes, err := json.Marshal(v1alpha1.AnalysisRunSpec{Metrics: right.Metrics, Args: right.Args}) + rightBytes, err := json.Marshal(v1alpha1.AnalysisRunSpec{Metrics: right.Metrics, DryRun: right.DryRun, MeasurementRetention: right.MeasurementRetention, Args: right.Args}) if err != nil { panic(err) } @@ -229,7 +284,7 @@ func CreateWithCollisionCounter(logCtx *log.Entry, analysisRunIf argoprojclient. } } -func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate, args []v1alpha1.Argument, name, generateName, namespace string) (*v1alpha1.AnalysisRun, error) { +func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, name, generateName, namespace string) (*v1alpha1.AnalysisRun, error) { template, err := FlattenTemplates(templates, clusterTemplates) if err != nil { return nil, err @@ -238,6 +293,14 @@ func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, cluster if err != nil { return nil, err } + dryRun, err := mergeDryRunMetrics(dryRunMetrics, template.Spec.DryRun) + if err != nil { + return nil, err + } + measurementRetention, err := mergeMeasurementRetentionMetrics(measurementRetentionMetrics, template.Spec.MeasurementRetention) + if err != nil { + return nil, err + } ar := v1alpha1.AnalysisRun{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -245,8 +308,10 @@ func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, cluster Namespace: namespace, }, Spec: v1alpha1.AnalysisRunSpec{ - Metrics: template.Spec.Metrics, - Args: newArgs, + Metrics: template.Spec.Metrics, + DryRun: dryRun, + MeasurementRetention: measurementRetention, + Args: newArgs, }, } return &ar, nil @@ -257,14 +322,24 @@ func FlattenTemplates(templates []*v1alpha1.AnalysisTemplate, clusterTemplates [ if err != nil { return nil, err } + dryRunMetrics, err := flattenDryRunMetrics(templates, clusterTemplates) + if err != nil { + return nil, err + } + measurementRetentionMetrics, err := flattenMeasurementRetentionMetrics(templates, clusterTemplates) + if err != nil { + return nil, err + } args, err := flattenArgs(templates, clusterTemplates) if err != nil { return nil, err } return &v1alpha1.AnalysisTemplate{ Spec: v1alpha1.AnalysisTemplateSpec{ - Metrics: metrics, - Args: args, + Metrics: metrics, + DryRun: dryRunMetrics, + MeasurementRetention: measurementRetentionMetrics, + Args: args, }, }, nil } @@ -328,6 +403,86 @@ func flattenMetrics(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []* return combinedMetrics, nil } +func mergeDryRunMetrics(leftDryRunMetrics []v1alpha1.DryRun, rightDryRunMetrics []v1alpha1.DryRun) ([]v1alpha1.DryRun, error) { + var combinedDryRunMetrics []v1alpha1.DryRun + combinedDryRunMetrics = append(combinedDryRunMetrics, leftDryRunMetrics...) + combinedDryRunMetrics = append(combinedDryRunMetrics, rightDryRunMetrics...) + + err := validateDryRunMetrics(combinedDryRunMetrics) + if err != nil { + return nil, err + } + return combinedDryRunMetrics, nil +} + +func mergeMeasurementRetentionMetrics(leftMeasurementRetentionMetrics []v1alpha1.MeasurementRetention, rightMeasurementRetentionMetrics []v1alpha1.MeasurementRetention) ([]v1alpha1.MeasurementRetention, error) { + var combinedMeasurementRetentionMetrics []v1alpha1.MeasurementRetention + combinedMeasurementRetentionMetrics = append(combinedMeasurementRetentionMetrics, leftMeasurementRetentionMetrics...) + combinedMeasurementRetentionMetrics = append(combinedMeasurementRetentionMetrics, rightMeasurementRetentionMetrics...) + + err := validateMeasurementRetentionMetrics(combinedMeasurementRetentionMetrics) + if err != nil { + return nil, err + } + return combinedMeasurementRetentionMetrics, nil +} + +func flattenDryRunMetrics(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate) ([]v1alpha1.DryRun, error) { + var combinedDryRunMetrics []v1alpha1.DryRun + for _, template := range templates { + combinedDryRunMetrics = append(combinedDryRunMetrics, template.Spec.DryRun...) + } + + for _, template := range clusterTemplates { + combinedDryRunMetrics = append(combinedDryRunMetrics, template.Spec.DryRun...) + } + + err := validateDryRunMetrics(combinedDryRunMetrics) + if err != nil { + return nil, err + } + return combinedDryRunMetrics, nil +} + +func flattenMeasurementRetentionMetrics(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate) ([]v1alpha1.MeasurementRetention, error) { + var combinedMeasurementRetentionMetrics []v1alpha1.MeasurementRetention + for _, template := range templates { + combinedMeasurementRetentionMetrics = append(combinedMeasurementRetentionMetrics, template.Spec.MeasurementRetention...) + } + + for _, template := range clusterTemplates { + combinedMeasurementRetentionMetrics = append(combinedMeasurementRetentionMetrics, template.Spec.MeasurementRetention...) + } + + err := validateMeasurementRetentionMetrics(combinedMeasurementRetentionMetrics) + if err != nil { + return nil, err + } + return combinedMeasurementRetentionMetrics, nil +} + +func validateDryRunMetrics(dryRunMetrics []v1alpha1.DryRun) error { + metricMap := map[string]bool{} + for _, dryRun := range dryRunMetrics { + if _, ok := metricMap[dryRun.MetricName]; ok { + return fmt.Errorf("two Dry-Run metric rules have the same name '%s'", dryRun.MetricName) + } + metricMap[dryRun.MetricName] = true + } + return nil +} + +func validateMeasurementRetentionMetrics(measurementRetentionMetrics []v1alpha1.MeasurementRetention) error { + metricMap := map[string]bool{} + for _, measurementRetention := range measurementRetentionMetrics { + if _, ok := metricMap[measurementRetention.MetricName]; ok { + return fmt.Errorf("two Measurement Retention metric rules have the same name '%s'", measurementRetention.MetricName) + } + metricMap[measurementRetention.MetricName] = true + } + return nil +} + func NewAnalysisRunFromUnstructured(obj *unstructured.Unstructured, templateArgs []v1alpha1.Argument, name, generateName, namespace string) (*unstructured.Unstructured, error) { var newArgs []v1alpha1.Argument @@ -405,46 +560,6 @@ func NewAnalysisRunFromUnstructured(obj *unstructured.Unstructured, templateArgs return obj, nil } -//TODO(dthomson) remove v0.9.0 -func NewAnalysisRunFromClusterTemplate(template *v1alpha1.ClusterAnalysisTemplate, args []v1alpha1.Argument, name, generateName, namespace string) (*v1alpha1.AnalysisRun, error) { - newArgs, err := MergeArgs(args, template.Spec.Args) - if err != nil { - return nil, err - } - ar := v1alpha1.AnalysisRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - GenerateName: generateName, - Namespace: namespace, - }, - Spec: v1alpha1.AnalysisRunSpec{ - Metrics: template.Spec.Metrics, - Args: newArgs, - }, - } - return &ar, nil -} - -//TODO(dthomson) remove v0.9.0 -func NewAnalysisRunFromTemplate(template *v1alpha1.AnalysisTemplate, args []v1alpha1.Argument, name, generateName, namespace string) (*v1alpha1.AnalysisRun, error) { - newArgs, err := MergeArgs(args, template.Spec.Args) - if err != nil { - return nil, err - } - ar := v1alpha1.AnalysisRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - GenerateName: generateName, - Namespace: namespace, - }, - Spec: v1alpha1.AnalysisRunSpec{ - Metrics: template.Spec.Metrics, - Args: newArgs, - }, - } - return &ar, nil -} - // GetInstanceID takes an object and returns the controller instance id if it has one func GetInstanceID(obj runtime.Object) string { objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) diff --git a/utils/analysis/helpers_test.go b/utils/analysis/helpers_test.go index 0a5171f23d..f9baf89f14 100644 --- a/utils/analysis/helpers_test.go +++ b/utils/analysis/helpers_test.go @@ -6,6 +6,8 @@ import ( "fmt" "testing" + "k8s.io/apimachinery/pkg/util/intstr" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,20 +62,34 @@ func TestIsFastFailTerminating(t *testing.T) { Name: "success-rate", Phase: v1alpha1.AnalysisPhaseRunning, }, + { + Name: "dry-run-metric", + Phase: v1alpha1.AnalysisPhaseRunning, + DryRun: true, + }, }, }, } + // Verify that when the metric is not failing or in the error state then we don't terminate. successRate := run.Status.MetricResults[1] assert.False(t, IsTerminating(run)) + // Metric failing in the dryRun mode shouldn't impact the terminal decision. + dryRunMetricResult := run.Status.MetricResults[2] + dryRunMetricResult.Phase = v1alpha1.AnalysisPhaseError + run.Status.MetricResults[2] = dryRunMetricResult + assert.False(t, IsTerminating(run)) + // Verify that a wet run metric failure/error results in terminal decision. successRate.Phase = v1alpha1.AnalysisPhaseError run.Status.MetricResults[1] = successRate assert.True(t, IsTerminating(run)) successRate.Phase = v1alpha1.AnalysisPhaseFailed run.Status.MetricResults[1] = successRate assert.True(t, IsTerminating(run)) + // Verify that an inconclusive wet run metric results in terminal decision. successRate.Phase = v1alpha1.AnalysisPhaseInconclusive run.Status.MetricResults[1] = successRate assert.True(t, IsTerminating(run)) + // Verify that we don't terminate when there are no metric results or when the status is empty. run.Status.MetricResults = nil assert.False(t, IsTerminating(run)) run.Status = v1alpha1.AnalysisRunStatus{} @@ -163,6 +179,31 @@ func TestLastMeasurement(t *testing.T) { assert.Nil(t, LastMeasurement(run, "success-rate")) } +func TestArrayMeasurement(t *testing.T) { + m1 := v1alpha1.Measurement{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + Value: "99", + } + m2 := v1alpha1.Measurement{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + Value: "98", + } + run := &v1alpha1.AnalysisRun{ + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseRunning, + MetricResults: []v1alpha1.MetricResult{ + { + Name: "success-rate", + Phase: v1alpha1.AnalysisPhaseRunning, + Measurements: []v1alpha1.Measurement{m1, m2}, + }, + }, + }, + } + assert.Nil(t, ArrayMeasurement(run, "non-existent")) + assert.Equal(t, run.Status.MetricResults[0].Measurements, ArrayMeasurement(run, "success-rate")) +} + func TestIsTerminating(t *testing.T) { run := &v1alpha1.AnalysisRun{ Status: v1alpha1.AnalysisRunStatus{ @@ -347,12 +388,24 @@ func TestFlattenTemplates(t *testing.T) { { Spec: v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{fooMetric}, - Args: nil, + DryRun: []v1alpha1.DryRun{{ + MetricName: "foo", + }}, + MeasurementRetention: []v1alpha1.MeasurementRetention{{ + MetricName: "foo", + }}, + Args: nil, }, }, { Spec: v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{barMetric}, - Args: nil, + DryRun: []v1alpha1.DryRun{{ + MetricName: "bar", + }}, + MeasurementRetention: []v1alpha1.MeasurementRetention{{ + MetricName: "bar", + }}, + Args: nil, }, }, }, []*v1alpha1.ClusterAnalysisTemplate{}) @@ -369,14 +422,34 @@ func TestFlattenTemplates(t *testing.T) { { Spec: v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{fooMetric}, - Args: nil, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "foo", + }, + }, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "foo", + }, + }, + Args: nil, }, }, }, []*v1alpha1.ClusterAnalysisTemplate{ { Spec: v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{barMetric}, - Args: nil, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "bar", + }, + }, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "bar", + }, + }, + Args: nil, }, }, }) @@ -386,7 +459,7 @@ func TestFlattenTemplates(t *testing.T) { assert.Equal(t, fooMetric, template.Spec.Metrics[0]) assert.Equal(t, barMetric, template.Spec.Metrics[1]) }) - t.Run(" Merge fail with name collision", func(t *testing.T) { + t.Run("Merge fail with name collision", func(t *testing.T) { fooMetric := metric("foo", "true") template, err := FlattenTemplates([]*v1alpha1.AnalysisTemplate{ { @@ -404,6 +477,64 @@ func TestFlattenTemplates(t *testing.T) { assert.Nil(t, template) assert.Equal(t, err, fmt.Errorf("two metrics have the same name 'foo'")) }) + t.Run("Merge fail with dry-run name collision", func(t *testing.T) { + fooMetric := metric("foo", "true") + barMetric := metric("bar", "true") + template, err := FlattenTemplates([]*v1alpha1.AnalysisTemplate{ + { + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{fooMetric}, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "foo", + }, + }, + Args: nil, + }, + }, { + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{barMetric}, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "foo", + }, + }, + Args: nil, + }, + }, + }, []*v1alpha1.ClusterAnalysisTemplate{}) + assert.Nil(t, template) + assert.Equal(t, err, fmt.Errorf("two Dry-Run metric rules have the same name 'foo'")) + }) + t.Run("Merge fail with measurement retention metrics name collision", func(t *testing.T) { + fooMetric := metric("foo", "true") + barMetric := metric("bar", "true") + template, err := FlattenTemplates([]*v1alpha1.AnalysisTemplate{ + { + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{fooMetric}, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "foo", + }, + }, + Args: nil, + }, + }, { + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{barMetric}, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "foo", + }, + }, + Args: nil, + }, + }, + }, []*v1alpha1.ClusterAnalysisTemplate{}) + assert.Nil(t, template) + assert.Equal(t, err, fmt.Errorf("two Measurement Retention metric rules have the same name 'foo'")) + }) t.Run("Merge multiple args successfully", func(t *testing.T) { fooArgs := arg("foo", pointer.StringPtr("true")) barArgs := arg("bar", pointer.StringPtr("true")) @@ -489,7 +620,7 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { }, }} - clustertemplates := []*v1alpha1.ClusterAnalysisTemplate{} + var clusterTemplates []*v1alpha1.ClusterAnalysisTemplate arg := v1alpha1.Argument{ Name: "my-arg", @@ -506,7 +637,7 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { } args := []v1alpha1.Argument{arg, secretArg} - run, err := NewAnalysisRunFromTemplates(templates, clustertemplates, args, "foo-run", "foo-run-generate-", "my-ns") + run, err := NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "foo-run", "foo-run-generate-", "my-ns") assert.NoError(t, err) assert.Equal(t, "foo-run", run.Name) assert.Equal(t, "foo-run-generate-", run.GenerateName) @@ -519,7 +650,7 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { // Fail Merge Args unresolvedArg := v1alpha1.Argument{Name: "unresolved"} templates[0].Spec.Args = append(templates[0].Spec.Args, unresolvedArg) - run, err = NewAnalysisRunFromTemplates(templates, clustertemplates, args, "foo-run", "foo-run-generate-", "my-ns") + run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "foo-run", "foo-run-generate-", "my-ns") assert.Nil(t, run) assert.Equal(t, fmt.Errorf("args.unresolved was not resolved"), err) // Fail flatten metric @@ -532,7 +663,7 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { } // Fail Flatten Templates templates = append(templates, matchingMetric) - run, err = NewAnalysisRunFromTemplates(templates, clustertemplates, args, "foo-run", "foo-run-generate-", "my-ns") + run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "foo-run", "foo-run-generate-", "my-ns") assert.Nil(t, run) assert.Equal(t, fmt.Errorf("two metrics have the same name 'success-rate'"), err) } @@ -687,8 +818,7 @@ func TestNewAnalysisRunFromUnstructured(t *testing.T) { } } -//TODO(dthomson) remove this test in v0.9.0 -func TestNewAnalysisRunFromTemplate(t *testing.T) { +func TestCompatibilityNewAnalysisRunFromTemplate(t *testing.T) { template := v1alpha1.AnalysisTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", @@ -713,7 +843,8 @@ func TestNewAnalysisRunFromTemplate(t *testing.T) { Value: pointer.StringPtr("my-val"), }, } - run, err := NewAnalysisRunFromTemplate(&template, args, "foo-run", "foo-run-generate-", "my-ns") + analysisTemplates := []*v1alpha1.AnalysisTemplate{&template} + run, err := NewAnalysisRunFromTemplates(analysisTemplates, nil, args, nil, nil, "foo-run", "foo-run-generate-", "my-ns") assert.NoError(t, err) assert.Equal(t, "foo-run", run.Name) assert.Equal(t, "foo-run-generate-", run.GenerateName) @@ -722,9 +853,8 @@ func TestNewAnalysisRunFromTemplate(t *testing.T) { assert.Equal(t, "my-val", *run.Spec.Args[0].Value) } -//TODO(dthomson) remove this test in v0.9.0 -func TestNewAnalysisRunFromClusterTemplate(t *testing.T) { - template := v1alpha1.ClusterAnalysisTemplate{ +func TestCompatibilityNewAnalysisRunFromClusterTemplate(t *testing.T) { + clusterTemplate := v1alpha1.ClusterAnalysisTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -748,7 +878,8 @@ func TestNewAnalysisRunFromClusterTemplate(t *testing.T) { Value: pointer.StringPtr("my-val"), }, } - run, err := NewAnalysisRunFromClusterTemplate(&template, args, "foo-run", "foo-run-generate-", "my-ns") + clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{&clusterTemplate} + run, err := NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, nil, nil, "foo-run", "foo-run-generate-", "my-ns") assert.NoError(t, err) assert.Equal(t, "foo-run", run.Name) assert.Equal(t, "foo-run-generate-", run.GenerateName) @@ -773,3 +904,172 @@ func TestGetInstanceID(t *testing.T) { assert.Panics(t, func() { GetInstanceID(nilRun) }) } + +func TestGetDryRunMetrics(t *testing.T) { + t.Run("GetDryRunMetrics returns the metric names map", func(t *testing.T) { + failureLimit := intstr.FromInt(2) + count := intstr.FromInt(1) + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + }, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "success-rate", + }, + }, + } + dryRunMetricNamesMap, err := GetDryRunMetrics(spec.DryRun, spec.Metrics) + assert.Nil(t, err) + assert.True(t, dryRunMetricNamesMap["success-rate"]) + }) + t.Run("GetDryRunMetrics handles the RegEx rules", func(t *testing.T) { + failureLimit := intstr.FromInt(2) + count := intstr.FromInt(1) + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + { + Name: "error-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + }, + DryRun: []v1alpha1.DryRun{ + { + MetricName: ".*", + }, + }, + } + dryRunMetricNamesMap, err := GetDryRunMetrics(spec.DryRun, spec.Metrics) + assert.Nil(t, err) + assert.Equal(t, len(dryRunMetricNamesMap), 2) + }) + t.Run("GetDryRunMetrics throw error when a rule doesn't get matched", func(t *testing.T) { + failureLimit := intstr.FromInt(2) + count := intstr.FromInt(1) + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + }, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "error-rate", + }, + }, + } + dryRunMetricNamesMap, err := GetDryRunMetrics(spec.DryRun, spec.Metrics) + assert.EqualError(t, err, "dryRun[0]: Rule didn't match any metric name(s)") + assert.Equal(t, len(dryRunMetricNamesMap), 0) + }) +} + +func TestGetMeasurementRetentionMetrics(t *testing.T) { + t.Run("GetMeasurementRetentionMetrics returns the metric names map", func(t *testing.T) { + failureLimit := intstr.FromInt(2) + count := intstr.FromInt(1) + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + }, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "success-rate", + Limit: 10, + }, + }, + } + measurementRetentionMetricNamesMap, err := GetMeasurementRetentionMetrics(spec.MeasurementRetention, spec.Metrics) + assert.Nil(t, err) + assert.NotNil(t, measurementRetentionMetricNamesMap["success-rate"]) + }) + t.Run("GetMeasurementRetentionMetrics handles the RegEx rules", func(t *testing.T) { + failureLimit := intstr.FromInt(2) + count := intstr.FromInt(1) + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + { + Name: "error-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + }, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: ".*", + Limit: 15, + }, + }, + } + measurementRetentionMetricNamesMap, err := GetMeasurementRetentionMetrics(spec.MeasurementRetention, spec.Metrics) + assert.Nil(t, err) + assert.Equal(t, len(measurementRetentionMetricNamesMap), 2) + }) + t.Run("GetMeasurementRetentionMetrics throw error when a rule doesn't get matched", func(t *testing.T) { + failureLimit := intstr.FromInt(2) + count := intstr.FromInt(1) + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: &count, + FailureLimit: &failureLimit, + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{}, + }, + }, + }, + MeasurementRetention: []v1alpha1.MeasurementRetention{ + { + MetricName: "error-rate", + Limit: 11, + }, + }, + } + measurementRetentionMetricNamesMap, err := GetMeasurementRetentionMetrics(spec.MeasurementRetention, spec.Metrics) + assert.EqualError(t, err, "measurementRetention[0]: Rule didn't match any metric name(s)") + assert.Equal(t, len(measurementRetentionMetricNamesMap), 0) + }) +} diff --git a/utils/annotations/annotations.go b/utils/annotations/annotations.go index a99da3fcbb..e3fead8e8f 100644 --- a/utils/annotations/annotations.go +++ b/utils/annotations/annotations.go @@ -79,7 +79,7 @@ func SetRolloutRevision(rollout *v1alpha1.Rollout, revision string) bool { return false } -// SetRolloutRevision updates the revision for a rollout. +// SetRolloutWorkloadRefGeneration updates the workflow generation annotation for a rollout. func SetRolloutWorkloadRefGeneration(rollout *v1alpha1.Rollout, workloadGeneration string) bool { if rollout.Annotations == nil { rollout.Annotations = make(map[string]string) diff --git a/utils/appmesh/appmesh.go b/utils/appmesh/appmesh.go new file mode 100644 index 0000000000..83e77572ff --- /dev/null +++ b/utils/appmesh/appmesh.go @@ -0,0 +1,44 @@ +package appmesh + +import ( + "context" + + "github.com/argoproj/argo-rollouts/utils/defaults" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +const AppMeshCRDGroup = "appmesh.k8s.aws" + +func DoesAppMeshExist(dynamicClient dynamic.Interface, namespace string) bool { + _, err := dynamicClient.Resource(GetAppMeshVirtualServiceGVR()).Namespace(namespace).List(context.TODO(), metav1.ListOptions{Limit: 1}) + if err != nil { + return false + } + return true +} + +func GetAppMeshVirtualServiceGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: AppMeshCRDGroup, + Version: defaults.GetAppMeshCRDVersion(), + Resource: "virtualservices", + } +} + +func GetAppMeshVirtualRouterGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: AppMeshCRDGroup, + Version: defaults.GetAppMeshCRDVersion(), + Resource: "virtualrouters", + } +} + +func GetAppMeshVirtualNodeGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: AppMeshCRDGroup, + Version: defaults.GetAppMeshCRDVersion(), + Resource: "virtualnodes", + } +} diff --git a/utils/aws/aws.go b/utils/aws/aws.go index eba35e4f48..501e17b3ac 100644 --- a/utils/aws/aws.go +++ b/utils/aws/aws.go @@ -6,6 +6,8 @@ import ( "fmt" "strings" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/aws/aws-sdk-go-v2/config" elbv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" @@ -128,13 +130,18 @@ func FakeNewClientFunc(elbClient ELBv2APIClient) func() (Client, error) { } func (c *ClientAdapter) FindLoadBalancerByDNSName(ctx context.Context, dnsName string) (*elbv2types.LoadBalancer, error) { - lbOutput, err := c.ELBV2.DescribeLoadBalancers(ctx, &elbv2.DescribeLoadBalancersInput{}) - if err != nil { - return nil, err - } - for _, lb := range lbOutput.LoadBalancers { - if lb.DNSName != nil && *lb.DNSName == dnsName { - return &lb, nil + paginator := elbv2.NewDescribeLoadBalancersPaginator(c.ELBV2, &elbv2.DescribeLoadBalancersInput{ + PageSize: aws.Int32(defaults.DefaultAwsLoadBalancerPageSize), + }) + for paginator.HasMorePages() { + output, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + for _, lb := range output.LoadBalancers { + if lb.DNSName != nil && *lb.DNSName == dnsName { + return &lb, nil + } } } return nil, nil diff --git a/utils/aws/mocks/ELBv2APIClient.go b/utils/aws/mocks/ELBv2APIClient.go index 7f621b0563..cfc8872f3b 100644 --- a/utils/aws/mocks/ELBv2APIClient.go +++ b/utils/aws/mocks/ELBv2APIClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -193,3 +193,18 @@ func (_m *ELBv2APIClient) DescribeTargetHealth(ctx context.Context, params *elas return r0, r1 } + +type mockConstructorTestingTNewELBv2APIClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewELBv2APIClient creates a new instance of ELBv2APIClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewELBv2APIClient(t mockConstructorTestingTNewELBv2APIClient) *ELBv2APIClient { + mock := &ELBv2APIClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/utils/conditions/conditions.go b/utils/conditions/conditions.go index 8ebaf5412c..c2aaece787 100644 --- a/utils/conditions/conditions.go +++ b/utils/conditions/conditions.go @@ -9,12 +9,12 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" logutil "github.com/argoproj/argo-rollouts/utils/log" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -68,6 +68,17 @@ const ( RolloutCompletedReason = "RolloutCompleted" // RolloutCompletedMessage is added when the rollout is completed RolloutCompletedMessage = "Rollout completed update to revision %d (%s): %s" + // RolloutNotCompletedReason is added in a rollout when it is completed. + RolloutNotCompletedReason = "RolloutNotCompleted" + // RolloutNotCompletedMessage is added when the rollout is completed + RolloutNotCompletedMessage = "Rollout not completed, started update to revision %d (%s)" + + // RolloutHealthyReason is added in a rollout when it is healthy. + RolloutHealthyReason = "RolloutHealthy" + // RolloutHealthyMessage is added when the rollout is completed and is healthy or not. + RolloutHealthyMessage = "Rollout is healthy" + // RolloutNotHealthyMessage is added when the rollout is completed and is healthy or not. + RolloutNotHealthyMessage = "Rollout is not healthy" // RolloutAbortedReason indicates that the rollout was aborted RolloutAbortedReason = "RolloutAborted" @@ -86,6 +97,12 @@ const ( // estimated once a rollout is paused. RolloutPausedMessage = "Rollout is paused" + // ReplicaSetNotAvailableReason is added when the replicaset of an rollout is not available. + // This could happen when a fully promoted rollout becomes incomplete, e.g., + // due to pod restarts, evicted -> recreated. In this case, we'll need to reset the rollout's + // condition to `PROGRESSING` to avoid any timeouts. + ReplicaSetNotAvailableReason = "ReplicaSetNotAvailable" + // RolloutResumedReason is added in a rollout when it is resumed. Useful for not failing accidentally // rollout that paused amidst a rollout and are bounded by a deadline. RolloutResumedReason = "RolloutResumed" @@ -93,13 +110,14 @@ const ( // rollout that paused amidst a rollout and are bounded by a deadline. RolloutResumedMessage = "Rollout is resumed" - // ResumedRolloutReason is added in a rollout when it is resumed. Useful for not failing accidentally - // rollout that paused amidst a rollout and are bounded by a deadline. - RolloutStepCompletedReason = "RolloutStepCompleted" - // ResumeRolloutMessage is added in a rollout when it is resumed. Useful for not failing accidentally - // rollout that paused amidst a rollout and are bounded by a deadline. + // RolloutStepCompleted indicates when a canary step has completed + RolloutStepCompletedReason = "RolloutStepCompleted" RolloutStepCompletedMessage = "Rollout step %d/%d completed (%s)" + // TrafficWeightUpdated is emitted any time traffic weight is modified + TrafficWeightUpdatedReason = "TrafficWeightUpdated" + TrafficWeightUpdatedMessage = "Traffic weight updated %s" + // NewRSAvailableReason is added in a rollout when its newest replica set is made available // ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds // is at least the minimum available pods that need to run for the rollout. @@ -118,10 +136,13 @@ const ( // TimedOutReason is added in a rollout when its newest replica set fails to show any progress // within the given deadline (progressDeadlineSeconds). TimedOutReason = "ProgressDeadlineExceeded" - // RolloutTimeOutMessage is is added in a rollout when the rollout fails to show any progress + // RolloutTimeOutMessage is added in a rollout when the rollout fails to show any progress // within the given deadline (progressDeadlineSeconds). RolloutTimeOutMessage = "Rollout %q has timed out progressing." + RolloutDeletedReason = "RolloutDeleted" + RolloutDeletedMessage = "Rollout %s/%s is deleted." + ScalingReplicaSetReason = "ScalingReplicaSet" ScalingReplicaSetMessage = "Scaled %s ReplicaSet %s (revision %d) from %d to %d" @@ -150,6 +171,9 @@ const ( // WeightVerifyErrorReason is emitted when there is an error verifying the set weight WeightVerifyErrorReason = "WeightVerifyError" WeightVerifyErrorMessage = "Failed to verify weight: %s" + // LoadBalancerNotFoundReason is emitted when load balancer can not be found + LoadBalancerNotFoundReason = "LoadBalancerNotFound" + LoadBalancerNotFoundMessage = "Failed to find load balancer: %s" ) // NewRolloutCondition creates a new rollout condition. @@ -157,8 +181,8 @@ func NewRolloutCondition(condType v1alpha1.RolloutConditionType, status corev1.C return &v1alpha1.RolloutCondition{ Type: condType, Status: status, - LastUpdateTime: metav1.Now(), - LastTransitionTime: metav1.Now(), + LastUpdateTime: timeutil.MetaNow(), + LastTransitionTime: timeutil.MetaNow(), Reason: reason, Message: message, } @@ -244,9 +268,9 @@ func RolloutProgressing(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutSt strategySpecificProgress } -// RolloutComplete considers a rollout to be complete once all of its desired replicas +// RolloutHealthy considers a rollout to be healthy once all of its desired replicas // are updated, available, and receiving traffic from the active service, and no old pods are running. -func RolloutComplete(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus) bool { +func RolloutHealthy(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus) bool { completedStrategy := true replicas := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) @@ -275,6 +299,11 @@ func RolloutComplete(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatu completedStrategy } +// RolloutCompleted considers a rollout to be complete once StableRS == CurrentPodHash +func RolloutCompleted(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatus) bool { + return newStatus.StableRS != "" && newStatus.StableRS == newStatus.CurrentPodHash +} + // ComputeStepHash returns a hash value calculated from the Rollout's steps. The hash will // be safe encoded to avoid bad words. func ComputeStepHash(rollout *v1alpha1.Rollout) string { @@ -316,7 +345,7 @@ func RolloutTimedOut(rollout *v1alpha1.Rollout, newStatus *v1alpha1.RolloutStatu // progress or tried to create a replica set, or resumed a paused rollout and // compare against progressDeadlineSeconds. from := condition.LastUpdateTime - now := time.Now() + now := timeutil.Now() progressDeadlineSeconds := defaults.GetProgressDeadlineSecondsOrDefault(rollout) delta := time.Duration(progressDeadlineSeconds) * time.Second diff --git a/utils/conditions/experiments.go b/utils/conditions/experiments.go index 4acb6b1bbd..e424c5df15 100644 --- a/utils/conditions/experiments.go +++ b/utils/conditions/experiments.go @@ -10,6 +10,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" experimentutil "github.com/argoproj/argo-rollouts/utils/experiment" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) const ( @@ -39,8 +40,8 @@ func NewExperimentConditions(condType v1alpha1.ExperimentConditionType, status c return &v1alpha1.ExperimentCondition{ Type: condType, Status: status, - LastUpdateTime: metav1.Now(), - LastTransitionTime: metav1.Now(), + LastUpdateTime: timeutil.MetaNow(), + LastTransitionTime: timeutil.MetaNow(), Reason: reason, Message: message, } @@ -127,7 +128,7 @@ func ExperimentRunning(experiment *v1alpha1.Experiment) bool { func newInvalidSpecExperimentCondition(prevCond *v1alpha1.ExperimentCondition, reason string, message string) *v1alpha1.ExperimentCondition { if prevCond != nil && prevCond.Message == message { - prevCond.LastUpdateTime = metav1.Now() + prevCond.LastUpdateTime = timeutil.MetaNow() return prevCond } return NewExperimentConditions(v1alpha1.InvalidExperimentSpec, corev1.ConditionTrue, reason, message) diff --git a/utils/conditions/rollouts_test.go b/utils/conditions/rollouts_test.go index c633a4e40c..842f01f594 100644 --- a/utils/conditions/rollouts_test.go +++ b/utils/conditions/rollouts_test.go @@ -8,10 +8,10 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/controller" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/utils/hash" ) var ( @@ -350,7 +350,7 @@ func TestRolloutProgressing(t *testing.T) { } -func TestRolloutComplete(t *testing.T) { +func TestRolloutHealthy(t *testing.T) { rollout := func(desired, current, updated, available int32, correctObservedGeneration bool) *v1alpha1.Rollout { r := &v1alpha1.Rollout{ Spec: v1alpha1.RolloutSpec{ @@ -363,7 +363,7 @@ func TestRolloutComplete(t *testing.T) { }, } r.Generation = 123 - podHash := controller.ComputeHash(&r.Spec.Template, r.Status.CollisionCount) + podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) r.Status.CurrentPodHash = podHash return r } @@ -411,13 +411,13 @@ func TestRolloutComplete(t *testing.T) { { name: "BlueGreen complete", // update hash to status.CurrentPodHash after k8s library update - r: blueGreenRollout(5, 5, 5, 5, true, "85f7cf5fc7", "85f7cf5fc7"), + r: blueGreenRollout(5, 5, 5, 5, true, "76bbb58f74", "76bbb58f74"), expected: true, }, { name: "BlueGreen complete with extra old replicas", // update hash to status.CurrentPodHash after k8s library update - r: blueGreenRollout(5, 6, 5, 5, true, "85f7cf5fc7", "85f7cf5fc7"), + r: blueGreenRollout(5, 6, 5, 5, true, "76bbb58f74", "76bbb58f74"), expected: true, }, { @@ -475,12 +475,36 @@ func TestRolloutComplete(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - assert.Equal(t, test.expected, RolloutComplete(test.r, &test.r.Status)) + assert.Equal(t, test.expected, RolloutHealthy(test.r, &test.r.Status)) }) } } +func TestRolloutComplete(t *testing.T) { + rollout := func(desired, current, updated, available int32) *v1alpha1.Rollout { + r := &v1alpha1.Rollout{ + Spec: v1alpha1.RolloutSpec{ + Replicas: &desired, + }, + Status: v1alpha1.RolloutStatus{ + Replicas: current, + UpdatedReplicas: updated, + AvailableReplicas: available, + }, + } + podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) + r.Status.CurrentPodHash = podHash + r.Status.StableRS = podHash + return r + } + r := rollout(5, 5, 5, 5) + assert.Equal(t, true, RolloutCompleted(r, &r.Status)) + + r.Status.StableRS = "not-current-pod-hash" + assert.Equal(t, false, RolloutCompleted(r, &r.Status)) +} + func TestRolloutTimedOut(t *testing.T) { before := metav1.Time{ diff --git a/utils/defaults/defaults.go b/utils/defaults/defaults.go index 5395552532..9b18b16db1 100644 --- a/utils/defaults/defaults.go +++ b/utils/defaults/defaults.go @@ -36,6 +36,15 @@ const ( // DefaultConsecutiveErrorLimit is the default number times a metric can error in sequence before // erroring the entire metric. DefaultConsecutiveErrorLimit int32 = 4 + // DefaultQPS is the default Queries Per Second (QPS) for client side throttling to the K8s API server + DefaultQPS float32 = 40.0 + // DefaultBurst is the default value for Burst for client side throttling to the K8s API server + DefaultBurst int = 80 + // DefaultAwsLoadBalancerPageSize is the default page size used when calling aws to get load balancers by DNS name + DefaultAwsLoadBalancerPageSize = int32(300) + // DefaultMetricCleanupDelay is the default time to delay metrics removal upon object removal, gives time for metrics + // to be collected + DefaultMetricCleanupDelay = int32(65) ) const ( @@ -44,6 +53,9 @@ const ( DefaultIstioVersion = "v1alpha3" DefaultSMITrafficSplitVersion = "v1alpha1" DefaultTargetGroupBindingAPIVersion = "elbv2.k8s.aws/v1beta1" + DefaultAppMeshCRDVersion = "v1beta2" + DefaultTraefikAPIGroup = "traefik.containo.us" + DefaultTraefikVersion = "traefik.containo.us/v1alpha1" ) var ( @@ -52,6 +64,8 @@ var ( ambassadorAPIVersion = DefaultAmbassadorVersion smiAPIVersion = DefaultSMITrafficSplitVersion targetGroupBindingAPIVersion = DefaultTargetGroupBindingAPIVersion + appmeshCRDVersion = DefaultAppMeshCRDVersion + defaultMetricCleanupDelay = DefaultMetricCleanupDelay ) const ( @@ -71,6 +85,14 @@ func init() { } } +func GetStringOrDefault(value, defaultValue string) string { + if value == "" { + return defaultValue + } else { + return value + } +} + // GetReplicasOrDefault returns the deferenced number of replicas or the default number func GetReplicasOrDefault(replicas *int32) int32 { if replicas == nil { @@ -259,6 +281,14 @@ func GetAmbassadorAPIVersion() string { return ambassadorAPIVersion } +func SetAppMeshCRDVersion(apiVersion string) { + appmeshCRDVersion = apiVersion +} + +func GetAppMeshCRDVersion() string { + return appmeshCRDVersion +} + func SetSMIAPIVersion(apiVersion string) { smiAPIVersion = apiVersion } @@ -278,3 +308,13 @@ func GetTargetGroupBindingAPIVersion() string { func GetRolloutVerifyRetryInterval() time.Duration { return rolloutVerifyRetryInterval } + +// GetMetricCleanupDelaySeconds returns the duration to delay the cleanup of metrics +func GetMetricCleanupDelaySeconds() time.Duration { + return time.Duration(defaultMetricCleanupDelay) * time.Second +} + +// SetMetricCleanupDelaySeconds sets the metric cleanup delay in seconds +func SetMetricCleanupDelaySeconds(seconds int32) { + defaultMetricCleanupDelay = seconds +} diff --git a/utils/defaults/defaults_test.go b/utils/defaults/defaults_test.go index d3df2554bc..2162f1be82 100644 --- a/utils/defaults/defaults_test.go +++ b/utils/defaults/defaults_test.go @@ -12,6 +12,11 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) +func TestGetStringOrDefault(t *testing.T) { + assert.Equal(t, "some value", GetStringOrDefault("some value", "default value")) + assert.Equal(t, "default value", GetStringOrDefault("", "default value")) +} + func TestGetReplicasOrDefault(t *testing.T) { replicas := int32(2) assert.Equal(t, replicas, GetReplicasOrDefault(&replicas)) @@ -397,4 +402,13 @@ func TestSetDefaults(t *testing.T) { assert.Equal(t, "v1alpha9", GetTargetGroupBindingAPIVersion()) SetTargetGroupBindingAPIVersion(DefaultTargetGroupBindingAPIVersion) assert.Equal(t, DefaultTargetGroupBindingAPIVersion, GetTargetGroupBindingAPIVersion()) + + assert.Equal(t, DefaultAppMeshCRDVersion, GetAppMeshCRDVersion()) + SetAppMeshCRDVersion("v1beta3") + assert.Equal(t, "v1beta3", GetAppMeshCRDVersion()) + SetAppMeshCRDVersion(DefaultAmbassadorVersion) + + assert.Equal(t, DefaultMetricCleanupDelay, int32(GetMetricCleanupDelaySeconds().Seconds())) + SetMetricCleanupDelaySeconds(24) + assert.Equal(t, time.Duration(24)*time.Second, GetMetricCleanupDelaySeconds()) } diff --git a/utils/experiment/experiment.go b/utils/experiment/experiment.go index cf87d7e263..3bd3d07a96 100644 --- a/utils/experiment/experiment.go +++ b/utils/experiment/experiment.go @@ -8,11 +8,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" patchtypes "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/controller" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" rolloutsclient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/typed/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/hash" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) var terminateExperimentPatch = []byte(`{"spec":{"terminate":true}}`) @@ -88,7 +89,7 @@ func PassedDurations(experiment *v1alpha1.Experiment) (bool, time.Duration) { if experiment.Status.AvailableAt == nil { return false, 0 } - now := metav1.Now() + now := timeutil.MetaNow() dur, err := experiment.Spec.Duration.Duration() if err != nil { return false, 0 @@ -130,8 +131,9 @@ func GetCollisionCountForTemplate(experiment *v1alpha1.Experiment, template v1al // ReplicasetNameFromExperiment gets the replicaset name based off of the experiment and the template func ReplicasetNameFromExperiment(experiment *v1alpha1.Experiment, template v1alpha1.TemplateSpec) string { + // todo: review this method for deletion as it's not using collisionCount := GetCollisionCountForTemplate(experiment, template) - podTemplateSpecHash := controller.ComputeHash(&template.Template, collisionCount) + podTemplateSpecHash := hash.ComputePodTemplateHash(&template.Template, collisionCount) return fmt.Sprintf("%s-%s-%s", experiment.Name, template.Name, podTemplateSpecHash) } diff --git a/utils/experiment/experiment_test.go b/utils/experiment/experiment_test.go index 194065cbf7..2c87bda888 100644 --- a/utils/experiment/experiment_test.go +++ b/utils/experiment/experiment_test.go @@ -5,13 +5,14 @@ import ( "testing" "time" - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" kubetesting "k8s.io/client-go/testing" "k8s.io/utils/pointer" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" ) func TestHasFinished(t *testing.T) { @@ -90,6 +91,7 @@ func TestGetTemplateStatusMapping(t *testing.T) { assert.Equal(t, int32(1), mapping["test"].Replicas) assert.Equal(t, int32(2), mapping["test2"].Replicas) } + func TestReplicaSetNameFromExperiment(t *testing.T) { templateName := "template" template := v1alpha1.TemplateSpec{ @@ -100,14 +102,14 @@ func TestReplicaSetNameFromExperiment(t *testing.T) { Name: "foo", }, } - assert.Equal(t, "foo-template-85f7cf5fc7", ReplicasetNameFromExperiment(e, template)) + assert.Equal(t, "foo-template-76bbb58f74", ReplicasetNameFromExperiment(e, template)) newTemplateStatus := v1alpha1.TemplateStatus{ Name: templateName, CollisionCount: pointer.Int32Ptr(1), } e.Status.TemplateStatuses = append(e.Status.TemplateStatuses, newTemplateStatus) - assert.Equal(t, "foo-template-56ccbc9b64", ReplicasetNameFromExperiment(e, template)) + assert.Equal(t, "foo-template-688c48b575", ReplicasetNameFromExperiment(e, template)) } func TestExperimentByCreationTimestamp(t *testing.T) { diff --git a/utils/hash/hash.go b/utils/hash/hash.go new file mode 100644 index 0000000000..52d82d3cc7 --- /dev/null +++ b/utils/hash/hash.go @@ -0,0 +1,34 @@ +package hash + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "hash/fnv" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/rand" +) + +// ComputePodTemplateHash returns a hash value calculated from pod template. +// The hash will be safe encoded to avoid bad words. +func ComputePodTemplateHash(template *corev1.PodTemplateSpec, collisionCount *int32) string { + podTemplateSpecHasher := fnv.New32a() + stepsBytes, err := json.Marshal(template) + if err != nil { + panic(err) + } + _, err = podTemplateSpecHasher.Write(stepsBytes) + if err != nil { + panic(err) + } + if collisionCount != nil { + collisionCountBytes := make([]byte, 8) + binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount)) + _, err = podTemplateSpecHasher.Write(collisionCountBytes) + if err != nil { + panic(err) + } + } + return rand.SafeEncodeString(fmt.Sprint(podTemplateSpecHasher.Sum32())) +} diff --git a/utils/hash/hash_test.go b/utils/hash/hash_test.go new file mode 100644 index 0000000000..8a0edf11f4 --- /dev/null +++ b/utils/hash/hash_test.go @@ -0,0 +1,48 @@ +package hash + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func TestHashUtils(t *testing.T) { + templateRed := generatePodTemplate("red") + hashRed := ComputePodTemplateHash(&templateRed, nil) + template := generatePodTemplate("red") + + t.Run("HashForSameTemplates", func(t *testing.T) { + podHash := ComputePodTemplateHash(&template, nil) + assert.Equal(t, hashRed, podHash) + }) + t.Run("HashForDifferentTemplates", func(t *testing.T) { + podHash := ComputePodTemplateHash(&template, pointer.Int32(1)) + assert.NotEqual(t, hashRed, podHash) + }) +} + +func generatePodTemplate(image string) corev1.PodTemplateSpec { + podLabels := map[string]string{"name": image} + + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: image, + Image: image, + ImagePullPolicy: corev1.PullAlways, + TerminationMessagePath: corev1.TerminationMessagePathDefault, + }, + }, + DNSPolicy: corev1.DNSClusterFirst, + RestartPolicy: corev1.RestartPolicyAlways, + SecurityContext: &corev1.PodSecurityContext{}, + }, + } +} diff --git a/utils/ingress/ingress.go b/utils/ingress/ingress.go index ad9c9ee316..83827507ec 100644 --- a/utils/ingress/ingress.go +++ b/utils/ingress/ingress.go @@ -1,6 +1,7 @@ package ingress import ( + json2 "encoding/json" "errors" "fmt" "strconv" @@ -11,18 +12,25 @@ import ( "k8s.io/client-go/discovery" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/utils/defaults" "github.com/argoproj/argo-rollouts/utils/diff" + "github.com/argoproj/argo-rollouts/utils/json" ) const ( // CanaryIngressSuffix is the name suffix all canary ingresses created by the rollouts controller will have CanaryIngressSuffix = "-canary" // ManagedActionsAnnotation holds list of ALB actions that are managed by rollouts + // DEPRECATED in favor of ManagedAnnotations ManagedActionsAnnotation = "rollouts.argoproj.io/managed-alb-actions" + // ManagedAnnotations holds list of ALB annotations that are managed by rollouts supports multiple annotations + ManagedAnnotations = "rollouts.argoproj.io/managed-alb-annotations" //ALBIngressAnnotation is the prefix annotation that is used by the ALB Ingress controller to configure an ALB ALBIngressAnnotation = "alb.ingress.kubernetes.io" // ALBActionPrefix the prefix to specific actions within an ALB ingress. ALBActionPrefix = "/actions." + // ALBConditionPrefix the prefix to specific conditions within an ALB ingress. + ALBConditionPrefix = "/conditions." ) // ALBAction describes an ALB action that configure the behavior of an ALB. This struct is marshaled into a string @@ -32,9 +40,29 @@ type ALBAction struct { ForwardConfig ALBForwardConfig `json:"ForwardConfig"` } +// ALBCondition describes an ALB action condition that configure the behavior of an ALB. This struct is marshaled into a string +// that is added to the Ingress's annotations. +type ALBCondition struct { + Field string `json:"field"` + HttpHeaderConfig HttpHeaderConfig `json:"httpHeaderConfig"` +} + +// HttpHeaderConfig describes header config for the ALB action condition +type HttpHeaderConfig struct { + HttpHeaderName string `json:"httpHeaderName"` + Values []string `json:"values"` +} + // ALBForwardConfig describes a list of target groups that the ALB should route traffic towards type ALBForwardConfig struct { - TargetGroups []ALBTargetGroup `json:"TargetGroups"` + TargetGroups []ALBTargetGroup `json:"TargetGroups"` + TargetGroupStickinessConfig *ALBTargetGroupStickinessConfig `json:"TargetGroupStickinessConfig,omitempty"` +} + +// ALBTargetGroupStickinessConfig describes settings for the listener to apply to all forwards +type ALBTargetGroupStickinessConfig struct { + Enabled bool `json:"Enabled"` + DurationSeconds int64 `json:"DurationSeconds"` } // ALBTargetGroup holds the weight to send to a specific destination consisting of a K8s service and port or ARN @@ -55,13 +83,31 @@ func GetRolloutIngressKeys(rollout *v1alpha1.Rollout) []string { rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil && rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress != "" { + stableIngress := rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress // Also start watcher for `-canary` ingress which is created by the trafficmanagement controller ingresses = append( ingresses, - fmt.Sprintf("%s/%s", rollout.Namespace, rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress), - fmt.Sprintf("%s/%s", rollout.Namespace, GetCanaryIngressName(rollout)), + fmt.Sprintf("%s/%s", rollout.Namespace, stableIngress), + fmt.Sprintf("%s/%s", rollout.Namespace, GetCanaryIngressName(rollout.GetName(), stableIngress)), ) } + + // Scenario where one rollout is managing multiple Ngnix ingresses. + if rollout.Spec.Strategy.Canary != nil && + rollout.Spec.Strategy.Canary.TrafficRouting != nil && + rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil && + len(rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses) > 0 { + + for _, stableIngress := range rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses { + // Also start watcher for `-canary` ingress which is created by the trafficmanagement controller + ingresses = append( + ingresses, + fmt.Sprintf("%s/%s", rollout.Namespace, stableIngress), + fmt.Sprintf("%s/%s", rollout.Namespace, GetCanaryIngressName(rollout.GetName(), stableIngress)), + ) + } + } + if rollout.Spec.Strategy.Canary != nil && rollout.Spec.Strategy.Canary.TrafficRouting != nil && rollout.Spec.Strategy.Canary.TrafficRouting.ALB != nil && @@ -76,14 +122,10 @@ func GetRolloutIngressKeys(rollout *v1alpha1.Rollout) []string { } // GetCanaryIngressName constructs the name to use for the canary ingress resource from a given Rollout -func GetCanaryIngressName(rollout *v1alpha1.Rollout) string { +func GetCanaryIngressName(rolloutName, stableIngressName string) string { // names limited to 253 characters - if rollout.Spec.Strategy.Canary != nil && - rollout.Spec.Strategy.Canary.TrafficRouting != nil && - rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil && - rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress != "" { - - prefix := fmt.Sprintf("%s-%s", rollout.GetName(), rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress) + if stableIngressName != "" { + prefix := fmt.Sprintf("%s-%s", rolloutName, stableIngressName) if len(prefix) > 253-len(CanaryIngressSuffix) { // trim prefix prefix = prefix[0 : 253-len(CanaryIngressSuffix)] @@ -135,6 +177,26 @@ func hasLegacyIngressRuleWithService(ingress *extensionsv1beta1.Ingress, svc str // ManagedALBActions a mapping of Rollout names to the ALB action that the Rollout manages type ManagedALBActions map[string]string +type ManagedALBAnnotations map[string]ManagedALBAnnotation + +type ManagedALBAnnotation []string + +// String outputs a string of all the managed ALB annotations that is stored in the Ingress's annotations +func (m ManagedALBAnnotations) String() string { + return string(json.MustMarshal(m)) +} + +func NewManagedALBAnnotations(json string) (ManagedALBAnnotations, error) { + res := ManagedALBAnnotations{} + if json == "" { + return res, nil + } + if err := json2.Unmarshal([]byte(json), &res); err != nil { + return nil, err + } + return res, nil +} + // String outputs a string of all the managed ALB actions that is stored in the Ingress's annotations func (m ManagedALBActions) String() string { str := "" @@ -166,15 +228,23 @@ func NewManagedALBActions(annotation string) (ManagedALBActions, error) { // ALBActionAnnotationKey returns the annotation key for a specific action func ALBActionAnnotationKey(r *v1alpha1.Rollout) string { - prefix := ALBIngressAnnotation - if r.Spec.Strategy.Canary.TrafficRouting.ALB.AnnotationPrefix != "" { - prefix = r.Spec.Strategy.Canary.TrafficRouting.ALB.AnnotationPrefix - } - actionService := r.Spec.Strategy.Canary.StableService - if r.Spec.Strategy.Canary.TrafficRouting.ALB.RootService != "" { - actionService = r.Spec.Strategy.Canary.TrafficRouting.ALB.RootService - } - return fmt.Sprintf("%s%s%s", prefix, ALBActionPrefix, actionService) + actionService := defaults.GetStringOrDefault(r.Spec.Strategy.Canary.TrafficRouting.ALB.RootService, r.Spec.Strategy.Canary.StableService) + return albIngressKubernetesIoKey(r, ALBActionPrefix, actionService) +} + +// ALBHeaderBasedActionAnnotationKey returns the annotation key for a specific action +func ALBHeaderBasedActionAnnotationKey(r *v1alpha1.Rollout, action string) string { + return albIngressKubernetesIoKey(r, ALBActionPrefix, action) +} + +// ALBHeaderBasedConditionAnnotationKey returns the annotation key for a specific condition +func ALBHeaderBasedConditionAnnotationKey(r *v1alpha1.Rollout, action string) string { + return albIngressKubernetesIoKey(r, ALBConditionPrefix, action) +} + +func albIngressKubernetesIoKey(r *v1alpha1.Rollout, action, service string) string { + prefix := defaults.GetStringOrDefault(r.Spec.Strategy.Canary.TrafficRouting.ALB.AnnotationPrefix, ALBIngressAnnotation) + return fmt.Sprintf("%s%s%s", prefix, action, service) } type patchConfig struct { diff --git a/utils/ingress/ingress_test.go b/utils/ingress/ingress_test.go index eaca7e6a86..5170219ea1 100644 --- a/utils/ingress/ingress_test.go +++ b/utils/ingress/ingress_test.go @@ -61,6 +61,33 @@ func TestGetRolloutIngressKeysForCanaryWithTrafficRouting(t *testing.T) { assert.ElementsMatch(t, keys, []string{"default/stable-ingress", "default/myrollout-stable-ingress-canary", "default/alb-ingress"}) } +func TestGetRolloutIngressKeysForCanaryWithTrafficRoutingMultiIngress(t *testing.T) { + keys := GetRolloutIngressKeys(&v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrollout", + Namespace: "default", + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + CanaryService: "canary-service", + StableService: "stable-service", + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{ + StableIngress: "stable-ingress", + AdditionalStableIngresses: []string{"stable-ingress-additional"}, + }, + ALB: &v1alpha1.ALBTrafficRouting{ + Ingress: "alb-ingress", + }, + }, + }, + }, + }, + }) + assert.ElementsMatch(t, keys, []string{"default/stable-ingress", "default/myrollout-stable-ingress-canary", "default/stable-ingress-additional", "default/myrollout-stable-ingress-additional-canary", "default/alb-ingress"}) +} + func TestGetCanaryIngressName(t *testing.T) { rollout := &v1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{ @@ -74,7 +101,8 @@ func TestGetCanaryIngressName(t *testing.T) { StableService: "stable-service", TrafficRouting: &v1alpha1.RolloutTrafficRouting{ Nginx: &v1alpha1.NginxTrafficRouting{ - StableIngress: "stable-ingress", + StableIngress: "stable-ingress", + AdditionalStableIngresses: []string{"stable-ingress-additional"}, }, }, }, @@ -82,20 +110,34 @@ func TestGetCanaryIngressName(t *testing.T) { }, } - t.Run("NoTrim", func(t *testing.T) { + t.Run("StableIngress - NoTrim", func(t *testing.T) { rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress = "stable-ingress" - canaryIngress := GetCanaryIngressName(rollout) + canaryIngress := GetCanaryIngressName(rollout.GetName(), rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress) assert.Equal(t, "myrollout-stable-ingress-canary", canaryIngress) }) - t.Run("Trim", func(t *testing.T) { + t.Run("StableIngress - Trim", func(t *testing.T) { rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress = fmt.Sprintf("stable-ingress%s", strings.Repeat("a", 260)) - canaryIngress := GetCanaryIngressName(rollout) + canaryIngress := GetCanaryIngressName(rollout.GetName(), rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.StableIngress) assert.Equal(t, 253, len(canaryIngress), "canary ingress truncated to 253") assert.Equal(t, true, strings.HasSuffix(canaryIngress, "-canary"), "canary ingress has -canary suffix") }) + t.Run("AdditionalStableIngresses - NoTrim", func(t *testing.T) { + for _, ing := range rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses { + canaryIngress := GetCanaryIngressName(rollout.GetName(), ing) + assert.Equal(t, "myrollout-stable-ingress-additional-canary", canaryIngress) + } + }) + t.Run("AdditionalStableIngresses - Trim", func(t *testing.T) { + rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses = []string{fmt.Sprintf("stable-ingress%s", strings.Repeat("a", 260))} + for _, ing := range rollout.Spec.Strategy.Canary.TrafficRouting.Nginx.AdditionalStableIngresses { + canaryIngress := GetCanaryIngressName(rollout.GetName(), ing) + assert.Equal(t, 253, len(canaryIngress), "canary ingress truncated to 253") + assert.Equal(t, true, strings.HasSuffix(canaryIngress, "-canary"), "canary ingress has -canary suffix") + } + }) t.Run("NoStableIngress", func(t *testing.T) { rollout.Spec.Strategy.Canary.TrafficRouting.Nginx = nil - canaryIngress := GetCanaryIngressName(rollout) + canaryIngress := GetCanaryIngressName(rollout.GetName(), "") assert.Equal(t, "", canaryIngress, "canary ingress is empty") }) } @@ -514,3 +556,55 @@ func getExtensionsIngress() *extensionsv1beta1.Ingress { }, } } + +func TestManagedALBAnnotations(t *testing.T) { + emptyJson, _ := NewManagedALBAnnotations("") + assert.NotNil(t, emptyJson) + assert.Equal(t, 0, len(emptyJson)) + assert.Equal(t, "{}", emptyJson.String()) + + _, err := NewManagedALBAnnotations("invalid json") + assert.Error(t, err) + + json := "{\"rollouts-demo\":[\"alb.ingress.kubernetes.io/actions.action1\", \"alb.ingress.kubernetes.io/actions.header-action\", \"alb.ingress.kubernetes.io/conditions.header-action\"]}" + actual, err := NewManagedALBAnnotations(json) + assert.NoError(t, err) + + rolloutsDemoAnnotation := actual["rollouts-demo"] + assert.NotNil(t, rolloutsDemoAnnotation) + assert.Equal(t, 3, len(rolloutsDemoAnnotation)) +} + +func TestALBHeaderBasedActionAnnotationKey(t *testing.T) { + r := &v1alpha1.Rollout{ + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{ + AnnotationPrefix: "alb.ingress.kubernetes.io", + }, + }, + }, + }, + }, + } + assert.Equal(t, "alb.ingress.kubernetes.io/actions.route", ALBHeaderBasedActionAnnotationKey(r, "route")) +} + +func TestALBHeaderBasedConditionAnnotationKey(t *testing.T) { + r := &v1alpha1.Rollout{ + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{ + AnnotationPrefix: "alb.ingress.kubernetes.io", + }, + }, + }, + }, + }, + } + assert.Equal(t, "alb.ingress.kubernetes.io/conditions.route", ALBHeaderBasedConditionAnnotationKey(r, "route")) +} diff --git a/utils/ingress/wrapper.go b/utils/ingress/wrapper.go index 1d28c8021d..d70b6a96ed 100644 --- a/utils/ingress/wrapper.go +++ b/utils/ingress/wrapper.go @@ -3,6 +3,7 @@ package ingress import ( "context" "errors" + "sort" "sync" corev1 "k8s.io/api/core/v1" @@ -10,11 +11,14 @@ import ( v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/informers" extensionsv1beta1 "k8s.io/client-go/informers/extensions/v1beta1" networkingv1 "k8s.io/client-go/informers/networking/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) // Ingress defines an Ingress resource abstraction used to allow Rollouts to @@ -68,6 +72,29 @@ func NewIngressWithAnnotations(mode IngressMode, annotations map[string]string) } } +func NewIngressWithSpecAndAnnotations(ingress *Ingress, annotations map[string]string) *Ingress { + switch ingress.mode { + case IngressModeNetworking: + i := &v1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + }, + Spec: *ingress.ingress.Spec.DeepCopy(), + } + return NewIngress(i) + case IngressModeExtensions: + i := &v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + }, + Spec: *ingress.legacyIngress.Spec.DeepCopy(), + } + return NewLegacyIngress(i) + default: + return nil + } +} + func (i *Ingress) GetExtensionsIngress() (*v1beta1.Ingress, error) { if i.legacyIngress == nil { return nil, errors.New("extensions Ingress is nil in this wrapper") @@ -95,6 +122,27 @@ func (i *Ingress) GetAnnotations() map[string]string { } } +// GetClass returns the ingress class. +// For backwards compatibility `kubernetes.io/ingress.class` annotation will be used if set, +// otherwise `spec.ingressClassName` is used. +func (i *Ingress) GetClass() string { + annotations := i.GetAnnotations() + class := annotations["kubernetes.io/ingress.class"] + if class == "" { + switch i.mode { + case IngressModeNetworking: + if c := i.ingress.Spec.IngressClassName; c != nil { + class = *c + } + case IngressModeExtensions: + if c := i.legacyIngress.Spec.IngressClassName; c != nil { + class = *c + } + } + } + return class +} + func (i *Ingress) GetLabels() map[string]string { switch i.mode { case IngressModeNetworking: @@ -128,6 +176,117 @@ func (i *Ingress) SetAnnotations(annotations map[string]string) { } } +func (i *Ingress) CreateAnnotationBasedPath(actionName string) { + i.mux.Lock() + defer i.mux.Unlock() + if HasRuleWithService(i, actionName) { + return + } + switch i.mode { + case IngressModeNetworking: + t := v1.PathTypeImplementationSpecific + p := v1.HTTPIngressPath{ + Path: "/*", + PathType: &t, + Backend: v1.IngressBackend{ + Service: &v1.IngressServiceBackend{ + Name: actionName, + Port: v1.ServiceBackendPort{ + Name: "use-annotation", + }, + }, + }, + } + for _, rule := range i.ingress.Spec.Rules { + rule.HTTP.Paths = append(rule.HTTP.Paths[:1], rule.HTTP.Paths[0:]...) + rule.HTTP.Paths[0] = p + } + case IngressModeExtensions: + t := v1beta1.PathTypeImplementationSpecific + p := v1beta1.HTTPIngressPath{ + Path: "/*", + PathType: &t, + Backend: v1beta1.IngressBackend{ + ServiceName: actionName, + ServicePort: intstr.FromString("use-annotation"), + }, + } + for _, rule := range i.legacyIngress.Spec.Rules { + rule.HTTP.Paths = append(rule.HTTP.Paths[:1], rule.HTTP.Paths[0:]...) + rule.HTTP.Paths[0] = p + } + } +} + +func (i *Ingress) RemovePathByServiceName(actionName string) { + i.mux.Lock() + defer i.mux.Unlock() + switch i.mode { + case IngressModeNetworking: + for _, rule := range i.ingress.Spec.Rules { + if j := indexPathByService(rule, actionName); j != -1 { + rule.HTTP.Paths = append(rule.HTTP.Paths[:j], rule.HTTP.Paths[j+1:]...) + } + } + case IngressModeExtensions: + for _, rule := range i.legacyIngress.Spec.Rules { + if j := indexLegacyPathByService(rule, actionName); j != -1 { + rule.HTTP.Paths = append(rule.HTTP.Paths[:j], rule.HTTP.Paths[j+1:]...) + } + } + } +} + +func (i *Ingress) SortHttpPaths(routes []v1alpha1.MangedRoutes) { + var routeWeight = make(map[string]int) // map of route name for ordering + for j, route := range routes { + routeWeight[route.Name] = j + } + + i.mux.Lock() + defer i.mux.Unlock() + switch i.mode { + case IngressModeNetworking: + for _, rule := range i.ingress.Spec.Rules { + sort.SliceStable(rule.HTTP.Paths, func(i, j int) bool { + return getKeyWeight(routeWeight, rule.HTTP.Paths[i].Backend.Service.Name) < getKeyWeight(routeWeight, rule.HTTP.Paths[j].Backend.Service.Name) + }) + } + case IngressModeExtensions: + for _, rule := range i.legacyIngress.Spec.Rules { + sort.SliceStable(rule.HTTP.Paths, func(i, j int) bool { + return getKeyWeight(routeWeight, rule.HTTP.Paths[i].Backend.ServiceName) < getKeyWeight(routeWeight, rule.HTTP.Paths[j].Backend.ServiceName) + }) + } + } +} + +func getKeyWeight(weight map[string]int, key string) int { + if val, ok := weight[key]; ok { + return val + } else { + return len(weight) + } +} + +func indexPathByService(rule v1.IngressRule, name string) int { + for i, path := range rule.HTTP.Paths { + if path.Backend.Service.Name == name { + return i + } + } + return -1 +} + +func indexLegacyPathByService(rule v1beta1.IngressRule, name string) int { + for i, path := range rule.HTTP.Paths { + if path.Backend.ServiceName == name { + return i + } + } + return -1 +} + func (i *Ingress) DeepCopy() *Ingress { switch i.mode { case IngressModeNetworking: diff --git a/utils/ingress/wrapper_test.go b/utils/ingress/wrapper_test.go index eae08b4828..9e8f511494 100644 --- a/utils/ingress/wrapper_test.go +++ b/utils/ingress/wrapper_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/argoproj/argo-rollouts/utils/ingress" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" @@ -14,6 +13,10 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" kubeinformers "k8s.io/client-go/informers" k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/pointer" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/utils/ingress" ) func TestNewIngressWithAnnotations(t *testing.T) { @@ -68,6 +71,57 @@ func TestNewIngressWithAnnotations(t *testing.T) { }) } +func TestNewIngressWithSpecAndAnnotations(t *testing.T) { + annotations := make(map[string]string) + annotations["some.annotation.key1"] = "some.annotation.value1" + annotations["some.annotation.key2"] = "some.annotation.value2" + getAnnotations := func() map[string]string { + annotations := make(map[string]string) + annotations["some.annotation.key1"] = "some.annotation.value1" + annotations["some.annotation.key2"] = "some.annotation.value2" + return annotations + } + t.Run("will instantiate an Ingress wrapped with an annotated networkingv1.Ingress", func(t *testing.T) { + ing := networkingIngress() + + // given + t.Parallel() + + // when + i := ingress.NewIngressWithSpecAndAnnotations(ing, getAnnotations()) + + // then + assert.NotNil(t, i) + a := i.GetAnnotations() + assert.Equal(t, 2, len(a)) + a["extra-annotation-key"] = "extra-annotation-value" + i.SetAnnotations(a) + assert.Equal(t, 3, len(a)) + actualIngress, _ := i.GetNetworkingIngress() + expectedIngress, _ := ing.GetNetworkingIngress() + assert.Equal(t, expectedIngress.Spec, actualIngress.Spec) + }) + t.Run("will instantiate an Ingress wrapped with an annotated extensions/v1beta1.Ingress", func(t *testing.T) { + ing := extensionsIngress() + // given + t.Parallel() + + // when + i := ingress.NewIngressWithSpecAndAnnotations(ing, getAnnotations()) + + // then + assert.NotNil(t, i) + a := i.GetAnnotations() + assert.Equal(t, 2, len(a)) + a["extra-annotation-key"] = "extra-annotation-value" + i.SetAnnotations(a) + assert.Equal(t, 3, len(a)) + actualIngress, _ := i.GetExtensionsIngress() + expectedIngress, _ := ing.GetExtensionsIngress() + assert.Equal(t, expectedIngress.Spec, actualIngress.Spec) + }) +} + func TestGetExtensionsIngress(t *testing.T) { extensionsIngress := &v1beta1.Ingress{} t.Run("will get extensions ingress successfully", func(t *testing.T) { @@ -124,6 +178,93 @@ func TestGetNetworkingIngress(t *testing.T) { }) } +func TestGetClass(t *testing.T) { + t.Run("will get the class from network Ingress annotation", func(t *testing.T) { + // given + t.Parallel() + i := getNetworkingIngress() + annotations := map[string]string{"kubernetes.io/ingress.class": "ingress-name-annotation"} + i.SetAnnotations(annotations) + emptyClass := "" + i.Spec.IngressClassName = &emptyClass + w := ingress.NewIngress(i) + + // when + class := w.GetClass() + + // then + assert.Equal(t, "ingress-name-annotation", class) + }) + t.Run("will get the class from network Ingress annotation with priority", func(t *testing.T) { + // given + t.Parallel() + i := getNetworkingIngress() + annotations := map[string]string{"kubernetes.io/ingress.class": "ingress-name-annotation"} + i.SetAnnotations(annotations) + w := ingress.NewIngress(i) + + // when + class := w.GetClass() + + // then + assert.Equal(t, "ingress-name-annotation", class) + }) + t.Run("will get the class from network Ingress spec", func(t *testing.T) { + // given + t.Parallel() + i := getNetworkingIngress() + w := ingress.NewIngress(i) + + // when + class := w.GetClass() + + // then + assert.Equal(t, "ingress-name", class) + }) + t.Run("will get the class from extensions Ingress annotation", func(t *testing.T) { + // given + t.Parallel() + i := getExtensionsIngress() + annotations := map[string]string{"kubernetes.io/ingress.class": "ingress-name-annotation"} + i.SetAnnotations(annotations) + emptyClass := "" + i.Spec.IngressClassName = &emptyClass + w := ingress.NewLegacyIngress(i) + + // when + class := w.GetClass() + + // then + assert.Equal(t, "ingress-name-annotation", class) + }) + t.Run("will get the class from extensions Ingress annotation with priority", func(t *testing.T) { + // given + t.Parallel() + i := getExtensionsIngress() + annotations := map[string]string{"kubernetes.io/ingress.class": "ingress-name-annotation"} + i.SetAnnotations(annotations) + w := ingress.NewLegacyIngress(i) + + // when + class := w.GetClass() + + // then + assert.Equal(t, "ingress-name-annotation", class) + }) + t.Run("will get the class from extensions Ingress spec", func(t *testing.T) { + // given + t.Parallel() + i := getExtensionsIngress() + w := ingress.NewLegacyIngress(i) + + // when + class := w.GetClass() + + // then + assert.Equal(t, "ingress-name", class) + }) +} + func TestGetLabels(t *testing.T) { t.Run("will get the labels from network Ingress successfully", func(t *testing.T) { // given @@ -188,6 +329,100 @@ func TestGetObjectMeta(t *testing.T) { }) } +func TestCreateAnnotationBasedPath(t *testing.T) { + t.Run("v1 ingress, create path", func(t *testing.T) { + ing := networkingIngress() + ni, _ := ing.GetNetworkingIngress() + + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + ing.CreateAnnotationBasedPath("test-route") + assert.Equal(t, 2, len(ni.Spec.Rules[0].HTTP.Paths)) + }) + t.Run("v1 ingress, create existing path", func(t *testing.T) { + ing := networkingIngress() + ni, _ := ing.GetNetworkingIngress() + + ing.CreateAnnotationBasedPath("v1backend") + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + }) + t.Run("v1beta1 ingress, create path", func(t *testing.T) { + ing := extensionsIngress() + ni, _ := ing.GetExtensionsIngress() + + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + ing.CreateAnnotationBasedPath("test-route") + assert.Equal(t, 2, len(ni.Spec.Rules[0].HTTP.Paths)) + }) + t.Run("v1beta1 ingress, create existing path", func(t *testing.T) { + ing := extensionsIngress() + ni, _ := ing.GetExtensionsIngress() + + ing.CreateAnnotationBasedPath("v1beta1backend") + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + }) +} + +func TestRemoveAnnotationBasedPath(t *testing.T) { + t.Run("v1 ingress, remove path", func(t *testing.T) { + ing := networkingIngress() + ni, _ := ing.GetNetworkingIngress() + + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + ing.RemovePathByServiceName("v1backend") + assert.Equal(t, 0, len(ni.Spec.Rules[0].HTTP.Paths)) + }) + t.Run("v1 ingress, remove non existing path", func(t *testing.T) { + ing := networkingIngress() + ni, _ := ing.GetNetworkingIngress() + + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + ing.RemovePathByServiceName("non-exsisting-route") + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + }) + t.Run("v1beta1 ingress, remove path", func(t *testing.T) { + ing := extensionsIngress() + ni, _ := ing.GetExtensionsIngress() + + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + ing.RemovePathByServiceName("v1beta1backend") + assert.Equal(t, 0, len(ni.Spec.Rules[0].HTTP.Paths)) + }) + t.Run("v1beta1 ingress, remove non existing path", func(t *testing.T) { + ing := extensionsIngress() + ni, _ := ing.GetExtensionsIngress() + + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + ing.RemovePathByServiceName("non-exsisting-route") + assert.Equal(t, 1, len(ni.Spec.Rules[0].HTTP.Paths)) + }) +} + +func TestSortHttpPaths(t *testing.T) { + managedRoutes := []v1alpha1.MangedRoutes{{Name: "route1"}, {Name: "route2"}, {Name: "route3"}} + t.Run("v1 ingress, sort path", func(t *testing.T) { + ing := networkingIngressWithPath("action1", "route3", "route1", "route2") + ing.SortHttpPaths(managedRoutes) + ni, _ := ing.GetNetworkingIngress() + + assert.Equal(t, 4, len(ni.Spec.Rules[0].HTTP.Paths)) + assert.Equal(t, "route1", ni.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name) + assert.Equal(t, "route2", ni.Spec.Rules[0].HTTP.Paths[1].Backend.Service.Name) + assert.Equal(t, "route3", ni.Spec.Rules[0].HTTP.Paths[2].Backend.Service.Name) + assert.Equal(t, "action1", ni.Spec.Rules[0].HTTP.Paths[3].Backend.Service.Name) + }) + t.Run("v1beta1 ingress, sort path", func(t *testing.T) { + ing := extensionsIngressWithPath("action1", "route3", "route1", "route2") + ing.SortHttpPaths(managedRoutes) + ni, _ := ing.GetExtensionsIngress() + + assert.Equal(t, 4, len(ni.Spec.Rules[0].HTTP.Paths)) + assert.Equal(t, "route1", ni.Spec.Rules[0].HTTP.Paths[0].Backend.ServiceName) + assert.Equal(t, "route2", ni.Spec.Rules[0].HTTP.Paths[1].Backend.ServiceName) + assert.Equal(t, "route3", ni.Spec.Rules[0].HTTP.Paths[2].Backend.ServiceName) + assert.Equal(t, "action1", ni.Spec.Rules[0].HTTP.Paths[3].Backend.ServiceName) + }) +} + func TestDeepCopy(t *testing.T) { t.Run("will deepcopy ingress wrapped with networking.Ingress", func(t *testing.T) { // given @@ -736,3 +971,135 @@ func getExtensionsIngress() *v1beta1.Ingress { }, } } + +func networkingIngress() *ingress.Ingress { + pathType := v1.PathTypeImplementationSpecific + res := v1.Ingress{ + Spec: v1.IngressSpec{ + IngressClassName: pointer.String("v1ingress"), + Rules: []v1.IngressRule{ + { + Host: "v1host", + IngressRuleValue: v1.IngressRuleValue{ + HTTP: &v1.HTTPIngressRuleValue{ + Paths: []v1.HTTPIngressPath{ + { + Backend: v1.IngressBackend{ + Service: &v1.IngressServiceBackend{ + Name: "v1backend", + Port: v1.ServiceBackendPort{Name: "use-annotation"}, + }, + }, + Path: "/*", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + }, + } + return ingress.NewIngress(&res) +} + +func networkingIngressWithPath(paths ...string) *ingress.Ingress { + var ingressPaths []v1.HTTPIngressPath + for _, path := range paths { + ingressPaths = append(ingressPaths, v1IngressPath(path)) + } + res := v1.Ingress{ + Spec: v1.IngressSpec{ + IngressClassName: pointer.String("v1ingress"), + Rules: []v1.IngressRule{ + { + Host: "v1host", + IngressRuleValue: v1.IngressRuleValue{ + HTTP: &v1.HTTPIngressRuleValue{ + Paths: ingressPaths, + }, + }, + }, + }, + }, + } + return ingress.NewIngress(&res) +} + +func v1IngressPath(serviceName string) v1.HTTPIngressPath { + pathType := v1.PathTypeImplementationSpecific + return v1.HTTPIngressPath{ + Backend: v1.IngressBackend{ + Service: &v1.IngressServiceBackend{ + Name: serviceName, + Port: v1.ServiceBackendPort{Name: "use-annotation"}, + }, + }, + Path: "/*", + PathType: &pathType, + } +} + +func extensionsIngress() *ingress.Ingress { + pathType := v1beta1.PathTypeImplementationSpecific + res := v1beta1.Ingress{ + Spec: v1beta1.IngressSpec{ + IngressClassName: pointer.String("v1beta1ingress"), + Rules: []v1beta1.IngressRule{ + { + Host: "v1beta1host", + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Backend: v1beta1.IngressBackend{ + ServiceName: "v1beta1backend", + ServicePort: intstr.FromString("use-annotation"), + }, + Path: "/*", + PathType: &pathType, + }, + }, + }, + }, + }, + }, + }, + } + return ingress.NewLegacyIngress(&res) +} + +func extensionsIngressWithPath(paths ...string) *ingress.Ingress { + var ingressPaths []v1beta1.HTTPIngressPath + for _, path := range paths { + ingressPaths = append(ingressPaths, extensionIngressPath(path)) + } + res := v1beta1.Ingress{ + Spec: v1beta1.IngressSpec{ + IngressClassName: pointer.String("v1beta1ingress"), + Rules: []v1beta1.IngressRule{ + { + Host: "v1beta1host", + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: ingressPaths, + }, + }, + }, + }, + }, + } + return ingress.NewLegacyIngress(&res) +} + +func extensionIngressPath(serviceName string) v1beta1.HTTPIngressPath { + pathType := v1beta1.PathTypeImplementationSpecific + return v1beta1.HTTPIngressPath{ + Backend: v1beta1.IngressBackend{ + ServiceName: serviceName, + ServicePort: intstr.FromString("use-annotation"), + }, + Path: "/*", + PathType: &pathType, + } +} diff --git a/utils/metric/metric.go b/utils/metric/metric.go index 4d9003164f..cab54161d3 100644 --- a/utils/metric/metric.go +++ b/utils/metric/metric.go @@ -1,9 +1,8 @@ package metric import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) // MarkMeasurementError sets an error message on a measurement along with finish time @@ -11,7 +10,7 @@ func MarkMeasurementError(m v1alpha1.Measurement, err error) v1alpha1.Measuremen m.Phase = v1alpha1.AnalysisPhaseError m.Message = err.Error() if m.FinishedAt == nil { - finishedTime := metav1.Now() + finishedTime := timeutil.MetaNow() m.FinishedAt = &finishedTime } return m diff --git a/utils/record/record.go b/utils/record/record.go index 23c19a0f5d..f255b02c24 100644 --- a/utils/record/record.go +++ b/utils/record/record.go @@ -1,26 +1,36 @@ package record import ( + "context" + "crypto/sha1" + "encoding/base64" "encoding/json" "regexp" "strings" + "time" - "github.com/argoproj/notifications-engine/pkg/services" + timeutil "github.com/argoproj/argo-rollouts/utils/time" "github.com/argoproj/notifications-engine/pkg/api" + "github.com/argoproj/notifications-engine/pkg/services" "github.com/argoproj/notifications-engine/pkg/subscriptions" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + k8sinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" k8sfake "k8s.io/client-go/kubernetes/fake" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/kubectl/pkg/scheme" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" rolloutscheme "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/scheme" logutil "github.com/argoproj/argo-rollouts/utils/log" ) @@ -59,13 +69,18 @@ type EventRecorderAdapter struct { Recorder record.EventRecorder // RolloutEventCounter is a counter to increment on events RolloutEventCounter *prometheus.CounterVec + // NotificationFailCounter is a counter to increment on failing to send notifications + NotificationFailedCounter *prometheus.CounterVec + // NotificationSuccessCounter is a counter to increment on successful send notifications + NotificationSuccessCounter *prometheus.CounterVec + NotificationSendPerformance *prometheus.HistogramVec eventf func(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...interface{}) // apiFactory is a notifications engine API factory apiFactory api.Factory } -func NewEventRecorder(kubeclientset kubernetes.Interface, rolloutEventCounter *prometheus.CounterVec, apiFactory api.Factory) EventRecorder { +func NewEventRecorder(kubeclientset kubernetes.Interface, rolloutEventCounter *prometheus.CounterVec, notificationFailedCounter *prometheus.CounterVec, notificationSuccessCounter *prometheus.CounterVec, notificationSendPerformance *prometheus.HistogramVec, apiFactory api.Factory) EventRecorder { // Create event broadcaster // Add argo-rollouts custom resources to the default Kubernetes Scheme so Events can be // logged for argo-rollouts types. @@ -74,9 +89,12 @@ func NewEventRecorder(kubeclientset kubernetes.Interface, rolloutEventCounter *p eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) k8srecorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) recorder := &EventRecorderAdapter{ - Recorder: k8srecorder, - RolloutEventCounter: rolloutEventCounter, - apiFactory: apiFactory, + Recorder: k8srecorder, + RolloutEventCounter: rolloutEventCounter, + NotificationFailedCounter: notificationFailedCounter, + NotificationSuccessCounter: notificationSuccessCounter, + NotificationSendPerformance: notificationSendPerformance, + apiFactory: apiFactory, } recorder.eventf = recorder.defaultEventf return recorder @@ -89,6 +107,39 @@ type FakeEventRecorder struct { Events []string } +func NewFakeApiFactory() api.Factory { + var ( + settings = api.Settings{ConfigMapName: "my-config-map", SecretName: "my-secret", InitGetVars: func(cfg *api.Config, configMap *corev1.ConfigMap, secret *corev1.Secret) (api.GetVars, error) { + return func(obj map[string]interface{}, dest services.Destination) map[string]interface{} { + return map[string]interface{}{"obj": obj} + }, nil + }} + ) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-config-map", Namespace: "default"}, + Data: map[string]string{ + "service.slack": `{"token": "abc"}`, + }, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + } + + clientset := k8sfake.NewSimpleClientset(cm, secret) + informerFactory := k8sinformers.NewSharedInformerFactory(clientset, time.Minute) + + secrets := informerFactory.Core().V1().Secrets().Informer() + configMaps := informerFactory.Core().V1().ConfigMaps().Informer() + apiFactory := api.NewFactory(settings, "default", secrets, configMaps) + go informerFactory.Start(context.Background().Done()) + if !cache.WaitForCacheSync(context.Background().Done(), configMaps.HasSynced, secrets.HasSynced) { + log.Info("failed to sync informers") + } + + return apiFactory +} + func NewFakeEventRecorder() *FakeEventRecorder { recorder := NewEventRecorder( k8sfake.NewSimpleClientset(), @@ -98,7 +149,27 @@ func NewFakeEventRecorder() *FakeEventRecorder { }, []string{"name", "namespace", "type", "reason"}, ), - nil, + prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "notification_send_error", + }, + []string{"name", "namespace", "type", "reason"}, + ), + prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "notification_send_success", + }, + []string{"name", "namespace", "type", "reason"}, + ), + prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "notification_send_performance", + Help: "Notification send performance.", + Buckets: []float64{0.01, 0.15, .25, .5, 1}, + }, + []string{"namespace", "name"}, + ), + NewFakeApiFactory(), ).(*EventRecorderAdapter) recorder.Recorder = record.NewFakeRecorder(1000) fakeRecorder := &FakeEventRecorder{} @@ -139,7 +210,9 @@ func (e *EventRecorderAdapter) defaultEventf(object runtime.Object, warn bool, o err := e.sendNotifications(object, opts) if err != nil { logCtx.Errorf("Notifications failed to send for eventReason %s with error: %s", opts.EventReason, err) + e.NotificationFailedCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc() } + e.NotificationSuccessCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc() } logFn := logCtx.Infof @@ -168,46 +241,124 @@ func NewAPIFactorySettings() api.Settings { // Send notifications for triggered event if user is subscribed func (e *EventRecorderAdapter) sendNotifications(object runtime.Object, opts EventOptions) error { logCtx := logutil.WithObject(object) - subsFromAnnotations := subscriptions.Annotations(object.(metav1.Object).GetAnnotations()) - destByTrigger := subsFromAnnotations.GetDestinations(nil, map[string][]string{}) - + _, namespace, name := logutil.KindNamespaceName(logCtx) + startTime := timeutil.Now() + defer func() { + duration := time.Since(startTime) + e.NotificationSendPerformance.WithLabelValues(namespace, name).Observe(duration.Seconds()) + logCtx.WithField("time_ms", duration.Seconds()*1e3).Debug("Notification sent") + }() + notificationsAPI, err := e.apiFactory.GetAPI() + if err != nil { + // don't return error if notifications are not configured and rollout has no subscribers + subsFromAnnotations := subscriptions.Annotations(object.(metav1.Object).GetAnnotations()) + logCtx.Infof("subsFromAnnotations: %s", subsFromAnnotations) + if errors.IsNotFound(err) && len(subsFromAnnotations.GetDestinations(nil, map[string][]string{})) == 0 { + return nil + } + return err + } + cfg := notificationsAPI.GetConfig() + destByTrigger := cfg.GetGlobalDestinations(object.(metav1.Object).GetLabels()) + destByTrigger.Merge(subscriptions.NewAnnotations(object.(metav1.Object).GetAnnotations()).GetDestinations(cfg.DefaultTriggers, cfg.ServiceDefaultTriggers)) trigger := translateReasonToTrigger(opts.EventReason) - destinations := destByTrigger[trigger] if len(destinations) == 0 { logCtx.Debugf("No configured destinations for trigger: %s", trigger) return nil } - notificationsAPI, err := e.apiFactory.GetAPI() + objMap, err := toObjectMap(object) if err != nil { return err } - // Creates config for notifications for built-in triggers - triggerActions, ok := notificationsAPI.GetConfig().Triggers[trigger] - if !ok { - logCtx.Debugf("No configured template for trigger: %s", trigger) - return nil + emptyCondition := hash("") + + for _, destination := range destinations { + res, err := notificationsAPI.RunTrigger(trigger, objMap) + if err != nil { + log.Errorf("Failed to execute condition of trigger %s: %v", trigger, err) + return err + } + log.Infof("Trigger %s result: %v", trigger, res) + + for _, c := range res { + log.Infof("Res When Condition hash: %s, Templates: %s", c.Key, c.Templates) + s := strings.Split(c.Key, ".")[1] + if s != emptyCondition && c.Triggered == true { + err = notificationsAPI.Send(objMap, c.Templates, destination) + if err != nil { + log.Errorf("notification error: %s", err.Error()) + return err + } + } else if s == emptyCondition { + err = notificationsAPI.Send(objMap, c.Templates, destination) + if err != nil { + log.Errorf("notification error: %s", err.Error()) + return err + } + } + } } + return nil +} + +// This function is copied over from notification engine to make sure we honour emptyCondition +// emptyConditions today are not handled well in notification engine. +// TODO: update notification engine to handle emptyConditions and remove this function and its usage +func hash(input string) string { + h := sha1.New() + _, _ = h.Write([]byte(input)) + return base64.RawURLEncoding.EncodeToString(h.Sum(nil)) +} + +// toObjectMap converts an object to a map for the purposes of sending to the notification engine +func toObjectMap(object interface{}) (map[string]interface{}, error) { objBytes, err := json.Marshal(object) if err != nil { - return err + return nil, err } var objMap map[string]interface{} err = json.Unmarshal(objBytes, &objMap) if err != nil { - return err + return nil, err } - for _, dest := range destinations { - err = notificationsAPI.Send(objMap, triggerActions[0].Send, dest) + + // The JSON marshalling above drops the `spec.template` and `spec.selectors` fields if the rollout + // is using workload referencing. The following restores those fields in the returned object map + // so that notification templates can refer to them (as if workload ref was not used). + if ro, ok := object.(*v1alpha1.Rollout); ok && ro.Spec.WorkloadRef != nil { + templateBytes, err := json.Marshal(ro.Spec.Template) if err != nil { - log.Errorf("notification error: %s", err.Error()) - return err + return nil, err + } + var templateMap map[string]interface{} + err = json.Unmarshal(templateBytes, &templateMap) + if err != nil { + return nil, err + } + err = unstructured.SetNestedMap(objMap, templateMap, "spec", "template") + if err != nil { + return nil, err + } + + selectorBytes, err := json.Marshal(ro.Spec.Selector) + if err != nil { + return nil, err + } + var selectorMap map[string]interface{} + err = json.Unmarshal(selectorBytes, &selectorMap) + if err != nil { + return nil, err + } + err = unstructured.SetNestedMap(objMap, selectorMap, "spec", "selector") + if err != nil { + return nil, err } } - return nil + return objMap, nil } func translateReasonToTrigger(reason string) string { diff --git a/utils/record/record_test.go b/utils/record/record_test.go index 15b89f83dd..c99f55985b 100644 --- a/utils/record/record_test.go +++ b/utils/record/record_test.go @@ -13,10 +13,13 @@ import ( "github.com/argoproj/notifications-engine/pkg/triggers" "github.com/golang/mock/gomock" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) @@ -92,17 +95,126 @@ func TestSendNotifications(t *testing.T) { } mockCtrl := gomock.NewController(t) mockAPI := mocks.NewMockAPI(mockCtrl) + cr := []triggers.ConditionResult{{ + Key: "1." + hash(""), + Triggered: true, + Templates: []string{"my-template"}, + }} + mockAPI.EXPECT().RunTrigger(gomock.Any(), gomock.Any()).Return(cr, nil).AnyTimes() mockAPI.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() mockAPI.EXPECT().GetConfig().Return(api.Config{ Triggers: map[string][]triggers.Condition{"on-foo-reason": {triggers.Condition{Send: []string{"my-template"}}}}}).AnyTimes() apiFactory := &mocks.FakeFactory{Api: mockAPI} rec := NewFakeEventRecorder() rec.EventRecorderAdapter.apiFactory = apiFactory + //ch := make(chan prometheus.HistogramVec, 1) + err := rec.sendNotifications(&r, EventOptions{EventReason: "FooReason"}) + assert.NoError(t, err) +} +func TestSendNotificationsWhenCondition(t *testing.T) { + r := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "default", + Annotations: map[string]string{"notifications.argoproj.io/subscribe.on-foo-reason.console": "console"}, + }, + } + mockCtrl := gomock.NewController(t) + mockAPI := mocks.NewMockAPI(mockCtrl) + cr := []triggers.ConditionResult{{ + Key: "1." + hash(""), + Triggered: true, + Templates: []string{"my-template"}, + }} + mockAPI.EXPECT().RunTrigger(gomock.Any(), gomock.Any()).Return(cr, nil).AnyTimes() + mockAPI.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockAPI.EXPECT().GetConfig().Return(api.Config{ + Triggers: map[string][]triggers.Condition{"on-foo-reason": {triggers.Condition{When: "rollout.spec.template.spec.containers[0].image == test:blue", Send: []string{"my-template"}}}}}).AnyTimes() + apiFactory := &mocks.FakeFactory{Api: mockAPI} + rec := NewFakeEventRecorder() + rec.EventRecorderAdapter.apiFactory = apiFactory + //ch := make(chan prometheus.HistogramVec, 1) err := rec.sendNotifications(&r, EventOptions{EventReason: "FooReason"}) assert.NoError(t, err) } +func TestNotificationFailedCounter(t *testing.T) { + r := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "default", + Annotations: map[string]string{"notifications.argoproj.io/subscribe.on-foo-reason.console": "console"}, + }, + } + rec := NewFakeEventRecorder() + opts := EventOptions{EventType: corev1.EventTypeWarning, EventReason: "FooReason"} + rec.NotificationFailedCounter.WithLabelValues(r.Name, r.Namespace, opts.EventType, opts.EventReason).Inc() + + res := testutil.ToFloat64(rec.NotificationFailedCounter) + assert.Equal(t, float64(1), res) +} + +func TestNotificationSuccessCounter(t *testing.T) { + r := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "default", + Annotations: map[string]string{"notifications.argoproj.io/subscribe.on-foo-reason.console": "console"}, + }, + } + rec := NewFakeEventRecorder() + opts := EventOptions{EventType: corev1.EventTypeNormal, EventReason: "FooReason"} + rec.NotificationSuccessCounter.WithLabelValues(r.Name, r.Namespace, opts.EventType, opts.EventReason).Inc() + + res := testutil.ToFloat64(rec.NotificationSuccessCounter) + assert.Equal(t, float64(1), res) +} + +func TestNotificationSendPerformance(t *testing.T) { + r := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "default", + Annotations: map[string]string{"notifications.argoproj.io/subscribe.on-foo-reason.console": "console"}, + }, + } + rec := NewFakeEventRecorder() + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.4)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(1.3)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.5)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(1.4)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.6)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.1)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(1.3)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.25)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.9)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.17)) + rec.NotificationSendPerformance.WithLabelValues(r.Namespace, r.Name).Observe(float64(0.35)) + + reg := prometheus.NewRegistry() + reg.MustRegister(rec.NotificationSendPerformance) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + log.Infof("mfs: %v, %v, %v, %v", *mfs[0], *mfs[0].Metric[0].Histogram.SampleCount, *mfs[0].Metric[0].Histogram.SampleSum, *mfs[0].Metric[0].Histogram.Bucket[0].CumulativeCount) + want := `# HELP notification_send_performance Notification send performance. + # TYPE notification_send_performance histogram + notification_send_performance_bucket{name="guestbook",namespace="default",le="0.01"} 0 + notification_send_performance_bucket{name="guestbook",namespace="default",le="0.15"} 1 + notification_send_performance_bucket{name="guestbook",namespace="default",le="0.25"} 3 + notification_send_performance_bucket{name="guestbook",namespace="default",le="0.5"} 6 + notification_send_performance_bucket{name="guestbook",namespace="default",le="1"} 8 + notification_send_performance_bucket{name="guestbook",namespace="default",le="+Inf"} 11 + notification_send_performance_sum{name="guestbook",namespace="default"} 7.27 + notification_send_performance_count{name="guestbook",namespace="default"} 11 + ` + err = testutil.CollectAndCompare(rec.NotificationSendPerformance, strings.NewReader(want)) + assert.Nil(t, err) +} + func TestSendNotificationsFails(t *testing.T) { r := v1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{ @@ -115,6 +227,52 @@ func TestSendNotificationsFails(t *testing.T) { t.Run("SendError", func(t *testing.T) { mockCtrl := gomock.NewController(t) mockAPI := mocks.NewMockAPI(mockCtrl) + cr := []triggers.ConditionResult{{ + Key: "1." + hash(""), + Triggered: true, + Templates: []string{"my-template"}, + }} + mockAPI.EXPECT().RunTrigger(gomock.Any(), gomock.Any()).Return(cr, nil).AnyTimes() + mockAPI.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("failed to send")).AnyTimes() + mockAPI.EXPECT().GetConfig().Return(api.Config{ + Triggers: map[string][]triggers.Condition{"on-foo-reason": {triggers.Condition{Send: []string{"my-template"}}}}}).AnyTimes() + apiFactory := &mocks.FakeFactory{Api: mockAPI} + rec := NewFakeEventRecorder() + rec.EventRecorderAdapter.apiFactory = apiFactory + + err := rec.sendNotifications(&r, EventOptions{EventReason: "FooReason"}) + assert.Error(t, err) + }) + + t.Run("GetAPIError", func(t *testing.T) { + apiFactory := &mocks.FakeFactory{Err: errors.New("failed to get API")} + rec := NewFakeEventRecorder() + rec.EventRecorderAdapter.apiFactory = apiFactory + + err := rec.sendNotifications(&r, EventOptions{EventReason: "FooReason"}) + assert.Error(t, err) + }) + +} + +func TestSendNotificationsFailsWithRunTriggerError(t *testing.T) { + r := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "default", + Annotations: map[string]string{"notifications.argoproj.io/subscribe.on-foo-reason.console": "console"}, + }, + } + + t.Run("SendError", func(t *testing.T) { + mockCtrl := gomock.NewController(t) + mockAPI := mocks.NewMockAPI(mockCtrl) + cr := []triggers.ConditionResult{{ + Key: "1." + hash(""), + Triggered: true, + Templates: []string{"my-template"}, + }} + mockAPI.EXPECT().RunTrigger(gomock.Any(), gomock.Any()).Return(cr, errors.New("fail")).AnyTimes() mockAPI.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("failed to send")).AnyTimes() mockAPI.EXPECT().GetConfig().Return(api.Config{ Triggers: map[string][]triggers.Condition{"on-foo-reason": {triggers.Condition{Send: []string{"my-template"}}}}}).AnyTimes() @@ -148,6 +306,12 @@ func TestSendNotificationsNoTrigger(t *testing.T) { mockCtrl := gomock.NewController(t) mockAPI := mocks.NewMockAPI(mockCtrl) + cr := []triggers.ConditionResult{{ + Key: "1." + hash(""), + Triggered: false, + Templates: []string{"my-template"}, + }} + mockAPI.EXPECT().RunTrigger(gomock.Any(), gomock.Any()).Return(cr, errors.New("trigger 'on-missing-reason' is not configured")).AnyTimes() mockAPI.EXPECT().GetConfig().Return(api.Config{ Triggers: map[string][]triggers.Condition{"on-foo-reason": {triggers.Condition{Send: []string{"my-template"}}}}}).AnyTimes() mockAPI.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("failed to send")).Times(0) @@ -156,7 +320,7 @@ func TestSendNotificationsNoTrigger(t *testing.T) { rec.EventRecorderAdapter.apiFactory = apiFactory err := rec.sendNotifications(&r, EventOptions{EventReason: "MissingReason"}) - assert.NoError(t, err) + assert.Error(t, err) } func TestNewAPIFactorySettings(t *testing.T) { @@ -171,3 +335,48 @@ func TestNewAPIFactorySettings(t *testing.T) { assert.Equal(t, map[string]interface{}{"rollout": rollout}, vars) } + +func TestWorkloadRefObjectMap(t *testing.T) { + ro := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "default", + Annotations: map[string]string{"notifications.argoproj.io/subscribe.on-missing-reason.console": "console"}, + }, + Spec: v1alpha1.RolloutSpec{ + TemplateResolvedFromRef: true, + SelectorResolvedFromRef: true, + WorkloadRef: &v1alpha1.ObjectRef{ + Kind: "Deployment", + Name: "foo", + APIVersion: "apps/v1", + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "foo", + }, + }, + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "foo": "bar", + }, + }, + }, + } + objMap, err := toObjectMap(&ro) + assert.NoError(t, err) + + templateMap, ok, err := unstructured.NestedMap(objMap, "spec", "template") + assert.NoError(t, err) + assert.True(t, ok) + assert.NotNil(t, templateMap) + + selectorMap, ok, err := unstructured.NestedMap(objMap, "spec", "selector") + assert.NoError(t, err) + assert.True(t, ok) + assert.NotNil(t, selectorMap) +} diff --git a/utils/replicaset/canary.go b/utils/replicaset/canary.go index 01055d1931..a93bd0e659 100644 --- a/utils/replicaset/canary.go +++ b/utils/replicaset/canary.go @@ -4,12 +4,12 @@ import ( "encoding/json" "math" + log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" - log "github.com/sirupsen/logrus" ) const ( @@ -21,6 +21,10 @@ func allDesiredAreAvailable(rs *appsv1.ReplicaSet, desired int32) bool { return rs != nil && desired == *rs.Spec.Replicas && desired == rs.Status.AvailableReplicas } +func allDesiredAreCreated(rs *appsv1.ReplicaSet, desired int32) bool { + return rs != nil && desired == *rs.Spec.Replicas && desired == rs.Status.Replicas +} + func AtDesiredReplicaCountsForCanary(ro *v1alpha1.Rollout, newRS, stableRS *appsv1.ReplicaSet, olderRSs []*appsv1.ReplicaSet, weights *v1alpha1.TrafficWeights) bool { var desiredNewRSReplicaCount, desiredStableRSReplicaCount int32 if ro.Spec.Strategy.Canary.TrafficRouting == nil { @@ -32,7 +36,7 @@ func AtDesiredReplicaCountsForCanary(ro *v1alpha1.Rollout, newRS, stableRS *apps return false } if ro.Spec.Strategy.Canary.TrafficRouting == nil || !ro.Spec.Strategy.Canary.DynamicStableScale { - if !allDesiredAreAvailable(stableRS, desiredStableRSReplicaCount) { + if !allDesiredAreCreated(stableRS, desiredStableRSReplicaCount) { // only check stable RS if we are not using dynamic stable scaling return false } @@ -43,55 +47,17 @@ func AtDesiredReplicaCountsForCanary(ro *v1alpha1.Rollout, newRS, stableRS *apps return true } -/* -// AtDesiredReplicaCountsForCanary indicates if the rollout is at the desired state for the current step -func AtDesiredReplicaCountsForCanary(rollout *v1alpha1.Rollout, newRS, stableRS *appsv1.ReplicaSet, olderRSs []*appsv1.ReplicaSet) bool { - desiredNewRSReplicaCount, desiredStableRSReplicaCount := DesiredReplicaCountsForCanary(rollout, newRS, stableRS) - if newRS == nil || desiredNewRSReplicaCount != *newRS.Spec.Replicas || desiredNewRSReplicaCount != newRS.Status.AvailableReplicas { - return false - } - if stableRS == nil || desiredStableRSReplicaCount != *stableRS.Spec.Replicas || desiredStableRSReplicaCount != stableRS.Status.AvailableReplicas { - return false - } - if GetAvailableReplicaCountForReplicaSets(olderRSs) != int32(0) { - return false - } - return true -} -*/ - -/* -//DesiredReplicaCountsForCanary calculates the desired endstate replica count for the new and stable replicasets -func DesiredReplicaCountsForCanary(rollout *v1alpha1.Rollout, newRS, stableRS *appsv1.ReplicaSet) (int32, int32) { - rolloutSpecReplica := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) - replicas, weight := GetCanaryReplicasOrWeight(rollout) - - desiredNewRSReplicaCount := int32(0) - desiredStableRSReplicaCount := int32(0) - if replicas != nil { - desiredNewRSReplicaCount = *replicas - desiredStableRSReplicaCount = rolloutSpecReplica - } else { - desiredNewRSReplicaCount = int32(math.Ceil(float64(rolloutSpecReplica) * (float64(weight) / 100))) - desiredStableRSReplicaCount = int32(math.Ceil(float64(rolloutSpecReplica) * (1 - (float64(weight) / 100)))) - } - - if !CheckStableRSExists(newRS, stableRS) { - // If there is no stableRS or it is the same as the newRS, then the rollout does not follow the canary steps. - // Instead the controller tries to get the newRS to 100% traffic. - desiredNewRSReplicaCount = rolloutSpecReplica - desiredStableRSReplicaCount = 0 - } - // Unlike the ReplicaSet based weighted canary, a service mesh/ingress - // based canary leaves the stable as 100% scaled until the rollout completes. - if rollout.Spec.Strategy.Canary.TrafficRouting != nil { - desiredStableRSReplicaCount = rolloutSpecReplica - } - - return desiredNewRSReplicaCount, desiredStableRSReplicaCount - +// CheckMinPodsPerRS ensures that if the desired number of pods in a stable or canary ReplicaSet is not zero, +// then it is at least MinPodsPerRS for High Availability. Only applicable if using TrafficRouting +func CheckMinPodsPerRS (rollout *v1alpha1.Rollout, count int32) (int32) { + if count == 0 { + return count + } + if rollout.Spec.Strategy.Canary == nil || rollout.Spec.Strategy.Canary.MinPodsPerRS == nil || rollout.Spec.Strategy.Canary.TrafficRouting == nil { + return count + } + return max (count, *rollout.Spec.Strategy.Canary.MinPodsPerRS) } -*/ // CalculateReplicaCountsForBasicCanary calculates the number of replicas for the newRS and the stableRS // when using the basic canary strategy. The function calculates the desired number of replicas for @@ -131,9 +97,9 @@ func DesiredReplicaCountsForCanary(rollout *v1alpha1.Rollout, newRS, stableRS *a func CalculateReplicaCountsForBasicCanary(rollout *v1alpha1.Rollout, newRS *appsv1.ReplicaSet, stableRS *appsv1.ReplicaSet, oldRSs []*appsv1.ReplicaSet) (int32, int32) { rolloutSpecReplica := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) _, desiredWeight := GetCanaryReplicasOrWeight(rollout) + maxSurge := MaxSurge(rollout) - desiredStableRSReplicaCount := int32(math.Ceil(float64(rolloutSpecReplica) * (1 - (float64(desiredWeight) / 100)))) - desiredNewRSReplicaCount := int32(math.Ceil(float64(rolloutSpecReplica) * (float64(desiredWeight) / 100))) + desiredNewRSReplicaCount, desiredStableRSReplicaCount := approximateWeightedCanaryStableReplicaCounts(rolloutSpecReplica, desiredWeight, maxSurge) stableRSReplicaCount := int32(0) newRSReplicaCount := int32(0) @@ -151,13 +117,6 @@ func CalculateReplicaCountsForBasicCanary(rollout *v1alpha1.Rollout, newRS *apps desiredStableRSReplicaCount = 0 } - maxSurge := MaxSurge(rollout) - - if extraReplicaAdded(rolloutSpecReplica, desiredWeight) { - // In the case where the weight of the stable and canary replica counts cannot be divided evenly, - // the controller needs to surges by one to account for both replica counts being rounded up. - maxSurge = maxSurge + 1 - } maxReplicaCountAllowed := rolloutSpecReplica + maxSurge allRSs := append(oldRSs, newRS) @@ -223,6 +182,84 @@ func CalculateReplicaCountsForBasicCanary(rollout *v1alpha1.Rollout, newRS *apps return newRSReplicaCount, stableRSReplicaCount } +// approximateWeightedCanaryStableReplicaCounts approximates the desired canary weight and returns +// the closest replica count values for the canary and stable to reach the desired weight. The +// canary/stable replica counts might sum to either spec.replicas or spec.replicas + 1 but will not +// exceed spec.replicas if maxSurge is 0. If the canary weight is between 1-99, and spec.replicas is > 1, +// we will always return a minimum of 1 for stable and canary as to not return 0. +func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, maxSurge int32) (int32, int32) { + if specReplicas == 0 { + return 0, 0 + } + // canaryOption is one potential return value of this function. We will evaluate multiple options + // for the canary count in order to best approximate the desired weight. + type canaryOption struct { + canary int32 + total int32 + } + var options []canaryOption + + ceilWeightedCanaryCount := int32(math.Ceil(float64(specReplicas*desiredWeight) / 100.0)) + floorWeightedCanaryCount := int32(math.Floor(float64(specReplicas*desiredWeight) / 100.0)) + + tied := floorCeilingTied(desiredWeight, specReplicas) + + // zeroAllowed indicates if are allowed to return the floored value if it is zero. We don't allow + // the value to be zero if when user has a weight from 1-99, and they run 2+ replicas (surge included) + zeroAllowed := desiredWeight == 100 || desiredWeight == 0 || (specReplicas == 1 && maxSurge == 0) + + if ceilWeightedCanaryCount < specReplicas || zeroAllowed { + options = append(options, canaryOption{ceilWeightedCanaryCount, specReplicas}) + } + + if !tied && (floorWeightedCanaryCount != 0 || zeroAllowed) { + options = append(options, canaryOption{floorWeightedCanaryCount, specReplicas}) + } + + // check if we are allowed to surge. if we are, we can also consider rounding up to spec.replicas + 1 + // in order to achieve a closer canary weight + if maxSurge > 0 { + options = append(options, canaryOption{ceilWeightedCanaryCount, specReplicas + 1}) + surgeIsTied := floorCeilingTied(desiredWeight, specReplicas+1) + if !surgeIsTied && (floorWeightedCanaryCount != 0 || zeroAllowed) { + options = append(options, canaryOption{floorWeightedCanaryCount, specReplicas + 1}) + } + } + + if len(options) == 0 { + // should not get here + return 0, specReplicas + } + + bestOption := options[0] + bestDelta := weightDelta(desiredWeight, bestOption.canary, bestOption.total) + for i := 1; i < len(options); i++ { + currOption := options[i] + currDelta := weightDelta(desiredWeight, currOption.canary, currOption.total) + if currDelta < bestDelta { + bestOption = currOption + bestDelta = currDelta + } + } + return bestOption.canary, bestOption.total - bestOption.canary +} + +// floorCeilingTied indicates if the ceiling and floor values are equidistant from the desired weight +// For example: replicas: 3, desiredWeight: 50% +// A canary count of 1 (33.33%) or 2 (66.66%) are both equidistant from desired weight of 50%. +// When this happens, we will pick the larger canary count +func floorCeilingTied(desiredWeight, totalReplicas int32) bool { + _, frac := math.Modf(float64(totalReplicas) * (float64(desiredWeight) / 100)) + return frac == 0.5 +} + +// weightDelta calculates the difference that the canary replicas will be from the desired weight +// This is used to pick the closest approximation of canary counts. +func weightDelta(desiredWeight, canaryReplicas, totalReplicas int32) float64 { + actualWeight := float64(canaryReplicas*100) / float64(totalReplicas) + return math.Abs(actualWeight - float64(desiredWeight)) +} + // calculateScaleDownReplicaCount calculates drainRSReplicaCount // drainRSReplicaCount can be either stableRS count or canaryRS count // drainRSReplicaCount corresponds to RS whose availability is not considered in calculating replicasToScaleDown @@ -290,6 +327,7 @@ func maxValue(countA int32, countB int32) int32 { // CalculateReplicaCountsForTrafficRoutedCanary calculates the canary and stable replica counts // when using canary with traffic routing. If current traffic weights are supplied, we factor the // those weights into the and return the higher of current traffic scale vs. desired traffic scale +// If MinPodsPerRS is defined and the number of replicas in either RS is not 0, then return at least MinPodsPerRS func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, weights *v1alpha1.TrafficWeights) (int32, int32) { var canaryCount, stableCount int32 rolloutSpecReplica := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) @@ -298,7 +336,7 @@ func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, wei // a canary count was explicitly set canaryCount = *setCanaryScaleReplicas } else { - canaryCount = trafficWeightToReplicas(rolloutSpecReplica, desiredWeight) + canaryCount = CheckMinPodsPerRS(rollout, trafficWeightToReplicas(rolloutSpecReplica, desiredWeight)) } if !rollout.Spec.Strategy.Canary.DynamicStableScale { @@ -329,7 +367,7 @@ func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, wei canaryCount = max(trafficWeightReplicaCount, canaryCount) } } - return canaryCount, stableCount + return CheckMinPodsPerRS(rollout, canaryCount), CheckMinPodsPerRS(rollout, stableCount) } // trafficWeightToReplicas returns the appropriate replicas given the full spec.replicas and a weight @@ -397,14 +435,6 @@ func GetReplicasForScaleDown(rs *appsv1.ReplicaSet, ignoreAvailability bool) int return rs.Status.AvailableReplicas } -// extraReplicaAdded checks if an extra replica is added because the stable and canary replicas count are both -// rounded up. The controller rounds both of the replica counts when the setWeight does not distribute evenly -// in order to prevent either from having a 0 replica count. -func extraReplicaAdded(replicas int32, setWeight int32) bool { - _, frac := math.Modf(float64(replicas) * (float64(setWeight) / 100)) - return frac != 0.0 -} - // GetCurrentCanaryStep returns the current canary step. If there are no steps or the rollout // has already executed the last step, the func returns nil func GetCurrentCanaryStep(rollout *v1alpha1.Rollout) (*v1alpha1.CanaryStep, *int32) { @@ -423,7 +453,7 @@ func GetCurrentCanaryStep(rollout *v1alpha1.Rollout) (*v1alpha1.CanaryStep, *int // GetCanaryReplicasOrWeight either returns a static set of replicas or a weight percentage func GetCanaryReplicasOrWeight(rollout *v1alpha1.Rollout) (*int32, int32) { - if rollout.Status.PromoteFull || rollout.Status.CurrentPodHash == rollout.Status.StableRS { + if rollout.Status.PromoteFull || rollout.Status.StableRS == "" || rollout.Status.CurrentPodHash == rollout.Status.StableRS { return nil, 100 } if scs := UseSetCanaryScale(rollout); scs != nil { diff --git a/utils/replicaset/canary_test.go b/utils/replicaset/canary_test.go index 3b817ad96b..4b2d301b96 100644 --- a/utils/replicaset/canary_test.go +++ b/utils/replicaset/canary_test.go @@ -1,6 +1,7 @@ package replicaset import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -96,6 +97,7 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { abortScaleDownDelaySeconds *int32 statusAbort bool + minPodsPerRS *int32 }{ { name: "Do not add extra RSs in scaleDownCount when .Spec.Replica < AvailableReplicas", @@ -374,7 +376,7 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { olderRS: newRS("older", 3, 3), }, { - name: "Add an extra replica to surge when the setWeight rounding adds another instance", + name: "Do not round past maxSurge with uneven setWeight divisor", rolloutSpecReplicas: 10, setWeight: 5, maxSurge: intstr.FromInt(0), @@ -386,7 +388,23 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { canarySpecReplica: 0, canaryAvailableReplica: 0, - expectedStableReplicaCount: 10, + expectedStableReplicaCount: 9, + expectedCanaryReplicaCount: 0, + }, + { + name: "Do not round past maxSurge with uneven setWeight divisor (part 2)", + rolloutSpecReplicas: 10, + setWeight: 5, + maxSurge: intstr.FromInt(0), + maxUnavailable: intstr.FromInt(1), + + stableSpecReplica: 9, + stableAvailableReplica: 9, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + expectedStableReplicaCount: 9, expectedCanaryReplicaCount: 1, }, { @@ -487,6 +505,37 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { expectedCanaryReplicaCount: 1, // should only surge by 1 to honor maxSurge: 1 }, { + name: "scale down to maxunavailable without exceeding maxSurge", + rolloutSpecReplicas: 3, + setWeight: 99, + maxSurge: intstr.FromInt(0), + maxUnavailable: intstr.FromInt(2), + + stableSpecReplica: 3, + stableAvailableReplica: 3, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + expectedStableReplicaCount: 1, + expectedCanaryReplicaCount: 0, + }, + { + name: "scale down to maxunavailable without exceeding maxSurge (part 2)", + rolloutSpecReplicas: 3, + setWeight: 99, + maxSurge: intstr.FromInt(0), + maxUnavailable: intstr.FromInt(2), + + stableSpecReplica: 1, + stableAvailableReplica: 1, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + expectedStableReplicaCount: 1, + expectedCanaryReplicaCount: 2, + }, { // verify we scale down stableRS while honoring maxUnavailable even when stableRS unavailable name: "honor maxUnavailable during scale down stableRS unavailable", rolloutSpecReplicas: 4, @@ -626,6 +675,23 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { expectedStableReplicaCount: 1, expectedCanaryReplicaCount: 0, }, + { + name: "Honor MinPodsPerRS when using trafficRouting and starting canary", + rolloutSpecReplicas: 10, + setWeight: 5, + + stableSpecReplica: 10, + stableAvailableReplica: 10, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + trafficRouting: &v1alpha1.RolloutTrafficRouting{}, + minPodsPerRS: intPnt(2), + + expectedStableReplicaCount: 10, + expectedCanaryReplicaCount: 2, + }, } for i := range tests { test := tests[i] @@ -636,6 +702,9 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { stableRS := newRS("stable", test.stableSpecReplica, test.stableAvailableReplica) canaryRS := newRS("canary", test.canarySpecReplica, test.canaryAvailableReplica) rollout.Spec.Strategy.Canary.AbortScaleDownDelaySeconds = test.abortScaleDownDelaySeconds + if test.minPodsPerRS != nil { + rollout.Spec.Strategy.Canary.MinPodsPerRS = test.minPodsPerRS + } var newRSReplicaCount, stableRSReplicaCount int32 if test.trafficRouting != nil { newRSReplicaCount, stableRSReplicaCount = CalculateReplicaCountsForTrafficRoutedCanary(rollout, nil) @@ -648,6 +717,91 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { } } +func TestApproximateWeightedNewStableReplicaCounts(t *testing.T) { + tests := []struct { + replicas int32 + weight int32 + maxSurge int32 + expCanary int32 + expStable int32 + }{ + {replicas: 0, weight: 0, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 50, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 100, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% + + {replicas: 0, weight: 0, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 50, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 100, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% + + {replicas: 1, weight: 0, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 1, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 49, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 50, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% + {replicas: 1, weight: 99, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% + {replicas: 1, weight: 100, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% + + {replicas: 1, weight: 0, maxSurge: 1, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 1, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 49, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 50, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 99, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 100, maxSurge: 1, expCanary: 1, expStable: 0}, // 100% + + {replicas: 2, weight: 0, maxSurge: 0, expCanary: 0, expStable: 2}, // 0% + {replicas: 2, weight: 1, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 50, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 99, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 100, maxSurge: 0, expCanary: 2, expStable: 0}, // 100% + + {replicas: 2, weight: 0, maxSurge: 1, expCanary: 0, expStable: 2}, // 0% + {replicas: 2, weight: 1, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 2, weight: 50, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 99, maxSurge: 1, expCanary: 2, expStable: 1}, // 66.6% + {replicas: 2, weight: 100, maxSurge: 1, expCanary: 2, expStable: 0}, // 100% + + {replicas: 3, weight: 10, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 25, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 33, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 34, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 49, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 50, maxSurge: 0, expCanary: 2, expStable: 1}, // 66.6% + + {replicas: 3, weight: 10, maxSurge: 1, expCanary: 1, expStable: 3}, // 25% + {replicas: 3, weight: 25, maxSurge: 1, expCanary: 1, expStable: 3}, // 25% + {replicas: 3, weight: 33, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 34, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 49, maxSurge: 1, expCanary: 2, expStable: 2}, // 50% + {replicas: 3, weight: 50, maxSurge: 1, expCanary: 2, expStable: 2}, // 50% + + {replicas: 10, weight: 0, maxSurge: 1, expCanary: 0, expStable: 10}, // 0% + {replicas: 10, weight: 1, maxSurge: 0, expCanary: 1, expStable: 9}, // 10% + {replicas: 10, weight: 14, maxSurge: 0, expCanary: 1, expStable: 9}, // 10% + {replicas: 10, weight: 15, maxSurge: 0, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 16, maxSurge: 0, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 99, maxSurge: 0, expCanary: 9, expStable: 1}, // 90% + {replicas: 10, weight: 100, maxSurge: 1, expCanary: 10, expStable: 0}, // 100% + + {replicas: 10, weight: 0, maxSurge: 1, expCanary: 0, expStable: 10}, // 0% + {replicas: 10, weight: 1, maxSurge: 1, expCanary: 1, expStable: 10}, // 9.1% + {replicas: 10, weight: 18, maxSurge: 1, expCanary: 2, expStable: 9}, // 18.1% + {replicas: 10, weight: 19, maxSurge: 1, expCanary: 2, expStable: 9}, // 18.1% + {replicas: 10, weight: 20, maxSurge: 1, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 23, maxSurge: 1, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 24, maxSurge: 1, expCanary: 3, expStable: 8}, // 27.2% + {replicas: 10, weight: 25, maxSurge: 1, expCanary: 3, expStable: 8}, // 27.2% + {replicas: 10, weight: 99, maxSurge: 1, expCanary: 10, expStable: 1}, // 90.9% + {replicas: 10, weight: 100, maxSurge: 1, expCanary: 10, expStable: 0}, // 100% + + } + for i := range tests { + test := tests[i] + t.Run(fmt.Sprintf("%s_replicas:%d_weight:%d_surge:%d", t.Name(), test.replicas, test.weight, test.maxSurge), func(t *testing.T) { + newRSReplicaCount, stableRSReplicaCount := approximateWeightedCanaryStableReplicaCounts(test.replicas, test.weight, test.maxSurge) + assert.Equal(t, test.expCanary, newRSReplicaCount, "check canary replica count") + assert.Equal(t, test.expStable, stableRSReplicaCount, "check stable replica count") + }) + } +} func TestCalculateReplicaCountsForNewDeployment(t *testing.T) { rollout := newRollout(10, 10, intstr.FromInt(0), intstr.FromInt(1), "canary", "stable", nil, nil) stableRS := newRS("stable", 10, 0) @@ -881,6 +1035,130 @@ func TestGetCurrentSetWeight(t *testing.T) { } +func TestAtDesiredReplicaCountsForCanary(t *testing.T) { + + t.Run("we are at desired replica counts and availability", func(t *testing.T) { + rollout := newRollout(4, 50, intstr.FromInt(1), intstr.FromInt(1), "current", "stable", &v1alpha1.SetCanaryScale{ + Weight: pointer.Int32Ptr(2), + Replicas: pointer.Int32Ptr(2), + MatchTrafficWeight: false, + }, nil) + + newReplicaSet := newRS("", 2, 2) + newReplicaSet.Name = "newRS" + newReplicaSet.Status.Replicas = 2 + + stableReplicaSet := newRS("", 2, 2) + stableReplicaSet.Name = "stableRS" + stableReplicaSet.Status.Replicas = 2 + + atDesiredReplicaCounts := AtDesiredReplicaCountsForCanary(rollout, newReplicaSet, stableReplicaSet, nil, &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 50, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 50, + }, + }) + assert.Equal(t, true, atDesiredReplicaCounts) + }) + + t.Run("new replicaset is not at desired counts or availability", func(t *testing.T) { + rollout := newRollout(4, 50, intstr.FromInt(1), intstr.FromInt(1), "current", "stable", &v1alpha1.SetCanaryScale{ + Weight: pointer.Int32Ptr(2), + Replicas: pointer.Int32Ptr(2), + MatchTrafficWeight: false, + }, nil) + + newReplicaSet := newRS("", 2, 1) + newReplicaSet.Name = "newRS" + newReplicaSet.Status.Replicas = 2 + + stableReplicaSet := newRS("", 2, 2) + stableReplicaSet.Name = "stableRS" + stableReplicaSet.Status.Replicas = 2 + + atDesiredReplicaCounts := AtDesiredReplicaCountsForCanary(rollout, newReplicaSet, stableReplicaSet, nil, &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 50, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 50, + }, + }) + assert.Equal(t, false, atDesiredReplicaCounts) + }) + + t.Run("stable replicaset is not at desired counts or availability", func(t *testing.T) { + rollout := newRollout(4, 75, intstr.FromInt(1), intstr.FromInt(1), "current", "stable", &v1alpha1.SetCanaryScale{}, nil) + newReplicaSet := newRS("", 3, 3) + newReplicaSet.Name = "newRS" + newReplicaSet.Status.Replicas = 3 + + stableReplicaSet := newRS("", 2, 2) + stableReplicaSet.Name = "stableRS" + stableReplicaSet.Status.Replicas = 2 + + atDesiredReplicaCounts := AtDesiredReplicaCountsForCanary(rollout, newReplicaSet, stableReplicaSet, nil, &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 75, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 25, + }, + }) + assert.Equal(t, false, atDesiredReplicaCounts) + }) + + t.Run("stable replicaset is not at desired availability but is at correct count", func(t *testing.T) { + // This test returns true because for stable replicasets we only check the count of the pods but not availability + rollout := newRollout(4, 75, intstr.FromInt(1), intstr.FromInt(1), "current", "stable", &v1alpha1.SetCanaryScale{}, nil) + newReplicaSet := newRS("", 3, 3) + newReplicaSet.Name = "newRS" + newReplicaSet.Status.Replicas = 1 + + stableReplicaSet := newRS("", 1, 0) + stableReplicaSet.Name = "stableRS" + stableReplicaSet.Status.Replicas = 1 + + atDesiredReplicaCounts := AtDesiredReplicaCountsForCanary(rollout, newReplicaSet, stableReplicaSet, nil, &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 75, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 25, + }, + }) + assert.Equal(t, true, atDesiredReplicaCounts) + }) + + t.Run("test that when status field lags behind spec.replicas we fail", func(t *testing.T) { + rollout := newRollout(4, 50, intstr.FromInt(1), intstr.FromInt(1), "current", "stable", &v1alpha1.SetCanaryScale{ + Weight: pointer.Int32Ptr(2), + Replicas: pointer.Int32Ptr(2), + MatchTrafficWeight: false, + }, nil) + + newReplicaSet := newRS("", 2, 2) + newReplicaSet.Name = "newRS" + newReplicaSet.Status.Replicas = 2 + + stableReplicaSet := newRS("", 2, 2) + stableReplicaSet.Name = "stableRS" + stableReplicaSet.Status.Replicas = 3 + + atDesiredReplicaCounts := AtDesiredReplicaCountsForCanary(rollout, newReplicaSet, stableReplicaSet, nil, &v1alpha1.TrafficWeights{ + Canary: v1alpha1.WeightDestination{ + Weight: 50, + }, + Stable: v1alpha1.WeightDestination{ + Weight: 50, + }, + }) + assert.Equal(t, false, atDesiredReplicaCounts) + }) +} + func TestGetCurrentExperiment(t *testing.T) { rollout := &v1alpha1.Rollout{ Spec: v1alpha1.RolloutSpec{ diff --git a/utils/replicaset/replicaset.go b/utils/replicaset/replicaset.go index deb1af1da0..333b5117fd 100644 --- a/utils/replicaset/replicaset.go +++ b/utils/replicaset/replicaset.go @@ -24,7 +24,9 @@ import ( "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/hash" logutil "github.com/argoproj/argo-rollouts/utils/log" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) // FindNewReplicaSet returns the new RS this given rollout targets from the given list. @@ -38,12 +40,17 @@ func FindNewReplicaSet(rollout *v1alpha1.Rollout, rsList []*appsv1.ReplicaSet) * } rsList = newRSList sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList)) - // First, attempt to find the replicaset by the replicaset naming formula - replicaSetName := fmt.Sprintf("%s-%s", rollout.Name, controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount)) - for _, rs := range rsList { - if rs.Name == replicaSetName { - return rs - } + // First, attempt to find the replicaset using our own hashing + podHash := hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + if rs := searchRsByHash(rsList, podHash); rs != nil { + return rs + } + // Second, attempt to find the replicaset with old hash implementation + oldHash := controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + if rs := searchRsByHash(rsList, oldHash); rs != nil { + logCtx := logutil.WithRollout(rollout) + logCtx.Infof("ComputePodTemplateHash hash changed (new hash: %s, old hash: %s)", podHash, oldHash) + return rs } // Iterate the ReplicaSet list again, this time doing a deep equal against the template specs. // This covers the corner case in which the reason we did not find the replicaset, was because @@ -60,7 +67,7 @@ func FindNewReplicaSet(rollout *v1alpha1.Rollout, rsList []*appsv1.ReplicaSet) * desired := rollout.Spec.Template.DeepCopy() if PodTemplateEqualIgnoreHash(live, desired) { logCtx := logutil.WithRollout(rollout) - logCtx.Infof("ComputeHash change detected (expected: %s, actual: %s)", replicaSetName, rs.Name) + logCtx.Infof("ComputePodTemplateHash hash changed (expected: %s, actual: %s)", podHash, rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]) return rs } } @@ -68,6 +75,15 @@ func FindNewReplicaSet(rollout *v1alpha1.Rollout, rsList []*appsv1.ReplicaSet) * return nil } +func searchRsByHash(rsList []*appsv1.ReplicaSet, hash string) *appsv1.ReplicaSet { + for _, rs := range rsList { + if rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] == hash { + return rs + } + } + return nil +} + func GetRolloutAffinity(rollout v1alpha1.Rollout) *v1alpha1.AntiAffinity { var antiAffinityStrategy *v1alpha1.AntiAffinity if rollout.Spec.Strategy.BlueGreen != nil && rollout.Spec.Strategy.BlueGreen.AntiAffinity != nil { @@ -86,7 +102,7 @@ func GetRolloutAffinity(rollout v1alpha1.Rollout) *v1alpha1.AntiAffinity { func GenerateReplicaSetAffinity(rollout v1alpha1.Rollout) *corev1.Affinity { antiAffinityStrategy := GetRolloutAffinity(rollout) - currentPodHash := controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + currentPodHash := hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) affinitySpec := rollout.Spec.Template.Spec.Affinity.DeepCopy() if antiAffinityStrategy != nil && rollout.Status.StableRS != "" && rollout.Status.StableRS != currentPodHash { antiAffinityRule := CreateInjectedAntiAffinityRule(rollout) @@ -192,7 +208,7 @@ func RemoveInjectedAntiAffinityRule(affinity *corev1.Affinity, rollout v1alpha1. func IfInjectedAntiAffinityRuleNeedsUpdate(affinity *corev1.Affinity, rollout v1alpha1.Rollout) bool { _, podAffinityTerm := HasInjectedAntiAffinityRule(affinity, rollout) - currentPodHash := controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + currentPodHash := hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) if podAffinityTerm != nil && rollout.Status.StableRS != currentPodHash { for _, labelSelectorRequirement := range podAffinityTerm.LabelSelector.MatchExpressions { if labelSelectorRequirement.Key == v1alpha1.DefaultRolloutUniqueLabelKey && labelSelectorRequirement.Values[0] != rollout.Status.StableRS { @@ -204,7 +220,7 @@ func IfInjectedAntiAffinityRuleNeedsUpdate(affinity *corev1.Affinity, rollout v1 } func NeedsRestart(rollout *v1alpha1.Rollout) bool { - now := metav1.Now().UTC() + now := timeutil.MetaNow().UTC() if rollout.Spec.RestartAt == nil { return false } @@ -215,9 +231,8 @@ func NeedsRestart(rollout *v1alpha1.Rollout) bool { } // FindOldReplicaSets returns the old replica sets targeted by the given Rollout, with the given slice of RSes. -func FindOldReplicaSets(rollout *v1alpha1.Rollout, rsList []*appsv1.ReplicaSet) []*appsv1.ReplicaSet { +func FindOldReplicaSets(rollout *v1alpha1.Rollout, rsList []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) []*appsv1.ReplicaSet { var allRSs []*appsv1.ReplicaSet - newRS := FindNewReplicaSet(rollout, rsList) for _, rs := range rsList { // Filter out new replica set if newRS != nil && rs.UID == newRS.UID { @@ -455,7 +470,7 @@ func CheckPodSpecChange(rollout *v1alpha1.Rollout, newRS *appsv1.ReplicaSet) boo if rollout.Status.CurrentPodHash == "" { return false } - podHash := controller.ComputeHash(&rollout.Spec.Template, rollout.Status.CollisionCount) + podHash := hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) if newRS != nil { podHash = GetPodTemplateHash(newRS) } @@ -523,7 +538,7 @@ func PodTemplateEqualIgnoreHash(live, desired *corev1.PodTemplateSpec) bool { // GetPodTemplateHash returns the rollouts-pod-template-hash value from a ReplicaSet's labels func GetPodTemplateHash(rs *appsv1.ReplicaSet) string { - if rs.Labels == nil { + if rs == nil || rs.Labels == nil { return "" } return rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] @@ -594,7 +609,7 @@ func GetTimeRemainingBeforeScaleDownDeadline(rs *appsv1.ReplicaSet) (*time.Durat if err != nil { return nil, fmt.Errorf("unable to read scaleDownAt label on rs '%s'", rs.Name) } - now := metav1.Now() + now := timeutil.MetaNow() scaleDownAt := metav1.NewTime(scaleDownAtTime) if scaleDownAt.After(now.Time) { remainingTime := scaleDownAt.Sub(now.Time) @@ -622,12 +637,12 @@ func GetPodsOwnedByReplicaSet(ctx context.Context, client kubernetes.Interface, return podOwnedByRS, nil } -// IsReplicaSetReady returns if a ReplicaSet is scaled up and its ready count is >= desired count -func IsReplicaSetReady(rs *appsv1.ReplicaSet) bool { +// IsReplicaSetAvailable returns if a ReplicaSet is scaled up and its ready count is >= desired count +func IsReplicaSetAvailable(rs *appsv1.ReplicaSet) bool { if rs == nil { return false } replicas := rs.Spec.Replicas - readyReplicas := rs.Status.ReadyReplicas - return replicas != nil && *replicas != 0 && readyReplicas != 0 && *replicas <= readyReplicas + availableReplicas := rs.Status.AvailableReplicas + return replicas != nil && *replicas != 0 && availableReplicas != 0 && *replicas <= availableReplicas } diff --git a/utils/replicaset/replicaset_test.go b/utils/replicaset/replicaset_test.go index 4e3f3809e4..f1a2a80fd7 100644 --- a/utils/replicaset/replicaset_test.go +++ b/utils/replicaset/replicaset_test.go @@ -23,6 +23,8 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" + "github.com/argoproj/argo-rollouts/utils/hash" + timeutil "github.com/argoproj/argo-rollouts/utils/time" ) // generateRollout creates a rollout, with the input image as its template @@ -59,14 +61,14 @@ func generateRollout(image string) v1alpha1.Rollout { // generateRS creates a replica set, with the input rollout's template as its template func generateRS(rollout v1alpha1.Rollout) appsv1.ReplicaSet { template := rollout.Spec.Template.DeepCopy() - podTemplateHash := controller.ComputeHash(&rollout.Spec.Template, nil) + podTemplateHash := hash.ComputePodTemplateHash(&rollout.Spec.Template, nil) template.Labels = map[string]string{ v1alpha1.DefaultRolloutUniqueLabelKey: podTemplateHash, } return appsv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), - Name: fmt.Sprintf("%s-%s", rollout.Name, controller.ComputeHash(&rollout.Spec.Template, nil)), + Name: fmt.Sprintf("%s-%s", rollout.Name, podTemplateHash), Labels: template.Labels, }, Spec: appsv1.ReplicaSetSpec{ @@ -77,6 +79,26 @@ func generateRS(rollout v1alpha1.Rollout) appsv1.ReplicaSet { } } +func TestFindNewReplicaSet(t *testing.T) { + ro := generateRollout("red") + rs1 := generateRS(ro) + rs1.Labels["name"] = "red" + *(rs1.Spec.Replicas) = 1 + + t.Run("FindNewReplicaSet by hash", func(t *testing.T) { + // rs has the current hash + rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] = hash.ComputePodTemplateHash(&ro.Spec.Template, ro.Status.CollisionCount) + actual := FindNewReplicaSet(&ro, []*appsv1.ReplicaSet{&rs1}) + assert.Equal(t, &rs1, actual) + }) + t.Run("FindNewReplicaSet by deprecated hash", func(t *testing.T) { + // rs has the deprecated hash + rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] = controller.ComputeHash(&ro.Spec.Template, ro.Status.CollisionCount) + actual := FindNewReplicaSet(&ro, []*appsv1.ReplicaSet{&rs1}) + assert.Equal(t, &rs1, actual) + }) +} + func TestFindOldReplicaSets(t *testing.T) { now := metav1.Now() before := metav1.Time{Time: now.Add(-time.Minute)} @@ -84,7 +106,7 @@ func TestFindOldReplicaSets(t *testing.T) { rollout := generateRollout("nginx") newRS := generateRS(rollout) *(newRS.Spec.Replicas) = 1 - newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] = "hash" + newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] = hash.ComputePodTemplateHash(&rollout.Spec.Template, rollout.Status.CollisionCount) newRS.CreationTimestamp = now oldRollout := generateRollout("nginx") @@ -121,7 +143,7 @@ func TestFindOldReplicaSets(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - allRS := FindOldReplicaSets(&test.rollout, test.rsList) + allRS := FindOldReplicaSets(&test.rollout, test.rsList, &newRS) sort.Sort(controller.ReplicaSetsByCreationTimestamp(allRS)) sort.Sort(controller.ReplicaSetsByCreationTimestamp(test.expected)) if !reflect.DeepEqual(allRS, test.expected) { @@ -636,7 +658,7 @@ func TestCheckPodSpecChange(t *testing.T) { ro := generateRollout("nginx") rs := generateRS(ro) assert.False(t, CheckPodSpecChange(&ro, &rs)) - ro.Status.CurrentPodHash = controller.ComputeHash(&ro.Spec.Template, ro.Status.CollisionCount) + ro.Status.CurrentPodHash = hash.ComputePodTemplateHash(&ro.Spec.Template, ro.Status.CollisionCount) assert.False(t, CheckPodSpecChange(&ro, &rs)) ro.Status.CurrentPodHash = "different-hash" @@ -827,7 +849,7 @@ func TestGenerateReplicaSetAffinity(t *testing.T) { assert.Equal(t, "", ro.Status.StableRS) assert.Nil(t, GenerateReplicaSetAffinity(ro)) // StableRS is equal to CurrentPodHash - ro.Status.StableRS = controller.ComputeHash(&ro.Spec.Template, nil) + ro.Status.StableRS = hash.ComputePodTemplateHash(&ro.Spec.Template, nil) assert.Nil(t, GenerateReplicaSetAffinity(ro)) // Injects anti-affinity rule with RequiredDuringSchedulingIgnoredDuringExecution into empty RS Affinity object @@ -1184,13 +1206,13 @@ func TestGetTimeRemainingBeforeScaleDownDeadline(t *testing.T) { assert.Nil(t, remainingTime) } { - rs.ObjectMeta.Annotations = map[string]string{v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey: metav1.Now().Add(-600 * time.Second).UTC().Format(time.RFC3339)} + rs.ObjectMeta.Annotations = map[string]string{v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey: timeutil.Now().Add(-600 * time.Second).UTC().Format(time.RFC3339)} remainingTime, err := GetTimeRemainingBeforeScaleDownDeadline(&rs) assert.Nil(t, err) assert.Nil(t, remainingTime) } { - rs.ObjectMeta.Annotations = map[string]string{v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey: metav1.Now().Add(600 * time.Second).UTC().Format(time.RFC3339)} + rs.ObjectMeta.Annotations = map[string]string{v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey: timeutil.Now().Add(600 * time.Second).UTC().Format(time.RFC3339)} remainingTime, err := GetTimeRemainingBeforeScaleDownDeadline(&rs) assert.Nil(t, err) assert.NotNil(t, remainingTime) @@ -1248,9 +1270,9 @@ spec: assert.True(t, PodTemplateEqualIgnoreHash(&live, &desired)) } -func TestIsReplicaSetReady(t *testing.T) { +func TestIsReplicaSetAvailable(t *testing.T) { { - assert.False(t, IsReplicaSetReady(nil)) + assert.False(t, IsReplicaSetAvailable(nil)) } { rs := appsv1.ReplicaSet{ @@ -1258,10 +1280,11 @@ func TestIsReplicaSetReady(t *testing.T) { Replicas: pointer.Int32Ptr(1), }, Status: appsv1.ReplicaSetStatus{ - ReadyReplicas: 0, + ReadyReplicas: 0, + AvailableReplicas: 0, }, } - assert.False(t, IsReplicaSetReady(&rs)) + assert.False(t, IsReplicaSetAvailable(&rs)) } { rs := appsv1.ReplicaSet{ @@ -1269,10 +1292,11 @@ func TestIsReplicaSetReady(t *testing.T) { Replicas: pointer.Int32Ptr(1), }, Status: appsv1.ReplicaSetStatus{ - ReadyReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, }, } - assert.True(t, IsReplicaSetReady(&rs)) + assert.True(t, IsReplicaSetAvailable(&rs)) } { rs := appsv1.ReplicaSet{ @@ -1280,10 +1304,24 @@ func TestIsReplicaSetReady(t *testing.T) { Replicas: pointer.Int32Ptr(1), }, Status: appsv1.ReplicaSetStatus{ - ReadyReplicas: 2, + ReadyReplicas: 2, + AvailableReplicas: 2, + }, + } + assert.True(t, IsReplicaSetAvailable(&rs)) + } + { + rs := appsv1.ReplicaSet{ + Spec: appsv1.ReplicaSetSpec{ + Replicas: pointer.Int32Ptr(0), + }, + Status: appsv1.ReplicaSetStatus{ + ReadyReplicas: 0, + AvailableReplicas: 0, }, } - assert.True(t, IsReplicaSetReady(&rs)) + // NOTE: currently consider scaled down replicas as not available + assert.False(t, IsReplicaSetAvailable(&rs)) } { rs := appsv1.ReplicaSet{ @@ -1291,10 +1329,10 @@ func TestIsReplicaSetReady(t *testing.T) { Replicas: pointer.Int32Ptr(0), }, Status: appsv1.ReplicaSetStatus{ - ReadyReplicas: 0, + ReadyReplicas: 1, + AvailableReplicas: 0, }, } - // NOTE: currently consider scaled down replicas as not ready - assert.False(t, IsReplicaSetReady(&rs)) + assert.False(t, IsReplicaSetAvailable(&rs)) } } diff --git a/utils/rollout/rolloututil.go b/utils/rollout/rolloututil.go index c2e997aa00..0b7df7ff38 100644 --- a/utils/rollout/rolloututil.go +++ b/utils/rollout/rolloututil.go @@ -4,6 +4,8 @@ import ( "fmt" "strconv" + replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" @@ -23,7 +25,9 @@ func GetRolloutPhase(ro *v1alpha1.Rollout) (v1alpha1.RolloutPhase, string) { if !isGenerationObserved(ro) { return v1alpha1.RolloutPhaseProgressing, "waiting for rollout spec update to be observed" } - + if IsUnpausing(ro) { + return v1alpha1.RolloutPhaseProgressing, "waiting for rollout to unpause" + } if ro.Spec.TemplateResolvedFromRef && !isWorkloadGenerationObserved(ro) { return v1alpha1.RolloutPhaseProgressing, "waiting for rollout spec update to be observed for the reference workload" } @@ -51,6 +55,17 @@ func isGenerationObserved(ro *v1alpha1.Rollout) bool { return int64(observedGen) == ro.Generation } +// IsUnpausing detects if we are in the process of unpausing a rollout. This is determined by seeing +// if status.controllerPause is true, but the list of pause conditions (status.pauseConditions) +// is empty. This implies that a user cleared the pause conditions but controller has not yet +// observed or reacted to it. +// NOTE: this function is necessary because unlike metadata.generation & status.observedGeneration +// status.controllerPause & status.pauseConditions are both status fields and does not benefit from +// the auto-incrementing behavior of metadata.generation. +func IsUnpausing(ro *v1alpha1.Rollout) bool { + return ro.Status.ControllerPause && len(ro.Status.PauseConditions) == 0 +} + func isWorkloadGenerationObserved(ro *v1alpha1.Rollout) bool { if _, ok := annotations.GetWorkloadGenerationAnnotation(ro); !ok { return true @@ -166,3 +181,16 @@ func CanaryStepString(c v1alpha1.CanaryStep) string { } return "invalid" } + +// ShouldVerifyWeight We use this to test if we should verify weights because weight verification could involve +// API calls to the cloud provider which could incur rate limiting +func ShouldVerifyWeight(ro *v1alpha1.Rollout) bool { + currentStep, _ := replicasetutil.GetCurrentCanaryStep(ro) + // If we are in the middle of an update at a setWeight step, also perform weight verification. + // Note that we don't do this every reconciliation because weight verification typically involves + // API calls to the cloud provider which could incur rate limitingq + shouldVerifyWeight := ro.Status.StableRS != "" && + !IsFullyPromoted(ro) && + currentStep != nil && currentStep.SetWeight != nil + return shouldVerifyWeight +} diff --git a/utils/rollout/rolloututil_test.go b/utils/rollout/rolloututil_test.go index 4fc15a4c63..572bd1cb51 100644 --- a/utils/rollout/rolloututil_test.go +++ b/utils/rollout/rolloututil_test.go @@ -394,3 +394,41 @@ func TestCanaryStepString(t *testing.T) { assert.Equal(t, test.expectedString, CanaryStepString(test.step)) } } + +func TestIsUnpausing(t *testing.T) { + ro := newCanaryRollout() + ro.Status.Phase = v1alpha1.RolloutPhasePaused + ro.Status.Message = "canary pause" + ro.Status.PauseConditions = []v1alpha1.PauseCondition{ + { + Reason: v1alpha1.PauseReasonCanaryPauseStep, + }, + } + ro.Status.ControllerPause = true + status, message := GetRolloutPhase(ro) + assert.Equal(t, v1alpha1.RolloutPhasePaused, status) + assert.Equal(t, "canary pause", message) + + ro.Status.PauseConditions = nil + status, message = GetRolloutPhase(ro) + assert.Equal(t, v1alpha1.RolloutPhaseProgressing, status) + assert.Equal(t, "waiting for rollout to unpause", message) +} + +func TestShouldVerifyWeight(t *testing.T) { + ro := newCanaryRollout() + ro.Status.StableRS = "34feab23f" + ro.Status.CurrentStepIndex = pointer.Int32Ptr(0) + ro.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32Ptr(20), + }} + assert.Equal(t, true, ShouldVerifyWeight(ro)) + + ro.Status.StableRS = "" + assert.Equal(t, false, ShouldVerifyWeight(ro)) + + ro.Status.StableRS = "34feab23f" + ro.Status.CurrentStepIndex = nil + ro.Spec.Strategy.Canary.Steps = nil + assert.Equal(t, false, ShouldVerifyWeight(ro)) +} diff --git a/utils/service/service.go b/utils/service/service.go index eb3be4139c..fbcfb63ad2 100644 --- a/utils/service/service.go +++ b/utils/service/service.go @@ -35,6 +35,12 @@ func GetRolloutServiceKeys(rollout *v1alpha1.Rollout) []string { if rollout.Spec.Strategy.Canary.StableService != "" { services = append(services, fmt.Sprintf("%s/%s", rollout.Namespace, rollout.Spec.Strategy.Canary.StableService)) } + if rollout.Spec.Strategy.Canary.PingPong != nil && rollout.Spec.Strategy.Canary.PingPong.PingService != "" { + services = append(services, fmt.Sprintf("%s/%s", rollout.Namespace, rollout.Spec.Strategy.Canary.PingPong.PingService)) + } + if rollout.Spec.Strategy.Canary.PingPong != nil && rollout.Spec.Strategy.Canary.PingPong.PongService != "" { + services = append(services, fmt.Sprintf("%s/%s", rollout.Namespace, rollout.Spec.Strategy.Canary.PingPong.PongService)) + } } return services } diff --git a/utils/service/service_test.go b/utils/service/service_test.go index 82cab8dbab..1b1d615dd2 100644 --- a/utils/service/service_test.go +++ b/utils/service/service_test.go @@ -63,6 +63,25 @@ func TestGetRolloutServiceKeysForCanaryWithCanaryService(t *testing.T) { assert.Equal(t, keys, []string{"default/canary-service", "default/stable-service"}) } +func TestGetRolloutServiceKeysForPingPongCanaryService(t *testing.T) { + keys := GetRolloutServiceKeys(&v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + PingPong: &v1alpha1.PingPongSpec{ + PingService: "ping-service", + PongService: "pong-service", + }, + }, + }, + }, + }) + assert.Equal(t, keys, []string{"default/ping-service", "default/pong-service"}) +} + func TestGetRolloutServiceKeysForBlueGreen(t *testing.T) { keys := GetRolloutServiceKeys(&v1alpha1.Rollout{ ObjectMeta: metav1.ObjectMeta{ diff --git a/utils/time/now.go b/utils/time/now.go new file mode 100644 index 0000000000..4103857811 --- /dev/null +++ b/utils/time/now.go @@ -0,0 +1,15 @@ +package time + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Now is a wrapper around time.Now() and used to override behavior in tests. +var Now = time.Now + +// MetaNow is a wrapper around metav1.Now() and used to override behavior in tests. +var MetaNow = func() metav1.Time { + return metav1.Time{Time: Now()} +} diff --git a/utils/unstructured/unstructured.go b/utils/unstructured/unstructured.go index e92fb00e81..12e1034755 100644 --- a/utils/unstructured/unstructured.go +++ b/utils/unstructured/unstructured.go @@ -67,6 +67,24 @@ func ObjectToAnalysisRun(obj interface{}) *v1alpha1.AnalysisRun { return ar } +func ObjectToExperiment(obj interface{}) *v1alpha1.Experiment { + un, ok := obj.(*unstructured.Unstructured) + if ok { + var ex v1alpha1.Experiment + err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &ex) + if err != nil { + log.Warnf("Failed to convert Experiment from Unstructured object: %v", err) + return nil + } + return &ex + } + ex, ok := obj.(*v1alpha1.Experiment) + if !ok { + log.Warn("Object is neither a rollout or unstructured") + } + return ex +} + var diffSeparator = regexp.MustCompile(`\n---`) // SplitYAML splits a YAML file into unstructured objects. Returns list of all unstructured objects diff --git a/utils/unstructured/unstructured_test.go b/utils/unstructured/unstructured_test.go index 5472646130..1dd9562c2c 100644 --- a/utils/unstructured/unstructured_test.go +++ b/utils/unstructured/unstructured_test.go @@ -155,11 +155,63 @@ spec: obj, err := StrToUnstructured(arYAML) assert.NotNil(t, obj) assert.NoError(t, err) - ar := ObjectToRollout(obj) + ar := ObjectToAnalysisRun(obj) assert.NotNil(t, ar) - ar2 := ObjectToRollout(ar) + ar2 := ObjectToAnalysisRun(ar) assert.Equal(t, ar, ar2) var invalid struct{} - ar3 := ObjectToRollout(&invalid) + ar3 := ObjectToAnalysisRun(&invalid) assert.Nil(t, ar3) } + +func TestObjectToExpirment(t *testing.T) { + exYAML := ` +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: experiment-with-analysis +spec: + templates: + - name: purple + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:purple + imagePullPolicy: Always + - name: orange + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:orange + imagePullPolicy: Always + analyses: + - name: random-fail + templateName: random-fail + - name: pass + templateName: pass +` + obj, err := StrToUnstructured(exYAML) + assert.NotNil(t, obj) + assert.NoError(t, err) + ex := ObjectToExperiment(obj) + assert.NotNil(t, ex) + ex2 := ObjectToExperiment(ex) + assert.Equal(t, ex, ex2) + var invalid struct{} + ex3 := ObjectToExperiment(&invalid) + assert.Nil(t, ex3) +}