From 44a033d7df89b6c69f5829c57021add331ece9ae Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Thu, 18 Jul 2024 17:09:56 +0800 Subject: [PATCH] Update release workflow Signed-off-by: Yi Chen --- .../workflows/{main.yaml => integration.yaml} | 33 ++-- .github/workflows/release-charts.yaml | 36 ++++ .github/workflows/release-docker.yaml | 113 +++++++++++ .github/workflows/release.yaml | 183 ++++-------------- Makefile | 20 +- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 129 ++++++------ 7 files changed, 278 insertions(+), 240 deletions(-) rename .github/workflows/{main.yaml => integration.yaml} (90%) create mode 100644 .github/workflows/release-charts.yaml create mode 100644 .github/workflows/release-docker.yaml diff --git a/.github/workflows/main.yaml b/.github/workflows/integration.yaml similarity index 90% rename from .github/workflows/main.yaml rename to .github/workflows/integration.yaml index 40af1cc3b..30cb6e3b7 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/integration.yaml @@ -1,13 +1,19 @@ -name: Pre-commit checks +name: Integration Test on: pull_request: branches: - master + - release-* push: branches: - master + - release-* + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.actor }} + cancel-in-progress: true jobs: build-api-docs: @@ -15,8 +21,6 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - with: - fetch-depth: "0" - name: The API documentation hasn't changed run: | @@ -32,36 +36,31 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - with: - fetch-depth: "0" - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: "go.mod" + go-version-file: go.mod - name: build sparkctl - run: | - make all + run: make build-sparkctl build-spark-operator: runs-on: ubuntu-latest steps: - name: Checkout source code uses: actions/checkout@v4 - with: - fetch-depth: "0" - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: "go.mod" + go-version-file: go.mod - - name: Run gofmt check + - name: Run go fmt check run: make fmt-check - - name: Run static analysis - run: make static-analysis + - name: Run go vet check + run: make go-vet - name: Run unit tests run: make unit-test @@ -90,8 +89,6 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - with: - fetch-depth: "0" - name: Install Helm uses: azure/setup-helm@v4 @@ -118,9 +115,9 @@ jobs: - name: Run chart-testing (list-changed) id: list-changed run: | - changed=$(ct list-changed) + changed=$(ct list-changed --target-branch ${{ github.ref }}) if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" + echo "changed=true" >> "$GITHUB_OUTPUT" fi - name: Detect CRDs drift between chart and manifest diff --git a/.github/workflows/release-charts.yaml b/.github/workflows/release-charts.yaml new file mode 100644 index 000000000..82caef448 --- /dev/null +++ b/.github/workflows/release-charts.yaml @@ -0,0 +1,36 @@ +name: Release Helm charts + +on: + release: + types: [prereleased, released] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Set up Helm + uses: azure/setup-helm@v4.2.0 + with: + version: v3.14.4 + + - name: Release Spark operator Helm chart + uses: helm/chart-releaser-action@v1.6.0 + env: + CR_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CR_RELEASE_NAME_TEMPLATE: spark-operator-chart-{{ .Version }} diff --git a/.github/workflows/release-docker.yaml b/.github/workflows/release-docker.yaml new file mode 100644 index 000000000..a1ebae893 --- /dev/null +++ b/.github/workflows/release-docker.yaml @@ -0,0 +1,113 @@ +name: Release Docker images + +on: + release: + types: [prereleased, released] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + IMAGE_REGISTRY: docker.io + IMAGE_REPOSITORY: chenyi015/spark-operator + +# Ref: https://docs.docker.com/build/ci/github-actions/multi-platform/#distribute-build-across-multiple-runners. +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + platform: + - linux/amd64 + - linux/arm64 + + steps: + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.IMAGE_REGISTRY }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + outputs: type=image,name=${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }},push-by-digest=true,name-canonical=true,push=true + + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + runs-on: ubuntu-latest + needs: + - build + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + + - name: Set up Docker buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }} + + - name: Login to container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.IMAGE_REGISTRY }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }}@sha256:%s ' *) + + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }}:${{ steps.meta.outputs.version }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 417ffb267..d3735579a 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,168 +1,55 @@ -name: Release Charts +name: Create release on: push: - branches: - - master -env: - REGISTRY_IMAGE: docker.io/kubeflow/spark-operator + tags: + - v*.*.* jobs: - build-skip-check: - runs-on: ubuntu-latest - outputs: - image_changed: ${{ steps.skip-check.outputs.image_changed }} - chart_changed: ${{ steps.skip-check.outputs.chart_changed }} - app_version_tag: ${{ steps.skip-check.outputs.app_version_tag }} - chart_version_tag: ${{ steps.skip-check.outputs.chart_version_tag }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Check if build should be skipped - id: skip-check - run: | - app_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) - chart_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "version: .*" | cut -c10-) - - # Initialize flags - image_changed=false - chart_changed=false - - if ! git rev-parse -q --verify "refs/tags/$app_version_tag"; then - image_changed=true - git tag $app_version_tag - git push origin $app_version_tag - echo "Spark-Operator Docker Image new tag: $app_version_tag released" - fi - - if ! git rev-parse -q --verify "refs/tags/spark-operator-chart-$chart_version_tag"; then - chart_changed=true - git tag spark-operator-chart-$chart_version_tag - git push origin spark-operator-chart-$chart_version_tag - echo "Spark-Operator Helm Chart new tag: spark-operator-chart-$chart_version_tag released" - fi - - echo "image_changed=${image_changed}" >> "$GITHUB_OUTPUT" - echo "chart_changed=${chart_changed}" >> "$GITHUB_OUTPUT" - echo "app_version_tag=${app_version_tag}" >> "$GITHUB_OUTPUT" - echo "chart_version_tag=${chart_version_tag}" >> "$GITHUB_OUTPUT" release: + permissions: + contents: write runs-on: ubuntu-latest - needs: - - build-skip-check - if: needs.build-skip-check.outputs.image_changed == 'true' - strategy: - fail-fast: false - matrix: - platform: - - linux/amd64 - - linux/arm64 steps: - name: Checkout uses: actions/checkout@v4 - with: - fetch-depth: 1 + - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - platform=${{ matrix.platform }} - echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV - echo "SCOPE=${platform//\//-}" >> $GITHUB_ENV - - name: Set up QEMU - timeout-minutes: 1 - uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Install Helm - uses: azure/setup-helm@v4 - with: - version: v3.14.3 - - name: Login to Packages Container registry - uses: docker/login-action@v3 - with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push Spark-Operator Docker Image to Docker Hub - id: build - uses: docker/build-push-action@v5 - with: - context: . - platforms: ${{ matrix.platform }} - cache-to: type=gha,mode=max,scope=${{ env.SCOPE }} - cache-from: type=gha,scope=${{ env.SCOPE }} - push: true - outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true - - name: Export digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - name: Upload digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM_PAIR }} - path: /tmp/digests/* - if-no-files-found: error - retention-days: 1 - publish-image: - runs-on: ubuntu-latest - needs: - - release - - build-skip-check - if: needs.build-skip-check.outputs.image_changed == 'true' - steps: - - name: Download digests - uses: actions/download-artifact@v4 - with: - pattern: digests-* - path: /tmp/digests - merge-multiple: true - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY_IMAGE }} - tags: ${{ needs.build-skip-check.outputs.app_version_tag }} - - name: Login to Docker Hub - uses: docker/login-action@v3 + + - name: Set up Helm + uses: azure/setup-helm@v4.2.0 with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Create manifest list and push - working-directory: /tmp/digests - run: | - docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - - name: Inspect image + version: v3.14.4 + + - name: Package Helm charts run: | - docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} - publish-chart: - runs-on: ubuntu-latest - if: needs.build-skip-check.outputs.chart_changed == 'true' - needs: - - build-skip-check - steps: - - name: Checkout + for chart in $(ls charts); do + helm package charts/$chart + done + + - name: Release + id: release + uses: softprops/action-gh-release@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + prerelease: ${{ contains(github.ref, 'rc') }} + target_commitish: ${{ github.sha }} + files: | + spark-operator-v*.*.*.tgz + generate_release_notes: true + + - name: Checkout to branch gh-pages uses: actions/checkout@v4 with: + ref: gh-pages fetch-depth: 0 - - name: Install Helm - uses: azure/setup-helm@v4 - with: - version: v3.14.3 - - name: Configure Git + + - name: Update Helm charts repo index run: | - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - - name: Release Spark-Operator Helm Chart - uses: helm/chart-releaser-action@v1.6.0 - env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - CR_RELEASE_NAME_TEMPLATE: "spark-operator-chart-{{ .Version }}" + helm repo index --merge index.yaml --url ${{ steps.release.outputs.upload_url }} . + git add index.yaml + git commit -s -m "Update index.yaml" || exit 0 + git push diff --git a/Makefile b/Makefile index b7ae2b378..deefe965d 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,10 @@ REPO=github.com/kubeflow/spark-operator all: clean-sparkctl build-sparkctl install-sparkctl +.PHONY: build-operator +build-operator: ## Build Spark operator + go build -o bin/spark-operator main.go + build-sparkctl: [ ! -f "sparkctl/sparkctl-darwin-amd64" ] || [ ! -f "sparkctl/sparkctl-linux-amd64" ] && \ echo building using $(BUILDER) && \ @@ -71,16 +75,16 @@ clean: go clean -cache -testcache -r -x 2>&1 >/dev/null -rm -rf _output -unit-test: clean - @echo "running unit tests" - go test -v ./... -covermode=atomic +.PHONY: unit-test +unit-test: clean ## Run unit tests. + @echo "Running unit tests..." + go test $(shell go list ./... | grep -v /e2e) -coverprofile cover.out integration-test: clean @echo "running integration tests" go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:local -static-analysis: - @echo "running go vet" - # echo "Building using $(BUILDER)" - # go vet ./... - go vet $(REPO)... +.PHONY: go-vet +go-vet: ## Run go vet against code. + @echo "Running go vet..." + go vet ./... diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index b735a995c..0211c54bd 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.3 -appVersion: v1beta2-1.6.1-3.5.0 +version: v2.0.0-rc.0 +appVersion: v2.0.0-rc.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index a5a9c1bdc..33521daa9 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square) +![Version: v2.0.0-rc.0](https://img.shields.io/badge/Version-v2.0.0--rc.0-informational?style=flat-square) ![AppVersion: v2.0.0-rc.0](https://img.shields.io/badge/AppVersion-v2.0.0--rc.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -77,69 +77,70 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum ## Values -| Key | Type | Default | Description | -|-------------------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| affinity | object | `{}` | Affinity for pod assignment | -| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | -| commonLabels | object | `{}` | Common labels to add to the resources | -| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | -| envFrom | list | `[]` | Pod environment variable sources | -| fullnameOverride | string | `""` | String to override release name | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | -| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | Image pull secrets | -| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | -| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | -| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | -| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | -| logLevel | int | `2` | Set higher levels for more verbose logging | -| metrics.enable | bool | `true` | Enable prometheus metric scraping | -| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | -| metrics.port | int | `10254` | Metrics port | -| metrics.portName | string | `"metrics"` | Metrics port name | -| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | -| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment | -| podDisruptionBudget.enabled | bool | `false` | Whether to deploy a PodDisruptionBudget | -| podDisruptionBudget.minAvailable | int | `1` | An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction | -| podAnnotations | object | `{}` | Additional annotations to add to the pod | -| podLabels | object | `{}` | Additional labels to add to the pod | -| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | -| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | -| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | -| podMonitor.labels | object | `{}` | Pod monitor labels | -| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | -| podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | -| rbac.annotations | object | `{}` | Optional annotations for rbac | -| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | -| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | -| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | -| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | -| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | -| securityContext | object | `{}` | Operator container security context | -| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | -| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | -| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | -| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | -| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | -| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | -| sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | -| tolerations | list | `[]` | List of node taints to tolerate | -| uiService.enable | bool | `true` | Enable UI service creation for Spark application | -| volumeMounts | list | `[]` | | -| volumes | list | `[]` | | -| webhook.enable | bool | `false` | Enable webhook server | -| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | -| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | -| webhook.port | int | `8080` | Webhook service port | -| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment | +| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | +| commonLabels | object | `{}` | Common labels to add to the resources | +| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | +| envFrom | list | `[]` | Pod environment variable sources | +| fullnameOverride | string | `""` | String to override release name | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | +| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | Image pull secrets | +| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | +| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | +| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | +| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | +| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | +| logLevel | int | `2` | Set higher levels for more verbose logging | +| metrics.enable | bool | `true` | Enable prometheus metric scraping | +| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | +| metrics.port | int | `10254` | Metrics port | +| metrics.portName | string | `"metrics"` | Metrics port name | +| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | +| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podAnnotations | object | `{}` | Additional annotations to add to the pod | +| podDisruptionBudget | object | `{"enable":false,"minAvailable":1}` | podDisruptionBudget to avoid service degradation | +| podDisruptionBudget.enable | bool | `false` | Specifies whether to enable pod disruption budget. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | +| podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `replicaCount` to be greater than 1 | +| podLabels | object | `{}` | Additional labels to add to the pod | +| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | +| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | +| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | +| podMonitor.labels | object | `{}` | Pod monitor labels | +| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | +| podSecurityContext | object | `{}` | Pod security context | +| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | +| rbac.annotations | object | `{}` | Optional annotations for rbac | +| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | +| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | +| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | +| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | +| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | +| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | +| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | +| securityContext | object | `{}` | Operator container security context | +| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | +| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | +| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | +| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | +| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | +| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | +| sidecars | list | `[]` | Sidecar containers | +| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | +| tolerations | list | `[]` | List of node taints to tolerate | +| uiService.enable | bool | `true` | Enable UI service creation for Spark application | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | +| webhook.enable | bool | `false` | Enable webhook server | +| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | +| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | +| webhook.port | int | `8080` | Webhook service port | +| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | +| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | ## Maintainers