Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding support for setting spark job namespaces to all namespaces #2123

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,12 @@ spec:
- --zap-log-level={{ . }}
{{- end }}
{{- with .Values.spark.jobNamespaces }}
{{- if has "" . }}
- --namespaces=""
{{- else }}
- --namespaces={{ . | join "," }}
{{- end }}
{{- end }}
- --controller-threads={{ .Values.controller.workers }}
{{- with .Values.controller.uiService.enable }}
- --enable-ui-service=true
Expand Down
2 changes: 1 addition & 1 deletion charts/spark-operator-chart/templates/spark/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ limitations under the License.

{{- if .Values.spark.rbac.create -}}
{{- range $jobNamespace := .Values.spark.jobNamespaces | default list }}
{{- if $jobNamespace }}
{{- if ne $jobNamespace "" }}

---
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,19 @@ limitations under the License.
*/}}

{{- if .Values.spark.serviceAccount.create }}
{{- range $sparkJobNamespace := .Values.spark.jobNamespaces | default list }}
{{- range $jobNamespace := .Values.spark.jobNamespaces | default list }}
{{- if ne $jobNamespace "" }}

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "spark-operator.spark.serviceAccountName" $ }}
namespace: {{ $sparkJobNamespace }}
namespace: {{ $jobNamespace }}
labels: {{ include "spark-operator.labels" $ | nindent 4 }}
{{- with $.Values.spark.serviceAccount.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
4 changes: 4 additions & 0 deletions charts/spark-operator-chart/templates/webhook/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,12 @@ spec:
- --zap-log-level={{ . }}
{{- end }}
{{- with .Values.spark.jobNamespaces }}
{{- if has "" . }}
- --namespaces=""
{{- else }}
- --namespaces={{ . | join "," }}
{{- end }}
{{- end }}
- --webhook-secret-name={{ include "spark-operator.webhook.secretName" . }}
- --webhook-secret-namespace={{ .Release.Namespace }}
- --webhook-svc-name={{ include "spark-operator.webhook.serviceName" . }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
objectSelector:
matchLabels:
sparkoperator.k8s.io/launched-by-spark-operator: "true"
Expand All @@ -66,16 +68,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]
Expand All @@ -96,16 +100,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]
Expand All @@ -63,16 +65,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]
Expand Down
18 changes: 15 additions & 3 deletions charts/spark-operator-chart/tests/controller/deployment_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,26 @@ tests:

- it: Should contain `--namespaces` arg if `spark.jobNamespaces` is set
set:
spark.jobNamespaces:
- ns1
- ns2
spark:
jobNamespaces:
- ns1
- ns2
asserts:
- contains:
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
content: --namespaces=ns1,ns2

- it: Should set namespaces to all namespaces (`""`) if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- default
asserts:
- contains:
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
content: --namespaces=""

- it: Should contain `--controller-threads` arg if `controller.workers` is set
set:
controller:
Expand Down
39 changes: 8 additions & 31 deletions charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,59 +66,36 @@ tests:
path: metadata.annotations.key2
value: value2

- it: Should create multiple service accounts if `spark.jobNamespaces` is set
- it: Should create service account for every non-empty spark job namespace if `spark.jobNamespaces` is set with multiple values
set:
spark:
serviceAccount:
name: spark
jobNamespaces:
- ""
- ns1
- ns2
- ns3
documentIndex: 0
asserts:
- hasDocuments:
count: 3
count: 2
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
name: spark-operator-spark
namespace: ns1

- it: Should create multiple service accounts if `spark.jobNamespaces` is set
- it: Should create service account for every non-empty spark job namespace if `spark.jobNamespaces` is set with multiple values
set:
spark:
serviceAccount:
name: spark
jobNamespaces:
- ""
- ns1
- ns2
- ns3
documentIndex: 1
asserts:
- hasDocuments:
count: 3
count: 2
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
name: spark-operator-spark
namespace: ns2

- it: Should create multiple service accounts if `spark.jobNamespaces` is set
set:
spark:
serviceAccount:
name: spark
jobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 2
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns3
11 changes: 11 additions & 0 deletions charts/spark-operator-chart/tests/webhook/deployment_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,17 @@ tests:
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
content: --namespaces=ns1,ns2

- it: Should set namespaces to all namespaces (`""`) if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- default
asserts:
- contains:
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
content: --namespaces=""

- it: Should contain `--enable-metrics` arg if `prometheus.metrics.enable` is set to `true`
set:
prometheus:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ tests:
path: webhooks[*].failurePolicy
value: Fail

- it: Should set namespaceSelector if sparkJobNamespaces is not empty
- it: Should set namespaceSelector if `spark.jobNamespaces` is set with non-empty strings
set:
spark:
jobNamespaces:
Expand All @@ -68,6 +68,19 @@ tests:
- ns2
- ns3

- it: Should not set namespaceSelector if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- ns1
- ns2
- ns3
asserts:
- notExists:
path: webhooks[*].namespaceSelector


- it: Should should use the specified timeoutSeconds
set:
webhook:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ tests:
path: webhooks[*].failurePolicy
value: Fail

- it: Should set namespaceSelector if `spark.jobNamespaces` is not empty
- it: Should set namespaceSelector if `spark.jobNamespaces` is set with non-empty strings
set:
spark.jobNamespaces:
- ns1
Expand All @@ -67,6 +67,18 @@ tests:
- ns2
- ns3

- it: Should not set namespaceSelector if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- ns1
- ns2
- ns3
asserts:
- notExists:
path: webhooks[*].namespaceSelector

- it: Should should use the specified timeoutSeconds
set:
webhook:
Expand Down
2 changes: 1 addition & 1 deletion cmd/operator/controller/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func NewStartCommand() *cobra.Command {
}

command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{""}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")

command.Flags().BoolVar(&enableBatchScheduler, "enable-batch-scheduler", false, "Enable batch schedulers.")
Expand Down
2 changes: 1 addition & 1 deletion cmd/operator/webhook/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ func NewStartCommand() *cobra.Command {
}

command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{""}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
command.Flags().StringVar(&labelSelectorFilter, "label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.")
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")

Expand Down
9 changes: 7 additions & 2 deletions internal/controller/scheduledsparkapplication/event_filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,14 @@ var _ predicate.Predicate = &EventFilter{}
// NewEventFilter creates a new EventFilter instance.
func NewEventFilter(namespaces []string) *EventFilter {
nsMap := make(map[string]bool)
for _, ns := range namespaces {
nsMap[ns] = true
if len(namespaces) == 0 {
nsMap[metav1.NamespaceAll] = true
} else {
for _, ns := range namespaces {
nsMap[ns] = true
}
}

return &EventFilter{
namespaces: nsMap,
}
Expand Down
Loading