From fa94e96f4dc825e1dbb0205b480fb193c10174ee Mon Sep 17 00:00:00 2001 From: Kevin Hogeland Date: Wed, 21 Aug 2019 16:38:22 -0700 Subject: [PATCH 1/6] Generate CRD specs, bump to v1beta2 --- docs/developer-guide.md | 7 + examples/spark-pi-configmap.yaml | 6 +- examples/spark-pi-prometheus.yaml | 6 +- examples/spark-pi-schedule.yaml | 6 +- examples/spark-pi.yaml | 6 +- examples/spark-py-pi.yaml | 6 +- hack/update-codegen.sh | 2 +- main.go | 9 - ...tor.k8s.io_scheduledsparkapplications.yaml | 2562 ++++++++++++++++ ...parkoperator.k8s.io_sparkapplications.yaml | 2571 +++++++++++++++++ manifest/spark-operator-crds.yaml | 266 -- .../sparkoperator.k8s.io/v1beta2/defaults.go | 74 + .../sparkoperator.k8s.io/v1beta2}/doc.go | 10 +- .../sparkoperator.k8s.io/v1beta2/register.go | 52 + .../sparkoperator.k8s.io/v1beta2/types.go | 563 ++++ .../v1beta2/zz_generated.deepcopy.go | 796 +++++ pkg/client/clientset/versioned/clientset.go | 20 +- .../versioned/fake/clientset_generated.go | 13 +- .../clientset/versioned/fake/register.go | 2 + .../clientset/versioned/scheme/register.go | 2 + .../typed/sparkoperator.k8s.io/v1beta2/doc.go | 22 + .../sparkoperator.k8s.io/v1beta2/fake/doc.go | 22 + .../fake/fake_scheduledsparkapplication.go | 130 + .../v1beta2/fake/fake_sparkapplication.go | 130 + .../fake/fake_sparkoperator.k8s.io_client.go | 46 + .../v1beta2/generated_expansion.go | 25 + .../v1beta2/scheduledsparkapplication.go | 176 ++ .../v1beta2/sparkapplication.go | 176 ++ .../v1beta2/sparkoperator.k8s.io_client.go | 97 + .../informers/externalversions/generic.go | 7 + .../sparkoperator.k8s.io/interface.go | 8 + .../sparkoperator.k8s.io/v1beta2/interface.go | 54 + .../v1beta2/scheduledsparkapplication.go | 91 + .../v1beta2/sparkapplication.go | 91 + .../v1beta2/expansion_generated.go | 37 + .../v1beta2/scheduledsparkapplication.go | 96 + .../v1beta2/sparkapplication.go | 96 + pkg/config/config.go | 8 +- pkg/config/config_test.go | 18 +- pkg/config/secret.go | 14 +- pkg/config/secret_test.go | 30 +- .../scheduledsparkapplication/controller.go | 66 +- .../controller_test.go | 190 +- .../controller_util.go | 4 +- pkg/controller/sparkapplication/controller.go | 170 +- .../sparkapplication/controller_test.go | 468 +-- .../sparkapplication/monitoring_config.go | 6 +- .../monitoring_config_test.go | 24 +- .../spark_pod_eventhandler.go | 2 +- .../sparkapplication/sparkapp_metrics.go | 20 +- .../sparkapplication/sparkapp_util.go | 38 +- pkg/controller/sparkapplication/sparkui.go | 8 +- .../sparkapplication/sparkui_test.go | 26 +- pkg/controller/sparkapplication/submission.go | 20 +- pkg/crd/crd.go | 123 - pkg/crd/scheduledsparkapplication/crd.go | 193 -- pkg/crd/sparkapplication/crd.go | 184 -- pkg/util/util.go | 8 +- pkg/webhook/patch.go | 38 +- pkg/webhook/patch_test.go | 158 +- pkg/webhook/webhook.go | 4 +- pkg/webhook/webhook_test.go | 18 +- 62 files changed, 8651 insertions(+), 1470 deletions(-) create mode 100644 manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml create mode 100644 manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml delete mode 100644 manifest/spark-operator-crds.yaml create mode 100644 pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go rename pkg/{crd => apis/sparkoperator.k8s.io/v1beta2}/doc.go (67%) create mode 100644 pkg/apis/sparkoperator.k8s.io/v1beta2/register.go create mode 100644 pkg/apis/sparkoperator.k8s.io/v1beta2/types.go create mode 100644 pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go create mode 100644 pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go create mode 100644 pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go create mode 100644 pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go create mode 100644 pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go create mode 100644 pkg/client/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go create mode 100644 pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go create mode 100644 pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go delete mode 100644 pkg/crd/crd.go delete mode 100644 pkg/crd/scheduledsparkapplication/crd.go delete mode 100644 pkg/crd/sparkapplication/crd.go diff --git a/docs/developer-guide.md b/docs/developer-guide.md index c5214cf4d..f0326518d 100644 --- a/docs/developer-guide.md +++ b/docs/developer-guide.md @@ -51,6 +51,7 @@ Before building the operator the first time, run the following commands to get t $ go get -u k8s.io/code-generator/cmd/client-gen $ go get -u k8s.io/code-generator/cmd/deepcopy-gen $ go get -u k8s.io/code-generator/cmd/defaulter-gen +$ go get -u sigs.k8s.io/controller-tools/cmd/contoller-gen ``` To update the auto-generated code, run the following command. (This step is only required if the CRD types have been changed): @@ -59,6 +60,12 @@ To update the auto-generated code, run the following command. (This step is only $ go generate ``` +To update the auto-generated CRD definitions, run the following command: + +```bash +$ controller-gen crd:trivialVersions=true,maxDescLen=0 paths="./pkg/apis/sparkoperator.k8s.io/v1beta2" output:crd:artifacts:config=./manifest/crds/ +``` + You can verify the current auto-generated code is up to date with: ```bash diff --git a/examples/spark-pi-configmap.yaml b/examples/spark-pi-configmap.yaml index 72cb5a016..c62cd6855 100644 --- a/examples/spark-pi-configmap.yaml +++ b/examples/spark-pi-configmap.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: spark-pi @@ -33,8 +33,8 @@ spec: configMap: name: dummy-cm driver: - cores: 0.1 - coreLimit: "200m" + cores: 1 + coreLimit: "1200m" memory: "512m" labels: version: 2.4.0 diff --git a/examples/spark-pi-prometheus.yaml b/examples/spark-pi-prometheus.yaml index bf6d0c536..2dba9510d 100644 --- a/examples/spark-pi-prometheus.yaml +++ b/examples/spark-pi-prometheus.yaml @@ -14,7 +14,7 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: spark-pi @@ -32,8 +32,8 @@ spec: restartPolicy: type: Never driver: - cores: 0.1 - coreLimit: "200m" + cores: 1 + coreLimit: "1200m" memory: "512m" labels: version: 2.4.0 diff --git a/examples/spark-pi-schedule.yaml b/examples/spark-pi-schedule.yaml index 793a5afa7..6f4b49b04 100644 --- a/examples/spark-pi-schedule.yaml +++ b/examples/spark-pi-schedule.yaml @@ -14,7 +14,7 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: ScheduledSparkApplication metadata: name: spark-pi-scheduled @@ -32,8 +32,8 @@ spec: restartPolicy: type: Never driver: - cores: 0.1 - coreLimit: "200m" + cores: 1 + coreLimit: "1200m" memory: "512m" labels: version: 2.4.0 diff --git a/examples/spark-pi.yaml b/examples/spark-pi.yaml index 607ac1393..ca0660d8f 100644 --- a/examples/spark-pi.yaml +++ b/examples/spark-pi.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: spark-pi @@ -34,8 +34,8 @@ spec: path: "/tmp" type: Directory driver: - cores: 0.1 - coreLimit: "200m" + cores: 1 + coreLimit: "1200m" memory: "512m" labels: version: 2.4.0 diff --git a/examples/spark-py-pi.yaml b/examples/spark-py-pi.yaml index 7e169771e..ab8dbf5d1 100644 --- a/examples/spark-py-pi.yaml +++ b/examples/spark-py-pi.yaml @@ -16,7 +16,7 @@ # Support for Python is experimental, and requires building SNAPSHOT image of Apache Spark, # with `imagePullPolicy` set to Always -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: pyspark-pi @@ -36,8 +36,8 @@ spec: onSubmissionFailureRetries: 5 onSubmissionFailureRetryInterval: 20 driver: - cores: 0.1 - coreLimit: "200m" + cores: 1 + coreLimit: "1200m" memory: "512m" labels: version: 2.4.0 diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index a3906faaf..1eda6f9a1 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -27,7 +27,7 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge # instead of the $GOPATH directly. For normal projects this can be dropped. ${CODEGEN_PKG}/generate-groups.sh "all" \ github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis \ - sparkoperator.k8s.io:v1alpha1,v1beta1 \ + sparkoperator.k8s.io:v1alpha1,v1beta1,v1beta2 \ --go-header-file "$(dirname ${BASH_SOURCE})/custom-boilerplate.go.txt" \ --output-base "$(dirname ${BASH_SOURCE})/../../../.." diff --git a/main.go b/main.go index e32d6faaf..0ce04ae19 100644 --- a/main.go +++ b/main.go @@ -46,7 +46,6 @@ import ( operatorConfig "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/controller/scheduledsparkapplication" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/controller/sparkapplication" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/crd" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/webhook" ) @@ -54,7 +53,6 @@ import ( var ( master = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") kubeConfig = flag.String("kubeConfig", "", "Path to a kube config. Only required if out-of-cluster.") - installCRDs = flag.Bool("install-crds", true, "Whether to install CRDs") controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds.") namespace = flag.String("namespace", apiv1.NamespaceAll, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") @@ -141,13 +139,6 @@ func main() { glog.Fatal(err) } - if *installCRDs { - err = crd.CreateOrUpdateCRDs(apiExtensionsClient) - if err != nil { - glog.Fatal(err) - } - } - crInformerFactory := buildCustomResourceInformerFactory(crClient) podInformerFactory := buildPodInformerFactory(kubeClient) diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml new file mode 100644 index 000000000..534888004 --- /dev/null +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -0,0 +1,2562 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: scheduledsparkapplications.sparkoperator.k8s.io +spec: + group: sparkoperator.k8s.io + names: + kind: ScheduledSparkApplication + plural: scheduledsparkapplications + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + concurrencyPolicy: + type: string + failedRunHistoryLimit: + format: int32 + type: integer + schedule: + type: string + successfulRunHistoryLimit: + format: int32 + type: integer + suspend: + type: boolean + template: + properties: + arguments: + items: + type: string + type: array + deps: + properties: + downloadTimeout: + format: int32 + minimum: 1 + type: integer + files: + items: + type: string + type: array + filesDownloadDir: + type: string + jars: + items: + type: string + type: array + jarsDownloadDir: + type: string + maxSimultaneousDownloads: + format: int32 + minimum: 1 + type: integer + pyFiles: + items: + type: string + type: array + type: object + driver: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + configMaps: + items: + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + type: string + cores: + format: int32 + minimum: 0 + type: integer + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + envSecretKeyRefs: + additionalProperties: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + type: object + envVars: + additionalProperties: + type: string + type: object + gpu: + properties: + name: + type: string + quantity: + format: int64 + type: integer + required: + - name + - quantity + type: object + hostNetwork: + type: boolean + image: + type: string + javaOptions: + type: string + labels: + additionalProperties: + type: string + type: object + memory: + type: string + memoryOverhead: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + podName: + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + schedulerName: + type: string + secrets: + items: + properties: + name: + type: string + path: + type: string + secretType: + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + serviceAccount: + type: string + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + executor: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + configMaps: + items: + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + type: string + coreRequest: + type: string + cores: + format: int32 + minimum: 0 + type: integer + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + envSecretKeyRefs: + additionalProperties: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + type: object + envVars: + additionalProperties: + type: string + type: object + gpu: + properties: + name: + type: string + quantity: + format: int64 + type: integer + required: + - name + - quantity + type: object + hostNetwork: + type: boolean + image: + type: string + instances: + format: int32 + minimum: 1 + type: integer + javaOptions: + type: string + labels: + additionalProperties: + type: string + type: object + memory: + type: string + memoryOverhead: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + schedulerName: + type: string + secrets: + items: + properties: + name: + type: string + path: + type: string + secretType: + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + type: object + hadoopConfigMap: + type: string + image: + type: string + imagePullPolicy: + type: string + imagePullSecrets: + items: + type: string + type: array + initContainerImage: + type: string + mainApplicationFile: + type: string + mainClass: + type: string + memoryOverheadFactor: + type: string + mode: + enum: + - cluster + - client + type: string + monitoring: + properties: + exposeDriverMetrics: + type: boolean + exposeExecutorMetrics: + type: boolean + metricsProperties: + type: string + prometheus: + properties: + configFile: + type: string + configuration: + type: string + jmxExporterJar: + type: string + port: + format: int32 + minimum: 49151 + type: integer + required: + - jmxExporterJar + - port + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + type: object + pythonVersion: + enum: + - 2 + - 3 + type: string + restartPolicy: + properties: + onFailureRetries: + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + format: int64 + minimum: 1 + type: integer + type: + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + type: object + sparkConfigMap: + type: string + sparkVersion: + type: string + type: + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - mainApplicationFile + - sparkVersion + - type + type: object + required: + - schedule + - template + type: object + status: + properties: + lastRun: + format: date-time + type: string + lastRunName: + type: string + nextRun: + format: date-time + type: string + pastFailedRunNames: + items: + type: string + type: array + pastSuccessfulRunNames: + items: + type: string + type: array + reason: + type: string + scheduleState: + type: string + type: object + required: + - metadata + - spec + type: object + version: v1beta2 + versions: + - name: v1beta2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml new file mode 100644 index 000000000..09dbc1ce7 --- /dev/null +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -0,0 +1,2571 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: sparkapplications.sparkoperator.k8s.io +spec: + group: sparkoperator.k8s.io + names: + kind: SparkApplication + plural: sparkapplications + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + arguments: + items: + type: string + type: array + deps: + properties: + downloadTimeout: + format: int32 + minimum: 1 + type: integer + files: + items: + type: string + type: array + filesDownloadDir: + type: string + jars: + items: + type: string + type: array + jarsDownloadDir: + type: string + maxSimultaneousDownloads: + format: int32 + minimum: 1 + type: integer + pyFiles: + items: + type: string + type: array + type: object + driver: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + configMaps: + items: + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + type: string + cores: + format: int32 + minimum: 0 + type: integer + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + envSecretKeyRefs: + additionalProperties: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + type: object + envVars: + additionalProperties: + type: string + type: object + gpu: + properties: + name: + type: string + quantity: + format: int64 + type: integer + required: + - name + - quantity + type: object + hostNetwork: + type: boolean + image: + type: string + javaOptions: + type: string + labels: + additionalProperties: + type: string + type: object + memory: + type: string + memoryOverhead: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + podName: + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + schedulerName: + type: string + secrets: + items: + properties: + name: + type: string + path: + type: string + secretType: + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + serviceAccount: + type: string + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + executor: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + configMaps: + items: + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + type: string + coreRequest: + type: string + cores: + format: int32 + minimum: 0 + type: integer + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + envSecretKeyRefs: + additionalProperties: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + type: object + envVars: + additionalProperties: + type: string + type: object + gpu: + properties: + name: + type: string + quantity: + format: int64 + type: integer + required: + - name + - quantity + type: object + hostNetwork: + type: boolean + image: + type: string + instances: + format: int32 + minimum: 1 + type: integer + javaOptions: + type: string + labels: + additionalProperties: + type: string + type: object + memory: + type: string + memoryOverhead: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + schedulerName: + type: string + secrets: + items: + properties: + name: + type: string + path: + type: string + secretType: + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + properties: + fsGroup: + format: int64 + type: integer + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: string + - type: integer + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: string + - type: integer + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + type: object + hadoopConfigMap: + type: string + image: + type: string + imagePullPolicy: + type: string + imagePullSecrets: + items: + type: string + type: array + initContainerImage: + type: string + mainApplicationFile: + type: string + mainClass: + type: string + memoryOverheadFactor: + type: string + mode: + enum: + - cluster + - client + type: string + monitoring: + properties: + exposeDriverMetrics: + type: boolean + exposeExecutorMetrics: + type: boolean + metricsProperties: + type: string + prometheus: + properties: + configFile: + type: string + configuration: + type: string + jmxExporterJar: + type: string + port: + format: int32 + minimum: 49151 + type: integer + required: + - jmxExporterJar + - port + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + type: object + pythonVersion: + enum: + - 2 + - 3 + type: string + restartPolicy: + properties: + onFailureRetries: + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + format: int64 + minimum: 1 + type: integer + type: + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + type: object + sparkConfigMap: + type: string + sparkVersion: + type: string + type: + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - mainApplicationFile + - sparkVersion + - type + type: object + status: + properties: + applicationState: + properties: + errorMessage: + type: string + state: + type: string + required: + - state + type: object + driverInfo: + properties: + podName: + type: string + webUIAddress: + type: string + webUIIngressAddress: + type: string + webUIIngressName: + type: string + webUIPort: + format: int32 + type: integer + webUIServiceName: + type: string + type: object + executionAttempts: + format: int32 + type: integer + executorState: + additionalProperties: + type: string + type: object + lastSubmissionAttemptTime: + format: date-time + type: string + sparkApplicationId: + type: string + submissionAttempts: + format: int32 + type: integer + submissionID: + type: string + terminationTime: + format: date-time + type: string + required: + - driverInfo + type: object + required: + - metadata + - spec + type: object + version: v1beta2 + versions: + - name: v1beta2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/manifest/spark-operator-crds.yaml b/manifest/spark-operator-crds.yaml deleted file mode 100644 index 1eb2918a7..000000000 --- a/manifest/spark-operator-crds.yaml +++ /dev/null @@ -1,266 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: sparkapplications.sparkoperator.k8s.io -spec: - group: sparkoperator.k8s.io - names: - kind: SparkApplication - listKind: SparkApplicationList - plural: sparkapplications - shortNames: - - sparkapp - singular: sparkapplication - scope: Namespaced - validation: - openAPIV3Schema: - properties: - metadata: - properties: - name: - type: string - minLength: 1 - maxLength: 63 - spec: - properties: - image: - type: string - initContainerImage: - type: string - imagePullPolicy: - enum: - - Always - - Never - - IfNotPresent - imagePullSecrets: - type: array - items: - type: string - mainClass: - type: string - mainApplicationFile: - type: string - arguments: - type: array - items: - type: string - sparkConf: - type: object - sparkConfigMap: - type: string - hadoopConf: - type: object - hadoopConfigMap: - type: string - volumes: - type: array - items: - type: object - properties: - name: - type: string - deps: - properties: - downloadTimeout: - minimum: 1 - type: integer - maxSimultaneousDownloads: - minimum: 1 - type: integer - driver: - properties: - cores: - exclusiveMinimum: true - minimum: 0 - type: number - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - executor: - properties: - cores: - exclusiveMinimum: true - minimum: 0 - type: number - instances: - minimum: 1 - type: integer - nodeSelector: - type: object - failureRetries: - type: integer - retryInterval: - type: integer - mode: - enum: - - cluster - - client - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - prometheus: - properties: - port: - maximum: 49151 - minimum: 1024 - type: integer - pythonVersion: - enum: - - "2" - - "3" - restartPolicy: - properties: - onFailureRetries: - minimum: 0 - type: integer - onFailureRetryInterval: - minimum: 1 - type: integer - onSubmissionFailureRetries: - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - minimum: 1 - type: integer - type: - enum: - - Never - - OnFailure - - Always - type: - enum: - - Java - - Scala - - Python - - R - sparkVersion: - type: string - memoryOverheadFactor: - type: string - required: - - type - - sparkVersion - version: v1beta1 ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: scheduledsparkapplications.sparkoperator.k8s.io -spec: - group: sparkoperator.k8s.io - names: - kind: ScheduledSparkApplication - listKind: ScheduledSparkApplicationList - plural: scheduledsparkapplications - shortNames: - - scheduledsparkapp - singular: scheduledsparkapplication - scope: Namespaced - validation: - openAPIV3Schema: - properties: - spec: - properties: - concurrencyPolicy: - enum: - - Allow - - Forbid - - Replace - failedRunHistoryLimit: - minimum: 1 - type: integer - schedule: - type: string - successfulRunHistoryLimit: - minimum: 1 - type: integer - template: - properties: - deps: - properties: - downloadTimeout: - minimum: 1 - type: integer - maxSimultaneousDownloads: - minimum: 1 - type: integer - driver: - properties: - cores: - exclusiveMinimum: true - minimum: 0 - type: number - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - executor: - properties: - cores: - exclusiveMinimum: true - minimum: 0 - type: number - instances: - minimum: 1 - type: integer - mode: - enum: - - cluster - - client - monitoring: - properties: - prometheus: - properties: - port: - maximum: 49151 - minimum: 1024 - type: integer - pythonVersion: - enum: - - "2" - - "3" - restartPolicy: - properties: - onFailureRetries: - minimum: 0 - type: integer - onFailureRetryInterval: - minimum: 1 - type: integer - onSubmissionFailureRetries: - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - minimum: 1 - type: integer - type: - enum: - - Never - - OnFailure - - Always - type: - enum: - - Java - - Scala - - Python - - R - version: v1beta1 diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go new file mode 100644 index 000000000..e724c0399 --- /dev/null +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go @@ -0,0 +1,74 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// SetSparkApplicationDefaults sets default values for certain fields of a SparkApplication. +func SetSparkApplicationDefaults(app *SparkApplication) { + if app == nil { + return + } + + if app.Spec.Mode == "" { + app.Spec.Mode = ClusterMode + } + + if app.Spec.RestartPolicy.Type == "" { + app.Spec.RestartPolicy.Type = Never + } + + if app.Spec.RestartPolicy.Type != Never { + // Default to 5 sec if the RestartPolicy is OnFailure or Always and these values aren't specified. + if app.Spec.RestartPolicy.OnFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnFailureRetryInterval = new(int64) + *app.Spec.RestartPolicy.OnFailureRetryInterval = 5 + } + + if app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = new(int64) + *app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = 5 + } + } + + setDriverSpecDefaults(app.Spec.Driver) + setExecutorSpecDefaults(app.Spec.Executor) +} + +func setDriverSpecDefaults(spec DriverSpec) { + if spec.Cores == nil { + spec.Cores = new(int32) + *spec.Cores = 1 + } + if spec.Memory == nil { + spec.Memory = new(string) + *spec.Memory = "1g" + } +} + +func setExecutorSpecDefaults(spec ExecutorSpec) { + if spec.Cores == nil { + spec.Cores = new(int32) + *spec.Cores = 1 + } + if spec.Memory == nil { + spec.Memory = new(string) + *spec.Memory = "1g" + } + if spec.Instances == nil { + spec.Instances = new(int32) + *spec.Instances = 1 + } +} diff --git a/pkg/crd/doc.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go similarity index 67% rename from pkg/crd/doc.go rename to pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go index a9e0e57e9..93bd58c52 100644 --- a/pkg/crd/doc.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go @@ -14,8 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package crd +// +k8s:deepcopy-gen=package,register +// go:generate controller-gen crd:trivialVersions=true paths=. output:dir=. -// Package crd contains the definition of and code to work with the CustomResourceDefinition (CRD) -// for SparkApplication and ScheduledSparkApplication and a client to perform CRUD operations on -// instances of SparkApplication and ScheduledSparkApplication. +// Package v1beta2 is the v1beta2 version of the API. +// +groupName=sparkoperator.k8s.io +// +versionName=v1beta2 +package v1beta2 diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go new file mode 100644 index 000000000..f70432e43 --- /dev/null +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" +) + +const Version = "v1beta2" + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// SchemeGroupVersion is the group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: sparkoperator.GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group-qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SparkApplication{}, + &SparkApplicationList{}, + &ScheduledSparkApplication{}, + &ScheduledSparkApplicationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go new file mode 100644 index 000000000..a5463cf4c --- /dev/null +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -0,0 +1,563 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SparkApplicationType describes the type of a Spark application. +type SparkApplicationType string + +// Different types of Spark applications. +const ( + JavaApplicationType SparkApplicationType = "Java" + ScalaApplicationType SparkApplicationType = "Scala" + PythonApplicationType SparkApplicationType = "Python" + RApplicationType SparkApplicationType = "R" +) + +// DeployMode describes the type of deployment of a Spark application. +type DeployMode string + +// Different types of deployments. +const ( + ClusterMode DeployMode = "cluster" + ClientMode DeployMode = "client" + InClusterClientMode DeployMode = "in-cluster-client" +) + +// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +// This completely defines actions to be taken on any kind of Failures during an application run. +type RestartPolicy struct { + // +kubebuilder:validation:Enum={Never,Always,OnFailure} + Type RestartPolicyType `json:"type,omitempty"` + + // FailureRetries are the number of times to retry a failed application before giving up in a particular case. + // This is best effort and actual retry attempts can be >= the value specified due to caching. + // These are required if RestartPolicy is OnFailure. + // +kubebuilder:validation:Minimum=0 + OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` + // +kubebuilder:validation:Minimum=0 + OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` + + // Interval to wait between successive retries of a failed application. + // +kubebuilder:validation:Minimum=1 + OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` + // +kubebuilder:validation:Minimum=1 + OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` +} + +type RestartPolicyType string + +const ( + Never RestartPolicyType = "Never" + OnFailure RestartPolicyType = "OnFailure" + Always RestartPolicyType = "Always" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:defaulter-gen=true +// +kubebuilder:subresource:status + +type ScheduledSparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ScheduledSparkApplicationSpec `json:"spec"` + Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +} + +type ConcurrencyPolicy string + +const ( + // ConcurrencyAllow allows SparkApplications to run concurrently. + ConcurrencyAllow ConcurrencyPolicy = "Allow" + // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous + // one hasn't finished yet. + ConcurrencyForbid ConcurrencyPolicy = "Forbid" + // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. + ConcurrencyReplace ConcurrencyPolicy = "Replace" +) + +type ScheduledSparkApplicationSpec struct { + // Schedule is a cron schedule on which the application should run. + Schedule string `json:"schedule"` + // Template is a template from which SparkApplication instances can be created. + Template SparkApplicationSpec `json:"template"` + // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + // Optional. + // Defaults to false. + Suspend *bool `json:"suspend,omitempty"` + // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + // Optional. + // Defaults to 1. + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` + // FailedRunHistoryLimit is the number of past failed runs of the application to keep. + // Optional. + // Defaults to 1. + FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` +} + +type ScheduleState string + +const ( + FailedValidationState ScheduleState = "FailedValidation" + ScheduledState ScheduleState = "Scheduled" +) + +type ScheduledSparkApplicationStatus struct { + // LastRun is the time when the last run of the application started. + LastRun metav1.Time `json:"lastRun,omitempty"` + // NextRun is the time when the next run of the application will start. + NextRun metav1.Time `json:"nextRun,omitempty"` + // LastRunName is the name of the SparkApplication for the most recent run of the application. + LastRunName string `json:"lastRunName,omitempty"` + // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. + PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` + // PastFailedRunNames keeps the names of SparkApplications for past failed runs. + PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` + // ScheduleState is the current scheduling state of the application. + ScheduleState ScheduleState `json:"scheduleState,omitempty"` + // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. + Reason string `json:"reason,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ScheduledSparkApplicationList carries a list of ScheduledSparkApplication objects. +type ScheduledSparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledSparkApplication `json:"items,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:defaulter-gen=true +// +kubebuilder:subresource:status + +// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. +type SparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec SparkApplicationSpec `json:"spec"` + Status SparkApplicationStatus `json:"status,omitempty"` +} + +// SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. +// It carries every pieces of information a spark-submit command takes and recognizes. +type SparkApplicationSpec struct { + // Type tells the type of the Spark application. + // +kubebuilder:validation:Enum={Java,Python,Scala,R} + Type SparkApplicationType `json:"type"` + // SparkVersion is the version of Spark the application uses. + SparkVersion string `json:"sparkVersion"` + // Mode is the deployment mode of the Spark application. + // +kubebuilder:validation:Enum={cluster,client} + Mode DeployMode `json:"mode,omitempty"` + // Image is the container image for the driver, executor, and init-container. Any custom container images for the + // driver, executor, or init-container takes precedence over this. + // Optional. + Image *string `json:"image,omitempty"` + // InitContainerImage is the image of the init-container to use. Overrides Spec.Image if set. + // Optional. + InitContainerImage *string `json:"initContainerImage,omitempty"` + // ImagePullPolicy is the image pull policy for the driver, executor, and init-container. + // Optional. + ImagePullPolicy *string `json:"imagePullPolicy,omitempty"` + // ImagePullSecrets is the list of image-pull secrets. + // Optional. + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + // MainClass is the fully-qualified main class of the Spark application. + // This only applies to Java/Scala Spark applications. + // Optional. + MainClass *string `json:"mainClass,omitempty"` + // MainFile is the path to a bundled JAR, Python, or R file of the application. + // Optional. + MainApplicationFile *string `json:"mainApplicationFile"` + // Arguments is a list of arguments to be passed to the application. + // Optional. + Arguments []string `json:"arguments,omitempty"` + // SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + // spark-submit. + // Optional. + SparkConf map[string]string `json:"sparkConf,omitempty"` + // HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + // in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + // configuration properties. + // Optional. + HadoopConf map[string]string `json:"hadoopConf,omitempty"` + // SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + // The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + // Optional. + SparkConfigMap *string `json:"sparkConfigMap,omitempty"` + // HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + // The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + // Optional. + HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"` + // Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors. + // Optional. + Volumes []apiv1.Volume `json:"volumes,omitempty"` + // Driver is the driver specification. + Driver DriverSpec `json:"driver"` + // Executor is the executor specification. + Executor ExecutorSpec `json:"executor"` + // Deps captures all possible types of dependencies of a Spark application. + // Optional. + Deps Dependencies `json:"deps,omitempty"` + // RestartPolicy defines the policy on if and in which conditions the controller should restart an application. + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` + // NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + // This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + // This field will be deprecated in future versions (at SparkApplicationSpec level). + // Optional. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // FailureRetries is the number of times to retry a failed application before giving up. + // This is best effort and actual retry attempts can be >= the value specified. + // Optional. + FailureRetries *int32 `json:"failureRetries,omitempty"` + // RetryInterval is the unit of intervals in seconds between submission retries. + // Optional. + RetryInterval *int64 `json:"retryInterval,omitempty"` + // This sets the major Python version of the docker + // image used to run the driver and executor containers. Can either be 2 or 3, default 2. + // Optional. + // +kubebuilder:validation:Enum={2,3} + PythonVersion *string `json:"pythonVersion,omitempty"` + // This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + // For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + // be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + // Optional. + MemoryOverheadFactor *string `json:"memoryOverheadFactor,omitempty"` + // Monitoring configures how monitoring is handled. + // Optional. + Monitoring *MonitoringSpec `json:"monitoring,omitempty"` +} + +// ApplicationStateType represents the type of the current state of an application. +type ApplicationStateType string + +// Different states an application may have. +const ( + NewState ApplicationStateType = "" + SubmittedState ApplicationStateType = "SUBMITTED" + RunningState ApplicationStateType = "RUNNING" + CompletedState ApplicationStateType = "COMPLETED" + FailedState ApplicationStateType = "FAILED" + FailedSubmissionState ApplicationStateType = "SUBMISSION_FAILED" + PendingRerunState ApplicationStateType = "PENDING_RERUN" + InvalidatingState ApplicationStateType = "INVALIDATING" + SucceedingState ApplicationStateType = "SUCCEEDING" + FailingState ApplicationStateType = "FAILING" + UnknownState ApplicationStateType = "UNKNOWN" +) + +// ApplicationState tells the current state of the application and an error message in case of failures. +type ApplicationState struct { + State ApplicationStateType `json:"state"` + ErrorMessage string `json:"errorMessage,omitempty"` +} + +// ExecutorState tells the current state of an executor. +type ExecutorState string + +// Different states an executor may have. +const ( + ExecutorPendingState ExecutorState = "PENDING" + ExecutorRunningState ExecutorState = "RUNNING" + ExecutorCompletedState ExecutorState = "COMPLETED" + ExecutorFailedState ExecutorState = "FAILED" + ExecutorUnknownState ExecutorState = "UNKNOWN" +) + +// SparkApplicationStatus describes the current status of a Spark application. +type SparkApplicationStatus struct { + // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods + SparkApplicationID string `json:"sparkApplicationId,omitempty"` + // SubmissionID is a unique ID of the current submission of the application. + SubmissionID string `json:"submissionID,omitempty"` + // LastSubmissionAttemptTime is the time for the last application submission attempt. + LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` + // CompletionTime is the time when the application runs to completion if it does. + TerminationTime metav1.Time `json:"terminationTime,omitempty"` + // DriverInfo has information about the driver. + DriverInfo DriverInfo `json:"driverInfo"` + // AppState tells the overall application state. + AppState ApplicationState `json:"applicationState,omitempty"` + // ExecutorState records the state of executors by executor Pod names. + ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` + // ExecutionAttempts is the total number of attempts to run a submitted application to completion. + // Incremented upon each attempted run of the application and reset upon invalidation. + ExecutionAttempts int32 `json:"executionAttempts,omitempty"` + // SubmissionAttempts is the total number of attempts to submit an application to run. + // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SparkApplicationList carries a list of SparkApplication objects. +type SparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkApplication `json:"items,omitempty"` +} + +// Dependencies specifies all possible types of dependencies of a Spark application. +type Dependencies struct { + // Jars is a list of JAR files the Spark application depends on. + // Optional. + Jars []string `json:"jars,omitempty"` + // Files is a list of files the Spark application depends on. + // Optional. + Files []string `json:"files,omitempty"` + // PyFiles is a list of Python files the Spark application depends on. + // Optional. + PyFiles []string `json:"pyFiles,omitempty"` + // JarsDownloadDir is the location to download jars to in the driver and executors. + JarsDownloadDir *string `json:"jarsDownloadDir,omitempty"` + // FilesDownloadDir is the location to download files to in the driver and executors. + FilesDownloadDir *string `json:"filesDownloadDir,omitempty"` + // DownloadTimeout specifies the timeout in seconds before aborting the attempt to download + // and unpack dependencies from remote locations into the driver and executor pods. + // +kubebuilder:validation:Minimum=1 + DownloadTimeout *int32 `json:"downloadTimeout,omitempty"` + // MaxSimultaneousDownloads specifies the maximum number of remote dependencies to download + // simultaneously in a driver or executor pod. + // +kubebuilder:validation:Minimum=1 + MaxSimultaneousDownloads *int32 `json:"maxSimultaneousDownloads,omitempty"` +} + +// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod. +// TODO: investigate if we should use v1.PodSpec and limit what can be set instead. +type SparkPodSpec struct { + // Cores is the number of CPU cores to request for the pod. + // Optional. + // +kubebuilder:validation:Minimum=0 + Cores *int32 `json:"cores,omitempty"` + // CoreLimit specifies a hard limit on CPU cores for the pod. + // Optional + CoreLimit *string `json:"coreLimit,omitempty"` + // Memory is the amount of memory to request for the pod. + // Optional. + Memory *string `json:"memory,omitempty"` + // MemoryOverhead is the amount of off-heap memory to allocate in cluster mode, in MiB unless otherwise specified. + // Optional. + MemoryOverhead *string `json:"memoryOverhead,omitempty"` + // GPU specifies GPU requirement for the pod. + // Optional. + GPU *GPUSpec `json:"gpu,omitempty"` + // Image is the container image to use. Overrides Spec.Image if set. + // Optional. + Image *string `json:"image,omitempty"` + // ConfigMaps carries information of other ConfigMaps to add to the pod. + // Optional. + ConfigMaps []NamePath `json:"configMaps,omitempty"` + // Secrets carries information of secrets to add to the pod. + // Optional. + Secrets []SecretInfo `json:"secrets,omitempty"` + // EnvVars carries the environment variables to add to the pod. + // Optional. + EnvVars map[string]string `json:"envVars,omitempty"` + // EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + // Optional. + EnvSecretKeyRefs map[string]NameKey `json:"envSecretKeyRefs,omitempty"` + // Labels are the Kubernetes labels to be added to the pod. + // Optional. + Labels map[string]string `json:"labels,omitempty"` + // Annotations are the Kubernetes annotations to be added to the pod. + // Optional. + Annotations map[string]string `json:"annotations,omitempty"` + // VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem. + // Optional. + VolumeMounts []apiv1.VolumeMount `json:"volumeMounts,omitempty"` + // Affinity specifies the affinity/anti-affinity settings for the pod. + // Optional. + Affinity *apiv1.Affinity `json:"affinity,omitempty"` + // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. + // Optional. + Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + // SecurityContenxt specifies the PodSecurityContext to apply. + // Optional. + SecurityContenxt *apiv1.PodSecurityContext `json:"securityContext,omitempty"` + // SchedulerName specifies the scheduler that will be used for scheduling + // Optional. + SchedulerName *string `json:"schedulerName,omitempty"` + // Sidecars is a list of sidecar containers that run along side the main Spark container. + // Optional. + Sidecars []apiv1.Container `json:"sidecars,omitempty"` + // HostNetwork indicates whether to request host networking for the pod or not. + // Optional. + HostNetwork *bool `json:"hostNetwork,omitempty"` + // NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + // This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + // Optional. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // DnsConfig dns settings for the pod, following the Kubernetes specifications. + // Optional. + DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` +} + +// DriverSpec is specification of the driver. +type DriverSpec struct { + SparkPodSpec `json:",inline"` + // PodName is the name of the driver pod that the user creates. This is used for the + // in-cluster client mode in which the user creates a client pod where the driver of + // the user application runs. It's an error to set this field if Mode is not + // in-cluster-client. + // Optional. + // +kubebuilder:validation:Pattern=[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* + PodName *string `json:"podName,omitempty"` + // ServiceAccount is the name of the Kubernetes service account used by the driver pod + // when requesting executor pods from the API server. + ServiceAccount *string `json:"serviceAccount,omitempty"` + // JavaOptions is a string of extra JVM options to pass to the driver. For instance, + // GC settings or other logging. + JavaOptions *string `json:"javaOptions,omitempty"` +} + +// ExecutorSpec is specification of the executor. +type ExecutorSpec struct { + SparkPodSpec `json:",inline"` + // Instances is the number of executor instances. + // Optional. + // +kubebuilder:validation:Minimum=1 + Instances *int32 `json:"instances,omitempty"` + // CoreRequest is the physical CPU core request for the executors. + // Optional. + CoreRequest *string `json:"coreRequest,omitempty"` + // JavaOptions is a string of extra JVM options to pass to the executors. For instance, + // GC settings or other logging. + JavaOptions *string `json:"javaOptions,omitempty"` +} + +// NamePath is a pair of a name and a path to which the named objects should be mounted to. +type NamePath struct { + Name string `json:"name"` + Path string `json:"path"` +} + +// SecretType tells the type of a secret. +type SecretType string + +// An enumeration of secret types supported. +const ( + // GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs + // the environment variable GOOGLE_APPLICATION_CREDENTIALS. + GCPServiceAccountSecret SecretType = "GCPServiceAccount" + // HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the + // environment variable HADOOP_TOKEN_FILE_LOCATION. + HadoopDelegationTokenSecret SecretType = "HadoopDelegationToken" + // GenericType is for secrets that needs no special handling. + GenericType SecretType = "Generic" +) + +// DriverInfo captures information about the driver. +type DriverInfo struct { + WebUIServiceName string `json:"webUIServiceName,omitempty"` + // UI Details for the UI created via ClusterIP service accessible from within the cluster. + WebUIPort int32 `json:"webUIPort,omitempty"` + WebUIAddress string `json:"webUIAddress,omitempty"` + // Ingress Details if an ingress for the UI was created. + WebUIIngressName string `json:"webUIIngressName,omitempty"` + WebUIIngressAddress string `json:"webUIIngressAddress,omitempty"` + PodName string `json:"podName,omitempty"` +} + +// SecretInfo captures information of a secret. +type SecretInfo struct { + Name string `json:"name"` + Path string `json:"path"` + Type SecretType `json:"secretType"` +} + +// NameKey represents the name and key of a SecretKeyRef. +type NameKey struct { + Name string `json:"name"` + Key string `json:"key"` +} + +// MonitoringSpec defines the monitoring specification. +type MonitoringSpec struct { + // ExposeDriverMetrics specifies whether to expose metrics on the driver. + ExposeDriverMetrics bool `json:"exposeDriverMetrics"` + // ExposeExecutorMetrics specifies whether to expose metrics on the executors. + ExposeExecutorMetrics bool `json:"exposeExecutorMetrics"` + // MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + // Optional. + // If not specified, the content in spark-docker/conf/metrics.properties will be used. + MetricsProperties *string `json:"metricsProperties,omitempty"` + // Prometheus is for configuring the Prometheus JMX exporter. + // Optional. + Prometheus *PrometheusSpec `json:"prometheus,omitempty"` +} + +// PrometheusSpec defines the Prometheus specification when Prometheus is to be used for +// collecting and exposing metrics. +type PrometheusSpec struct { + // JmxExporterJar is the path to the Prometheus JMX exporter jar in the container. + JmxExporterJar string `json:"jmxExporterJar"` + // Port is the port of the HTTP server run by the Prometheus JMX exporter. + // Optional. + // If not specified, 8090 will be used as the default. + // +kubebuilder:validation:Minimum=1024 + // +kubebuilder:validation:Minimum=49151 + Port *int32 `json:"port"` + // ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + // ConfigFile takes precedence over Configuration, which is shown below. + ConfigFile *string `json:"configFile,omitempty"` + // Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + // Optional. + // If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + // Configuration has no effect if ConfigFile is set. + Configuration *string `json:"configuration,omitempty"` +} + +type GPUSpec struct { + // Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu + Name string `json:"name"` + // Quantity is the number of GPUs to request for driver or executor. + Quantity int64 `json:"quantity"` +} + +// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not. +func (s *SparkApplication) PrometheusMonitoringEnabled() bool { + return s.Spec.Monitoring != nil && s.Spec.Monitoring.Prometheus != nil +} + +// HasPrometheusConfigFile returns if Prometheus monitoring uses a configruation file in the container. +func (s *SparkApplication) HasPrometheusConfigFile() bool { + return s.PrometheusMonitoringEnabled() && + s.Spec.Monitoring.Prometheus.ConfigFile != nil && + *s.Spec.Monitoring.Prometheus.ConfigFile != "" +} + +// ExposeDriverMetrics returns if driver metrics should be exposed. +func (s *SparkApplication) ExposeDriverMetrics() bool { + return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeDriverMetrics +} + +// ExposeExecutorMetrics returns if executor metrics should be exposed. +func (s *SparkApplication) ExposeExecutorMetrics() bool { + return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeExecutorMetrics +} diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..7945151e2 --- /dev/null +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,796 @@ +// +build !ignore_autogenerated + +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationState) DeepCopyInto(out *ApplicationState) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState. +func (in *ApplicationState) DeepCopy() *ApplicationState { + if in == nil { + return nil + } + out := new(ApplicationState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dependencies) DeepCopyInto(out *Dependencies) { + *out = *in + if in.Jars != nil { + in, out := &in.Jars, &out.Jars + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PyFiles != nil { + in, out := &in.PyFiles, &out.PyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.JarsDownloadDir != nil { + in, out := &in.JarsDownloadDir, &out.JarsDownloadDir + *out = new(string) + **out = **in + } + if in.FilesDownloadDir != nil { + in, out := &in.FilesDownloadDir, &out.FilesDownloadDir + *out = new(string) + **out = **in + } + if in.DownloadTimeout != nil { + in, out := &in.DownloadTimeout, &out.DownloadTimeout + *out = new(int32) + **out = **in + } + if in.MaxSimultaneousDownloads != nil { + in, out := &in.MaxSimultaneousDownloads, &out.MaxSimultaneousDownloads + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies. +func (in *Dependencies) DeepCopy() *Dependencies { + if in == nil { + return nil + } + out := new(Dependencies) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverInfo) DeepCopyInto(out *DriverInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo. +func (in *DriverInfo) DeepCopy() *DriverInfo { + if in == nil { + return nil + } + out := new(DriverInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { + *out = *in + in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec) + if in.PodName != nil { + in, out := &in.PodName, &out.PodName + *out = new(string) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(string) + **out = **in + } + if in.JavaOptions != nil { + in, out := &in.JavaOptions, &out.JavaOptions + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. +func (in *DriverSpec) DeepCopy() *DriverSpec { + if in == nil { + return nil + } + out := new(DriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { + *out = *in + in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec) + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.CoreRequest != nil { + in, out := &in.CoreRequest, &out.CoreRequest + *out = new(string) + **out = **in + } + if in.JavaOptions != nil { + in, out := &in.JavaOptions, &out.JavaOptions + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. +func (in *ExecutorSpec) DeepCopy() *ExecutorSpec { + if in == nil { + return nil + } + out := new(ExecutorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPUSpec) DeepCopyInto(out *GPUSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec. +func (in *GPUSpec) DeepCopy() *GPUSpec { + if in == nil { + return nil + } + out := new(GPUSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { + *out = *in + if in.MetricsProperties != nil { + in, out := &in.MetricsProperties, &out.MetricsProperties + *out = new(string) + **out = **in + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. +func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { + if in == nil { + return nil + } + out := new(MonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameKey) DeepCopyInto(out *NameKey) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey. +func (in *NameKey) DeepCopy() *NameKey { + if in == nil { + return nil + } + out := new(NameKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamePath) DeepCopyInto(out *NamePath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath. +func (in *NamePath) DeepCopy() *NamePath { + if in == nil { + return nil + } + out := new(NamePath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.ConfigFile != nil { + in, out := &in.ConfigFile, &out.ConfigFile + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. +func (in *PrometheusSpec) DeepCopy() *PrometheusSpec { + if in == nil { + return nil + } + out := new(PrometheusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) { + *out = *in + if in.OnSubmissionFailureRetries != nil { + in, out := &in.OnSubmissionFailureRetries, &out.OnSubmissionFailureRetries + *out = new(int32) + **out = **in + } + if in.OnFailureRetries != nil { + in, out := &in.OnFailureRetries, &out.OnFailureRetries + *out = new(int32) + **out = **in + } + if in.OnSubmissionFailureRetryInterval != nil { + in, out := &in.OnSubmissionFailureRetryInterval, &out.OnSubmissionFailureRetryInterval + *out = new(int64) + **out = **in + } + if in.OnFailureRetryInterval != nil { + in, out := &in.OnFailureRetryInterval, &out.OnFailureRetryInterval + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy. +func (in *RestartPolicy) DeepCopy() *RestartPolicy { + if in == nil { + return nil + } + out := new(RestartPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication. +func (in *ScheduledSparkApplication) DeepCopy() *ScheduledSparkApplication { + if in == nil { + return nil + } + out := new(ScheduledSparkApplication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledSparkApplication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledSparkApplication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList. +func (in *ScheduledSparkApplicationList) DeepCopy() *ScheduledSparkApplicationList { + if in == nil { + return nil + } + out := new(ScheduledSparkApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledSparkApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplicationSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.SuccessfulRunHistoryLimit != nil { + in, out := &in.SuccessfulRunHistoryLimit, &out.SuccessfulRunHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedRunHistoryLimit != nil { + in, out := &in.FailedRunHistoryLimit, &out.FailedRunHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec. +func (in *ScheduledSparkApplicationSpec) DeepCopy() *ScheduledSparkApplicationSpec { + if in == nil { + return nil + } + out := new(ScheduledSparkApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkApplicationStatus) { + *out = *in + in.LastRun.DeepCopyInto(&out.LastRun) + in.NextRun.DeepCopyInto(&out.NextRun) + if in.PastSuccessfulRunNames != nil { + in, out := &in.PastSuccessfulRunNames, &out.PastSuccessfulRunNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PastFailedRunNames != nil { + in, out := &in.PastFailedRunNames, &out.PastFailedRunNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus. +func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplicationStatus { + if in == nil { + return nil + } + out := new(ScheduledSparkApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretInfo) DeepCopyInto(out *SecretInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo. +func (in *SecretInfo) DeepCopy() *SecretInfo { + if in == nil { + return nil + } + out := new(SecretInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplication) DeepCopyInto(out *SparkApplication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication. +func (in *SparkApplication) DeepCopy() *SparkApplication { + if in == nil { + return nil + } + out := new(SparkApplication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkApplication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SparkApplication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList. +func (in *SparkApplicationList) DeepCopy() *SparkApplicationList { + if in == nil { + return nil + } + out := new(SparkApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.InitContainerImage != nil { + in, out := &in.InitContainerImage, &out.InitContainerImage + *out = new(string) + **out = **in + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MainClass != nil { + in, out := &in.MainClass, &out.MainClass + *out = new(string) + **out = **in + } + if in.MainApplicationFile != nil { + in, out := &in.MainApplicationFile, &out.MainApplicationFile + *out = new(string) + **out = **in + } + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SparkConf != nil { + in, out := &in.SparkConf, &out.SparkConf + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.HadoopConf != nil { + in, out := &in.HadoopConf, &out.HadoopConf + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SparkConfigMap != nil { + in, out := &in.SparkConfigMap, &out.SparkConfigMap + *out = new(string) + **out = **in + } + if in.HadoopConfigMap != nil { + in, out := &in.HadoopConfigMap, &out.HadoopConfigMap + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Driver.DeepCopyInto(&out.Driver) + in.Executor.DeepCopyInto(&out.Executor) + in.Deps.DeepCopyInto(&out.Deps) + in.RestartPolicy.DeepCopyInto(&out.RestartPolicy) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FailureRetries != nil { + in, out := &in.FailureRetries, &out.FailureRetries + *out = new(int32) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(int64) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.MemoryOverheadFactor != nil { + in, out := &in.MemoryOverheadFactor, &out.MemoryOverheadFactor + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec. +func (in *SparkApplicationSpec) DeepCopy() *SparkApplicationSpec { + if in == nil { + return nil + } + out := new(SparkApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) { + *out = *in + in.LastSubmissionAttemptTime.DeepCopyInto(&out.LastSubmissionAttemptTime) + in.TerminationTime.DeepCopyInto(&out.TerminationTime) + out.DriverInfo = in.DriverInfo + out.AppState = in.AppState + if in.ExecutorState != nil { + in, out := &in.ExecutorState, &out.ExecutorState + *out = make(map[string]ExecutorState, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus. +func (in *SparkApplicationStatus) DeepCopy() *SparkApplicationStatus { + if in == nil { + return nil + } + out := new(SparkApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) { + *out = *in + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(int32) + **out = **in + } + if in.CoreLimit != nil { + in, out := &in.CoreLimit, &out.CoreLimit + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.MemoryOverhead != nil { + in, out := &in.MemoryOverhead, &out.MemoryOverhead + *out = new(string) + **out = **in + } + if in.GPU != nil { + in, out := &in.GPU, &out.GPU + *out = new(GPUSpec) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]NamePath, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretInfo, len(*in)) + copy(*out, *in) + } + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EnvSecretKeyRefs != nil { + in, out := &in.EnvSecretKeyRefs, &out.EnvSecretKeyRefs + *out = make(map[string]NameKey, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContenxt != nil { + in, out := &in.SecurityContenxt, &out.SecurityContenxt + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(v1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec. +func (in *SparkPodSpec) DeepCopy() *SparkPodSpec { + if in == nil { + return nil + } + out := new(SparkPodSpec) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 4662f28b2..a4449e0d9 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -23,6 +23,7 @@ package versioned import ( sparkoperatorv1alpha1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1alpha1" sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" + sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -32,8 +33,9 @@ type Interface interface { Discovery() discovery.DiscoveryInterface SparkoperatorV1alpha1() sparkoperatorv1alpha1.SparkoperatorV1alpha1Interface SparkoperatorV1beta1() sparkoperatorv1beta1.SparkoperatorV1beta1Interface + SparkoperatorV1beta2() sparkoperatorv1beta2.SparkoperatorV1beta2Interface // Deprecated: please explicitly pick a version if possible. - Sparkoperator() sparkoperatorv1beta1.SparkoperatorV1beta1Interface + Sparkoperator() sparkoperatorv1beta2.SparkoperatorV1beta2Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -42,6 +44,7 @@ type Clientset struct { *discovery.DiscoveryClient sparkoperatorV1alpha1 *sparkoperatorv1alpha1.SparkoperatorV1alpha1Client sparkoperatorV1beta1 *sparkoperatorv1beta1.SparkoperatorV1beta1Client + sparkoperatorV1beta2 *sparkoperatorv1beta2.SparkoperatorV1beta2Client } // SparkoperatorV1alpha1 retrieves the SparkoperatorV1alpha1Client @@ -54,10 +57,15 @@ func (c *Clientset) SparkoperatorV1beta1() sparkoperatorv1beta1.SparkoperatorV1b return c.sparkoperatorV1beta1 } +// SparkoperatorV1beta2 retrieves the SparkoperatorV1beta2Client +func (c *Clientset) SparkoperatorV1beta2() sparkoperatorv1beta2.SparkoperatorV1beta2Interface { + return c.sparkoperatorV1beta2 +} + // Deprecated: Sparkoperator retrieves the default version of SparkoperatorClient. // Please explicitly pick a version. -func (c *Clientset) Sparkoperator() sparkoperatorv1beta1.SparkoperatorV1beta1Interface { - return c.sparkoperatorV1beta1 +func (c *Clientset) Sparkoperator() sparkoperatorv1beta2.SparkoperatorV1beta2Interface { + return c.sparkoperatorV1beta2 } // Discovery retrieves the DiscoveryClient @@ -84,6 +92,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.sparkoperatorV1beta2, err = sparkoperatorv1beta2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -98,6 +110,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.sparkoperatorV1alpha1 = sparkoperatorv1alpha1.NewForConfigOrDie(c) cs.sparkoperatorV1beta1 = sparkoperatorv1beta1.NewForConfigOrDie(c) + cs.sparkoperatorV1beta2 = sparkoperatorv1beta2.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -108,6 +121,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.sparkoperatorV1alpha1 = sparkoperatorv1alpha1.New(c) cs.sparkoperatorV1beta1 = sparkoperatorv1beta1.New(c) + cs.sparkoperatorV1beta2 = sparkoperatorv1beta2.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 4a325266b..80a836431 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -26,6 +26,8 @@ import ( fakesparkoperatorv1alpha1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1alpha1/fake" sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" fakesparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake" + sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" + fakesparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -85,7 +87,12 @@ func (c *Clientset) SparkoperatorV1beta1() sparkoperatorv1beta1.SparkoperatorV1b return &fakesparkoperatorv1beta1.FakeSparkoperatorV1beta1{Fake: &c.Fake} } -// Sparkoperator retrieves the SparkoperatorV1beta1Client -func (c *Clientset) Sparkoperator() sparkoperatorv1beta1.SparkoperatorV1beta1Interface { - return &fakesparkoperatorv1beta1.FakeSparkoperatorV1beta1{Fake: &c.Fake} +// SparkoperatorV1beta2 retrieves the SparkoperatorV1beta2Client +func (c *Clientset) SparkoperatorV1beta2() sparkoperatorv1beta2.SparkoperatorV1beta2Interface { + return &fakesparkoperatorv1beta2.FakeSparkoperatorV1beta2{Fake: &c.Fake} +} + +// Sparkoperator retrieves the SparkoperatorV1beta2Client +func (c *Clientset) Sparkoperator() sparkoperatorv1beta2.SparkoperatorV1beta2Interface { + return &fakesparkoperatorv1beta2.FakeSparkoperatorV1beta2{Fake: &c.Fake} } diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index a37a8f01b..d2139dccf 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -23,6 +23,7 @@ package fake import ( sparkoperatorv1alpha1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1" sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -36,6 +37,7 @@ var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ sparkoperatorv1alpha1.AddToScheme, sparkoperatorv1beta1.AddToScheme, + sparkoperatorv1beta2.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index e0ceeab7f..039d155be 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -23,6 +23,7 @@ package scheme import ( sparkoperatorv1alpha1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1" sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -36,6 +37,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ sparkoperatorv1alpha1.AddToScheme, sparkoperatorv1beta1.AddToScheme, + sparkoperatorv1beta2.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go new file mode 100644 index 000000000..a148085db --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go @@ -0,0 +1,22 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta2 diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go new file mode 100644 index 000000000..4179d57b2 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go @@ -0,0 +1,22 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go new file mode 100644 index 000000000..cbe57616d --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go @@ -0,0 +1,130 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeScheduledSparkApplications implements ScheduledSparkApplicationInterface +type FakeScheduledSparkApplications struct { + Fake *FakeSparkoperatorV1beta2 + ns string +} + +var scheduledsparkapplicationsResource = schema.GroupVersionResource{Group: "sparkoperator.k8s.io", Version: "v1beta2", Resource: "scheduledsparkapplications"} + +var scheduledsparkapplicationsKind = schema.GroupVersionKind{Group: "sparkoperator.k8s.io", Version: "v1beta2", Kind: "ScheduledSparkApplication"} + +// Get takes name of the scheduledSparkApplication, and returns the corresponding scheduledSparkApplication object, and an error if there is any. +func (c *FakeScheduledSparkApplications) Get(name string, options v1.GetOptions) (result *v1beta2.ScheduledSparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(scheduledsparkapplicationsResource, c.ns, name), &v1beta2.ScheduledSparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ScheduledSparkApplication), err +} + +// List takes label and field selectors, and returns the list of ScheduledSparkApplications that match those selectors. +func (c *FakeScheduledSparkApplications) List(opts v1.ListOptions) (result *v1beta2.ScheduledSparkApplicationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(scheduledsparkapplicationsResource, scheduledsparkapplicationsKind, c.ns, opts), &v1beta2.ScheduledSparkApplicationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.ScheduledSparkApplicationList{ListMeta: obj.(*v1beta2.ScheduledSparkApplicationList).ListMeta} + for _, item := range obj.(*v1beta2.ScheduledSparkApplicationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested scheduledSparkApplications. +func (c *FakeScheduledSparkApplications) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(scheduledsparkapplicationsResource, c.ns, opts)) + +} + +// Create takes the representation of a scheduledSparkApplication and creates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any. +func (c *FakeScheduledSparkApplications) Create(scheduledSparkApplication *v1beta2.ScheduledSparkApplication) (result *v1beta2.ScheduledSparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(scheduledsparkapplicationsResource, c.ns, scheduledSparkApplication), &v1beta2.ScheduledSparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ScheduledSparkApplication), err +} + +// Update takes the representation of a scheduledSparkApplication and updates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any. +func (c *FakeScheduledSparkApplications) Update(scheduledSparkApplication *v1beta2.ScheduledSparkApplication) (result *v1beta2.ScheduledSparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(scheduledsparkapplicationsResource, c.ns, scheduledSparkApplication), &v1beta2.ScheduledSparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ScheduledSparkApplication), err +} + +// Delete takes name of the scheduledSparkApplication and deletes it. Returns an error if one occurs. +func (c *FakeScheduledSparkApplications) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(scheduledsparkapplicationsResource, c.ns, name), &v1beta2.ScheduledSparkApplication{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeScheduledSparkApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(scheduledsparkapplicationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.ScheduledSparkApplicationList{}) + return err +} + +// Patch applies the patch and returns the patched scheduledSparkApplication. +func (c *FakeScheduledSparkApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ScheduledSparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(scheduledsparkapplicationsResource, c.ns, name, pt, data, subresources...), &v1beta2.ScheduledSparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ScheduledSparkApplication), err +} diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go new file mode 100644 index 000000000..f5bdf71ee --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go @@ -0,0 +1,130 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeSparkApplications implements SparkApplicationInterface +type FakeSparkApplications struct { + Fake *FakeSparkoperatorV1beta2 + ns string +} + +var sparkapplicationsResource = schema.GroupVersionResource{Group: "sparkoperator.k8s.io", Version: "v1beta2", Resource: "sparkapplications"} + +var sparkapplicationsKind = schema.GroupVersionKind{Group: "sparkoperator.k8s.io", Version: "v1beta2", Kind: "SparkApplication"} + +// Get takes name of the sparkApplication, and returns the corresponding sparkApplication object, and an error if there is any. +func (c *FakeSparkApplications) Get(name string, options v1.GetOptions) (result *v1beta2.SparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(sparkapplicationsResource, c.ns, name), &v1beta2.SparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.SparkApplication), err +} + +// List takes label and field selectors, and returns the list of SparkApplications that match those selectors. +func (c *FakeSparkApplications) List(opts v1.ListOptions) (result *v1beta2.SparkApplicationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(sparkapplicationsResource, sparkapplicationsKind, c.ns, opts), &v1beta2.SparkApplicationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.SparkApplicationList{ListMeta: obj.(*v1beta2.SparkApplicationList).ListMeta} + for _, item := range obj.(*v1beta2.SparkApplicationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested sparkApplications. +func (c *FakeSparkApplications) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(sparkapplicationsResource, c.ns, opts)) + +} + +// Create takes the representation of a sparkApplication and creates it. Returns the server's representation of the sparkApplication, and an error, if there is any. +func (c *FakeSparkApplications) Create(sparkApplication *v1beta2.SparkApplication) (result *v1beta2.SparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(sparkapplicationsResource, c.ns, sparkApplication), &v1beta2.SparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.SparkApplication), err +} + +// Update takes the representation of a sparkApplication and updates it. Returns the server's representation of the sparkApplication, and an error, if there is any. +func (c *FakeSparkApplications) Update(sparkApplication *v1beta2.SparkApplication) (result *v1beta2.SparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(sparkapplicationsResource, c.ns, sparkApplication), &v1beta2.SparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.SparkApplication), err +} + +// Delete takes name of the sparkApplication and deletes it. Returns an error if one occurs. +func (c *FakeSparkApplications) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(sparkapplicationsResource, c.ns, name), &v1beta2.SparkApplication{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeSparkApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(sparkapplicationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.SparkApplicationList{}) + return err +} + +// Patch applies the patch and returns the patched sparkApplication. +func (c *FakeSparkApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.SparkApplication, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(sparkapplicationsResource, c.ns, name, pt, data, subresources...), &v1beta2.SparkApplication{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.SparkApplication), err +} diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go new file mode 100644 index 000000000..7dfc2ed98 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go @@ -0,0 +1,46 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeSparkoperatorV1beta2 struct { + *testing.Fake +} + +func (c *FakeSparkoperatorV1beta2) ScheduledSparkApplications(namespace string) v1beta2.ScheduledSparkApplicationInterface { + return &FakeScheduledSparkApplications{c, namespace} +} + +func (c *FakeSparkoperatorV1beta2) SparkApplications(namespace string) v1beta2.SparkApplicationInterface { + return &FakeSparkApplications{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSparkoperatorV1beta2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go new file mode 100644 index 000000000..5983e5a6c --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go @@ -0,0 +1,25 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +type ScheduledSparkApplicationExpansion interface{} + +type SparkApplicationExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go new file mode 100644 index 000000000..3b81b4db0 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -0,0 +1,176 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + scheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ScheduledSparkApplicationsGetter has a method to return a ScheduledSparkApplicationInterface. +// A group's client should implement this interface. +type ScheduledSparkApplicationsGetter interface { + ScheduledSparkApplications(namespace string) ScheduledSparkApplicationInterface +} + +// ScheduledSparkApplicationInterface has methods to work with ScheduledSparkApplication resources. +type ScheduledSparkApplicationInterface interface { + Create(*v1beta2.ScheduledSparkApplication) (*v1beta2.ScheduledSparkApplication, error) + Update(*v1beta2.ScheduledSparkApplication) (*v1beta2.ScheduledSparkApplication, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.ScheduledSparkApplication, error) + List(opts v1.ListOptions) (*v1beta2.ScheduledSparkApplicationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ScheduledSparkApplication, err error) + ScheduledSparkApplicationExpansion +} + +// scheduledSparkApplications implements ScheduledSparkApplicationInterface +type scheduledSparkApplications struct { + client rest.Interface + ns string +} + +// newScheduledSparkApplications returns a ScheduledSparkApplications +func newScheduledSparkApplications(c *SparkoperatorV1beta2Client, namespace string) *scheduledSparkApplications { + return &scheduledSparkApplications{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the scheduledSparkApplication, and returns the corresponding scheduledSparkApplication object, and an error if there is any. +func (c *scheduledSparkApplications) Get(name string, options v1.GetOptions) (result *v1beta2.ScheduledSparkApplication, err error) { + result = &v1beta2.ScheduledSparkApplication{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ScheduledSparkApplications that match those selectors. +func (c *scheduledSparkApplications) List(opts v1.ListOptions) (result *v1beta2.ScheduledSparkApplicationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.ScheduledSparkApplicationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested scheduledSparkApplications. +func (c *scheduledSparkApplications) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a scheduledSparkApplication and creates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any. +func (c *scheduledSparkApplications) Create(scheduledSparkApplication *v1beta2.ScheduledSparkApplication) (result *v1beta2.ScheduledSparkApplication, err error) { + result = &v1beta2.ScheduledSparkApplication{} + err = c.client.Post(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + Body(scheduledSparkApplication). + Do(). + Into(result) + return +} + +// Update takes the representation of a scheduledSparkApplication and updates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any. +func (c *scheduledSparkApplications) Update(scheduledSparkApplication *v1beta2.ScheduledSparkApplication) (result *v1beta2.ScheduledSparkApplication, err error) { + result = &v1beta2.ScheduledSparkApplication{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + Name(scheduledSparkApplication.Name). + Body(scheduledSparkApplication). + Do(). + Into(result) + return +} + +// Delete takes name of the scheduledSparkApplication and deletes it. Returns an error if one occurs. +func (c *scheduledSparkApplications) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *scheduledSparkApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched scheduledSparkApplication. +func (c *scheduledSparkApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ScheduledSparkApplication, err error) { + result = &v1beta2.ScheduledSparkApplication{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("scheduledsparkapplications"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go new file mode 100644 index 000000000..0645e5410 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -0,0 +1,176 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + scheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SparkApplicationsGetter has a method to return a SparkApplicationInterface. +// A group's client should implement this interface. +type SparkApplicationsGetter interface { + SparkApplications(namespace string) SparkApplicationInterface +} + +// SparkApplicationInterface has methods to work with SparkApplication resources. +type SparkApplicationInterface interface { + Create(*v1beta2.SparkApplication) (*v1beta2.SparkApplication, error) + Update(*v1beta2.SparkApplication) (*v1beta2.SparkApplication, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.SparkApplication, error) + List(opts v1.ListOptions) (*v1beta2.SparkApplicationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.SparkApplication, err error) + SparkApplicationExpansion +} + +// sparkApplications implements SparkApplicationInterface +type sparkApplications struct { + client rest.Interface + ns string +} + +// newSparkApplications returns a SparkApplications +func newSparkApplications(c *SparkoperatorV1beta2Client, namespace string) *sparkApplications { + return &sparkApplications{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the sparkApplication, and returns the corresponding sparkApplication object, and an error if there is any. +func (c *sparkApplications) Get(name string, options v1.GetOptions) (result *v1beta2.SparkApplication, err error) { + result = &v1beta2.SparkApplication{} + err = c.client.Get(). + Namespace(c.ns). + Resource("sparkapplications"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of SparkApplications that match those selectors. +func (c *sparkApplications) List(opts v1.ListOptions) (result *v1beta2.SparkApplicationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.SparkApplicationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("sparkapplications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested sparkApplications. +func (c *sparkApplications) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("sparkapplications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a sparkApplication and creates it. Returns the server's representation of the sparkApplication, and an error, if there is any. +func (c *sparkApplications) Create(sparkApplication *v1beta2.SparkApplication) (result *v1beta2.SparkApplication, err error) { + result = &v1beta2.SparkApplication{} + err = c.client.Post(). + Namespace(c.ns). + Resource("sparkapplications"). + Body(sparkApplication). + Do(). + Into(result) + return +} + +// Update takes the representation of a sparkApplication and updates it. Returns the server's representation of the sparkApplication, and an error, if there is any. +func (c *sparkApplications) Update(sparkApplication *v1beta2.SparkApplication) (result *v1beta2.SparkApplication, err error) { + result = &v1beta2.SparkApplication{} + err = c.client.Put(). + Namespace(c.ns). + Resource("sparkapplications"). + Name(sparkApplication.Name). + Body(sparkApplication). + Do(). + Into(result) + return +} + +// Delete takes name of the sparkApplication and deletes it. Returns an error if one occurs. +func (c *sparkApplications) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("sparkapplications"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *sparkApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("sparkapplications"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched sparkApplication. +func (c *sparkApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.SparkApplication, err error) { + result = &v1beta2.SparkApplication{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("sparkapplications"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go new file mode 100644 index 000000000..d28528bb3 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go @@ -0,0 +1,97 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type SparkoperatorV1beta2Interface interface { + RESTClient() rest.Interface + ScheduledSparkApplicationsGetter + SparkApplicationsGetter +} + +// SparkoperatorV1beta2Client is used to interact with features provided by the sparkoperator.k8s.io group. +type SparkoperatorV1beta2Client struct { + restClient rest.Interface +} + +func (c *SparkoperatorV1beta2Client) ScheduledSparkApplications(namespace string) ScheduledSparkApplicationInterface { + return newScheduledSparkApplications(c, namespace) +} + +func (c *SparkoperatorV1beta2Client) SparkApplications(namespace string) SparkApplicationInterface { + return newSparkApplications(c, namespace) +} + +// NewForConfig creates a new SparkoperatorV1beta2Client for the given config. +func NewForConfig(c *rest.Config) (*SparkoperatorV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SparkoperatorV1beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new SparkoperatorV1beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SparkoperatorV1beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SparkoperatorV1beta2Client for the given RESTClient. +func New(c rest.Interface) *SparkoperatorV1beta2Client { + return &SparkoperatorV1beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SparkoperatorV1beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index baac753e1..4d86b22e8 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -25,6 +25,7 @@ import ( v1alpha1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1" v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -67,6 +68,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta1.SchemeGroupVersion.WithResource("sparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta1().SparkApplications().Informer()}, nil + // Group=sparkoperator.k8s.io, Version=v1beta2 + case v1beta2.SchemeGroupVersion.WithResource("scheduledsparkapplications"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("sparkapplications"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta2().SparkApplications().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go index 13ed8144f..2aa95594b 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go @@ -24,6 +24,7 @@ import ( internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1alpha1" v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1" + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2" ) // Interface provides access to each of this group's versions. @@ -32,6 +33,8 @@ type Interface interface { V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface + // V1beta2 provides access to shared informers for resources in V1beta2. + V1beta2() v1beta2.Interface } type group struct { @@ -54,3 +57,8 @@ func (g *group) V1alpha1() v1alpha1.Interface { func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta2 returns a new v1beta2.Interface. +func (g *group) V1beta2() v1beta2.Interface { + return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go new file mode 100644 index 000000000..78b8f5f6d --- /dev/null +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go @@ -0,0 +1,54 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ScheduledSparkApplications returns a ScheduledSparkApplicationInformer. + ScheduledSparkApplications() ScheduledSparkApplicationInformer + // SparkApplications returns a SparkApplicationInformer. + SparkApplications() SparkApplicationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ScheduledSparkApplications returns a ScheduledSparkApplicationInformer. +func (v *version) ScheduledSparkApplications() ScheduledSparkApplicationInformer { + return &scheduledSparkApplicationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// SparkApplications returns a SparkApplicationInformer. +func (v *version) SparkApplications() SparkApplicationInformer { + return &sparkApplicationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go new file mode 100644 index 000000000..2fd0fcf28 --- /dev/null +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -0,0 +1,91 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + time "time" + + sparkoperatork8siov1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ScheduledSparkApplicationInformer provides access to a shared informer and lister for +// ScheduledSparkApplications. +type ScheduledSparkApplicationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.ScheduledSparkApplicationLister +} + +type scheduledSparkApplicationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewScheduledSparkApplicationInformer constructs a new informer for ScheduledSparkApplication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewScheduledSparkApplicationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredScheduledSparkApplicationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredScheduledSparkApplicationInformer constructs a new informer for ScheduledSparkApplication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredScheduledSparkApplicationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SparkoperatorV1beta2().ScheduledSparkApplications(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SparkoperatorV1beta2().ScheduledSparkApplications(namespace).Watch(options) + }, + }, + &sparkoperatork8siov1beta2.ScheduledSparkApplication{}, + resyncPeriod, + indexers, + ) +} + +func (f *scheduledSparkApplicationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredScheduledSparkApplicationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *scheduledSparkApplicationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&sparkoperatork8siov1beta2.ScheduledSparkApplication{}, f.defaultInformer) +} + +func (f *scheduledSparkApplicationInformer) Lister() v1beta2.ScheduledSparkApplicationLister { + return v1beta2.NewScheduledSparkApplicationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go new file mode 100644 index 000000000..8f3939122 --- /dev/null +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -0,0 +1,91 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + time "time" + + sparkoperatork8siov1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// SparkApplicationInformer provides access to a shared informer and lister for +// SparkApplications. +type SparkApplicationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.SparkApplicationLister +} + +type sparkApplicationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewSparkApplicationInformer constructs a new informer for SparkApplication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSparkApplicationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSparkApplicationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSparkApplicationInformer constructs a new informer for SparkApplication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSparkApplicationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SparkoperatorV1beta2().SparkApplications(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SparkoperatorV1beta2().SparkApplications(namespace).Watch(options) + }, + }, + &sparkoperatork8siov1beta2.SparkApplication{}, + resyncPeriod, + indexers, + ) +} + +func (f *sparkApplicationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSparkApplicationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *sparkApplicationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&sparkoperatork8siov1beta2.SparkApplication{}, f.defaultInformer) +} + +func (f *sparkApplicationInformer) Lister() v1beta2.SparkApplicationLister { + return v1beta2.NewSparkApplicationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go new file mode 100644 index 000000000..beba3a094 --- /dev/null +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go @@ -0,0 +1,37 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +// ScheduledSparkApplicationListerExpansion allows custom methods to be added to +// ScheduledSparkApplicationLister. +type ScheduledSparkApplicationListerExpansion interface{} + +// ScheduledSparkApplicationNamespaceListerExpansion allows custom methods to be added to +// ScheduledSparkApplicationNamespaceLister. +type ScheduledSparkApplicationNamespaceListerExpansion interface{} + +// SparkApplicationListerExpansion allows custom methods to be added to +// SparkApplicationLister. +type SparkApplicationListerExpansion interface{} + +// SparkApplicationNamespaceListerExpansion allows custom methods to be added to +// SparkApplicationNamespaceLister. +type SparkApplicationNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go new file mode 100644 index 000000000..54ff0ea75 --- /dev/null +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -0,0 +1,96 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScheduledSparkApplicationLister helps list ScheduledSparkApplications. +type ScheduledSparkApplicationLister interface { + // List lists all ScheduledSparkApplications in the indexer. + List(selector labels.Selector) (ret []*v1beta2.ScheduledSparkApplication, err error) + // ScheduledSparkApplications returns an object that can list and get ScheduledSparkApplications. + ScheduledSparkApplications(namespace string) ScheduledSparkApplicationNamespaceLister + ScheduledSparkApplicationListerExpansion +} + +// scheduledSparkApplicationLister implements the ScheduledSparkApplicationLister interface. +type scheduledSparkApplicationLister struct { + indexer cache.Indexer +} + +// NewScheduledSparkApplicationLister returns a new ScheduledSparkApplicationLister. +func NewScheduledSparkApplicationLister(indexer cache.Indexer) ScheduledSparkApplicationLister { + return &scheduledSparkApplicationLister{indexer: indexer} +} + +// List lists all ScheduledSparkApplications in the indexer. +func (s *scheduledSparkApplicationLister) List(selector labels.Selector) (ret []*v1beta2.ScheduledSparkApplication, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ScheduledSparkApplication)) + }) + return ret, err +} + +// ScheduledSparkApplications returns an object that can list and get ScheduledSparkApplications. +func (s *scheduledSparkApplicationLister) ScheduledSparkApplications(namespace string) ScheduledSparkApplicationNamespaceLister { + return scheduledSparkApplicationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScheduledSparkApplicationNamespaceLister helps list and get ScheduledSparkApplications. +type ScheduledSparkApplicationNamespaceLister interface { + // List lists all ScheduledSparkApplications in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.ScheduledSparkApplication, err error) + // Get retrieves the ScheduledSparkApplication from the indexer for a given namespace and name. + Get(name string) (*v1beta2.ScheduledSparkApplication, error) + ScheduledSparkApplicationNamespaceListerExpansion +} + +// scheduledSparkApplicationNamespaceLister implements the ScheduledSparkApplicationNamespaceLister +// interface. +type scheduledSparkApplicationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ScheduledSparkApplications in the indexer for a given namespace. +func (s scheduledSparkApplicationNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ScheduledSparkApplication, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ScheduledSparkApplication)) + }) + return ret, err +} + +// Get retrieves the ScheduledSparkApplication from the indexer for a given namespace and name. +func (s scheduledSparkApplicationNamespaceLister) Get(name string) (*v1beta2.ScheduledSparkApplication, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("scheduledsparkapplication"), name) + } + return obj.(*v1beta2.ScheduledSparkApplication), nil +} diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go new file mode 100644 index 000000000..603791327 --- /dev/null +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -0,0 +1,96 @@ +// Code generated by k8s code-generator DO NOT EDIT. + +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// SparkApplicationLister helps list SparkApplications. +type SparkApplicationLister interface { + // List lists all SparkApplications in the indexer. + List(selector labels.Selector) (ret []*v1beta2.SparkApplication, err error) + // SparkApplications returns an object that can list and get SparkApplications. + SparkApplications(namespace string) SparkApplicationNamespaceLister + SparkApplicationListerExpansion +} + +// sparkApplicationLister implements the SparkApplicationLister interface. +type sparkApplicationLister struct { + indexer cache.Indexer +} + +// NewSparkApplicationLister returns a new SparkApplicationLister. +func NewSparkApplicationLister(indexer cache.Indexer) SparkApplicationLister { + return &sparkApplicationLister{indexer: indexer} +} + +// List lists all SparkApplications in the indexer. +func (s *sparkApplicationLister) List(selector labels.Selector) (ret []*v1beta2.SparkApplication, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.SparkApplication)) + }) + return ret, err +} + +// SparkApplications returns an object that can list and get SparkApplications. +func (s *sparkApplicationLister) SparkApplications(namespace string) SparkApplicationNamespaceLister { + return sparkApplicationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// SparkApplicationNamespaceLister helps list and get SparkApplications. +type SparkApplicationNamespaceLister interface { + // List lists all SparkApplications in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.SparkApplication, err error) + // Get retrieves the SparkApplication from the indexer for a given namespace and name. + Get(name string) (*v1beta2.SparkApplication, error) + SparkApplicationNamespaceListerExpansion +} + +// sparkApplicationNamespaceLister implements the SparkApplicationNamespaceLister +// interface. +type sparkApplicationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all SparkApplications in the indexer for a given namespace. +func (s sparkApplicationNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.SparkApplication, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.SparkApplication)) + }) + return ret, err +} + +// Get retrieves the SparkApplication from the indexer for a given namespace and name. +func (s sparkApplicationNamespaceLister) Get(name string) (*v1beta2.SparkApplication, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("sparkapplication"), name) + } + return obj.(*v1beta2.SparkApplication), nil +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 781b0a8c4..89415597e 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -19,7 +19,7 @@ package config import ( "fmt" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) // GetDriverAnnotationOption returns a spark-submit option for a driver annotation of the given key and value. @@ -33,7 +33,7 @@ func GetExecutorAnnotationOption(key string, value string) string { } // GetDriverEnvVarConfOptions returns a list of spark-submit options for setting driver environment variables. -func GetDriverEnvVarConfOptions(app *v1beta1.SparkApplication) []string { +func GetDriverEnvVarConfOptions(app *v1beta2.SparkApplication) []string { var envVarConfOptions []string for key, value := range app.Spec.Driver.EnvVars { envVar := fmt.Sprintf("%s%s=%s", SparkDriverEnvVarConfigKeyPrefix, key, value) @@ -43,7 +43,7 @@ func GetDriverEnvVarConfOptions(app *v1beta1.SparkApplication) []string { } // GetExecutorEnvVarConfOptions returns a list of spark-submit options for setting executor environment variables. -func GetExecutorEnvVarConfOptions(app *v1beta1.SparkApplication) []string { +func GetExecutorEnvVarConfOptions(app *v1beta2.SparkApplication) []string { var envVarConfOptions []string for key, value := range app.Spec.Executor.EnvVars { envVar := fmt.Sprintf("%s%s=%s", SparkExecutorEnvVarConfigKeyPrefix, key, value) @@ -53,6 +53,6 @@ func GetExecutorEnvVarConfOptions(app *v1beta1.SparkApplication) []string { } // GetPrometheusConfigMapName returns the name of the ConfigMap for Prometheus configuration. -func GetPrometheusConfigMapName(app *v1beta1.SparkApplication) string { +func GetPrometheusConfigMapName(app *v1beta2.SparkApplication) string { return fmt.Sprintf("%s-%s", app.Name, PrometheusConfigMapNameSuffix) } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index a373bb59f..d6f60af0b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -22,14 +22,14 @@ import ( "github.com/stretchr/testify/assert" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) func TestGetDriverEnvVarConfOptions(t *testing.T) { - app := &v1beta1.SparkApplication{ - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + app := &v1beta2.SparkApplication{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ EnvVars: map[string]string{ "ENV1": "VALUE1", "ENV2": "VALUE2", @@ -50,10 +50,10 @@ func TestGetDriverEnvVarConfOptions(t *testing.T) { } func TestGetExecutorEnvVarConfOptions(t *testing.T) { - app := &v1beta1.SparkApplication{ - Spec: v1beta1.SparkApplicationSpec{ - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + app := &v1beta2.SparkApplication{ + Spec: v1beta2.SparkApplicationSpec{ + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ EnvVars: map[string]string{ "ENV1": "VALUE1", "ENV2": "VALUE2", diff --git a/pkg/config/secret.go b/pkg/config/secret.go index c3fe28b82..b76b682c0 100644 --- a/pkg/config/secret.go +++ b/pkg/config/secret.go @@ -20,23 +20,23 @@ import ( "fmt" "path/filepath" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) // GetDriverSecretConfOptions returns a list of spark-submit options for mounting driver secrets. -func GetDriverSecretConfOptions(app *v1beta1.SparkApplication) []string { +func GetDriverSecretConfOptions(app *v1beta2.SparkApplication) []string { var secretConfOptions []string for _, s := range app.Spec.Driver.Secrets { conf := fmt.Sprintf("%s%s=%s", SparkDriverSecretKeyPrefix, s.Name, s.Path) secretConfOptions = append(secretConfOptions, conf) - if s.Type == v1beta1.GCPServiceAccountSecret { + if s.Type == v1beta2.GCPServiceAccountSecret { conf = fmt.Sprintf( "%s%s=%s", SparkDriverEnvVarConfigKeyPrefix, GoogleApplicationCredentialsEnvVar, filepath.Join(s.Path, ServiceAccountJSONKeyFileName)) secretConfOptions = append(secretConfOptions, conf) - } else if s.Type == v1beta1.HadoopDelegationTokenSecret { + } else if s.Type == v1beta2.HadoopDelegationTokenSecret { conf = fmt.Sprintf( "%s%s=%s", SparkDriverEnvVarConfigKeyPrefix, @@ -49,19 +49,19 @@ func GetDriverSecretConfOptions(app *v1beta1.SparkApplication) []string { } // GetExecutorSecretConfOptions returns a list of spark-submit options for mounting executor secrets. -func GetExecutorSecretConfOptions(app *v1beta1.SparkApplication) []string { +func GetExecutorSecretConfOptions(app *v1beta2.SparkApplication) []string { var secretConfOptions []string for _, s := range app.Spec.Executor.Secrets { conf := fmt.Sprintf("%s%s=%s", SparkExecutorSecretKeyPrefix, s.Name, s.Path) secretConfOptions = append(secretConfOptions, conf) - if s.Type == v1beta1.GCPServiceAccountSecret { + if s.Type == v1beta2.GCPServiceAccountSecret { conf = fmt.Sprintf( "%s%s=%s", SparkExecutorEnvVarConfigKeyPrefix, GoogleApplicationCredentialsEnvVar, filepath.Join(s.Path, ServiceAccountJSONKeyFileName)) secretConfOptions = append(secretConfOptions, conf) - } else if s.Type == v1beta1.HadoopDelegationTokenSecret { + } else if s.Type == v1beta2.HadoopDelegationTokenSecret { conf = fmt.Sprintf( "%s%s=%s", SparkExecutorEnvVarConfigKeyPrefix, diff --git a/pkg/config/secret_test.go b/pkg/config/secret_test.go index c360dcc2b..6157ecb5c 100644 --- a/pkg/config/secret_test.go +++ b/pkg/config/secret_test.go @@ -23,15 +23,15 @@ import ( "github.com/stretchr/testify/assert" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) func TestGetDriverSecretConfOptions(t *testing.T) { - app := &v1beta1.SparkApplication{ - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ - Secrets: []v1beta1.SecretInfo{ + app := &v1beta2.SparkApplication{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ + Secrets: []v1beta2.SecretInfo{ { Name: "db-credentials", Path: "/etc/secrets", @@ -39,12 +39,12 @@ func TestGetDriverSecretConfOptions(t *testing.T) { { Name: "gcp-service-account", Path: "/etc/secrets", - Type: v1beta1.GCPServiceAccountSecret, + Type: v1beta2.GCPServiceAccountSecret, }, { Name: "hadoop-token", Path: "/etc/secrets", - Type: v1beta1.HadoopDelegationTokenSecret, + Type: v1beta2.HadoopDelegationTokenSecret, }, }, }, @@ -67,11 +67,11 @@ func TestGetDriverSecretConfOptions(t *testing.T) { } func TestGetExecutorSecretConfOptions(t *testing.T) { - app := &v1beta1.SparkApplication{ - Spec: v1beta1.SparkApplicationSpec{ - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ - Secrets: []v1beta1.SecretInfo{ + app := &v1beta2.SparkApplication{ + Spec: v1beta2.SparkApplicationSpec{ + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ + Secrets: []v1beta2.SecretInfo{ { Name: "db-credentials", Path: "/etc/secrets", @@ -79,12 +79,12 @@ func TestGetExecutorSecretConfOptions(t *testing.T) { { Name: "gcp-service-account", Path: "/etc/secrets", - Type: v1beta1.GCPServiceAccountSecret, + Type: v1beta2.GCPServiceAccountSecret, }, { Name: "hadoop-token", Path: "/etc/secrets", - Type: v1beta1.HadoopDelegationTokenSecret, + Type: v1beta2.HadoopDelegationTokenSecret, }, }, }, diff --git a/pkg/controller/scheduledsparkapplication/controller.go b/pkg/controller/scheduledsparkapplication/controller.go index 05d3dca00..1e65ab2a0 100644 --- a/pkg/controller/scheduledsparkapplication/controller.go +++ b/pkg/controller/scheduledsparkapplication/controller.go @@ -37,11 +37,11 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" crdscheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" + crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) @@ -79,7 +79,7 @@ func NewController( clock: clock, } - informer := informerFactory.Sparkoperator().V1beta1().ScheduledSparkApplications() + informer := informerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications() informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.onAdd, UpdateFunc: controller.onUpdate, @@ -87,7 +87,7 @@ func NewController( }) controller.cacheSynced = informer.Informer().HasSynced controller.ssaLister = informer.Lister() - controller.saLister = informerFactory.Sparkoperator().V1beta1().SparkApplications().Lister() + controller.saLister = informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister() return controller } @@ -166,10 +166,10 @@ func (c *Controller) syncScheduledSparkApplication(key string) error { schedule, err := cron.ParseStandard(app.Spec.Schedule) if err != nil { glog.Errorf("failed to parse schedule %s of ScheduledSparkApplication %s/%s: %v", app.Spec.Schedule, app.Namespace, app.Name, err) - status.ScheduleState = v1beta1.FailedValidationState + status.ScheduleState = v1beta2.FailedValidationState status.Reason = err.Error() } else { - status.ScheduleState = v1beta1.ScheduledState + status.ScheduleState = v1beta2.ScheduledState now := c.clock.Now() nextRunTime := status.NextRun.Time if nextRunTime.IsZero() { @@ -237,13 +237,13 @@ func (c *Controller) dequeue(obj interface{}) { } func (c *Controller) createSparkApplication( - scheduledApp *v1beta1.ScheduledSparkApplication, t time.Time) (string, error) { - app := &v1beta1.SparkApplication{} + scheduledApp *v1beta2.ScheduledSparkApplication, t time.Time) (string, error) { + app := &v1beta2.SparkApplication{} app.Spec = scheduledApp.Spec.Template app.Name = fmt.Sprintf("%s-%d", scheduledApp.Name, t.UnixNano()) app.OwnerReferences = append(app.OwnerReferences, metav1.OwnerReference{ - APIVersion: v1beta1.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta1.ScheduledSparkApplication{}).Name(), + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.ScheduledSparkApplication{}).Name(), Name: scheduledApp.Name, UID: scheduledApp.UID, }) @@ -252,14 +252,14 @@ func (c *Controller) createSparkApplication( app.ObjectMeta.Labels[key] = value } app.ObjectMeta.Labels[config.ScheduledSparkAppNameLabel] = scheduledApp.Name - _, err := c.crdClient.SparkoperatorV1beta1().SparkApplications(scheduledApp.Namespace).Create(app) + _, err := c.crdClient.SparkoperatorV1beta2().SparkApplications(scheduledApp.Namespace).Create(app) if err != nil { return "", err } return app.Name, nil } -func (c *Controller) shouldStartNextRun(app *v1beta1.ScheduledSparkApplication) (bool, error) { +func (c *Controller) shouldStartNextRun(app *v1beta2.ScheduledSparkApplication) (bool, error) { sortedApps, err := c.listSparkApplications(app) if err != nil { return false, err @@ -271,11 +271,11 @@ func (c *Controller) shouldStartNextRun(app *v1beta1.ScheduledSparkApplication) // The last run (most recently started) is the first one in the sorted slice. lastRun := sortedApps[0] switch app.Spec.ConcurrencyPolicy { - case v1beta1.ConcurrencyAllow: + case v1beta2.ConcurrencyAllow: return true, nil - case v1beta1.ConcurrencyForbid: + case v1beta2.ConcurrencyForbid: return c.hasLastRunFinished(lastRun), nil - case v1beta1.ConcurrencyReplace: + case v1beta2.ConcurrencyReplace: if err := c.killLastRunIfNotFinished(lastRun); err != nil { return false, err } @@ -284,7 +284,7 @@ func (c *Controller) shouldStartNextRun(app *v1beta1.ScheduledSparkApplication) return true, nil } -func (c *Controller) startNextRun(app *v1beta1.ScheduledSparkApplication, now time.Time) (string, error) { +func (c *Controller) startNextRun(app *v1beta2.ScheduledSparkApplication, now time.Time) (string, error) { name, err := c.createSparkApplication(app, now) if err != nil { glog.Errorf("failed to create a SparkApplication instance for ScheduledSparkApplication %s/%s: %v", app.Namespace, app.Name, err) @@ -293,19 +293,19 @@ func (c *Controller) startNextRun(app *v1beta1.ScheduledSparkApplication, now ti return name, nil } -func (c *Controller) hasLastRunFinished(app *v1beta1.SparkApplication) bool { - return app.Status.AppState.State == v1beta1.CompletedState || - app.Status.AppState.State == v1beta1.FailedState +func (c *Controller) hasLastRunFinished(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.CompletedState || + app.Status.AppState.State == v1beta2.FailedState } -func (c *Controller) killLastRunIfNotFinished(app *v1beta1.SparkApplication) error { +func (c *Controller) killLastRunIfNotFinished(app *v1beta2.SparkApplication) error { finished := c.hasLastRunFinished(app) if finished { return nil } // Delete the SparkApplication object of the last run. - if err := c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Delete(app.Name, + if err := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(app.Name, metav1.NewDeleteOptions(0)); err != nil { return err } @@ -314,8 +314,8 @@ func (c *Controller) killLastRunIfNotFinished(app *v1beta1.SparkApplication) err } func (c *Controller) checkAndUpdatePastRuns( - app *v1beta1.ScheduledSparkApplication, - status *v1beta1.ScheduledSparkApplicationStatus) error { + app *v1beta2.ScheduledSparkApplication, + status *v1beta2.ScheduledSparkApplicationStatus) error { sortedApps, err := c.listSparkApplications(app) if err != nil { return err @@ -324,9 +324,9 @@ func (c *Controller) checkAndUpdatePastRuns( var completedRuns []string var failedRuns []string for _, a := range sortedApps { - if a.Status.AppState.State == v1beta1.CompletedState { + if a.Status.AppState.State == v1beta2.CompletedState { completedRuns = append(completedRuns, a.Name) - } else if a.Status.AppState.State == v1beta1.FailedState { + } else if a.Status.AppState.State == v1beta2.FailedState { failedRuns = append(failedRuns, a.Name) } } @@ -334,19 +334,19 @@ func (c *Controller) checkAndUpdatePastRuns( var toDelete []string status.PastSuccessfulRunNames, toDelete = bookkeepPastRuns(completedRuns, app.Spec.SuccessfulRunHistoryLimit) for _, name := range toDelete { - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Delete(name, metav1.NewDeleteOptions(0)) + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(name, metav1.NewDeleteOptions(0)) } status.PastFailedRunNames, toDelete = bookkeepPastRuns(failedRuns, app.Spec.FailedRunHistoryLimit) for _, name := range toDelete { - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Delete(name, metav1.NewDeleteOptions(0)) + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(name, metav1.NewDeleteOptions(0)) } return nil } func (c *Controller) updateScheduledSparkApplicationStatus( - app *v1beta1.ScheduledSparkApplication, - newStatus *v1beta1.ScheduledSparkApplicationStatus) error { + app *v1beta2.ScheduledSparkApplication, + newStatus *v1beta2.ScheduledSparkApplicationStatus) error { // If the status has not changed, do not perform an update. if isStatusEqual(newStatus, &app.Status) { return nil @@ -355,13 +355,13 @@ func (c *Controller) updateScheduledSparkApplicationStatus( toUpdate := app.DeepCopy() return retry.RetryOnConflict(retry.DefaultRetry, func() error { toUpdate.Status = *newStatus - _, updateErr := c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(toUpdate.Namespace).Update( + _, updateErr := c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(toUpdate.Namespace).Update( toUpdate) if updateErr == nil { return nil } - result, err := c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(toUpdate.Namespace).Get( + result, err := c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(toUpdate.Namespace).Get( toUpdate.Name, metav1.GetOptions{}) if err != nil { return err @@ -372,7 +372,7 @@ func (c *Controller) updateScheduledSparkApplicationStatus( }) } -func (c *Controller) listSparkApplications(app *v1beta1.ScheduledSparkApplication) (sparkApps, error) { +func (c *Controller) listSparkApplications(app *v1beta2.ScheduledSparkApplication) (sparkApps, error) { set := labels.Set{config.ScheduledSparkAppNameLabel: app.Name} apps, err := c.saLister.SparkApplications(app.Namespace).List(set.AsSelector()) if err != nil { @@ -397,7 +397,7 @@ func bookkeepPastRuns(names []string, runLimit *int32) (toKeep []string, toDelet return } -func isStatusEqual(newStatus, currentStatus *v1beta1.ScheduledSparkApplicationStatus) bool { +func isStatusEqual(newStatus, currentStatus *v1beta2.ScheduledSparkApplicationStatus) bool { return newStatus.ScheduleState == currentStatus.ScheduleState && newStatus.LastRun == currentStatus.LastRun && newStatus.NextRun == currentStatus.NextRun && diff --git a/pkg/controller/scheduledsparkapplication/controller_test.go b/pkg/controller/scheduledsparkapplication/controller_test.go index 6828313bd..a93109522 100644 --- a/pkg/controller/scheduledsparkapplication/controller_test.go +++ b/pkg/controller/scheduledsparkapplication/controller_test.go @@ -30,25 +30,25 @@ import ( kubetesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake" crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) func TestSyncScheduledSparkApplication_Allow(t *testing.T) { - app := &v1beta1.ScheduledSparkApplication{ + app := &v1beta2.ScheduledSparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test-app-allow", }, - Spec: v1beta1.ScheduledSparkApplicationSpec{ + Spec: v1beta2.ScheduledSparkApplicationSpec{ Schedule: "@every 1m", - ConcurrencyPolicy: v1beta1.ConcurrencyAllow, + ConcurrencyPolicy: v1beta2.ConcurrencyAllow, }, } c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Create(app) + c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(app) key, _ := cache.MetaNamespaceKeyFunc(app) options := metav1.GetOptions{} @@ -56,8 +56,8 @@ func TestSyncScheduledSparkApplication_Allow(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) - assert.Equal(t, v1beta1.ScheduledState, app.Status.ScheduleState) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) // The first run should not have been triggered. assert.True(t, app.Status.LastRunName == "") @@ -66,14 +66,14 @@ func TestSyncScheduledSparkApplication_Allow(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) firstRunName := app.Status.LastRunName // The first run should have been triggered. assert.True(t, firstRunName != "") assert.False(t, app.Status.LastRun.IsZero()) assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.NotNil(t, run) clk.Step(5 * time.Second) @@ -81,31 +81,31 @@ func TestSyncScheduledSparkApplication_Allow(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) // Next run is not due, so LastRunName should stay the same. assert.Equal(t, firstRunName, app.Status.LastRunName) // Simulate completion of the first run. - run.Status.AppState.State = v1beta1.CompletedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Update(run) + run.Status.AppState.State = v1beta2.CompletedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(run) // This sync should not start any new run, but update Status.PastSuccessfulRunNames. if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.NotNil(t, run) // This sync should not start any new run, nor update Status.PastSuccessfulRunNames. if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.NotNil(t, run) // Advance the clock to trigger the second run. @@ -114,36 +114,36 @@ func TestSyncScheduledSparkApplication_Allow(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) - assert.Equal(t, v1beta1.ScheduledState, app.Status.ScheduleState) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) // The second run should have a different name. secondRunName := app.Status.LastRunName assert.NotEqual(t, firstRunName, secondRunName) assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(secondRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(secondRunName, options) assert.NotNil(t, run) // Simulate completion of the second run. - run.Status.AppState.State = v1beta1.CompletedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Update(run) + run.Status.AppState.State = v1beta2.CompletedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(run) // This sync should not start any new run, but update Status.PastSuccessfulRunNames. if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) // The first run should have been deleted due to the completion of the second run. - firstRun, _ := c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + firstRun, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.Nil(t, firstRun) // This sync should not start any new run, nor update Status.PastSuccessfulRunNames. if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(secondRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(secondRunName, options) assert.NotNil(t, run) } @@ -151,18 +151,18 @@ func TestSyncScheduledSparkApplication_Forbid(t *testing.T) { // TODO: figure out why the test fails and remove this. t.Skip() - app := &v1beta1.ScheduledSparkApplication{ + app := &v1beta2.ScheduledSparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test-app-forbid", }, - Spec: v1beta1.ScheduledSparkApplicationSpec{ + Spec: v1beta2.ScheduledSparkApplicationSpec{ Schedule: "@every 1m", - ConcurrencyPolicy: v1beta1.ConcurrencyForbid, + ConcurrencyPolicy: v1beta2.ConcurrencyForbid, }, } c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Create(app) + c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(app) key, _ := cache.MetaNamespaceKeyFunc(app) options := metav1.GetOptions{} @@ -170,8 +170,8 @@ func TestSyncScheduledSparkApplication_Forbid(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) - assert.Equal(t, v1beta1.ScheduledState, app.Status.ScheduleState) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) // The first run should not have been triggered. assert.True(t, app.Status.LastRunName == "") @@ -180,15 +180,15 @@ func TestSyncScheduledSparkApplication_Forbid(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) - assert.Equal(t, v1beta1.ScheduledState, app.Status.ScheduleState) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) firstRunName := app.Status.LastRunName // The first run should have been triggered. assert.True(t, firstRunName != "") assert.False(t, app.Status.LastRun.IsZero()) assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.NotNil(t, run) clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) @@ -196,23 +196,23 @@ func TestSyncScheduledSparkApplication_Forbid(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) assert.Equal(t, firstRunName, app.Status.LastRunName) // Simulate completion of the first run. - run.Status.AppState.State = v1beta1.CompletedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Update(run) + run.Status.AppState.State = v1beta2.CompletedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(run) // This sync should start the next run because the first run has completed. if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) secondRunName := app.Status.LastRunName assert.NotEqual(t, firstRunName, secondRunName) assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(secondRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(secondRunName, options) assert.NotNil(t, run) } @@ -220,18 +220,18 @@ func TestSyncScheduledSparkApplication_Replace(t *testing.T) { // TODO: figure out why the test fails and remove this. t.Skip() - app := &v1beta1.ScheduledSparkApplication{ + app := &v1beta2.ScheduledSparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test-app-replace", }, - Spec: v1beta1.ScheduledSparkApplicationSpec{ + Spec: v1beta2.ScheduledSparkApplicationSpec{ Schedule: "@every 1m", - ConcurrencyPolicy: v1beta1.ConcurrencyReplace, + ConcurrencyPolicy: v1beta2.ConcurrencyReplace, }, } c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Create(app) + c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(app) key, _ := cache.MetaNamespaceKeyFunc(app) options := metav1.GetOptions{} @@ -239,8 +239,8 @@ func TestSyncScheduledSparkApplication_Replace(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) - assert.Equal(t, v1beta1.ScheduledState, app.Status.ScheduleState) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) // The first run should not have been triggered. assert.True(t, app.Status.LastRunName == "") @@ -249,15 +249,15 @@ func TestSyncScheduledSparkApplication_Replace(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) - assert.Equal(t, v1beta1.ScheduledState, app.Status.ScheduleState) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) firstRunName := app.Status.LastRunName // The first run should have been triggered. assert.True(t, firstRunName != "") assert.False(t, app.Status.LastRun.IsZero()) assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.NotNil(t, run) clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) @@ -265,82 +265,82 @@ func TestSyncScheduledSparkApplication_Replace(t *testing.T) { if err := c.syncScheduledSparkApplication(key); err != nil { t.Fatal(err) } - app, _ = c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) + app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(app.Name, options) secondRunName := app.Status.LastRunName assert.NotEqual(t, firstRunName, secondRunName) // The first run should have been deleted. - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(firstRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(firstRunName, options) assert.Nil(t, run) // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(secondRunName, options) + run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(secondRunName, options) assert.NotNil(t, run) } func TestShouldStartNextRun(t *testing.T) { - app := &v1beta1.ScheduledSparkApplication{ + app := &v1beta2.ScheduledSparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test-app", }, - Spec: v1beta1.ScheduledSparkApplicationSpec{ + Spec: v1beta2.ScheduledSparkApplicationSpec{ Schedule: "@every 1m", }, - Status: v1beta1.ScheduledSparkApplicationStatus{ + Status: v1beta2.ScheduledSparkApplicationStatus{ LastRunName: "run1", }, } c, _ := newFakeController() - c.crdClient.SparkoperatorV1beta1().ScheduledSparkApplications(app.Namespace).Create(app) + c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(app) - run1 := &v1beta1.SparkApplication{ + run1 := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: app.Namespace, Name: "run1", Labels: map[string]string{config.ScheduledSparkAppNameLabel: app.Name}, }, } - c.crdClient.SparkoperatorV1beta1().SparkApplications(run1.Namespace).Create(run1) + c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Create(run1) // ConcurrencyAllow with a running run. - run1.Status.AppState.State = v1beta1.RunningState - c.crdClient.SparkoperatorV1beta1().SparkApplications(run1.Namespace).Update(run1) - app.Spec.ConcurrencyPolicy = v1beta1.ConcurrencyAllow + run1.Status.AppState.State = v1beta2.RunningState + c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(run1) + app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyAllow ok, _ := c.shouldStartNextRun(app) assert.True(t, ok) // ConcurrencyForbid with a running run. - app.Spec.ConcurrencyPolicy = v1beta1.ConcurrencyForbid + app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyForbid ok, _ = c.shouldStartNextRun(app) assert.False(t, ok) // ConcurrencyForbid with a completed run. - run1.Status.AppState.State = v1beta1.CompletedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(run1.Namespace).Update(run1) + run1.Status.AppState.State = v1beta2.CompletedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(run1) ok, _ = c.shouldStartNextRun(app) assert.True(t, ok) // ConcurrencyReplace with a completed run. - app.Spec.ConcurrencyPolicy = v1beta1.ConcurrencyReplace + app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyReplace ok, _ = c.shouldStartNextRun(app) assert.True(t, ok) // ConcurrencyReplace with a running run. - run1.Status.AppState.State = v1beta1.RunningState - c.crdClient.SparkoperatorV1beta1().SparkApplications(run1.Namespace).Update(run1) + run1.Status.AppState.State = v1beta2.RunningState + c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(run1) ok, _ = c.shouldStartNextRun(app) assert.True(t, ok) // The previous running run should have been deleted. - existing, _ := c.crdClient.SparkoperatorV1beta1().SparkApplications(run1.Namespace).Get(run1.Name, + existing, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Get(run1.Name, metav1.GetOptions{}) assert.Nil(t, existing) } func TestCheckAndUpdatePastRuns(t *testing.T) { var two int32 = 2 - app := &v1beta1.ScheduledSparkApplication{ + app := &v1beta2.ScheduledSparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test-app", }, - Spec: v1beta1.ScheduledSparkApplicationSpec{ + Spec: v1beta2.ScheduledSparkApplicationSpec{ Schedule: "@every 1m", SuccessfulRunHistoryLimit: &two, FailedRunHistoryLimit: &two, @@ -348,19 +348,19 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { } c, _ := newFakeController() - run1 := &v1beta1.SparkApplication{ + run1 := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Namespace: app.Namespace, Name: "run1", Labels: map[string]string{config.ScheduledSparkAppNameLabel: app.Name}, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.CompletedState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.CompletedState, }, }, } - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(run1) + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(run1) // The first completed run should have been recorded. status := app.Status.DeepCopy() @@ -372,14 +372,14 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { run2 := run1.DeepCopy() run2.CreationTimestamp.Time = run1.CreationTimestamp.Add(10 * time.Second) run2.Name = "run2" - run2.Status.AppState.State = v1beta1.RunningState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(run2) + run2.Status.AppState.State = v1beta2.RunningState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(run2) c.checkAndUpdatePastRuns(app, status) assert.Equal(t, 1, len(status.PastSuccessfulRunNames)) assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[0]) // The second completed run should have been recorded. - run2.Status.AppState.State = v1beta1.CompletedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Update(run2) + run2.Status.AppState.State = v1beta2.CompletedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(run2) c.checkAndUpdatePastRuns(app, status) assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[0]) @@ -390,10 +390,10 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[0]) assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[1]) // SparkApplications of both of the first two completed runs should exist. - existing, _ := c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run2.Name, + existing, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run2.Name, metav1.GetOptions{}) assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run1.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run1.Name, metav1.GetOptions{}) assert.NotNil(t, existing) @@ -401,20 +401,20 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { run3 := run1.DeepCopy() run3.CreationTimestamp.Time = run2.CreationTimestamp.Add(10 * time.Second) run3.Name = "run3" - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(run3) + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(run3) c.checkAndUpdatePastRuns(app, status) assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) assert.Equal(t, run3.Name, status.PastSuccessfulRunNames[0]) assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[1]) // SparkApplications of the last two completed runs should still exist, // but the one of the first completed run should have been deleted. - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run3.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run3.Name, metav1.GetOptions{}) assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run2.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run2.Name, metav1.GetOptions{}) assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run1.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run1.Name, metav1.GetOptions{}) assert.Nil(t, existing) @@ -422,8 +422,8 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { run4 := run1.DeepCopy() run4.CreationTimestamp.Time = run3.CreationTimestamp.Add(10 * time.Second) run4.Name = "run4" - run4.Status.AppState.State = v1beta1.FailedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(run4) + run4.Status.AppState.State = v1beta2.FailedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(run4) c.checkAndUpdatePastRuns(app, status) assert.Equal(t, 1, len(status.PastFailedRunNames)) assert.Equal(t, run4.Name, status.PastFailedRunNames[0]) @@ -432,8 +432,8 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { run5 := run1.DeepCopy() run5.CreationTimestamp.Time = run4.CreationTimestamp.Add(10 * time.Second) run5.Name = "run5" - run5.Status.AppState.State = v1beta1.FailedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(run5) + run5.Status.AppState.State = v1beta2.FailedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(run5) c.checkAndUpdatePastRuns(app, status) assert.Equal(t, 2, len(status.PastFailedRunNames)) assert.Equal(t, run5.Name, status.PastFailedRunNames[0]) @@ -443,21 +443,21 @@ func TestCheckAndUpdatePastRuns(t *testing.T) { run6 := run1.DeepCopy() run6.CreationTimestamp.Time = run5.CreationTimestamp.Add(10 * time.Second) run6.Name = "run6" - run6.Status.AppState.State = v1beta1.FailedState - c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(run6) + run6.Status.AppState.State = v1beta2.FailedState + c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(run6) c.checkAndUpdatePastRuns(app, status) assert.Equal(t, 2, len(status.PastFailedRunNames)) assert.Equal(t, run6.Name, status.PastFailedRunNames[0]) assert.Equal(t, run5.Name, status.PastFailedRunNames[1]) // SparkApplications of the last two failed runs should still exist, // but the one of the first failed run should have been deleted. - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run6.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run6.Name, metav1.GetOptions{}) assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run5.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run5.Name, metav1.GetOptions{}) assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(run4.Name, + existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(run4.Name, metav1.GetOptions{}) assert.Nil(t, existing) } @@ -469,8 +469,8 @@ func newFakeController() (*Controller, *clock.FakeClock) { informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 1*time.Second) clk := clock.NewFakeClock(time.Now()) controller := NewController(crdClient, kubeClient, apiExtensionsClient, informerFactory, clk) - ssaInformer := informerFactory.Sparkoperator().V1beta1().ScheduledSparkApplications().Informer() - saInformer := informerFactory.Sparkoperator().V1beta1().SparkApplications().Informer() + ssaInformer := informerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer() + saInformer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer() crdClient.PrependReactor("create", "scheduledsparkapplications", func(action kubetesting.Action) (bool, runtime.Object, error) { obj := action.(kubetesting.CreateAction).GetObject() diff --git a/pkg/controller/scheduledsparkapplication/controller_util.go b/pkg/controller/scheduledsparkapplication/controller_util.go index 5f00f643f..563d3181d 100644 --- a/pkg/controller/scheduledsparkapplication/controller_util.go +++ b/pkg/controller/scheduledsparkapplication/controller_util.go @@ -17,10 +17,10 @@ limitations under the License. package scheduledsparkapplication import ( - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) -type sparkApps []*v1beta1.SparkApplication +type sparkApps []*v1beta2.SparkApplication func (s sparkApps) Len() int { return len(s) diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go index 2d95f2886..9a9970f97 100644 --- a/pkg/controller/sparkapplication/controller.go +++ b/pkg/controller/sparkapplication/controller.go @@ -40,11 +40,11 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" crdscheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" + crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" ) @@ -120,7 +120,7 @@ func newSparkApplicationController( controller.metrics.registerMetrics() } - crdInformer := crdInformerFactory.Sparkoperator().V1beta1().SparkApplications() + crdInformer := crdInformerFactory.Sparkoperator().V1beta2().SparkApplications() crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.onAdd, UpdateFunc: controller.onUpdate, @@ -168,15 +168,15 @@ func (c *Controller) Stop() { // Callback function called when a new SparkApplication object gets created. func (c *Controller) onAdd(obj interface{}) { - app := obj.(*v1beta1.SparkApplication) - v1beta1.SetSparkApplicationDefaults(app) + app := obj.(*v1beta2.SparkApplication) + v1beta2.SetSparkApplicationDefaults(app) glog.Infof("SparkApplication %s/%s was added, enqueueing it for submission", app.Namespace, app.Name) c.enqueue(app) } func (c *Controller) onUpdate(oldObj, newObj interface{}) { - oldApp := oldObj.(*v1beta1.SparkApplication) - newApp := newObj.(*v1beta1.SparkApplication) + oldApp := oldObj.(*v1beta2.SparkApplication) + newApp := newObj.(*v1beta2.SparkApplication) // The informer will call this function on non-updated resources during resync, avoid // processing unchanged applications, unless it is waiting to be retried. @@ -188,8 +188,8 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { // and end up in an inconsistent state. if !equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { // Force-set the application status to Invalidating which handles clean-up and application re-run. - if _, err := c.updateApplicationStatusWithRetries(newApp, func(status *v1beta1.SparkApplicationStatus) { - status.AppState.State = v1beta1.InvalidatingState + if _, err := c.updateApplicationStatusWithRetries(newApp, func(status *v1beta2.SparkApplicationStatus) { + status.AppState.State = v1beta2.InvalidatingState }); err != nil { c.recorder.Eventf( newApp, @@ -214,13 +214,13 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { } func (c *Controller) onDelete(obj interface{}) { - var app *v1beta1.SparkApplication + var app *v1beta2.SparkApplication switch obj.(type) { - case *v1beta1.SparkApplication: - app = obj.(*v1beta1.SparkApplication) + case *v1beta2.SparkApplication: + app = obj.(*v1beta2.SparkApplication) case cache.DeletedFinalStateUnknown: deletedObj := obj.(cache.DeletedFinalStateUnknown).Obj - app = deletedObj.(*v1beta1.SparkApplication) + app = deletedObj.(*v1beta2.SparkApplication) } if app != nil { @@ -265,7 +265,7 @@ func (c *Controller) processNextItem() bool { return true } -func (c *Controller) getExecutorPods(app *v1beta1.SparkApplication) ([]*apiv1.Pod, error) { +func (c *Controller) getExecutorPods(app *v1beta2.SparkApplication) ([]*apiv1.Pod, error) { matchLabels := getResourceLabels(app) matchLabels[config.SparkRoleLabel] = config.SparkExecutorRole // Fetch all the executor pods for the current run of the application. @@ -277,7 +277,7 @@ func (c *Controller) getExecutorPods(app *v1beta1.SparkApplication) ([]*apiv1.Po return pods, nil } -func (c *Controller) getDriverPod(app *v1beta1.SparkApplication) (*apiv1.Pod, error) { +func (c *Controller) getDriverPod(app *v1beta2.SparkApplication) (*apiv1.Pod, error) { pod, err := c.podLister.Pods(app.Namespace).Get(app.Status.DriverInfo.PodName) if err == nil { return pod, nil @@ -300,7 +300,7 @@ func (c *Controller) getDriverPod(app *v1beta1.SparkApplication) (*apiv1.Pod, er // getAndUpdateDriverState finds the driver pod of the application // and updates the driver state based on the current phase of the pod. -func (c *Controller) getAndUpdateDriverState(app *v1beta1.SparkApplication) error { +func (c *Controller) getAndUpdateDriverState(app *v1beta2.SparkApplication) error { // Either the driver pod doesn't exist yet or its name has not been updated. if app.Status.DriverInfo.PodName == "" { return fmt.Errorf("empty driver pod name with application state %s", app.Status.AppState.State) @@ -313,7 +313,7 @@ func (c *Controller) getAndUpdateDriverState(app *v1beta1.SparkApplication) erro if driverPod == nil { app.Status.AppState.ErrorMessage = "Driver Pod not found" - app.Status.AppState.State = v1beta1.FailingState + app.Status.AppState.State = v1beta2.FailingState app.Status.TerminationTime = metav1.Now() return nil } @@ -348,13 +348,13 @@ func (c *Controller) getAndUpdateDriverState(app *v1beta1.SparkApplication) erro // getAndUpdateExecutorState lists the executor pods of the application // and updates the executor state based on the current phase of the pods. -func (c *Controller) getAndUpdateExecutorState(app *v1beta1.SparkApplication) error { +func (c *Controller) getAndUpdateExecutorState(app *v1beta2.SparkApplication) error { pods, err := c.getExecutorPods(app) if err != nil { return err } - executorStateMap := make(map[string]v1beta1.ExecutorState) + executorStateMap := make(map[string]v1beta2.ExecutorState) var executorApplicationID string for _, pod := range pods { if util.IsExecutorPod(pod) { @@ -379,7 +379,7 @@ func (c *Controller) getAndUpdateExecutorState(app *v1beta1.SparkApplication) er } if app.Status.ExecutorState == nil { - app.Status.ExecutorState = make(map[string]v1beta1.ExecutorState) + app.Status.ExecutorState = make(map[string]v1beta2.ExecutorState) } for name, execStatus := range executorStateMap { app.Status.ExecutorState[name] = execStatus @@ -390,14 +390,14 @@ func (c *Controller) getAndUpdateExecutorState(app *v1beta1.SparkApplication) er _, exists := executorStateMap[name] if !isExecutorTerminated(oldStatus) && !exists { glog.Infof("Executor pod %s not found, assuming it was deleted.", name) - app.Status.ExecutorState[name] = v1beta1.ExecutorFailedState + app.Status.ExecutorState[name] = v1beta2.ExecutorFailedState } } return nil } -func (c *Controller) getAndUpdateAppState(app *v1beta1.SparkApplication) error { +func (c *Controller) getAndUpdateAppState(app *v1beta2.SparkApplication) error { if err := c.getAndUpdateDriverState(app); err != nil { return err } @@ -407,7 +407,7 @@ func (c *Controller) getAndUpdateAppState(app *v1beta1.SparkApplication) error { return nil } -func (c *Controller) handleSparkApplicationDeletion(app *v1beta1.SparkApplication) { +func (c *Controller) handleSparkApplicationDeletion(app *v1beta2.SparkApplication) { // SparkApplication deletion requested, lets delete driver pod. if err := c.deleteSparkResources(app); err != nil { glog.Errorf("failed to delete resources associated with deleted SparkApplication %s/%s: %v", app.Namespace, app.Name, err) @@ -415,23 +415,23 @@ func (c *Controller) handleSparkApplicationDeletion(app *v1beta1.SparkApplicatio } // ShouldRetry determines if SparkApplication in a given state should be retried. -func shouldRetry(app *v1beta1.SparkApplication) bool { +func shouldRetry(app *v1beta2.SparkApplication) bool { switch app.Status.AppState.State { - case v1beta1.SucceedingState: - return app.Spec.RestartPolicy.Type == v1beta1.Always - case v1beta1.FailingState: - if app.Spec.RestartPolicy.Type == v1beta1.Always { + case v1beta2.SucceedingState: + return app.Spec.RestartPolicy.Type == v1beta2.Always + case v1beta2.FailingState: + if app.Spec.RestartPolicy.Type == v1beta2.Always { return true - } else if app.Spec.RestartPolicy.Type == v1beta1.OnFailure { + } else if app.Spec.RestartPolicy.Type == v1beta2.OnFailure { // We retry if we haven't hit the retry limit. if app.Spec.RestartPolicy.OnFailureRetries != nil && app.Status.ExecutionAttempts <= *app.Spec.RestartPolicy.OnFailureRetries { return true } } - case v1beta1.FailedSubmissionState: - if app.Spec.RestartPolicy.Type == v1beta1.Always { + case v1beta2.FailedSubmissionState: + if app.Spec.RestartPolicy.Type == v1beta2.Always { return true - } else if app.Spec.RestartPolicy.Type == v1beta1.OnFailure { + } else if app.Spec.RestartPolicy.Type == v1beta2.OnFailure { // We retry if we haven't hit the retry limit. if app.Spec.RestartPolicy.OnSubmissionFailureRetries != nil && app.Status.SubmissionAttempts <= *app.Spec.RestartPolicy.OnSubmissionFailureRetries { return true @@ -500,18 +500,18 @@ func (c *Controller) syncSparkApplication(key string) error { // Take action based on application state. switch appToUpdate.Status.AppState.State { - case v1beta1.NewState: + case v1beta2.NewState: c.recordSparkApplicationEvent(appToUpdate) if err := c.validateSparkApplication(appToUpdate); err != nil { - appToUpdate.Status.AppState.State = v1beta1.FailedState + appToUpdate.Status.AppState.State = v1beta2.FailedState appToUpdate.Status.AppState.ErrorMessage = err.Error() } else { appToUpdate = c.submitSparkApplication(appToUpdate) } - case v1beta1.SucceedingState: + case v1beta2.SucceedingState: if !shouldRetry(appToUpdate) { // Application is not subject to retry. Move to terminal CompletedState. - appToUpdate.Status.AppState.State = v1beta1.CompletedState + appToUpdate.Status.AppState.State = v1beta2.CompletedState c.recordSparkApplicationEvent(appToUpdate) } else { if err := c.deleteSparkResources(appToUpdate); err != nil { @@ -519,12 +519,12 @@ func (c *Controller) syncSparkApplication(key string) error { appToUpdate.Namespace, appToUpdate.Name, err) return err } - appToUpdate.Status.AppState.State = v1beta1.PendingRerunState + appToUpdate.Status.AppState.State = v1beta2.PendingRerunState } - case v1beta1.FailingState: + case v1beta2.FailingState: if !shouldRetry(appToUpdate) { // Application is not subject to retry. Move to terminal FailedState. - appToUpdate.Status.AppState.State = v1beta1.FailedState + appToUpdate.Status.AppState.State = v1beta2.FailedState c.recordSparkApplicationEvent(appToUpdate) } else if hasRetryIntervalPassed(appToUpdate.Spec.RestartPolicy.OnFailureRetryInterval, appToUpdate.Status.ExecutionAttempts, appToUpdate.Status.TerminationTime) { if err := c.deleteSparkResources(appToUpdate); err != nil { @@ -532,17 +532,17 @@ func (c *Controller) syncSparkApplication(key string) error { appToUpdate.Namespace, appToUpdate.Name, err) return err } - appToUpdate.Status.AppState.State = v1beta1.PendingRerunState + appToUpdate.Status.AppState.State = v1beta2.PendingRerunState } - case v1beta1.FailedSubmissionState: + case v1beta2.FailedSubmissionState: if !shouldRetry(appToUpdate) { // App will never be retried. Move to terminal FailedState. - appToUpdate.Status.AppState.State = v1beta1.FailedState + appToUpdate.Status.AppState.State = v1beta2.FailedState c.recordSparkApplicationEvent(appToUpdate) } else if hasRetryIntervalPassed(appToUpdate.Spec.RestartPolicy.OnSubmissionFailureRetryInterval, appToUpdate.Status.SubmissionAttempts, appToUpdate.Status.LastSubmissionAttemptTime) { appToUpdate = c.submitSparkApplication(appToUpdate) } - case v1beta1.InvalidatingState: + case v1beta2.InvalidatingState: // Invalidate the current run and enqueue the SparkApplication for re-execution. if err := c.deleteSparkResources(appToUpdate); err != nil { glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", @@ -550,8 +550,8 @@ func (c *Controller) syncSparkApplication(key string) error { return err } c.clearStatus(&appToUpdate.Status) - appToUpdate.Status.AppState.State = v1beta1.PendingRerunState - case v1beta1.PendingRerunState: + appToUpdate.Status.AppState.State = v1beta2.PendingRerunState + case v1beta2.PendingRerunState: glog.V(2).Infof("SparkApplication %s/%s pending rerun", appToUpdate.Namespace, appToUpdate.Name) if c.validateSparkResourceDeletion(appToUpdate) { glog.V(2).Infof("Resources for SparkApplication %s/%s successfully deleted", appToUpdate.Namespace, appToUpdate.Name) @@ -559,7 +559,7 @@ func (c *Controller) syncSparkApplication(key string) error { c.clearStatus(&appToUpdate.Status) appToUpdate = c.submitSparkApplication(appToUpdate) } - case v1beta1.SubmittedState, v1beta1.RunningState, v1beta1.UnknownState: + case v1beta2.SubmittedState, v1beta2.RunningState, v1beta2.UnknownState: if err := c.getAndUpdateAppState(appToUpdate); err != nil { return err } @@ -595,7 +595,7 @@ func hasRetryIntervalPassed(retryInterval *int64, attemptsDone int32, lastEventT } // submitSparkApplication creates a new submission for the given SparkApplication and submits it using spark-submit. -func (c *Controller) submitSparkApplication(app *v1beta1.SparkApplication) *v1beta1.SparkApplication { +func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1beta2.SparkApplication { if app.PrometheusMonitoringEnabled() { if err := configPrometheusMonitoring(app, c.kubeClient); err != nil { glog.Error(err) @@ -606,9 +606,9 @@ func (c *Controller) submitSparkApplication(app *v1beta1.SparkApplication) *v1be submissionID := uuid.New().String() submissionCmdArgs, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) if err != nil { - app.Status = v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + app.Status = v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, ErrorMessage: err.Error(), }, SubmissionAttempts: app.Status.SubmissionAttempts + 1, @@ -620,9 +620,9 @@ func (c *Controller) submitSparkApplication(app *v1beta1.SparkApplication) *v1be // Try submitting the application by running spark-submit. submitted, err := runSparkSubmit(newSubmission(submissionCmdArgs, app)) if err != nil { - app.Status = v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + app.Status = v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, ErrorMessage: err.Error(), }, SubmissionAttempts: app.Status.SubmissionAttempts + 1, @@ -640,12 +640,12 @@ func (c *Controller) submitSparkApplication(app *v1beta1.SparkApplication) *v1be } glog.Infof("SparkApplication %s/%s has been submitted", app.Namespace, app.Name) - app.Status = v1beta1.SparkApplicationStatus{ + app.Status = v1beta2.SparkApplicationStatus{ SubmissionID: submissionID, - AppState: v1beta1.ApplicationState{ - State: v1beta1.SubmittedState, + AppState: v1beta2.ApplicationState{ + State: v1beta2.SubmittedState, }, - DriverInfo: v1beta1.DriverInfo{ + DriverInfo: v1beta2.DriverInfo{ PodName: driverPodName, }, SubmissionAttempts: app.Status.SubmissionAttempts + 1, @@ -676,8 +676,8 @@ func (c *Controller) submitSparkApplication(app *v1beta1.SparkApplication) *v1be } func (c *Controller) updateApplicationStatusWithRetries( - original *v1beta1.SparkApplication, - updateFunc func(status *v1beta1.SparkApplicationStatus)) (*v1beta1.SparkApplication, error) { + original *v1beta2.SparkApplication, + updateFunc func(status *v1beta2.SparkApplicationStatus)) (*v1beta2.SparkApplication, error) { toUpdate := original.DeepCopy() var lastUpdateErr error @@ -686,7 +686,7 @@ func (c *Controller) updateApplicationStatusWithRetries( if equality.Semantic.DeepEqual(original.Status, toUpdate.Status) { return toUpdate, nil } - _, err := c.crdClient.SparkoperatorV1beta1().SparkApplications(toUpdate.Namespace).Update(toUpdate) + _, err := c.crdClient.SparkoperatorV1beta2().SparkApplications(toUpdate.Namespace).Update(toUpdate) if err == nil { return toUpdate, nil } @@ -696,7 +696,7 @@ func (c *Controller) updateApplicationStatusWithRetries( // Failed to update to the API server. // Get the latest version from the API server first and re-apply the update. name := toUpdate.Name - toUpdate, err = c.crdClient.SparkoperatorV1beta1().SparkApplications(toUpdate.Namespace).Get(name, + toUpdate, err = c.crdClient.SparkoperatorV1beta2().SparkApplications(toUpdate.Namespace).Get(name, metav1.GetOptions{}) if err != nil { glog.Errorf("failed to get SparkApplication %s/%s: %v", original.Namespace, name, err) @@ -713,13 +713,13 @@ func (c *Controller) updateApplicationStatusWithRetries( } // updateStatusAndExportMetrics updates the status of the SparkApplication and export the metrics. -func (c *Controller) updateStatusAndExportMetrics(oldApp, newApp *v1beta1.SparkApplication) error { +func (c *Controller) updateStatusAndExportMetrics(oldApp, newApp *v1beta2.SparkApplication) error { // Skip update if nothing changed. if equality.Semantic.DeepEqual(oldApp, newApp) { return nil } - updatedApp, err := c.updateApplicationStatusWithRetries(oldApp, func(status *v1beta1.SparkApplicationStatus) { + updatedApp, err := c.updateApplicationStatusWithRetries(oldApp, func(status *v1beta2.SparkApplicationStatus) { *status = newApp.Status }) @@ -731,7 +731,7 @@ func (c *Controller) updateStatusAndExportMetrics(oldApp, newApp *v1beta1.SparkA return err } -func (c *Controller) getSparkApplication(namespace string, name string) (*v1beta1.SparkApplication, error) { +func (c *Controller) getSparkApplication(namespace string, name string) (*v1beta2.SparkApplication, error) { app, err := c.applicationLister.SparkApplications(namespace).Get(name) if err != nil { if errors.IsNotFound(err) { @@ -743,7 +743,7 @@ func (c *Controller) getSparkApplication(namespace string, name string) (*v1beta } // Delete the driver pod and optional UI resources (Service/Ingress) created for the application. -func (c *Controller) deleteSparkResources(app *v1beta1.SparkApplication) error { +func (c *Controller) deleteSparkResources(app *v1beta2.SparkApplication) error { driverPodName := app.Status.DriverInfo.PodName if driverPodName != "" { glog.V(2).Infof("Deleting pod %s in namespace %s", driverPodName, app.Namespace) @@ -774,7 +774,7 @@ func (c *Controller) deleteSparkResources(app *v1beta1.SparkApplication) error { return nil } -func (c *Controller) validateSparkApplication(app *v1beta1.SparkApplication) error { +func (c *Controller) validateSparkApplication(app *v1beta2.SparkApplication) error { appSpec := app.Spec driverSpec := appSpec.Driver executorSpec := appSpec.Executor @@ -786,7 +786,7 @@ func (c *Controller) validateSparkApplication(app *v1beta1.SparkApplication) err } // Validate that any Spark resources (driver/Service/Ingress) created for the application have been deleted. -func (c *Controller) validateSparkResourceDeletion(app *v1beta1.SparkApplication) bool { +func (c *Controller) validateSparkResourceDeletion(app *v1beta2.SparkApplication) bool { driverPodName := app.Status.DriverInfo.PodName if driverPodName != "" { _, err := c.kubeClient.CoreV1().Pods(app.Namespace).Get(driverPodName, metav1.GetOptions{}) @@ -824,23 +824,23 @@ func (c *Controller) enqueue(obj interface{}) { c.queue.AddRateLimited(key) } -func (c *Controller) recordSparkApplicationEvent(app *v1beta1.SparkApplication) { +func (c *Controller) recordSparkApplicationEvent(app *v1beta2.SparkApplication) { switch app.Status.AppState.State { - case v1beta1.NewState: + case v1beta2.NewState: c.recorder.Eventf( app, apiv1.EventTypeNormal, "SparkApplicationAdded", "SparkApplication %s was added, enqueuing it for submission", app.Name) - case v1beta1.SubmittedState: + case v1beta2.SubmittedState: c.recorder.Eventf( app, apiv1.EventTypeNormal, "SparkApplicationSubmitted", "SparkApplication %s was submitted successfully", app.Name) - case v1beta1.FailedSubmissionState: + case v1beta2.FailedSubmissionState: c.recorder.Eventf( app, apiv1.EventTypeWarning, @@ -848,14 +848,14 @@ func (c *Controller) recordSparkApplicationEvent(app *v1beta1.SparkApplication) "failed to submit SparkApplication %s: %s", app.Name, app.Status.AppState.ErrorMessage) - case v1beta1.CompletedState: + case v1beta2.CompletedState: c.recorder.Eventf( app, apiv1.EventTypeNormal, "SparkApplicationCompleted", "SparkApplication %s completed", app.Name) - case v1beta1.FailedState: + case v1beta2.FailedState: c.recorder.Eventf( app, apiv1.EventTypeWarning, @@ -863,7 +863,7 @@ func (c *Controller) recordSparkApplicationEvent(app *v1beta1.SparkApplication) "SparkApplication %s failed: %s", app.Name, app.Status.AppState.ErrorMessage) - case v1beta1.PendingRerunState: + case v1beta2.PendingRerunState: c.recorder.Eventf( app, apiv1.EventTypeWarning, @@ -873,7 +873,7 @@ func (c *Controller) recordSparkApplicationEvent(app *v1beta1.SparkApplication) } } -func (c *Controller) recordDriverEvent(app *v1beta1.SparkApplication, phase apiv1.PodPhase, name string) { +func (c *Controller) recordDriverEvent(app *v1beta2.SparkApplication, phase apiv1.PodPhase, name string) { switch phase { case apiv1.PodSucceeded: c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverCompleted", "Driver %s completed", name) @@ -888,23 +888,23 @@ func (c *Controller) recordDriverEvent(app *v1beta1.SparkApplication, phase apiv } } -func (c *Controller) recordExecutorEvent(app *v1beta1.SparkApplication, state v1beta1.ExecutorState, name string) { +func (c *Controller) recordExecutorEvent(app *v1beta2.SparkApplication, state v1beta2.ExecutorState, name string) { switch state { - case v1beta1.ExecutorCompletedState: + case v1beta2.ExecutorCompletedState: c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorCompleted", "Executor %s completed", name) - case v1beta1.ExecutorPendingState: + case v1beta2.ExecutorPendingState: c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorPending", "Executor %s is pending", name) - case v1beta1.ExecutorRunningState: + case v1beta2.ExecutorRunningState: c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorRunning", "Executor %s is running", name) - case v1beta1.ExecutorFailedState: + case v1beta2.ExecutorFailedState: c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorFailed", "Executor %s failed", name) - case v1beta1.ExecutorUnknownState: + case v1beta2.ExecutorUnknownState: c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorUnknownState", "Executor %s in unknown state", name) } } -func (c *Controller) clearStatus(status *v1beta1.SparkApplicationStatus) { - if status.AppState.State == v1beta1.InvalidatingState { +func (c *Controller) clearStatus(status *v1beta2.SparkApplicationStatus) { + if status.AppState.State == v1beta2.InvalidatingState { status.SparkApplicationID = "" status.SubmissionAttempts = 0 status.ExecutionAttempts = 0 @@ -912,11 +912,11 @@ func (c *Controller) clearStatus(status *v1beta1.SparkApplicationStatus) { status.TerminationTime = metav1.Time{} status.AppState.ErrorMessage = "" status.ExecutorState = nil - } else if status.AppState.State == v1beta1.PendingRerunState { + } else if status.AppState.State == v1beta2.PendingRerunState { status.SparkApplicationID = "" status.SubmissionAttempts = 0 status.LastSubmissionAttemptTime = metav1.Time{} - status.DriverInfo = v1beta1.DriverInfo{} + status.DriverInfo = v1beta2.DriverInfo{} status.AppState.ErrorMessage = "" status.ExecutorState = nil } diff --git a/pkg/controller/sparkapplication/controller_test.go b/pkg/controller/sparkapplication/controller_test.go index b01ae1ce6..504544ba3 100644 --- a/pkg/controller/sparkapplication/controller_test.go +++ b/pkg/controller/sparkapplication/controller_test.go @@ -35,14 +35,14 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake" crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" ) -func newFakeController(app *v1beta1.SparkApplication, pods ...*apiv1.Pod) (*Controller, *record.FakeRecorder) { +func newFakeController(app *v1beta2.SparkApplication, pods ...*apiv1.Pod) (*Controller, *record.FakeRecorder) { crdclientfake.AddToScheme(scheme.Scheme) crdClient := crdclientfake.NewSimpleClientset() kubeClient := kubeclientfake.NewSimpleClientset() @@ -67,7 +67,7 @@ func newFakeController(app *v1beta1.SparkApplication, pods ...*apiv1.Pod) (*Cont controller := newSparkApplicationController(crdClient, kubeClient, informerFactory, podInformerFactory, recorder, &util.MetricConfig{}, "") - informer := informerFactory.Sparkoperator().V1beta1().SparkApplications().Informer() + informer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer() if app != nil { informer.GetIndexer().Add(app) } @@ -84,12 +84,12 @@ func newFakeController(app *v1beta1.SparkApplication, pods ...*apiv1.Pod) (*Cont func TestOnAdd(t *testing.T) { ctrl, _ := newFakeController(nil) - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{}, + Status: v1beta2.SparkApplicationStatus{}, } ctrl.onAdd(app) @@ -105,16 +105,16 @@ func TestOnAdd(t *testing.T) { func TestOnUpdate(t *testing.T) { ctrl, recorder := newFakeController(nil) - appTemplate := &v1beta1.SparkApplication{ + appTemplate := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", ResourceVersion: "1", }, - Spec: v1beta1.SparkApplicationSpec{ - Mode: v1beta1.ClusterMode, + Spec: v1beta2.SparkApplicationSpec{ + Mode: v1beta2.ClusterMode, Image: stringptr("foo-image:v1"), - Executor: v1beta1.ExecutorSpec{ + Executor: v1beta2.ExecutorSpec{ Instances: int32ptr(1), }, }, @@ -150,7 +150,7 @@ func TestOnUpdate(t *testing.T) { assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateFailed")) // Case3: Spec update successful. - ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(appTemplate.Namespace).Create(appTemplate) + ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Create(appTemplate) ctrl.onUpdate(appTemplate, copyWithSpecUpdate) // Verify App was enqueued. @@ -167,20 +167,20 @@ func TestOnUpdate(t *testing.T) { assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateProcessed")) // Verify the SparkApplication state was updated to InvalidatingState. - app, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(appTemplate.Namespace).Get(appTemplate.Name, metav1.GetOptions{}) + app, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Get(appTemplate.Name, metav1.GetOptions{}) assert.Nil(t, err) - assert.Equal(t, v1beta1.InvalidatingState, app.Status.AppState.State) + assert.Equal(t, v1beta2.InvalidatingState, app.Status.AppState.State) } func TestOnDelete(t *testing.T) { ctrl, recorder := newFakeController(nil) - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{}, + Status: v1beta2.SparkApplicationStatus{}, } ctrl.onAdd(app) ctrl.queue.Get() @@ -234,31 +234,31 @@ func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { os.Setenv(kubernetesServiceHostEnvVar, "localhost") os.Setenv(kubernetesServicePortEnvVar, "443") - restartPolicyOnFailure := v1beta1.RestartPolicy{ - Type: v1beta1.OnFailure, + restartPolicyOnFailure := v1beta2.RestartPolicy{ + Type: v1beta2.OnFailure, OnFailureRetries: int32ptr(1), OnFailureRetryInterval: int64ptr(100), OnSubmissionFailureRetryInterval: int64ptr(100), OnSubmissionFailureRetries: int32ptr(1), } - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.NewState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.NewState, ErrorMessage: "", }, }, } ctrl, recorder := newFakeController(app) - _, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(app) + _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(app) if err != nil { t.Fatal(err) } @@ -273,9 +273,9 @@ func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { // Attempt 1 err = ctrl.syncSparkApplication("default/foo") - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) + updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) - assert.Equal(t, v1beta1.FailedSubmissionState, updatedApp.Status.AppState.State) + assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State) assert.Equal(t, int32(1), updatedApp.Status.SubmissionAttempts) assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppFailedSubmissionCount, map[string]string{})) @@ -288,16 +288,16 @@ func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { // Attempt 2: Retry again. updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)} ctrl, recorder = newFakeController(updatedApp) - _, err = ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(updatedApp) + _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(updatedApp) if err != nil { t.Fatal(err) } err = ctrl.syncSparkApplication("default/foo") // Verify that the application failed again. - updatedApp, err = ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) + updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) assert.Nil(t, err) - assert.Equal(t, v1beta1.FailedSubmissionState, updatedApp.Status.AppState.State) + assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State) assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts) assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) @@ -307,16 +307,16 @@ func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { // Attempt 3: No more retries. updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)} ctrl, recorder = newFakeController(updatedApp) - _, err = ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(updatedApp) + _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(updatedApp) if err != nil { t.Fatal(err) } err = ctrl.syncSparkApplication("default/foo") // Verify that the application failed again. - updatedApp, err = ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) + updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) assert.Nil(t, err) - assert.Equal(t, v1beta1.FailedState, updatedApp.Status.AppState.State) + assert.Equal(t, v1beta2.FailedState, updatedApp.Status.AppState.State) // No more submission attempts made. assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts) } @@ -324,7 +324,7 @@ func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { func TestValidateDetectsNodeSelectorSuccessNoSelector(t *testing.T) { ctrl, _ := newFakeController(nil) - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", @@ -338,12 +338,12 @@ func TestValidateDetectsNodeSelectorSuccessNoSelector(t *testing.T) { func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtAppLevel(t *testing.T) { ctrl, _ := newFakeController(nil) - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ NodeSelector: map[string]string{"mynode": "mygift"}, }, } @@ -355,14 +355,14 @@ func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtAppLevel(t *testing.T) func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtPodLevel(t *testing.T) { ctrl, _ := newFakeController(nil) - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ NodeSelector: map[string]string{"mynode": "mygift"}, }, }, @@ -372,8 +372,8 @@ func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtPodLevel(t *testing.T) err := ctrl.validateSparkApplication(app) assert.Nil(t, err) - app.Spec.Executor = v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + app.Spec.Executor = v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ NodeSelector: map[string]string{"mynode": "mygift"}, }, } @@ -385,15 +385,15 @@ func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtPodLevel(t *testing.T) func TestValidateDetectsNodeSelectorFailsAppAndPodLevel(t *testing.T) { ctrl, _ := newFakeController(nil) - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ NodeSelector: map[string]string{"mynode": "mygift"}, - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ NodeSelector: map[string]string{"mynode": "mygift"}, }, }, @@ -403,8 +403,8 @@ func TestValidateDetectsNodeSelectorFailsAppAndPodLevel(t *testing.T) { err := ctrl.validateSparkApplication(app) assert.NotNil(t, err) - app.Spec.Executor = v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + app.Spec.Executor = v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ NodeSelector: map[string]string{"mynode": "mygift"}, }, } @@ -415,7 +415,7 @@ func TestValidateDetectsNodeSelectorFailsAppAndPodLevel(t *testing.T) { func TestShouldRetry(t *testing.T) { type testcase struct { - app *v1beta1.SparkApplication + app *v1beta2.SparkApplication shouldRetry bool } @@ -424,18 +424,18 @@ func TestShouldRetry(t *testing.T) { assert.Equal(t, test.shouldRetry, shouldRetry) } - restartPolicyAlways := v1beta1.RestartPolicy{ - Type: v1beta1.Always, + restartPolicyAlways := v1beta2.RestartPolicy{ + Type: v1beta2.Always, OnSubmissionFailureRetryInterval: int64ptr(100), OnFailureRetryInterval: int64ptr(100), } - restartPolicyNever := v1beta1.RestartPolicy{ - Type: v1beta1.Never, + restartPolicyNever := v1beta2.RestartPolicy{ + Type: v1beta2.Never, } - restartPolicyOnFailure := v1beta1.RestartPolicy{ - Type: v1beta1.OnFailure, + restartPolicyOnFailure := v1beta2.RestartPolicy{ + Type: v1beta2.OnFailure, OnFailureRetries: int32ptr(1), OnFailureRetryInterval: int64ptr(100), OnSubmissionFailureRetryInterval: int64ptr(100), @@ -444,7 +444,7 @@ func TestShouldRetry(t *testing.T) { testcases := []testcase{ { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", @@ -452,119 +452,119 @@ func TestShouldRetry(t *testing.T) { shouldRetry: false, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.SucceedingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.SucceedingState, }, }, }, shouldRetry: true, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.SucceedingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.SucceedingState, }, }, }, shouldRetry: false, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, }, }, shouldRetry: true, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyNever, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, }, }, shouldRetry: false, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyNever, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, }, }, shouldRetry: false, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, }, }, shouldRetry: true, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.PendingRerunState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.PendingRerunState, }, }, }, @@ -579,8 +579,8 @@ func TestShouldRetry(t *testing.T) { func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { type testcase struct { - app *v1beta1.SparkApplication - expectedState v1beta1.ApplicationStateType + app *v1beta2.SparkApplication + expectedState v1beta2.ApplicationStateType } os.Setenv(sparkHomeEnvVar, "/spark") os.Setenv(kubernetesServiceHostEnvVar, "localhost") @@ -588,7 +588,7 @@ func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { testFn := func(test testcase, t *testing.T) { ctrl, _ := newFakeController(test.app) - _, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(test.app.Namespace).Create(test.app) + _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Create(test.app) if err != nil { t.Fatal(err) } @@ -603,26 +603,26 @@ func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", test.app.Namespace, test.app.Name)) assert.Nil(t, err) - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(test.app.Namespace).Get(test.app.Name, metav1.GetOptions{}) + updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Get(test.app.Name, metav1.GetOptions{}) assert.Nil(t, err) assert.Equal(t, test.expectedState, updatedApp.Status.AppState.State) - if test.expectedState == v1beta1.SubmittedState { + if test.expectedState == v1beta2.SubmittedState { assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) } } - restartPolicyAlways := v1beta1.RestartPolicy{ - Type: v1beta1.Always, + restartPolicyAlways := v1beta2.RestartPolicy{ + Type: v1beta2.Always, OnSubmissionFailureRetryInterval: int64ptr(100), OnFailureRetryInterval: int64ptr(100), } - restartPolicyNever := v1beta1.RestartPolicy{ - Type: v1beta1.Never, + restartPolicyNever := v1beta2.RestartPolicy{ + Type: v1beta2.Never, } - restartPolicyOnFailure := v1beta1.RestartPolicy{ - Type: v1beta1.OnFailure, + restartPolicyOnFailure := v1beta2.RestartPolicy{ + Type: v1beta2.OnFailure, OnFailureRetries: int32ptr(1), OnFailureRetryInterval: int64ptr(100), OnSubmissionFailureRetryInterval: int64ptr(100), @@ -631,284 +631,284 @@ func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { testcases := []testcase{ { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }}, - expectedState: v1beta1.SubmittedState, + expectedState: v1beta2.SubmittedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.SucceedingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.SucceedingState, }, }, }, - expectedState: v1beta1.PendingRerunState, + expectedState: v1beta2.PendingRerunState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.PendingRerunState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.PendingRerunState, }, }, }, - expectedState: v1beta1.SubmittedState, + expectedState: v1beta2.SubmittedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, }, - expectedState: v1beta1.FailedSubmissionState, + expectedState: v1beta2.FailedSubmissionState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, SubmissionAttempts: 1, LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, }, - expectedState: v1beta1.SubmittedState, + expectedState: v1beta2.SubmittedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, ExecutionAttempts: 1, TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, }, - expectedState: v1beta1.PendingRerunState, + expectedState: v1beta2.PendingRerunState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyAlways, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, }, - expectedState: v1beta1.FailingState, + expectedState: v1beta2.FailingState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyNever, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.InvalidatingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.InvalidatingState, }, TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, }, - expectedState: v1beta1.PendingRerunState, + expectedState: v1beta2.PendingRerunState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyNever, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.SucceedingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.SucceedingState, }, }, }, - expectedState: v1beta1.CompletedState, + expectedState: v1beta2.CompletedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyNever, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.NewState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.NewState, }, }, }, - expectedState: v1beta1.SubmittedState, + expectedState: v1beta2.SubmittedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, ExecutionAttempts: 2, }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, }, - expectedState: v1beta1.FailedState, + expectedState: v1beta2.FailedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, ExecutionAttempts: 1, TerminationTime: metav1.Now(), }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, }, - expectedState: v1beta1.FailingState, + expectedState: v1beta2.FailingState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailingState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailingState, }, ExecutionAttempts: 1, TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, }, - expectedState: v1beta1.PendingRerunState, + expectedState: v1beta2.PendingRerunState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, SubmissionAttempts: 3, }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, }, - expectedState: v1beta1.FailedState, + expectedState: v1beta2.FailedState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, SubmissionAttempts: 1, LastSubmissionAttemptTime: metav1.Now(), }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, }, - expectedState: v1beta1.FailedSubmissionState, + expectedState: v1beta2.FailedSubmissionState, }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.FailedSubmissionState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.FailedSubmissionState, }, SubmissionAttempts: 1, LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ RestartPolicy: restartPolicyOnFailure, }, }, - expectedState: v1beta1.SubmittedState, + expectedState: v1beta2.SubmittedState, }, } @@ -920,12 +920,12 @@ func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { func TestSyncSparkApplication_ExecutingState(t *testing.T) { type testcase struct { appName string - oldAppStatus v1beta1.ApplicationStateType - oldExecutorStatus map[string]v1beta1.ExecutorState + oldAppStatus v1beta2.ApplicationStateType + oldExecutorStatus map[string]v1beta2.ExecutorState driverPod *apiv1.Pod executorPod *apiv1.Pod - expectedAppState v1beta1.ApplicationStateType - expectedExecutorState map[string]v1beta1.ExecutorState + expectedAppState v1beta2.ApplicationStateType + expectedExecutorState map[string]v1beta2.ExecutorState expectedAppMetrics metrics expectedExecutorMetrics executorMetrics } @@ -936,35 +936,35 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { appName := "foo" driverPodName := appName + "-driver" - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: appName, Namespace: "test", }, - Spec: v1beta1.SparkApplicationSpec{ - RestartPolicy: v1beta1.RestartPolicy{ - Type: v1beta1.Never, + Spec: v1beta2.SparkApplicationSpec{ + RestartPolicy: v1beta2.RestartPolicy{ + Type: v1beta2.Never, }, }, - Status: v1beta1.SparkApplicationStatus{ - AppState: v1beta1.ApplicationState{ - State: v1beta1.SubmittedState, + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.SubmittedState, ErrorMessage: "", }, - DriverInfo: v1beta1.DriverInfo{ + DriverInfo: v1beta2.DriverInfo{ PodName: driverPodName, }, - ExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorRunningState}, + ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, }, } testcases := []testcase{ { appName: appName, - oldAppStatus: v1beta1.SubmittedState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorRunningState}, - expectedAppState: v1beta1.FailingState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorFailedState}, + oldAppStatus: v1beta2.SubmittedState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, + expectedAppState: v1beta2.FailingState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, expectedAppMetrics: metrics{ failedMetricCount: 1, }, @@ -974,8 +974,8 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { }, { appName: appName, - oldAppStatus: v1beta1.SubmittedState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorRunningState}, + oldAppStatus: v1beta2.SubmittedState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, driverPod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: driverPodName, @@ -1004,8 +1004,8 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { Phase: apiv1.PodSucceeded, }, }, - expectedAppState: v1beta1.RunningState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorCompletedState}, + expectedAppState: v1beta2.RunningState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, expectedAppMetrics: metrics{ runningMetricCount: 1, }, @@ -1015,8 +1015,8 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { }, { appName: appName, - oldAppStatus: v1beta1.RunningState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorRunningState}, + oldAppStatus: v1beta2.RunningState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, driverPod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: driverPodName, @@ -1055,8 +1055,8 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { Phase: apiv1.PodFailed, }, }, - expectedAppState: v1beta1.FailingState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorFailedState}, + expectedAppState: v1beta2.FailingState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, expectedAppMetrics: metrics{ failedMetricCount: 1, }, @@ -1066,17 +1066,17 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { }, { appName: appName, - oldAppStatus: v1beta1.FailingState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorFailedState}, - expectedAppState: v1beta1.FailedState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorFailedState}, + oldAppStatus: v1beta2.FailingState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, + expectedAppState: v1beta2.FailedState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, expectedAppMetrics: metrics{}, expectedExecutorMetrics: executorMetrics{}, }, { appName: appName, - oldAppStatus: v1beta1.RunningState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorRunningState}, + oldAppStatus: v1beta2.RunningState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, driverPod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: driverPodName, @@ -1105,8 +1105,8 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { Phase: apiv1.PodSucceeded, }, }, - expectedAppState: v1beta1.SucceedingState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorCompletedState}, + expectedAppState: v1beta2.SucceedingState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, expectedAppMetrics: metrics{ successMetricCount: 1, }, @@ -1116,17 +1116,17 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { }, { appName: appName, - oldAppStatus: v1beta1.SucceedingState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorCompletedState}, - expectedAppState: v1beta1.CompletedState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorCompletedState}, + oldAppStatus: v1beta2.SucceedingState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, + expectedAppState: v1beta2.CompletedState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, expectedAppMetrics: metrics{}, expectedExecutorMetrics: executorMetrics{}, }, { appName: appName, - oldAppStatus: v1beta1.SubmittedState, - oldExecutorStatus: map[string]v1beta1.ExecutorState{}, + oldAppStatus: v1beta2.SubmittedState, + oldExecutorStatus: map[string]v1beta2.ExecutorState{}, driverPod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: driverPodName, @@ -1153,8 +1153,8 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { Phase: apiv1.PodPending, }, }, - expectedAppState: v1beta1.UnknownState, - expectedExecutorState: map[string]v1beta1.ExecutorState{"exec-1": v1beta1.ExecutorPendingState}, + expectedAppState: v1beta2.UnknownState, + expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState}, expectedAppMetrics: metrics{}, expectedExecutorMetrics: executorMetrics{}, }, @@ -1166,7 +1166,7 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { app.Name = test.appName app.Status.ExecutionAttempts = 1 ctrl, _ := newFakeController(app, test.driverPod, test.executorPod) - _, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Create(app) + _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(app) if err != nil { t.Fatal(err) } @@ -1180,7 +1180,7 @@ func TestSyncSparkApplication_ExecutingState(t *testing.T) { err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) assert.Nil(t, err) // Verify application and executor states. - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta1().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) + updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(app.Name, metav1.GetOptions{}) assert.Equal(t, test.expectedAppState, updatedApp.Status.AppState.State) assert.Equal(t, test.expectedExecutorState, updatedApp.Status.ExecutorState) diff --git a/pkg/controller/sparkapplication/monitoring_config.go b/pkg/controller/sparkapplication/monitoring_config.go index 36277c41a..1d0f9318f 100644 --- a/pkg/controller/sparkapplication/monitoring_config.go +++ b/pkg/controller/sparkapplication/monitoring_config.go @@ -26,7 +26,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) @@ -38,7 +38,7 @@ const ( prometheusPathAnnotation = "prometheus.io/path" ) -func configPrometheusMonitoring(app *v1beta1.SparkApplication, kubeClient clientset.Interface) error { +func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient clientset.Interface) error { port := config.DefaultPrometheusJavaAgentPort if app.Spec.Monitoring.Prometheus.Port != nil { port = *app.Spec.Monitoring.Prometheus.Port @@ -122,7 +122,7 @@ func configPrometheusMonitoring(app *v1beta1.SparkApplication, kubeClient client return nil } -func buildPrometheusConfigMap(app *v1beta1.SparkApplication, prometheusConfigMapName string) *corev1.ConfigMap { +func buildPrometheusConfigMap(app *v1beta2.SparkApplication, prometheusConfigMapName string) *corev1.ConfigMap { metricsProperties := config.DefaultMetricsProperties if app.Spec.Monitoring.MetricsProperties != nil { metricsProperties = *app.Spec.Monitoring.MetricsProperties diff --git a/pkg/controller/sparkapplication/monitoring_config_test.go b/pkg/controller/sparkapplication/monitoring_config_test.go index 8b955b53e..819243d1f 100644 --- a/pkg/controller/sparkapplication/monitoring_config_test.go +++ b/pkg/controller/sparkapplication/monitoring_config_test.go @@ -23,13 +23,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) func TestConfigPrometheusMonitoring(t *testing.T) { type testcase struct { - app *v1beta1.SparkApplication + app *v1beta2.SparkApplication metricsProperties string prometheusConfig string port string @@ -95,16 +95,16 @@ func TestConfigPrometheusMonitoring(t *testing.T) { testcases := []testcase{ { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "app1", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ - Monitoring: &v1beta1.MonitoringSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Monitoring: &v1beta2.MonitoringSpec{ ExposeDriverMetrics: true, ExposeExecutorMetrics: true, - Prometheus: &v1beta1.PrometheusSpec{ + Prometheus: &v1beta2.PrometheusSpec{ JmxExporterJar: "/prometheus/exporter.jar", }, }, @@ -117,23 +117,23 @@ func TestConfigPrometheusMonitoring(t *testing.T) { executorJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", }, { - app: &v1beta1.SparkApplication{ + app: &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "app2", Namespace: "default", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), }, - Executor: v1beta1.ExecutorSpec{ + Executor: v1beta2.ExecutorSpec{ JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), }, - Monitoring: &v1beta1.MonitoringSpec{ + Monitoring: &v1beta2.MonitoringSpec{ ExposeDriverMetrics: true, ExposeExecutorMetrics: true, MetricsProperties: stringptr("dummy"), - Prometheus: &v1beta1.PrometheusSpec{ + Prometheus: &v1beta2.PrometheusSpec{ JmxExporterJar: "/prometheus/exporter.jar", Port: int32ptr(8091), Configuration: stringptr("dummy"), diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler.go b/pkg/controller/sparkapplication/spark_pod_eventhandler.go index ce116c599..6978de6f0 100644 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler.go +++ b/pkg/controller/sparkapplication/spark_pod_eventhandler.go @@ -22,7 +22,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" + crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) diff --git a/pkg/controller/sparkapplication/sparkapp_metrics.go b/pkg/controller/sparkapplication/sparkapp_metrics.go index 98d92ba1d..a6e09f4b5 100644 --- a/pkg/controller/sparkapplication/sparkapp_metrics.go +++ b/pkg/controller/sparkapplication/sparkapp_metrics.go @@ -22,7 +22,7 @@ import ( "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" ) @@ -139,7 +139,7 @@ func (sm *sparkAppMetrics) registerMetrics() { sm.sparkAppExecutorRunningCount.Register() } -func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta1.SparkApplication) { +func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta2.SparkApplication) { metricLabels := fetchMetricLabels(newApp.Labels, sm.labels) glog.V(2).Infof("Exporting metrics for %s; old status: %v new status: %v", newApp.Name, oldApp.Status, newApp.Status) @@ -148,15 +148,15 @@ func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta1.SparkApplicatio newState := newApp.Status.AppState.State if newState != oldState { switch newState { - case v1beta1.SubmittedState: + case v1beta2.SubmittedState: if m, err := sm.sparkAppSubmitCount.GetMetricWith(metricLabels); err != nil { glog.Errorf("Error while exporting metrics: %v", err) } else { m.Inc() } - case v1beta1.RunningState: + case v1beta2.RunningState: sm.sparkAppRunningCount.Inc(metricLabels) - case v1beta1.SucceedingState: + case v1beta2.SucceedingState: if !newApp.Status.LastSubmissionAttemptTime.Time.IsZero() && !newApp.Status.TerminationTime.Time.IsZero() { d := newApp.Status.TerminationTime.Time.Sub(newApp.Status.LastSubmissionAttemptTime.Time) @@ -172,7 +172,7 @@ func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta1.SparkApplicatio } else { m.Inc() } - case v1beta1.FailingState: + case v1beta2.FailingState: if !newApp.Status.LastSubmissionAttemptTime.Time.IsZero() && !newApp.Status.TerminationTime.Time.IsZero() { d := newApp.Status.TerminationTime.Time.Sub(newApp.Status.LastSubmissionAttemptTime.Time) if m, err := sm.sparkAppFailureExecutionTime.GetMetricWith(metricLabels); err != nil { @@ -187,7 +187,7 @@ func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta1.SparkApplicatio } else { m.Inc() } - case v1beta1.FailedSubmissionState: + case v1beta2.FailedSubmissionState: if m, err := sm.sparkAppFailedSubmissionCount.GetMetricWith(metricLabels); err != nil { glog.Errorf("Error while exporting metrics: %v", err) } else { @@ -199,13 +199,13 @@ func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta1.SparkApplicatio // Potential Executor status updates for executor, newExecState := range newApp.Status.ExecutorState { switch newExecState { - case v1beta1.ExecutorRunningState: + case v1beta2.ExecutorRunningState: if oldApp.Status.ExecutorState[executor] != newExecState { glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, oldApp.Status.ExecutorState[executor], newExecState) sm.sparkAppExecutorRunningCount.Inc(metricLabels) } - case v1beta1.ExecutorCompletedState: + case v1beta2.ExecutorCompletedState: if oldApp.Status.ExecutorState[executor] != newExecState { glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, oldApp.Status.ExecutorState[executor], newExecState) @@ -216,7 +216,7 @@ func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta1.SparkApplicatio m.Inc() } } - case v1beta1.ExecutorFailedState: + case v1beta2.ExecutorFailedState: if oldApp.Status.ExecutorState[executor] != newExecState { glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, oldApp.Status.ExecutorState[executor], newExecState) diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go index d73290f40..f572f1ac2 100644 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ b/pkg/controller/sparkapplication/sparkapp_util.go @@ -19,7 +19,7 @@ package sparkapplication import ( "fmt" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" apiv1 "k8s.io/api/core/v1" ) @@ -38,7 +38,7 @@ func getSparkApplicationID(pod *apiv1.Pod) string { return pod.Labels[config.SparkApplicationSelectorLabel] } -func getDriverPodName(app *v1beta1.SparkApplication) string { +func getDriverPodName(app *v1beta2.SparkApplication) string { name := app.Spec.Driver.PodName if name != nil && len(*name) > 0 { return *name @@ -52,15 +52,15 @@ func getDriverPodName(app *v1beta1.SparkApplication) string { return fmt.Sprintf("%s-driver", app.Name) } -func getDefaultUIServiceName(app *v1beta1.SparkApplication) string { +func getDefaultUIServiceName(app *v1beta2.SparkApplication) string { return fmt.Sprintf("%s-ui-svc", app.Name) } -func getDefaultUIIngressName(app *v1beta1.SparkApplication) string { +func getDefaultUIIngressName(app *v1beta2.SparkApplication) string { return fmt.Sprintf("%s-ui-ingress", app.Name) } -func getResourceLabels(app *v1beta1.SparkApplication) map[string]string { +func getResourceLabels(app *v1beta2.SparkApplication) map[string]string { labels := map[string]string{config.SparkAppNameLabel: app.Name} if app.Status.SubmissionID != "" { labels[config.SubmissionIDLabel] = app.Status.SubmissionID @@ -68,36 +68,36 @@ func getResourceLabels(app *v1beta1.SparkApplication) map[string]string { return labels } -func podPhaseToExecutorState(podPhase apiv1.PodPhase) v1beta1.ExecutorState { +func podPhaseToExecutorState(podPhase apiv1.PodPhase) v1beta2.ExecutorState { switch podPhase { case apiv1.PodPending: - return v1beta1.ExecutorPendingState + return v1beta2.ExecutorPendingState case apiv1.PodRunning: - return v1beta1.ExecutorRunningState + return v1beta2.ExecutorRunningState case apiv1.PodSucceeded: - return v1beta1.ExecutorCompletedState + return v1beta2.ExecutorCompletedState case apiv1.PodFailed: - return v1beta1.ExecutorFailedState + return v1beta2.ExecutorFailedState default: - return v1beta1.ExecutorUnknownState + return v1beta2.ExecutorUnknownState } } -func isExecutorTerminated(executorState v1beta1.ExecutorState) bool { - return executorState == v1beta1.ExecutorCompletedState || executorState == v1beta1.ExecutorFailedState +func isExecutorTerminated(executorState v1beta2.ExecutorState) bool { + return executorState == v1beta2.ExecutorCompletedState || executorState == v1beta2.ExecutorFailedState } -func driverPodPhaseToApplicationState(podPhase apiv1.PodPhase) v1beta1.ApplicationStateType { +func driverPodPhaseToApplicationState(podPhase apiv1.PodPhase) v1beta2.ApplicationStateType { switch podPhase { case apiv1.PodPending: - return v1beta1.SubmittedState + return v1beta2.SubmittedState case apiv1.PodRunning: - return v1beta1.RunningState + return v1beta2.RunningState case apiv1.PodSucceeded: - return v1beta1.SucceedingState + return v1beta2.SucceedingState case apiv1.PodFailed: - return v1beta1.FailingState + return v1beta2.FailingState default: - return v1beta1.UnknownState + return v1beta2.UnknownState } } diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go index 7fcfbef49..c58dffa2e 100644 --- a/pkg/controller/sparkapplication/sparkui.go +++ b/pkg/controller/sparkapplication/sparkui.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" clientset "k8s.io/client-go/kubernetes" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) @@ -57,7 +57,7 @@ type SparkIngress struct { ingressURL string } -func createSparkUIIngress(app *v1beta1.SparkApplication, service SparkService, ingressURLFormat string, kubeClient clientset.Interface) (*SparkIngress, error) { +func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURLFormat string, kubeClient clientset.Interface) (*SparkIngress, error) { ingressURL := getSparkUIingressURL(ingressURLFormat, app.GetName()) ingress := extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ @@ -98,7 +98,7 @@ func createSparkUIIngress(app *v1beta1.SparkApplication, service SparkService, i } func createSparkUIService( - app *v1beta1.SparkApplication, + app *v1beta2.SparkApplication, kubeClient clientset.Interface) (*SparkService, error) { portStr := getUITargetPort(app) port, err := strconv.Atoi(portStr) @@ -144,7 +144,7 @@ func createSparkUIService( // getWebUITargetPort attempts to get the Spark web UI port from configuration property spark.ui.port // in Spec.SparkConf if it is present, otherwise the default port is returned. // Note that we don't attempt to get the port from Spec.SparkConfigMap. -func getUITargetPort(app *v1beta1.SparkApplication) string { +func getUITargetPort(app *v1beta2.SparkApplication) string { port, ok := app.Spec.SparkConf[sparkUIPortConfigurationKey] if ok { return port diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go index 39c997216..915a2c94d 100644 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ b/pkg/controller/sparkapplication/sparkui_test.go @@ -26,14 +26,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) func TestCreateSparkUIService(t *testing.T) { type testcase struct { name string - app *v1beta1.SparkApplication + app *v1beta2.SparkApplication expectedService SparkService expectedSelector map[string]string expectError bool @@ -84,45 +84,45 @@ func TestCreateSparkUIService(t *testing.T) { t.Fatal(err) } - app1 := &v1beta1.SparkApplication{ + app1 := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", UID: "foo-123", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ SparkConf: map[string]string{ sparkUIPortConfigurationKey: "4041", }, }, - Status: v1beta1.SparkApplicationStatus{ + Status: v1beta2.SparkApplicationStatus{ SparkApplicationID: "foo-1", ExecutionAttempts: 1, }, } - app2 := &v1beta1.SparkApplication{ + app2 := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", UID: "foo-123", }, - Status: v1beta1.SparkApplicationStatus{ + Status: v1beta2.SparkApplicationStatus{ SparkApplicationID: "foo-2", ExecutionAttempts: 2, }, } - app3 := &v1beta1.SparkApplication{ + app3 := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", UID: "foo-123", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ SparkConf: map[string]string{ sparkUIPortConfigurationKey: "4041x", }, }, - Status: v1beta1.SparkApplicationStatus{ + Status: v1beta2.SparkApplicationStatus{ SparkApplicationID: "foo-3", }, } @@ -166,15 +166,15 @@ func TestCreateSparkUIService(t *testing.T) { func TestCreateSparkUIIngress(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", UID: "foo-123", }, - Status: v1beta1.SparkApplicationStatus{ + Status: v1beta2.SparkApplicationStatus{ SparkApplicationID: "foo-1", - DriverInfo: v1beta1.DriverInfo{ + DriverInfo: v1beta2.DriverInfo{ WebUIServiceName: "blah-service", }, }, diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index 46c34dd3c..5f50874bf 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) @@ -44,7 +44,7 @@ type submission struct { args []string } -func newSubmission(args []string, app *v1beta1.SparkApplication) *submission { +func newSubmission(args []string, app *v1beta2.SparkApplication) *submission { return &submission{ namespace: app.Namespace, name: app.Name, @@ -82,7 +82,7 @@ func runSparkSubmit(submission *submission) (bool, error) { return true, nil } -func buildSubmissionCommandArgs(app *v1beta1.SparkApplication, driverPodName string, submissionID string) ([]string, error) { +func buildSubmissionCommandArgs(app *v1beta2.SparkApplication, driverPodName string, submissionID string) ([]string, error) { var args []string if app.Spec.MainClass != nil { args = append(args, "--class", *app.Spec.MainClass) @@ -190,18 +190,18 @@ func getMasterURL() (string, error) { return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil } -func getOwnerReference(app *v1beta1.SparkApplication) *metav1.OwnerReference { +func getOwnerReference(app *v1beta2.SparkApplication) *metav1.OwnerReference { controller := true return &metav1.OwnerReference{ - APIVersion: v1beta1.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta1.SparkApplication{}).Name(), + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), Name: app.Name, UID: app.UID, Controller: &controller, } } -func addDependenciesConfOptions(app *v1beta1.SparkApplication) []string { +func addDependenciesConfOptions(app *v1beta2.SparkApplication) []string { var depsConfOptions []string if len(app.Spec.Deps.Jars) > 0 { @@ -237,7 +237,7 @@ func addDependenciesConfOptions(app *v1beta1.SparkApplication) []string { return depsConfOptions } -func addDriverConfOptions(app *v1beta1.SparkApplication, submissionID string) ([]string, error) { +func addDriverConfOptions(app *v1beta2.SparkApplication, submissionID string) ([]string, error) { var driverConfOptions []string driverConfOptions = append(driverConfOptions, @@ -254,7 +254,7 @@ func addDriverConfOptions(app *v1beta1.SparkApplication, submissionID string) ([ if app.Spec.Driver.Cores != nil { driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.cores=%f", *app.Spec.Driver.Cores)) + fmt.Sprintf("spark.driver.cores=%d", *app.Spec.Driver.Cores)) } if app.Spec.Driver.CoreLimit != nil { driverConfOptions = append(driverConfOptions, @@ -300,7 +300,7 @@ func addDriverConfOptions(app *v1beta1.SparkApplication, submissionID string) ([ return driverConfOptions, nil } -func addExecutorConfOptions(app *v1beta1.SparkApplication, submissionID string) ([]string, error) { +func addExecutorConfOptions(app *v1beta2.SparkApplication, submissionID string) ([]string, error) { var executorConfOptions []string executorConfOptions = append(executorConfOptions, diff --git a/pkg/crd/crd.go b/pkg/crd/crd.go deleted file mode 100644 index c5dd11a77..000000000 --- a/pkg/crd/crd.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crd - -import ( - "fmt" - "time" - - "github.com/golang/glog" - - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" - - ssacrd "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/crd/scheduledsparkapplication" - sacrd "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/crd/sparkapplication" -) - -// CreateOrUpdateCRDs creates or updates the relevant CRDs used by the operator. -func CreateOrUpdateCRDs(clientset apiextensionsclient.Interface) error { - err := createOrUpdateCRD(clientset, sacrd.GetCRD()) - if err != nil { - return fmt.Errorf("failed to create or update CustomResourceDefinition %s: %v", sacrd.FullName, err) - } - - err = createOrUpdateCRD(clientset, ssacrd.GetCRD()) - if err != nil { - return fmt.Errorf("failed to create or update CustomResourceDefinition %s: %v", ssacrd.FullName, err) - } - - return nil -} - -func createOrUpdateCRD( - clientset apiextensionsclient.Interface, - definition *apiextensionsv1beta1.CustomResourceDefinition) error { - existing, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(definition.Name, - metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - // Failed to get the CRD object and the failure was not because the object cannot be found. - return err - } - - if err == nil && existing != nil { - // Update case. - if !equality.Semantic.DeepEqual(existing.Spec, definition.Spec) { - existing.Spec = definition.Spec - glog.Infof("Updating CustomResourceDefinition %s", definition.Name) - if _, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Update(existing); err != nil { - return err - } - } - } else { - // Create case. - glog.Infof("Creating CustomResourceDefinition %s", definition.Name) - if _, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(definition); err != nil { - return err - } - } - - // Wait for the CustomResourceDefinition to become registered. - err = waitForCRDEstablishment(clientset, definition.Name) - // Try deleting the CustomResourceDefinition if it fails to be registered on time. - if err != nil { - deleteErr := deleteCRD(clientset, definition.Name) - if deleteErr != nil { - return errors.NewAggregate([]error{err, deleteErr}) - } - return err - } - - return nil -} - -// waitForCRDEstablishment waits for the CRD to be registered and established until it times out. -func waitForCRDEstablishment(clientset apiextensionsclient.Interface, name string) error { - return wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { - crd, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, cond := range crd.Status.Conditions { - switch cond.Type { - case apiextensionsv1beta1.Established: - if cond.Status == apiextensionsv1beta1.ConditionTrue { - return true, nil - } - case apiextensionsv1beta1.NamesAccepted: - if cond.Status == apiextensionsv1beta1.ConditionFalse { - fmt.Printf("Name conflict: %v\n", cond.Reason) - } - } - } - return false, nil - }) -} - -func deleteCRD(clientset apiextensionsclient.Interface, name string) error { - err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(name, metav1.NewDeleteOptions(0)) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - return nil -} diff --git a/pkg/crd/scheduledsparkapplication/crd.go b/pkg/crd/scheduledsparkapplication/crd.go deleted file mode 100644 index d71a1f66d..000000000 --- a/pkg/crd/scheduledsparkapplication/crd.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduledsparkapplication - -import ( - "reflect" - - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" -) - -// CRD metadata. -const ( - Plural = "scheduledsparkapplications" - Singular = "scheduledsparkapplication" - ShortName = "scheduledsparkapp" - Group = sparkoperator.GroupName - Version = v1beta1.Version - FullName = Plural + "." + Group -) - -func GetCRD() *apiextensionsv1beta1.CustomResourceDefinition { - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: FullName, - }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: Group, - Version: Version, - Scope: apiextensionsv1beta1.NamespaceScoped, - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: Plural, - Singular: Singular, - ShortNames: []string{ShortName}, - Kind: reflect.TypeOf(v1beta1.ScheduledSparkApplication{}).Name(), - }, - Validation: getCustomResourceValidation(), - }, - } -} - -func getCustomResourceValidation() *apiextensionsv1beta1.CustomResourceValidation { - return &apiextensionsv1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{ - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "spec": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "schedule": { - Type: "string", - }, - "concurrencyPolicy": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"Allow"`)}, - {Raw: []byte(`"Forbid"`)}, - {Raw: []byte(`"Replace"`)}, - }, - }, - "successfulRunHistoryLimit": { - Type: "integer", - Minimum: float64Ptr(1), - }, - "failedRunHistoryLimit": { - Type: "integer", - Minimum: float64Ptr(1), - }, - "template": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "type": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"Java"`)}, - {Raw: []byte(`"Scala"`)}, - {Raw: []byte(`"Python"`)}, - {Raw: []byte(`"R"`)}, - }, - }, - "mode": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"cluster"`)}, - {Raw: []byte(`"client"`)}, - }, - }, - "driver": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "cores": { - Type: "number", - Minimum: float64Ptr(0), - ExclusiveMinimum: true, - }, - "podName": { - Pattern: "[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*", - }, - }, - }, - "executor": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "cores": { - Type: "number", - Minimum: float64Ptr(0), - ExclusiveMinimum: true, - }, - "instances": { - Type: "integer", - Minimum: float64Ptr(1), - }, - }, - }, - "deps": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "downloadTimeout": { - Type: "integer", - Minimum: float64Ptr(1), - }, - "maxSimultaneousDownloads": { - Type: "integer", - Minimum: float64Ptr(1), - }, - }, - }, - "restartPolicy": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "type": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"Never"`)}, - {Raw: []byte(`"OnFailure"`)}, - {Raw: []byte(`"Always"`)}, - }, - }, - "onSubmissionFailureRetries": { - Type: "integer", - Minimum: float64Ptr(0), - }, - "onFailureRetries": { - Type: "integer", - Minimum: float64Ptr(0), - }, - "onSubmissionFailureRetryInterval": { - Type: "integer", - Minimum: float64Ptr(1), - }, - "onFailureRetryInterval": { - Type: "integer", - Minimum: float64Ptr(1), - }, - }, - }, - "pythonVersion": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"2"`)}, - {Raw: []byte(`"3"`)}, - }, - }, - "monitoring": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "prometheus": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "port": { - Type: "integer", - Minimum: float64Ptr(1024), - Maximum: float64Ptr(49151), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func float64Ptr(f float64) *float64 { - return &f -} diff --git a/pkg/crd/sparkapplication/crd.go b/pkg/crd/sparkapplication/crd.go deleted file mode 100644 index 35932cd6b..000000000 --- a/pkg/crd/sparkapplication/crd.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "reflect" - - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" -) - -// CRD metadata. -const ( - Plural = "sparkapplications" - Singular = "sparkapplication" - ShortName = "sparkapp" - Group = sparkoperator.GroupName - Version = v1beta1.Version - FullName = Plural + "." + Group -) - -func GetCRD() *apiextensionsv1beta1.CustomResourceDefinition { - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: FullName, - }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: Group, - Version: Version, - Scope: apiextensionsv1beta1.NamespaceScoped, - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: Plural, - Singular: Singular, - ShortNames: []string{ShortName}, - Kind: reflect.TypeOf(v1beta1.SparkApplication{}).Name(), - }, - Validation: getCustomResourceValidation(), - }, - } -} - -func getCustomResourceValidation() *apiextensionsv1beta1.CustomResourceValidation { - return &apiextensionsv1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{ - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "metadata": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "name": { - Type: "string", - MinLength: int64Ptr(1), - MaxLength: int64Ptr(63), - }, - }, - }, - "spec": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "type": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"Java"`)}, - {Raw: []byte(`"Scala"`)}, - {Raw: []byte(`"Python"`)}, - {Raw: []byte(`"R"`)}, - }, - }, - "mode": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"cluster"`)}, - {Raw: []byte(`"client"`)}, - }, - }, - "driver": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "cores": { - Type: "number", - Minimum: float64Ptr(0), - ExclusiveMinimum: true, - }, - "podName": { - Pattern: "[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*", - }, - }, - }, - "executor": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "cores": { - Type: "number", - Minimum: float64Ptr(0), - ExclusiveMinimum: true, - }, - "instances": { - Type: "integer", - Minimum: float64Ptr(1), - }, - }, - }, - "deps": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "downloadTimeout": { - Type: "integer", - Minimum: float64Ptr(1), - }, - "maxSimultaneousDownloads": { - Type: "integer", - Minimum: float64Ptr(1), - }, - }, - }, - "restartPolicy": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "type": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"Never"`)}, - {Raw: []byte(`"OnFailure"`)}, - {Raw: []byte(`"Always"`)}, - }, - }, - "onSubmissionFailureRetries": { - Type: "integer", - Minimum: float64Ptr(0), - }, - "onFailureRetries": { - Type: "integer", - Minimum: float64Ptr(0), - }, - "onSubmissionFailureRetryInterval": { - Type: "integer", - Minimum: float64Ptr(1), - }, - "onFailureRetryInterval": { - Type: "integer", - Minimum: float64Ptr(1), - }, - }, - }, - "pythonVersion": { - Enum: []apiextensionsv1beta1.JSON{ - {Raw: []byte(`"2"`)}, - {Raw: []byte(`"3"`)}, - }, - }, - "monitoring": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "prometheus": { - Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ - "port": { - Type: "integer", - Minimum: float64Ptr(1024), - Maximum: float64Ptr(49151), - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func int64Ptr(i int64) *int64 { - return &i -} - -func float64Ptr(f float64) *float64 { - return &f -} diff --git a/pkg/util/util.go b/pkg/util/util.go index bded32834..d556d045e 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -24,7 +24,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) @@ -34,11 +34,11 @@ func NewHash32() hash.Hash32 { } // GetOwnerReference returns an OwnerReference pointing to the given app. -func GetOwnerReference(app *v1beta1.SparkApplication) metav1.OwnerReference { +func GetOwnerReference(app *v1beta2.SparkApplication) metav1.OwnerReference { controller := true return metav1.OwnerReference{ - APIVersion: v1beta1.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta1.SparkApplication{}).Name(), + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), Name: app.Name, UID: app.UID, Controller: &controller, diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go index 20fce1606..7a4680b40 100644 --- a/pkg/webhook/patch.go +++ b/pkg/webhook/patch.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" ) @@ -44,7 +44,7 @@ type patchOperation struct { Value interface{} `json:"value,omitempty"` } -func patchSparkPod(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func patchSparkPod(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var patchOps []patchOperation if util.IsDriverPod(pod) { @@ -85,7 +85,7 @@ func patchSparkPod(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperat return patchOps } -func addOwnerReference(pod *corev1.Pod, app *v1beta1.SparkApplication) patchOperation { +func addOwnerReference(pod *corev1.Pod, app *v1beta2.SparkApplication) patchOperation { ownerReference := util.GetOwnerReference(app) path := "/metadata/ownerReferences" @@ -100,7 +100,7 @@ func addOwnerReference(pod *corev1.Pod, app *v1beta1.SparkApplication) patchOper return patchOperation{Op: "add", Path: path, Value: value} } -func addVolumes(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addVolumes(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { volumes := app.Spec.Volumes volumeMap := make(map[string]corev1.Volume) for _, v := range volumes { @@ -182,7 +182,7 @@ func addEnvironmentVariable(pod *corev1.Pod, envName, envValue string) patchOper return patchOperation{Op: "add", Path: path, Value: value} } -func addSparkConfigMap(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addSparkConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var patchOps []patchOperation sparkConfigMapName := app.Spec.SparkConfigMap if sparkConfigMapName != nil { @@ -194,7 +194,7 @@ func addSparkConfigMap(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOp return patchOps } -func addHadoopConfigMap(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addHadoopConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var patchOps []patchOperation hadoopConfigMapName := app.Spec.HadoopConfigMap if hadoopConfigMapName != nil { @@ -206,8 +206,8 @@ func addHadoopConfigMap(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchO return patchOps } -func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { - var configMaps []v1beta1.NamePath +func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { + var configMaps []v1beta2.NamePath if util.IsDriverPod(pod) { configMaps = app.Spec.Driver.ConfigMaps } else if util.IsExecutorPod(pod) { @@ -227,7 +227,7 @@ func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta1.SparkApplication) []patc return patchOps } -func addPrometheusConfigMap(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addPrometheusConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { // Skip if Prometheus Monitoring is not enabled or an in-container ConfigFile is used, // in which cases a Prometheus ConfigMap won't be created. if !app.PrometheusMonitoringEnabled() || app.HasPrometheusConfigFile() { @@ -273,7 +273,7 @@ func addConfigMapVolumeMount(pod *corev1.Pod, configMapVolumeName string, mountP return addVolumeMount(pod, mount) } -func addAffinity(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation { +func addAffinity(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { var affinity *corev1.Affinity if util.IsDriverPod(pod) { affinity = app.Spec.Driver.Affinity @@ -287,7 +287,7 @@ func addAffinity(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation return &patchOperation{Op: "add", Path: "/spec/affinity", Value: *affinity} } -func addTolerations(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addTolerations(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var tolerations []corev1.Toleration if util.IsDriverPod(pod) { tolerations = app.Spec.Driver.Tolerations @@ -302,7 +302,7 @@ func addTolerations(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOpera return ops } -func addNodeSelectors(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addNodeSelectors(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var nodeSelector map[string]string if util.IsDriverPod(pod) { nodeSelector = app.Spec.Driver.NodeSelector @@ -317,7 +317,7 @@ func addNodeSelectors(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOpe return ops } -func addDNSConfig(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addDNSConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var dnsConfig *corev1.PodDNSConfig if util.IsDriverPod(pod) { @@ -333,7 +333,7 @@ func addDNSConfig(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperati return ops } -func addSchedulerName(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation { +func addSchedulerName(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { var schedulerName *string if util.IsDriverPod(pod) { schedulerName = app.Spec.Driver.SchedulerName @@ -360,7 +360,7 @@ func addToleration(pod *corev1.Pod, toleration corev1.Toleration) patchOperation return patchOperation{Op: "add", Path: path, Value: value} } -func addSecurityContext(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation { +func addSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { var secContext *corev1.PodSecurityContext if util.IsDriverPod(pod) { secContext = app.Spec.Driver.SecurityContenxt @@ -374,7 +374,7 @@ func addSecurityContext(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOp return &patchOperation{Op: "add", Path: "/spec/securityContext", Value: *secContext} } -func addSidecarContainers(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addSidecarContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var sidecars []corev1.Container if util.IsDriverPod(pod) { sidecars = app.Spec.Driver.Sidecars @@ -392,8 +392,8 @@ func addSidecarContainers(pod *corev1.Pod, app *v1beta1.SparkApplication) []patc return ops } -func addGPU(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation { - var gpu *v1beta1.GPUSpec +func addGPU(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { + var gpu *v1beta2.GPUSpec if util.IsDriverPod(pod) { gpu = app.Spec.Driver.GPU } @@ -433,7 +433,7 @@ func addGPU(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation { return &patchOperation{Op: "add", Path: path, Value: value} } -func addHostNetwork(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperation { +func addHostNetwork(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { var hostNetwork *bool if util.IsDriverPod(pod) { hostNetwork = app.Spec.Driver.HostNetwork diff --git a/pkg/webhook/patch_test.go b/pkg/webhook/patch_test.go index 999885748..bbdfdabca 100644 --- a/pkg/webhook/patch_test.go +++ b/pkg/webhook/patch_test.go @@ -28,12 +28,12 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" ) func TestPatchSparkPod_OwnerReference(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", @@ -76,12 +76,12 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { } func TestPatchSparkPod_Volumes(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ Volumes: []corev1.Volume{ corev1.Volume{ Name: "spark", @@ -92,8 +92,8 @@ func TestPatchSparkPod_Volumes(t *testing.T) { }, }, }, - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ VolumeMounts: []corev1.VolumeMount{ { Name: "spark", @@ -152,14 +152,14 @@ func TestPatchSparkPod_Volumes(t *testing.T) { } func TestPatchSparkPod_Affinity(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ Affinity: &corev1.Affinity{ PodAffinity: &corev1.PodAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ @@ -209,15 +209,15 @@ func TestPatchSparkPod_Affinity(t *testing.T) { } func TestPatchSparkPod_ConfigMaps(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ - ConfigMaps: []v1beta1.NamePath{{Name: "foo", Path: "/path/to/foo"}}, + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ + ConfigMaps: []v1beta2.NamePath{{Name: "foo", Path: "/path/to/foo"}}, }, }, }, @@ -255,12 +255,12 @@ func TestPatchSparkPod_ConfigMaps(t *testing.T) { func TestPatchSparkPod_SparkConfigMap(t *testing.T) { sparkConfMapName := "spark-conf" - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ SparkConfigMap: &sparkConfMapName, }, } @@ -299,12 +299,12 @@ func TestPatchSparkPod_SparkConfigMap(t *testing.T) { func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { hadoopConfMapName := "hadoop-conf" - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ + Spec: v1beta2.SparkApplicationSpec{ HadoopConfigMap: &hadoopConfMapName, }, } @@ -342,14 +342,14 @@ func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { } func TestPatchSparkPod_PrometheusConfigMaps(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Monitoring: &v1beta1.MonitoringSpec{ - Prometheus: &v1beta1.PrometheusSpec{}, + Spec: v1beta2.SparkApplicationSpec{ + Monitoring: &v1beta2.MonitoringSpec{ + Prometheus: &v1beta2.PrometheusSpec{}, ExposeDriverMetrics: true, }, }, @@ -390,14 +390,14 @@ func TestPatchSparkPod_PrometheusConfigMaps(t *testing.T) { } func TestPatchSparkPod_Tolerations(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ Tolerations: []corev1.Toleration{ { Key: "Key", @@ -441,21 +441,21 @@ func TestPatchSparkPod_Tolerations(t *testing.T) { func TestPatchSparkPod_SecurityContext(t *testing.T) { var user int64 = 1000 - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ SecurityContenxt: &corev1.PodSecurityContext{ RunAsUser: &user, }, }, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ SecurityContenxt: &corev1.PodSecurityContext{ RunAsUser: &user, }, @@ -517,19 +517,19 @@ func TestPatchSparkPod_SchedulerName(t *testing.T) { var schedulerName = "another_scheduler" var defaultScheduler = "default-scheduler" - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test-patch-schedulername", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ SchedulerName: &schedulerName, }, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{}, + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{}, }, }, } @@ -588,14 +588,14 @@ func TestPatchSparkPod_SchedulerName(t *testing.T) { } func TestPatchSparkPod_Sidecars(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ Sidecars: []corev1.Container{ { Name: "sidecar1", @@ -608,8 +608,8 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { }, }, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ Sidecars: []corev1.Container{ { Name: "sidecar1", @@ -688,17 +688,17 @@ func TestPatchSparkPod_DNSConfig(t *testing.T) { }, } - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{DNSConfig: sampleDNSConfig}, + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{DNSConfig: sampleDNSConfig}, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{DNSConfig: sampleDNSConfig}, + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{DNSConfig: sampleDNSConfig}, }, }, } @@ -757,19 +757,19 @@ func TestPatchSparkPod_DNSConfig(t *testing.T) { } func TestPatchSparkPod_NodeSector(t *testing.T) { - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ NodeSelector: map[string]string{"disk": "ssd", "secondkey": "secondvalue"}, }, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{ + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ NodeSelector: map[string]string{"nodeType": "gpu", "secondkey": "secondvalue"}, }, }, @@ -834,7 +834,7 @@ func TestPatchSparkPod_GPU(t *testing.T) { cpuRequest := int64(5) type testcase struct { - gpuSpec *v1beta1.GPUSpec + gpuSpec *v1beta2.GPUSpec cpuLimits *int64 cpuRequests *int64 } @@ -886,17 +886,17 @@ func TestPatchSparkPod_GPU(t *testing.T) { assert.Equal(t, *test.cpuLimits, count) } } - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{}, + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{}, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{}, + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{}, }, }, } @@ -922,42 +922,42 @@ func TestPatchSparkPod_GPU(t *testing.T) { &cpuRequest, }, { - &v1beta1.GPUSpec{}, + &v1beta2.GPUSpec{}, nil, nil, }, { - &v1beta1.GPUSpec{}, + &v1beta2.GPUSpec{}, &cpuLimit, nil, }, { - &v1beta1.GPUSpec{}, + &v1beta2.GPUSpec{}, nil, &cpuRequest, }, { - &v1beta1.GPUSpec{}, + &v1beta2.GPUSpec{}, &cpuLimit, &cpuRequest, }, { - &v1beta1.GPUSpec{"example.com/gpu", 1}, + &v1beta2.GPUSpec{"example.com/gpu", 1}, nil, nil, }, { - &v1beta1.GPUSpec{"example.com/gpu", 1}, + &v1beta2.GPUSpec{"example.com/gpu", 1}, &cpuLimit, nil, }, { - &v1beta1.GPUSpec{"example.com/gpu", 1}, + &v1beta2.GPUSpec{"example.com/gpu", 1}, nil, &cpuRequest, }, { - &v1beta1.GPUSpec{"example.com/gpu", 1}, + &v1beta2.GPUSpec{"example.com/gpu", 1}, &cpuLimit, &cpuRequest, }, @@ -1025,17 +1025,17 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { var hostNetwork = true var defaultNetwork = false - app := &v1beta1.SparkApplication{ + app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test-hostNetwork", UID: "spark-test-1", }, - Spec: v1beta1.SparkApplicationSpec{ - Driver: v1beta1.DriverSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{}, + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{}, }, - Executor: v1beta1.ExecutorSpec{ - SparkPodSpec: v1beta1.SparkPodSpec{}, + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{}, }, }, } @@ -1108,7 +1108,7 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { } } -func getModifiedPod(pod *corev1.Pod, app *v1beta1.SparkApplication) (*corev1.Pod, error) { +func getModifiedPod(pod *corev1.Pod, app *v1beta2.SparkApplication) (*corev1.Pod, error) { patchOps := patchSparkPod(pod, app) patchBytes, err := json.Marshal(patchOps) if err != nil { diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 6f7cbc702..8e45d1ddc 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -39,7 +39,7 @@ import ( "k8s.io/client-go/kubernetes" crinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" + crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" ) @@ -120,7 +120,7 @@ func New( } hook := &WebHook{ clientset: clientset, - lister: informerFactory.Sparkoperator().V1beta1().SparkApplications().Lister(), + lister: informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister(), certProvider: cert, serviceRef: serviceRef, sparkJobNamespace: jobNamespace, diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go index 7c8fb048b..2e5e4e573 100644 --- a/pkg/webhook/webhook_test.go +++ b/pkg/webhook/webhook_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - spov1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + spov1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake" crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" @@ -38,7 +38,7 @@ import ( func TestMutatePod(t *testing.T) { crdClient := crdclientfake.NewSimpleClientset() informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0*time.Second) - informer := informerFactory.Sparkoperator().V1beta1().SparkApplications() + informer := informerFactory.Sparkoperator().V1beta2().SparkApplications() lister := informer.Lister() pod1 := &corev1.Pod{ @@ -78,13 +78,13 @@ func TestMutatePod(t *testing.T) { assert.True(t, response.Allowed) // 2. Test processing Spark pod with only one patch: adding an OwnerReference. - app1 := &spov1beta1.SparkApplication{ + app1 := &spov1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-app1", Namespace: "default", }, } - crdClient.SparkoperatorV1beta1().SparkApplications(app1.Namespace).Create(app1) + crdClient.SparkoperatorV1beta2().SparkApplications(app1.Namespace).Create(app1) informer.Informer().GetIndexer().Add(app1) pod1.Labels = map[string]string{ config.SparkRoleLabel: config.SparkDriverRole, @@ -103,12 +103,12 @@ func TestMutatePod(t *testing.T) { // 3. Test processing Spark pod with patches. var user int64 = 1000 - app2 := &spov1beta1.SparkApplication{ + app2 := &spov1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-app2", Namespace: "default", }, - Spec: spov1beta1.SparkApplicationSpec{ + Spec: spov1beta2.SparkApplicationSpec{ Volumes: []corev1.Volume{ { Name: "spark", @@ -125,8 +125,8 @@ func TestMutatePod(t *testing.T) { }, }, }, - Driver: spov1beta1.DriverSpec{ - SparkPodSpec: spov1beta1.SparkPodSpec{ + Driver: spov1beta2.DriverSpec{ + SparkPodSpec: spov1beta2.SparkPodSpec{ VolumeMounts: []corev1.VolumeMount{ { Name: "spark", @@ -160,7 +160,7 @@ func TestMutatePod(t *testing.T) { }, }, } - crdClient.SparkoperatorV1beta1().SparkApplications(app2.Namespace).Update(app2) + crdClient.SparkoperatorV1beta2().SparkApplications(app2.Namespace).Update(app2) informer.Informer().GetIndexer().Add(app2) pod1.Labels[config.SparkAppNameLabel] = app2.Name From 680ade7bf8a79d44e671551200ec8eb8b04c2f45 Mon Sep 17 00:00:00 2001 From: Kevin Hogeland Date: Wed, 21 Aug 2019 17:05:54 -0700 Subject: [PATCH 2/6] Add short/singular CRD names --- .../sparkoperator.k8s.io_scheduledsparkapplications.yaml | 6 +++++- manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml | 6 +++++- pkg/apis/sparkoperator.k8s.io/v1beta2/types.go | 3 +++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 534888004..b331b30cf 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -10,7 +10,9 @@ spec: names: kind: ScheduledSparkApplication plural: scheduledsparkapplications - scope: "" + shortNames: + - scheduledsparkapp + scope: Namespaced subresources: status: {} validation: @@ -352,6 +354,7 @@ spec: coreLimit: type: string cores: + exclusiveMinimum: true format: int32 minimum: 0 type: integer @@ -1240,6 +1243,7 @@ spec: coreRequest: type: string cores: + exclusiveMinimum: true format: int32 minimum: 0 type: integer diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index 09dbc1ce7..310cd2a36 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -10,7 +10,9 @@ spec: names: kind: SparkApplication plural: sparkapplications - scope: "" + shortNames: + - sparkapp + scope: Namespaced subresources: status: {} validation: @@ -338,6 +340,7 @@ spec: coreLimit: type: string cores: + exclusiveMinimum: true format: int32 minimum: 0 type: integer @@ -1226,6 +1229,7 @@ spec: coreRequest: type: string cores: + exclusiveMinimum: true format: int32 minimum: 0 type: integer diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index a5463cf4c..336dff107 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -76,6 +76,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:defaulter-gen=true // +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication type ScheduledSparkApplication struct { metav1.TypeMeta `json:",inline"` @@ -155,6 +156,7 @@ type ScheduledSparkApplicationList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:defaulter-gen=true // +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication // SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. type SparkApplication struct { @@ -354,6 +356,7 @@ type SparkPodSpec struct { // Cores is the number of CPU cores to request for the pod. // Optional. // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:ExclusiveMinimum=true Cores *int32 `json:"cores,omitempty"` // CoreLimit specifies a hard limit on CPU cores for the pod. // Optional From 11ebdabcb25841891cd696d0d259e66e9de6bca9 Mon Sep 17 00:00:00 2001 From: Kevin Hogeland Date: Thu, 5 Sep 2019 14:54:21 -0700 Subject: [PATCH 3/6] Merge upstream/master --- .gitignore | 1 + Gopkg.lock | 1 + docs/api.md | 2 +- docs/quick-start-guide.md | 14 +- docs/user-guide.md | 7 + main.go | 117 +++++--- ...tor.k8s.io_scheduledsparkapplications.yaml | 2 + ...parkoperator.k8s.io_sparkapplications.yaml | 2 + manifest/spark-operator-rbac.yaml | 5 +- .../sparkoperator.k8s.io/v1beta1/types.go | 3 + .../v1beta1/zz_generated.deepcopy.go | 6 + .../sparkoperator.k8s.io/v1beta2/types.go | 3 + pkg/batchscheduler/factory.go | 56 ---- pkg/batchscheduler/scheduler_manager.go | 80 +++++ .../volcano/volcano_scheduler.go | 20 +- pkg/controller/sparkapplication/controller.go | 39 ++- pkg/webhook/patch.go | 9 +- pkg/webhook/resourceusage/enforcer.go | 97 ++++++ pkg/webhook/resourceusage/handlers.go | 119 ++++++++ pkg/webhook/resourceusage/util.go | 241 +++++++++++++++ pkg/webhook/resourceusage/util_test.go | 25 ++ pkg/webhook/resourceusage/watcher.go | 157 ++++++++++ pkg/webhook/webhook.go | 284 ++++++++++++++---- 23 files changed, 1083 insertions(+), 207 deletions(-) delete mode 100644 pkg/batchscheduler/factory.go create mode 100644 pkg/batchscheduler/scheduler_manager.go create mode 100644 pkg/webhook/resourceusage/enforcer.go create mode 100644 pkg/webhook/resourceusage/handlers.go create mode 100644 pkg/webhook/resourceusage/util.go create mode 100644 pkg/webhook/resourceusage/util_test.go create mode 100644 pkg/webhook/resourceusage/watcher.go diff --git a/.gitignore b/.gitignore index e24d37506..bd44b47a0 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ spark-operator .idea/ **/*.iml sparkctl/sparkctl +spark-on-k8s-oprator diff --git a/Gopkg.lock b/Gopkg.lock index c332e5f0f..c31b39f14 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1229,6 +1229,7 @@ "k8s.io/client-go/discovery", "k8s.io/client-go/discovery/fake", "k8s.io/client-go/informers", + "k8s.io/client-go/informers/core/v1", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", diff --git a/docs/api.md b/docs/api.md index bdfc630f5..7c499e01c 100644 --- a/docs/api.md +++ b/docs/api.md @@ -9,7 +9,7 @@ ScheduledSparkApplication |__ SparkApplication |__ ScheduledSparkApplicationStatus -|__ SparkApplication +SparkApplication |__ SparkApplicationSpec |__ DriverSpec |__ SparkPodSpec diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 9257707de..b12c2229c 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -185,13 +185,13 @@ If enabled, the operator generates the following metrics: #### Work Queue Metrics | Metric | Description | | ------------- | ------------- | -| `queue_depth` | Current depth of workqueue | -| `queue_adds` | Total number of adds handled by workqueue | -| `queue_latency` | Latency for workqueue | -| `queue_work_duration` | How long processing an item from workqueue takes | -| `queue_retries` | Total number of retries handled by workqueue | -| `queue_unfinished_work_seconds` | Unfinished work in seconds | -| `queue_longest_running_processor_microseconds` | Longest running processor in microseconds | +| `spark_application_controller_depth` | Current depth of workqueue | +| `spark_application_controller_adds` | Total number of adds handled by workqueue | +| `spark_application_controller_latency` | Latency for workqueue | +| `spark_application_controller_work_duration` | How long processing an item from workqueue takes | +| `spark_application_controller_retries` | Total number of retries handled by workqueue | +| `spark_application_controller_unfinished_work_seconds` | Unfinished work in seconds | +| `spark_application_controller_longest_running_processor_microseconds` | Longest running processor in microseconds | The following is a list of all the configurations the operators supports for metrics: diff --git a/docs/user-guide.md b/docs/user-guide.md index c21f4a095..b79a5eeba 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -38,6 +38,7 @@ The Kubernetes Operator for Apache Spark ships with a command-line tool called ` * [Configuring Automatic Application Re-submission on Submission Failures](#configuring-automatic-application-re-submission-on-submission-failures) * [Running Spark Applications on a Schedule using a ScheduledSparkApplication](#running-spark-applications-on-a-schedule-using-a-scheduledsparkapplication) * [Enabling Leader Election for High Availability](#enabling-leader-election-for-high-availability) +* [Enabling Resource Quota Enforcement](#enabling-resource-quota-enforcement) * [Customizing the Operator](#customizing-the-operator) ## Using a SparkApplication @@ -564,6 +565,12 @@ The operator supports a high-availability (HA) mode, in which there can be more | `leader-election-renew-deadline` | 14 seconds | Leader election renew deadline. | | `leader-election-retry-period` | 4 seconds | Leader election retry period. | +## Enabling Resource Quota Enforcement + +The Spark Operator provides limited support for resource quota enforcement using a validating webhook. It will count the resources of non-terminal-phase SparkApplications and Pods, and determine whether a requested SparkApplication will fit given the remaining resources. ResourceQuota scope selectors are not supported, any ResourceQuota object that does not match the entire namespace will be ignored. Like the native Pod quota enforcement, current usage is updated asynchronously, so some overscheduling is possible. + +If you are running Spark applications in namespaces that are subject to resource quota constraints, consider enabling this feature to avoid driver resource starvation. Quota enforcement can be enabled with the command line arguments `-enable-resource-quota-enforcement=true`. It is recommended to also set `-webhook-fail-on-error=true`. + ## Customizing the Operator To customize the operator, you can follow the steps below: diff --git a/main.go b/main.go index 894031ac3..0bc96f2c4 100644 --- a/main.go +++ b/main.go @@ -24,6 +24,7 @@ import ( "fmt" "os" "os/signal" + "strings" "syscall" "time" @@ -42,7 +43,6 @@ import ( "k8s.io/client-go/tools/record" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" crclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" crinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" operatorConfig "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" @@ -53,24 +53,27 @@ import ( ) var ( - master = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") - kubeConfig = flag.String("kubeConfig", "", "Path to a kube config. Only required if out-of-cluster.") - controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") - resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds.") - namespace = flag.String("namespace", apiv1.NamespaceAll, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") - enableWebhook = flag.Bool("enable-webhook", false, "Whether to enable the mutating admission webhook for admitting and patching Spark pods.") - enableMetrics = flag.Bool("enable-metrics", false, "Whether to enable the metrics endpoint.") - metricsPort = flag.String("metrics-port", "10254", "Port for the metrics endpoint.") - metricsEndpoint = flag.String("metrics-endpoint", "/metrics", "Metrics endpoint.") - metricsPrefix = flag.String("metrics-prefix", "", "Prefix for the metrics.") - ingressURLFormat = flag.String("ingress-url-format", "", "Ingress URL format.") - enableLeaderElection = flag.Bool("leader-election", false, "Enable Spark operator leader election.") - leaderElectionLockNamespace = flag.String("leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") - leaderElectionLockName = flag.String("leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") - leaderElectionLeaseDuration = flag.Duration("leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") - leaderElectionRenewDeadline = flag.Duration("leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") - leaderElectionRetryPeriod = flag.Duration("leader-election-retry-period", 4*time.Second, "Leader election retry period.") - batchSchedulerName = flag.String("batch-scheduler-name", "", "Use specified scheduler for pods' batch scheduling.") + master = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeConfig = flag.String("kubeConfig", "", "Path to a kube config. Only required if out-of-cluster.") + installCRDs = flag.Bool("install-crds", true, "Whether to install CRDs") + controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") + resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds.") + namespace = flag.String("namespace", apiv1.NamespaceAll, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") + enableWebhook = flag.Bool("enable-webhook", false, "Whether to enable the mutating admission webhook for admitting and patching Spark pods.") + enableResourceQuotaEnforcement = flag.Bool("enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.") + enableMetrics = flag.Bool("enable-metrics", false, "Whether to enable the metrics endpoint.") + metricsPort = flag.String("metrics-port", "10254", "Port for the metrics endpoint.") + metricsEndpoint = flag.String("metrics-endpoint", "/metrics", "Metrics endpoint.") + metricsPrefix = flag.String("metrics-prefix", "", "Prefix for the metrics.") + ingressURLFormat = flag.String("ingress-url-format", "", "Ingress URL format.") + enableLeaderElection = flag.Bool("leader-election", false, "Enable Spark operator leader election.") + leaderElectionLockNamespace = flag.String("leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") + leaderElectionLockName = flag.String("leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") + leaderElectionLeaseDuration = flag.Duration("leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") + leaderElectionRenewDeadline = flag.Duration("leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") + leaderElectionRetryPeriod = flag.Duration("leader-election-retry-period", 4*time.Second, "Leader election retry period.") + enableBatchScheduler = flag.Bool("enable-batch-scheduler", false, + fmt.Sprintf("Enable batch schedulers for pods' scheduling, the available batch schedulers are: (%s).", strings.Join(batchscheduler.GetRegisteredNames(), ","))) ) func main() { @@ -142,62 +145,70 @@ func main() { glog.Fatal(err) } - var batchScheduler schedulerinterface.BatchScheduler - if *batchSchedulerName != "" { + var batchSchedulerMgr *batchscheduler.SchedulerManager + if *enableBatchScheduler { if !*enableWebhook { - glog.Fatalf( - "failed to initialize the batch scheduler %s as it requires the webhook to be enabled", *batchSchedulerName) - } - batchScheduler, err = batchscheduler.GetBatchScheduler(*batchSchedulerName, config) - if err != nil { - glog.Fatalf("failed to initialize batch scheduler %s.", err) + glog.Fatal( + "failed to initialize the batch scheduler manager as it requires the webhook to be enabled") } + batchSchedulerMgr = batchscheduler.NewSchedulerManager(config) } crInformerFactory := buildCustomResourceInformerFactory(crClient) podInformerFactory := buildPodInformerFactory(kubeClient) + var metricConfig *util.MetricConfig + if *enableMetrics { + metricConfig = &util.MetricConfig{ + MetricsEndpoint: *metricsEndpoint, + MetricsPort: *metricsPort, + MetricsPrefix: *metricsPrefix, + MetricsLabels: metricsLabels, + } + + glog.Info("Enabling metrics collecting and exporting to Prometheus") + util.InitializeMetrics(metricConfig) + } + + applicationController := sparkapplication.NewController( + crClient, kubeClient, crInformerFactory, podInformerFactory, metricConfig, *namespace, *ingressURLFormat, batchSchedulerMgr) + scheduledApplicationController := scheduledsparkapplication.NewController( + crClient, kubeClient, apiExtensionsClient, crInformerFactory, clock.RealClock{}) + // Start the informer factory that in turn starts the informer. go crInformerFactory.Start(stopCh) go podInformerFactory.Start(stopCh) var hook *webhook.WebHook if *enableWebhook { + var coreV1InformerFactory informers.SharedInformerFactory + if *enableResourceQuotaEnforcement { + coreV1InformerFactory = buildCoreV1InformerFactory(kubeClient) + } var err error // Don't deregister webhook on exit if leader election enabled (i.e. multiple webhooks running) - hook, err = webhook.New(kubeClient, crInformerFactory, *namespace, !*enableLeaderElection) + hook, err = webhook.New(kubeClient, crInformerFactory, *namespace, !*enableLeaderElection, *enableResourceQuotaEnforcement, coreV1InformerFactory) if err != nil { glog.Fatal(err) } - if err = hook.Start(); err != nil { + + if *enableResourceQuotaEnforcement { + go coreV1InformerFactory.Start(stopCh) + } + + if err = hook.Start(stopCh); err != nil { glog.Fatal(err) } + } else if *enableResourceQuotaEnforcement { + glog.Fatal("Webhook must be enabled to use resource quota enforcement.") } if *enableLeaderElection { - glog.Info("Waiting to be elected leader before starting application controller and metrics threads") + glog.Info("Waiting to be elected leader before starting application controller goroutines") <-startCh } - var metricConfig *util.MetricConfig - if *enableMetrics { - metricConfig = &util.MetricConfig{ - MetricsEndpoint: *metricsEndpoint, - MetricsPort: *metricsPort, - MetricsPrefix: *metricsPrefix, - MetricsLabels: metricsLabels, - } - - glog.Info("Enabling metrics collecting and exporting to Prometheus") - util.InitializeMetrics(metricConfig) - } - - applicationController := sparkapplication.NewController( - crClient, kubeClient, crInformerFactory, podInformerFactory, metricConfig, *namespace, *ingressURLFormat, batchScheduler) - scheduledApplicationController := scheduledsparkapplication.NewController( - crClient, kubeClient, apiExtensionsClient, crInformerFactory, clock.RealClock{}) - - glog.Info("Starting application controller threads") + glog.Info("Starting application controller goroutines") if err = applicationController.Start(*controllerThreads, stopCh); err != nil { glog.Fatal(err) @@ -252,3 +263,11 @@ func buildPodInformerFactory(kubeClient clientset.Interface) informers.SharedInf podFactoryOpts = append(podFactoryOpts, informers.WithTweakListOptions(tweakListOptionsFunc)) return informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Duration(*resyncInterval)*time.Second, podFactoryOpts...) } + +func buildCoreV1InformerFactory(kubeClient clientset.Interface) informers.SharedInformerFactory { + var coreV1FactoryOpts []informers.SharedInformerOption + if *namespace != apiv1.NamespaceAll { + coreV1FactoryOpts = append(coreV1FactoryOpts, informers.WithNamespace(*namespace)) + } + return informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Duration(*resyncInterval)*time.Second, coreV1FactoryOpts...) +} diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index b331b30cf..b587f413d 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -44,6 +44,8 @@ spec: items: type: string type: array + batchScheduler: + type: string deps: properties: downloadTimeout: diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index 310cd2a36..473b53b70 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -30,6 +30,8 @@ spec: items: type: string type: array + batchScheduler: + type: string deps: properties: downloadTimeout: diff --git a/manifest/spark-operator-rbac.yaml b/manifest/spark-operator-rbac.yaml index 55b72cc7e..9f3696448 100644 --- a/manifest/spark-operator-rbac.yaml +++ b/manifest/spark-operator-rbac.yaml @@ -45,6 +45,9 @@ rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get"] +- apiGroups: [""] + resources: ["resourcequotas"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] @@ -52,7 +55,7 @@ rules: resources: ["customresourcedefinitions"] verbs: ["create", "get", "update", "delete"] - apiGroups: ["admissionregistration.k8s.io"] - resources: ["mutatingwebhookconfigurations"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] verbs: ["create", "get", "update", "delete"] - apiGroups: ["sparkoperator.k8s.io"] resources: ["sparkapplications", "scheduledsparkapplications"] diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go index 1d38b20b8..d3186dd20 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go @@ -241,6 +241,9 @@ type SparkApplicationSpec struct { // Monitoring configures how monitoring is handled. // Optional. Monitoring *MonitoringSpec `json:"monitoring,omitempty"` + // BatchScheduler configures which batch scheduler will be used for scheduling + // Optional. + BatchScheduler *string `json:"batchScheduler,omitempty"` } // ApplicationStateType represents the type of the current state of an application. diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go index ece5cf989..b524d12f2 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go @@ -618,6 +618,12 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { *out = new(MonitoringSpec) (*in).DeepCopyInto(*out) } + + if in.BatchScheduler != nil { + in, out := &in.BatchScheduler, &out.BatchScheduler + *out = new(string) + **out = **in + } return } diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index 336dff107..74c1cd4f5 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -254,6 +254,9 @@ type SparkApplicationSpec struct { // Monitoring configures how monitoring is handled. // Optional. Monitoring *MonitoringSpec `json:"monitoring,omitempty"` + // BatchScheduler configures which batch scheduler will be used for scheduling + // Optional. + BatchScheduler *string `json:"batchScheduler,omitempty"` } // ApplicationStateType represents the type of the current state of an application. diff --git a/pkg/batchscheduler/factory.go b/pkg/batchscheduler/factory.go deleted file mode 100644 index 9bdcc269e..000000000 --- a/pkg/batchscheduler/factory.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batchscheduler - -import ( - "fmt" - "sync" - - "k8s.io/client-go/rest" - - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/volcano" -) - -type schedulerInitializeFunc func(config *rest.Config) (schedulerinterface.BatchScheduler, error) - -var ( - manageMutex sync.Mutex - schedulerContainers map[string]schedulerInitializeFunc -) - -func init() { - schedulerContainers = make(map[string]schedulerInitializeFunc) - registerBatchScheduler(volcano.GetPluginName(), volcano.New) -} - -func registerBatchScheduler(name string, iniFunc schedulerInitializeFunc) { - manageMutex.Lock() - defer manageMutex.Unlock() - schedulerContainers[name] = iniFunc -} - -func GetBatchScheduler(name string, config *rest.Config) (schedulerinterface.BatchScheduler, error) { - manageMutex.Lock() - defer manageMutex.Unlock() - for n, fc := range schedulerContainers { - if n == name { - return fc(config) - } - } - return nil, fmt.Errorf("failed to find batch scheduler named with %s", name) -} diff --git a/pkg/batchscheduler/scheduler_manager.go b/pkg/batchscheduler/scheduler_manager.go new file mode 100644 index 000000000..fe8495480 --- /dev/null +++ b/pkg/batchscheduler/scheduler_manager.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batchscheduler + +import ( + "fmt" + "sync" + + "k8s.io/client-go/rest" + + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/volcano" +) + +type schedulerInitializeFunc func(config *rest.Config) (schedulerinterface.BatchScheduler, error) + +var schedulerContainers = map[string]schedulerInitializeFunc{ + volcano.GetPluginName(): volcano.New, +} + +func GetRegisteredNames() []string { + var pluginNames []string + for key := range schedulerContainers { + pluginNames = append(pluginNames, key) + } + return pluginNames +} + +type SchedulerManager struct { + sync.Mutex + config *rest.Config + plugins map[string]schedulerinterface.BatchScheduler +} + +func NewSchedulerManager(config *rest.Config) *SchedulerManager { + manager := SchedulerManager{ + config: config, + plugins: make(map[string]schedulerinterface.BatchScheduler), + } + return &manager +} + +func (batch *SchedulerManager) GetScheduler(schedulerName string) (schedulerinterface.BatchScheduler, error) { + iniFunc, registered := schedulerContainers[schedulerName] + if !registered { + return nil, fmt.Errorf("unregistered scheduler plugin %s", schedulerName) + } + + batch.Lock() + defer batch.Unlock() + + if plugin, existed := batch.plugins[schedulerName]; existed && plugin != nil { + return plugin, nil + } else if existed && plugin == nil { + return nil, fmt.Errorf( + "failed to get scheduler plugin %s, previous initialization has failed", schedulerName) + } else { + if plugin, err := iniFunc(batch.config); err != nil { + batch.plugins[schedulerName] = nil + return nil, err + } else { + batch.plugins[schedulerName] = plugin + return plugin, nil + } + } +} diff --git a/pkg/batchscheduler/volcano/volcano_scheduler.go b/pkg/batchscheduler/volcano/volcano_scheduler.go index 0fa7a9c95..48f02236b 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler.go +++ b/pkg/batchscheduler/volcano/volcano_scheduler.go @@ -18,7 +18,6 @@ package volcano import ( "fmt" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" @@ -51,23 +50,8 @@ func (v *VolcanoBatchScheduler) Name() string { } func (v *VolcanoBatchScheduler) ShouldSchedule(app *v1beta2.SparkApplication) bool { - - checkScheduler := func(scheduler *string) bool { - if scheduler != nil && *scheduler == v.Name() { - return true - } - return false - } - - if app.Spec.Mode == v1beta2.ClientMode { - return checkScheduler(app.Spec.Executor.SchedulerName) - } - if app.Spec.Mode == v1beta2.ClusterMode { - return checkScheduler(app.Spec.Executor.SchedulerName) && checkScheduler(app.Spec.Driver.SchedulerName) - } - - glog.Warningf("Unsupported Spark application mode %s, abandon schedule via volcano.", app.Spec.Mode) - return false + //NOTE: There is no additional requirement for volcano scheduler + return true } func (v *VolcanoBatchScheduler) DoBatchSchedulingOnSubmission(app *v1beta2.SparkApplication) (*v1beta2.SparkApplication, error) { diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go index 41d38e16c..c7d098bcb 100644 --- a/pkg/controller/sparkapplication/controller.go +++ b/pkg/controller/sparkapplication/controller.go @@ -35,12 +35,13 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" crdscheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" @@ -74,7 +75,7 @@ type Controller struct { applicationLister crdlisters.SparkApplicationLister podLister v1.PodLister ingressURLFormat string - batchScheduler schedulerinterface.BatchScheduler + batchSchedulerMgr *batchscheduler.SchedulerManager } // NewController creates a new Controller. @@ -86,7 +87,7 @@ func NewController( metricsConfig *util.MetricConfig, namespace string, ingressURLFormat string, - batchscheduler schedulerinterface.BatchScheduler) *Controller { + batchSchedulerMgr *batchscheduler.SchedulerManager) *Controller { crdscheme.AddToScheme(scheme.Scheme) eventBroadcaster := record.NewBroadcaster() @@ -96,7 +97,7 @@ func NewController( }) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: "spark-operator"}) - return newSparkApplicationController(crdClient, kubeClient, crdInformerFactory, podInformerFactory, recorder, metricsConfig, ingressURLFormat, batchscheduler) + return newSparkApplicationController(crdClient, kubeClient, crdInformerFactory, podInformerFactory, recorder, metricsConfig, ingressURLFormat, batchSchedulerMgr) } func newSparkApplicationController( @@ -107,17 +108,17 @@ func newSparkApplicationController( eventRecorder record.EventRecorder, metricsConfig *util.MetricConfig, ingressURLFormat string, - batchScheduler schedulerinterface.BatchScheduler) *Controller { + batchSchedulerMgr *batchscheduler.SchedulerManager) *Controller { queue := workqueue.NewNamedRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(queueTokenRefillRate), queueTokenBucketSize)}, "spark-application-controller") controller := &Controller{ - crdClient: crdClient, - kubeClient: kubeClient, - recorder: eventRecorder, - queue: queue, - ingressURLFormat: ingressURLFormat, - batchScheduler: batchScheduler, + crdClient: crdClient, + kubeClient: kubeClient, + recorder: eventRecorder, + queue: queue, + ingressURLFormat: ingressURLFormat, + batchSchedulerMgr: batchSchedulerMgr, } if metricsConfig != nil { @@ -623,8 +624,8 @@ func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1be } // Use batch scheduler to perform scheduling task before submitting. - if c.shouldDoBatchScheduling(app) { - newApp, err := c.batchScheduler.DoBatchSchedulingOnSubmission(app) + if needScheduling, scheduler := c.shouldDoBatchScheduling(app); needScheduling { + newApp, err := scheduler.DoBatchSchedulingOnSubmission(app) if err != nil { glog.Errorf("failed to process batch scheduler BeforeSubmitSparkApplication with error %v", err) return app @@ -691,8 +692,16 @@ func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1be return app } -func (c *Controller) shouldDoBatchScheduling(app *v1beta2.SparkApplication) bool { - return c.batchScheduler != nil && c.batchScheduler.ShouldSchedule(app) +func (c *Controller) shouldDoBatchScheduling(app *v1beta2.SparkApplication) (bool, schedulerinterface.BatchScheduler) { + if c.batchSchedulerMgr == nil || app.Spec.BatchScheduler == nil || *app.Spec.BatchScheduler == "" { + return false, nil + } + if scheduler, err := c.batchSchedulerMgr.GetScheduler(*app.Spec.BatchScheduler); err != nil { + glog.Errorf("failed to get batch scheduler from name %s", *app.Spec.BatchScheduler) + return false, nil + } else { + return scheduler.ShouldSchedule(app), scheduler + } } func (c *Controller) updateApplicationStatusWithRetries( diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go index 4da56239f..608466551 100644 --- a/pkg/webhook/patch.go +++ b/pkg/webhook/patch.go @@ -333,10 +333,13 @@ func addDNSConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperati func addSchedulerName(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { var schedulerName *string - if util.IsDriverPod(pod) { + + //NOTE: Preferred to use `BatchScheduler` if application spec has it configured. + if app.Spec.BatchScheduler != nil { + schedulerName = app.Spec.BatchScheduler + } else if util.IsDriverPod(pod) { schedulerName = app.Spec.Driver.SchedulerName - } - if util.IsExecutorPod(pod) { + } else if util.IsExecutorPod(pod) { schedulerName = app.Spec.Executor.SchedulerName } if schedulerName == nil || *schedulerName == "" { diff --git a/pkg/webhook/resourceusage/enforcer.go b/pkg/webhook/resourceusage/enforcer.go new file mode 100644 index 000000000..4a24633bf --- /dev/null +++ b/pkg/webhook/resourceusage/enforcer.go @@ -0,0 +1,97 @@ +package resourceusage + +import ( + "fmt" + so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" + "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/informers" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/tools/cache" +) + +type ResourceQuotaEnforcer struct { + watcher ResourceUsageWatcher + resourceQuotaInformer corev1informers.ResourceQuotaInformer +} + +func NewResourceQuotaEnforcer(crdInformerFactory crdinformers.SharedInformerFactory, coreV1InformerFactory informers.SharedInformerFactory) ResourceQuotaEnforcer { + resourceUsageWatcher := newResourceUsageWatcher(crdInformerFactory, coreV1InformerFactory) + informer := coreV1InformerFactory.Core().V1().ResourceQuotas() + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) + return ResourceQuotaEnforcer{ + watcher: resourceUsageWatcher, + resourceQuotaInformer: informer, + } +} + +// TODO: There appears to be a deadlock in cache.WaitForCacheSync. Possibly related? https://github.com/kubernetes/kubernetes/issues/71450 +// For now, return immediately. There will be a short window after startup where quota calcuation is incorrect. +func (r ResourceQuotaEnforcer) WaitForCacheSync(stopCh <-chan struct{}) error { + /*if !cache.WaitForCacheSync(stopCh, func() bool { + return r.resourceQuotaInformer.Informer().HasSynced() + }) { + return fmt.Errorf("cache sync canceled") + }*/ + return nil +} + +func (r *ResourceQuotaEnforcer) admitResource(kind, namespace, name string, requestedResources ResourceList) (string, error) { + glog.V(2).Infof("Processing admission request for %s %s/%s, requesting: %s", kind, namespace, name, requestedResources) + resourceQuotas, err := r.resourceQuotaInformer.Lister().ResourceQuotas(namespace).List(labels.Everything()) + if err != nil { + return "", err + } + if (requestedResources.cpu.IsZero() && requestedResources.memory.IsZero()) || len(resourceQuotas) == 0 { + return "", nil + } + + currentNamespaceUsage, currentApplicationUsage := r.watcher.GetCurrentResourceUsageWithApplication(namespace, kind, name) + + for _, quota := range resourceQuotas { + // Scope selectors not currently supported, ignore any ResourceQuota that does not match everything. + if quota.Spec.ScopeSelector != nil || len(quota.Spec.Scopes) > 0 { + continue + } + + // If an existing application has increased its usage, check it against the quota again. If its usage hasn't increased, always allow it. + if requestedResources.cpu.Cmp(currentApplicationUsage.cpu) == 1 { + if cpuLimit, present := quota.Spec.Hard[corev1.ResourceCPU]; present { + availableCpu := cpuLimit + availableCpu.Sub(currentNamespaceUsage.cpu) + if requestedResources.cpu.Cmp(availableCpu) == 1 { + return fmt.Sprintf("%s %s/%s requests too many cores (%.3f cores requested, %.3f available).", kind, namespace, name, float64(requestedResources.cpu.MilliValue())/1000.0, float64(availableCpu.MilliValue())/1000.0), nil + } + } + } + + if requestedResources.memory.Cmp(currentApplicationUsage.memory) == 1 { + if memoryLimit, present := quota.Spec.Hard[corev1.ResourceMemory]; present { + availableMemory := memoryLimit + availableMemory.Sub(currentNamespaceUsage.memory) + if requestedResources.memory.Cmp(availableMemory) == 1 { + return fmt.Sprintf("%s %s/%s requests too much memory (%dMi requested, %dMi available).", kind, namespace, name, requestedResources.memory.Value()/(1<<20), availableMemory.Value()/(1<<20)), nil + } + } + } + } + return "", nil +} + +func (r *ResourceQuotaEnforcer) AdmitSparkApplication(app so.SparkApplication) (string, error) { + resourceUsage, err := sparkApplicationResourceUsage(app) + if err != nil { + return "", err + } + return r.admitResource(KindSparkApplication, app.ObjectMeta.Namespace, app.ObjectMeta.Name, resourceUsage) +} + +func (r *ResourceQuotaEnforcer) AdmitScheduledSparkApplication(app so.ScheduledSparkApplication) (string, error) { + resourceUsage, err := scheduledSparkApplicationResourceUsage(app) + if err != nil { + return "", err + } + return r.admitResource(KindScheduledSparkApplication, app.ObjectMeta.Namespace, app.ObjectMeta.Name, resourceUsage) +} diff --git a/pkg/webhook/resourceusage/handlers.go b/pkg/webhook/resourceusage/handlers.go new file mode 100644 index 000000000..d2dad511b --- /dev/null +++ b/pkg/webhook/resourceusage/handlers.go @@ -0,0 +1,119 @@ +package resourceusage + +import ( + so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + + "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" +) + +func (r *ResourceUsageWatcher) onPodAdded(obj interface{}) { + pod := obj.(*corev1.Pod) + // A pod launched by the Spark operator will already be accounted for by the CRD informer callback + if !launchedBySparkOperator(pod.ObjectMeta) { + r.setResources("Pod", namespaceOrDefault(pod.ObjectMeta), pod.ObjectMeta.Name, podResourceUsage(pod), r.usageByNamespacePod) + } +} + +func (r *ResourceUsageWatcher) onPodUpdated(oldObj, newObj interface{}) { + newPod := newObj.(*corev1.Pod) + if !launchedBySparkOperator(newPod.ObjectMeta) { + if newPod.Status.Phase == corev1.PodFailed || newPod.Status.Phase == corev1.PodSucceeded { + r.deleteResources("Pod", namespaceOrDefault(newPod.ObjectMeta), newPod.ObjectMeta.Name, r.usageByNamespacePod) + } else { + r.setResources("Pod", namespaceOrDefault(newPod.ObjectMeta), newPod.ObjectMeta.Name, podResourceUsage(newPod), r.usageByNamespacePod) + } + } +} + +func (r *ResourceUsageWatcher) onPodDeleted(obj interface{}) { + var pod *corev1.Pod + switch o := obj.(type) { + case *corev1.Pod: + pod = o + case cache.DeletedFinalStateUnknown: + pod = o.Obj.(*corev1.Pod) + default: + return + } + if !launchedBySparkOperator(pod.ObjectMeta) { + r.deleteResources("Pod", namespaceOrDefault(pod.ObjectMeta), pod.ObjectMeta.Name, r.usageByNamespacePod) + } +} + +func (r *ResourceUsageWatcher) onSparkApplicationAdded(obj interface{}) { + app := obj.(*so.SparkApplication) + namespace := namespaceOrDefault(app.ObjectMeta) + resources, err := sparkApplicationResourceUsage(*app) + if err != nil { + glog.Errorf("failed to determine resource usage of SparkApplication %s/%s: %v", namespace, app.ObjectMeta.Name, err) + } else { + r.setResources(KindSparkApplication, namespace, app.ObjectMeta.Name, resources, r.usageByNamespaceApplication) + } +} + +func (r *ResourceUsageWatcher) onSparkApplicationUpdated(oldObj, newObj interface{}) { + oldApp := oldObj.(*so.SparkApplication) + newApp := newObj.(*so.SparkApplication) + if oldApp.ResourceVersion == newApp.ResourceVersion { + return + } + namespace := namespaceOrDefault(newApp.ObjectMeta) + newResources, err := sparkApplicationResourceUsage(*newApp) + if err != nil { + glog.Errorf("failed to determine resource useage of SparkApplication %s/%s: %v", namespace, newApp.ObjectMeta.Name, err) + } else { + r.setResources(KindSparkApplication, namespace, newApp.ObjectMeta.Name, newResources, r.usageByNamespaceApplication) + } +} + +func (r *ResourceUsageWatcher) onSparkApplicationDeleted(obj interface{}) { + var app *so.SparkApplication + switch o := obj.(type) { + case *so.SparkApplication: + app = o + case cache.DeletedFinalStateUnknown: + app = o.Obj.(*so.SparkApplication) + default: + return + } + namespace := namespaceOrDefault(app.ObjectMeta) + r.deleteResources(KindSparkApplication, namespace, app.ObjectMeta.Name, r.usageByNamespaceApplication) +} + +func (r *ResourceUsageWatcher) onScheduledSparkApplicationAdded(obj interface{}) { + app := obj.(*so.ScheduledSparkApplication) + namespace := namespaceOrDefault(app.ObjectMeta) + resources, err := scheduledSparkApplicationResourceUsage(*app) + if err != nil { + glog.Errorf("failed to determine resource usage of ScheduledSparkApplication %s/%s: %v", namespace, app.ObjectMeta.Name, err) + } else { + r.setResources(KindScheduledSparkApplication, namespace, app.ObjectMeta.Name, resources, r.usageByNamespaceScheduledApplication) + } +} + +func (r *ResourceUsageWatcher) onScheduledSparkApplicationUpdated(oldObj, newObj interface{}) { + newApp := oldObj.(*so.ScheduledSparkApplication) + namespace := namespaceOrDefault(newApp.ObjectMeta) + newResources, err := scheduledSparkApplicationResourceUsage(*newApp) + if err != nil { + glog.Errorf("failed to determine resource usage of ScheduledSparkApplication %s/%s: %v", namespace, newApp.ObjectMeta.Name, err) + } else { + r.setResources(KindSparkApplication, namespace, newApp.ObjectMeta.Name, newResources, r.usageByNamespaceScheduledApplication) + } +} + +func (r *ResourceUsageWatcher) onScheduledSparkApplicationDeleted(obj interface{}) { + var app *so.ScheduledSparkApplication + switch o := obj.(type) { + case *so.ScheduledSparkApplication: + app = o + case cache.DeletedFinalStateUnknown: + app = o.Obj.(*so.ScheduledSparkApplication) + default: + return + } + namespace := namespaceOrDefault(app.ObjectMeta) + r.deleteResources(KindScheduledSparkApplication, namespace, app.ObjectMeta.Name, r.usageByNamespaceScheduledApplication) +} diff --git a/pkg/webhook/resourceusage/util.go b/pkg/webhook/resourceusage/util.go new file mode 100644 index 000000000..8b0407ba5 --- /dev/null +++ b/pkg/webhook/resourceusage/util.go @@ -0,0 +1,241 @@ +package resourceusage + +import ( + "fmt" + so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "math" + "regexp" + "strconv" + "strings" +) + +// ...are you serious, Go? +func max(x, y int64) int64 { + if x > y { + return x + } + return y +} + +const ( + // https://spark.apache.org/docs/latest/configuration.html + defaultCpuMillicores = 1000 + defaultMemoryBytes = 1 << 30 // 1Gi + defaultMemoryOverhead = 0.1 + + // https://github.com/apache/spark/blob/c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala#L85 + minMemoryOverhead = 384 * (1 << 20) // 384Mi + nonJvmDefaultMemoryOverhead = 0.4 +) + +func namespaceOrDefault(meta metav1.ObjectMeta) string { + namespace := meta.Namespace + if namespace == "" { + namespace = "default" + } + return namespace +} + +func launchedBySparkOperator(meta metav1.ObjectMeta) bool { + val, present := meta.Labels[config.LaunchedBySparkOperatorLabel] + return present && val == "true" +} + +func resourcesRequiredToSchedule(resourceRequirements corev1.ResourceRequirements) (cpu int64, memoryBytes int64) { + if coresRequest, present := resourceRequirements.Requests[corev1.ResourceCPU]; present { + cpu = coresRequest.MilliValue() + } else if coresLimit, present := resourceRequirements.Limits[corev1.ResourceCPU]; present { + cpu = coresLimit.MilliValue() + } + if memoryRequest, present := resourceRequirements.Requests[corev1.ResourceMemory]; present { + memoryBytes = memoryRequest.Value() + } else if memoryLimit, present := resourceRequirements.Limits[corev1.ResourceMemory]; present { + memoryBytes = memoryLimit.Value() + } + return cpu, memoryBytes +} + +func coresRequiredForSparkPod(spec so.SparkPodSpec, instances int64) (int64, error) { + var cpu int64 + if spec.Cores != nil { + cpu = int64(*spec.Cores * 1000) + } else { + cpu = defaultCpuMillicores + } + return cpu * instances, nil +} + +var javaStringSuffixes = map[string]int64{ + "b": 1, + "kb": 1 << 10, + "k": 1 << 10, + "mb": 1 << 20, + "m": 1 << 20, + "gb": 1 << 30, + "g": 1 << 30, + "tb": 1 << 40, + "t": 1 << 40, + "pb": 1 << 50, + "p": 1 << 50, +} + +var javaStringPattern = regexp.MustCompile(`([0-9]+)([a-z]+)?`) +var javaFractionStringPattern = regexp.MustCompile(`([0-9]+\.[0-9]+)([a-z]+)?`) + +// Logic copied from https://github.com/apache/spark/blob/5264164a67df498b73facae207eda12ee133be7d/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java#L276 +func parseJavaMemoryString(str string) (int64, error) { + lower := strings.ToLower(str) + if matches := javaStringPattern.FindStringSubmatch(lower); matches != nil { + value, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, err + } + suffix := matches[2] + if multiplier, present := javaStringSuffixes[suffix]; present { + return multiplier * value, nil + } + } else if matches = javaFractionStringPattern.FindStringSubmatch(lower); matches != nil { + value, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return 0, err + } + suffix := matches[2] + if multiplier, present := javaStringSuffixes[suffix]; present { + return int64(float64(multiplier) * value), nil + } + } + return 0, fmt.Errorf("could not parse string '%s' as a Java-style memory value. Examples: 100kb, 1.5mb, 1g", str) +} + +// Logic copied from https://github.com/apache/spark/blob/c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala +func memoryRequiredForSparkPod(spec so.SparkPodSpec, memoryOverheadFactor *string, appType so.SparkApplicationType, replicas int64) (int64, error) { + var memoryBytes int64 + if spec.Memory != nil { + memory, err := parseJavaMemoryString(*spec.Memory) + if err != nil { + return 0, err + } + memoryBytes = memory + } else { + memoryBytes = defaultMemoryBytes + } + var memoryOverheadBytes int64 + if spec.MemoryOverhead != nil { + overhead, err := parseJavaMemoryString(*spec.MemoryOverhead) + if err != nil { + return 0, err + } + memoryOverheadBytes = overhead + } else { + var overheadFactor float64 + if memoryOverheadFactor != nil { + overheadFactorScope, err := strconv.ParseFloat(*memoryOverheadFactor, 64) + if err != nil { + return 0, err + } + overheadFactor = overheadFactorScope + } else { + if appType == so.JavaApplicationType { + overheadFactor = defaultMemoryOverhead + } else { + overheadFactor = nonJvmDefaultMemoryOverhead + } + } + memoryOverheadBytes = int64(math.Max(overheadFactor*float64(memoryBytes), minMemoryOverhead)) + } + return (memoryBytes + memoryOverheadBytes) * replicas, nil +} + +func resourceUsage(spec so.SparkApplicationSpec) (ResourceList, error) { + driverMemoryOverheadFactor := spec.MemoryOverheadFactor + executorMemoryOverheadFactor := spec.MemoryOverheadFactor + driverMemory, err := memoryRequiredForSparkPod(spec.Driver.SparkPodSpec, driverMemoryOverheadFactor, spec.Type, 1) + if err != nil { + return ResourceList{}, err + } + + var instances int64 = 1 + if spec.Executor.Instances != nil { + instances = int64(*spec.Executor.Instances) + } + executorMemory, err := memoryRequiredForSparkPod(spec.Executor.SparkPodSpec, executorMemoryOverheadFactor, spec.Type, instances) + if err != nil { + return ResourceList{}, err + } + + driverCores, err := coresRequiredForSparkPod(spec.Driver.SparkPodSpec, 1) + if err != nil { + return ResourceList{}, err + } + + executorCores, err := coresRequiredForSparkPod(spec.Executor.SparkPodSpec, instances) + if err != nil { + return ResourceList{}, err + } + + return ResourceList{ + cpu: *resource.NewMilliQuantity(driverCores+executorCores, resource.DecimalSI), + memory: *resource.NewQuantity(driverMemory+executorMemory, resource.DecimalSI), + }, nil +} + +func sparkApplicationResourceUsage(sparkApp so.SparkApplication) (ResourceList, error) { + // A completed/failed SparkApplication consumes no resources + if !sparkApp.Status.TerminationTime.IsZero() || sparkApp.Status.AppState.State == so.FailedState || sparkApp.Status.AppState.State == so.CompletedState { + return ResourceList{}, nil + } + return resourceUsage(sparkApp.Spec) +} + +func scheduledSparkApplicationResourceUsage(sparkApp so.ScheduledSparkApplication) (ResourceList, error) { + // Failed validation, will consume no resources + if sparkApp.Status.ScheduleState == so.FailedValidationState { + return ResourceList{}, nil + } + return resourceUsage(sparkApp.Spec.Template) +} + +func podResourceUsage(pod *corev1.Pod) ResourceList { + spec := pod.Spec + var initCores int64 + var initMemoryBytes int64 + completed := make(map[string]struct{}) + + for _, containerStatus := range pod.Status.InitContainerStatuses { + if containerStatus.State.Terminated != nil { + completed[containerStatus.Name] = struct{}{} + } + } + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Terminated != nil { + completed[containerStatus.Name] = struct{}{} + } + } + + for _, container := range spec.InitContainers { + if _, present := completed[container.Name]; !present { + c, m := resourcesRequiredToSchedule(container.Resources) + initCores = max(c, initCores) + initMemoryBytes = max(m, initMemoryBytes) + } + } + var cores int64 + var memoryBytes int64 + for _, container := range spec.Containers { + if _, present := completed[container.Name]; !present { + c, m := resourcesRequiredToSchedule(container.Resources) + cores += c + memoryBytes += m + } + } + cores = max(initCores, cores) + memoryBytes = max(initMemoryBytes, memoryBytes) + return ResourceList{ + cpu: *resource.NewMilliQuantity(cores, resource.DecimalSI), + memory: *resource.NewQuantity(memoryBytes, resource.DecimalSI), + } +} diff --git a/pkg/webhook/resourceusage/util_test.go b/pkg/webhook/resourceusage/util_test.go new file mode 100644 index 000000000..c610136e0 --- /dev/null +++ b/pkg/webhook/resourceusage/util_test.go @@ -0,0 +1,25 @@ +package resourceusage + +import ( + "testing" +) + +func assertMemory(memoryString string, expectedBytes int64, t *testing.T) { + m, err := parseJavaMemoryString(memoryString) + if err != nil { + t.Error(err) + return + } + if m != expectedBytes { + t.Errorf("%s: expected %v bytes, got %v bytes", memoryString, expectedBytes, m) + return + } +} + +func TestJavaMemoryString(t *testing.T) { + assertMemory("1b", 1, t) + assertMemory("100k", 100*1024, t) + assertMemory("1gb", 1024*1024*1024, t) + assertMemory("10TB", 10*1024*1024*1024*1024, t) + assertMemory("10PB", 10*1024*1024*1024*1024*1024, t) +} diff --git a/pkg/webhook/resourceusage/watcher.go b/pkg/webhook/resourceusage/watcher.go new file mode 100644 index 000000000..46979fc84 --- /dev/null +++ b/pkg/webhook/resourceusage/watcher.go @@ -0,0 +1,157 @@ +package resourceusage + +import ( + "fmt" + "sync" + + crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/informers" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/tools/cache" +) + +type ResourceUsageWatcher struct { + currentUsageLock *sync.RWMutex + currentUsageByNamespace map[string]*ResourceList + usageByNamespacePod map[string]map[string]*ResourceList + usageByNamespaceScheduledApplication map[string]map[string]*ResourceList + usageByNamespaceApplication map[string]map[string]*ResourceList + crdInformerFactory crdinformers.SharedInformerFactory + coreV1InformerFactory informers.SharedInformerFactory + podInformer corev1informers.PodInformer +} + +// more convenient replacement for corev1.ResourceList +type ResourceList struct { + cpu resource.Quantity + memory resource.Quantity +} + +const ( + KindSparkApplication = "SparkApplication" + KindScheduledSparkApplication = "ScheduledSparkApplication" +) + +func (r ResourceList) String() string { + return fmt.Sprintf("cpu: %v mcpu, memory %v bytes", r.cpu.MilliValue(), r.memory.Value()) +} + +func newResourceUsageWatcher(crdInformerFactory crdinformers.SharedInformerFactory, coreV1InformerFactory informers.SharedInformerFactory) ResourceUsageWatcher { + glog.V(2).Infof("Creating new resource usage watcher") + r := ResourceUsageWatcher{ + crdInformerFactory: crdInformerFactory, + currentUsageLock: &sync.RWMutex{}, + coreV1InformerFactory: coreV1InformerFactory, + currentUsageByNamespace: make(map[string]*ResourceList), + usageByNamespacePod: make(map[string]map[string]*ResourceList), + usageByNamespaceScheduledApplication: make(map[string]map[string]*ResourceList), + usageByNamespaceApplication: make(map[string]map[string]*ResourceList), + } + // Note: Events for each handler are processed serially, so no coordination is needed between + // the different callbacks. Coordination is still needed around updating the shared state. + sparkApplicationInformer := r.crdInformerFactory.Sparkoperator().V1beta1().SparkApplications() + sparkApplicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: r.onSparkApplicationAdded, + UpdateFunc: r.onSparkApplicationUpdated, + DeleteFunc: r.onSparkApplicationDeleted, + }) + scheduledSparkApplicationInformer := r.crdInformerFactory.Sparkoperator().V1beta1().ScheduledSparkApplications() + scheduledSparkApplicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: r.onScheduledSparkApplicationAdded, + UpdateFunc: r.onScheduledSparkApplicationUpdated, + DeleteFunc: r.onScheduledSparkApplicationDeleted, + }) + r.podInformer = r.coreV1InformerFactory.Core().V1().Pods() + r.podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: r.onPodAdded, + UpdateFunc: r.onPodUpdated, + DeleteFunc: r.onPodDeleted, + }) + return r +} + +func (r *ResourceUsageWatcher) GetCurrentResourceUsage(namespace string) ResourceList { + r.currentUsageLock.RLock() + defer r.currentUsageLock.RUnlock() + if resourceUsageInternal, present := r.currentUsageByNamespace[namespace]; present { + return ResourceList{ + cpu: resourceUsageInternal.cpu, + memory: resourceUsageInternal.memory, + } + } + return ResourceList{} +} + +func (r *ResourceUsageWatcher) GetCurrentResourceUsageWithApplication(namespace, kind, name string) (namespaceResources, applicationResources ResourceList) { + r.currentUsageLock.RLock() + defer r.currentUsageLock.RUnlock() + if resourceUsageInternal, present := r.currentUsageByNamespace[namespace]; present { + var applicationResources ResourceList + var namespaceMap map[string]map[string]*ResourceList + switch kind { + case KindSparkApplication: + namespaceMap = r.usageByNamespaceApplication + case KindScheduledSparkApplication: + namespaceMap = r.usageByNamespaceScheduledApplication + } + if applicationMap, present := namespaceMap[namespace]; present { + if ar, present := applicationMap[name]; present { + applicationResources = *ar + } + } + currentUsage := *resourceUsageInternal // Creates a copy + currentUsage.cpu.Sub(applicationResources.cpu) + currentUsage.memory.Sub(applicationResources.memory) + return currentUsage, applicationResources + } + return ResourceList{}, ResourceList{} +} + +func (r *ResourceUsageWatcher) unsafeSetResources(namespace, name string, resources ResourceList, resourceMap map[string]map[string]*ResourceList) { + if _, present := resourceMap[namespace]; !present { + resourceMap[namespace] = make(map[string]*ResourceList) + } + // Clear any resource usage currently stored for this object + r.unsafeDeleteResources(namespace, name, resourceMap) + resourceMap[namespace][name] = &resources + if current, present := r.currentUsageByNamespace[namespace]; present { + current.cpu.Add(resources.cpu) + current.memory.Add(resources.memory) + } else { + r.currentUsageByNamespace[namespace] = &ResourceList{ + cpu: resources.cpu, + memory: resources.memory, + } + } +} + +func (r *ResourceUsageWatcher) unsafeDeleteResources(namespace, name string, resourceMap map[string]map[string]*ResourceList) { + if namespaceMap, present := resourceMap[namespace]; present { + if resources, present := namespaceMap[name]; present { + delete(resourceMap[namespace], name) + if current, present := r.currentUsageByNamespace[namespace]; present { + current.cpu.Sub(resources.cpu) + current.memory.Sub(resources.memory) + } + } + } +} + +func (r *ResourceUsageWatcher) setResources(typeName, namespace, name string, resources ResourceList, resourceMap map[string]map[string]*ResourceList) { + glog.V(3).Infof("Updating object %s %s/%s with resources %v", typeName, namespace, name, resources) + r.currentUsageLock.Lock() + r.unsafeSetResources(namespace, name, resources, resourceMap) + r.currentUsageLock.Unlock() + glog.V(3).Infof("Current resources for namespace %s: %v", namespace, r.currentUsageByNamespace[namespace]) +} + +func (r *ResourceUsageWatcher) deleteResources(typeName, namespace, name string, resourceMap map[string]map[string]*ResourceList) { + glog.V(3).Infof("Deleting resources from object %s/%s", namespace, name) + r.currentUsageLock.Lock() + r.unsafeDeleteResources(namespace, name, resourceMap) + r.currentUsageLock.Unlock() + glog.V(3).Infof("Current resources for namespace %s: %v", namespace, r.currentUsageByNamespace[namespace]) +} diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 8e45d1ddc..cf59a49b4 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -36,16 +36,21 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + crdapi "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" + crdv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" crinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/webhook/resourceusage" ) const ( - webhookName = "webhook.sparkoperator.k8s.io" + webhookName = "webhook.sparkoperator.k8s.io" + quotaWebhookName = "quotaenforcer.sparkoperator.k8s.io" ) var podResource = metav1.GroupVersionResource{ @@ -54,17 +59,33 @@ var podResource = metav1.GroupVersionResource{ Resource: "pods", } +var sparkApplicationResource = metav1.GroupVersionResource{ + Group: crdapi.GroupName, + Version: crdv1beta1.Version, + Resource: "sparkapplications", +} + +var scheduledSparkApplicationResource = metav1.GroupVersionResource{ + Group: crdapi.GroupName, + Version: crdv1beta1.Version, + Resource: "scheduledsparkapplications", +} + // WebHook encapsulates things needed to run the webhook. type WebHook struct { - clientset kubernetes.Interface - lister crdlisters.SparkApplicationLister - server *http.Server - certProvider *certProvider - serviceRef *v1beta1.ServiceReference - failurePolicy v1beta1.FailurePolicyType - selector *metav1.LabelSelector - sparkJobNamespace string - deregisterOnExit bool + clientset kubernetes.Interface + informerFactory crinformers.SharedInformerFactory + lister crdlisters.SparkApplicationLister + server *http.Server + certProvider *certProvider + serviceRef *v1beta1.ServiceReference + failurePolicy v1beta1.FailurePolicyType + selector *metav1.LabelSelector + sparkJobNamespace string + deregisterOnExit bool + enableResourceQuotaEnforcement bool + resourceQuotaEnforcer resourceusage.ResourceQuotaEnforcer + coreV1InformerFactory informers.SharedInformerFactory } // Configuration parsed from command-line flags @@ -101,7 +122,10 @@ func New( clientset kubernetes.Interface, informerFactory crinformers.SharedInformerFactory, jobNamespace string, - deregisterOnExit bool) (*WebHook, error) { + deregisterOnExit bool, + enableResourceQuotaEnforcement bool, + coreV1InformerFactory informers.SharedInformerFactory) (*WebHook, error) { + cert, err := NewCertProvider( userConfig.serverCert, userConfig.serverCertKey, @@ -119,14 +143,18 @@ func New( Path: &path, } hook := &WebHook{ - clientset: clientset, - lister: informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister(), - certProvider: cert, - serviceRef: serviceRef, - sparkJobNamespace: jobNamespace, - deregisterOnExit: deregisterOnExit, - failurePolicy: arv1beta1.Ignore, + clientset: clientset, + informerFactory: informerFactory, + lister: informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister(), + certProvider: cert, + serviceRef: serviceRef, + sparkJobNamespace: jobNamespace, + deregisterOnExit: deregisterOnExit, + failurePolicy: arv1beta1.Ignore, + coreV1InformerFactory: coreV1InformerFactory, + enableResourceQuotaEnforcement: enableResourceQuotaEnforcement, } + if userConfig.webhookFailOnError { if userConfig.webhookNamespaceSelector == "" { return nil, fmt.Errorf("webhook-namespace-selector must be set when webhook-fail-on-error is true") @@ -168,14 +196,23 @@ func parseNamespaceSelector(selectorArg string) (*metav1.LabelSelector, error) { } // Start starts the admission webhook server and registers itself to the API server. -func (wh *WebHook) Start() error { +func (wh *WebHook) Start(stopCh <-chan struct{}) error { wh.certProvider.Start() wh.server.TLSConfig = wh.certProvider.tlsConfig() + if wh.enableResourceQuotaEnforcement { + resourceQuotaEnforcer := resourceusage.NewResourceQuotaEnforcer(wh.informerFactory, wh.coreV1InformerFactory) + err := resourceQuotaEnforcer.WaitForCacheSync(stopCh) + if err != nil { + return err + } + wh.resourceQuotaEnforcer = resourceQuotaEnforcer + } + go func() { - glog.Info("Starting the Spark pod admission webhook server") + glog.Info("Starting the Spark admission webhook server") if err := wh.server.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { - glog.Errorf("error while serving the Spark pod admission webhook: %v\n", err) + glog.Errorf("error while serving the Spark admission webhook: %v\n", err) } }() @@ -229,15 +266,29 @@ func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { internalError(w, err) return } - - if review.Request.Resource != podResource { - denyRequest(w, fmt.Sprintf("unexpected resource type: %v", review.Request.Resource.String()), http.StatusUnsupportedMediaType) + var whErr error + var reviewResponse *admissionv1beta1.AdmissionResponse + switch review.Request.Resource { + case podResource: + reviewResponse, whErr = mutatePods(review, wh.lister, wh.sparkJobNamespace) + case sparkApplicationResource: + if !wh.enableResourceQuotaEnforcement { + unexpectedResourceType(w, review.Request.Resource.String()) + return + } + reviewResponse, whErr = admitSparkApplications(review, wh.resourceQuotaEnforcer) + case scheduledSparkApplicationResource: + if !wh.enableResourceQuotaEnforcement { + unexpectedResourceType(w, review.Request.Resource.String()) + return + } + reviewResponse, whErr = admitScheduledSparkApplications(review, wh.resourceQuotaEnforcer) + default: + unexpectedResourceType(w, review.Request.Resource.String()) return } - - reviewResponse, err := mutatePods(review, wh.lister, wh.sparkJobNamespace) - if err != nil { - internalError(w, err) + if whErr != nil { + internalError(w, whErr) return } @@ -259,6 +310,10 @@ func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { } } +func unexpectedResourceType(w http.ResponseWriter, kind string) { + denyRequest(w, fmt.Sprintf("unexpected resource type: %v", kind), http.StatusUnsupportedMediaType) +} + func internalError(w http.ResponseWriter, err error) { glog.Errorf("internal error: %v", err) denyRequest(w, err.Error(), 500) @@ -288,28 +343,39 @@ func denyRequest(w http.ResponseWriter, reason string, code int) { } func (wh *WebHook) selfRegistration(webhookConfigName string) error { - client := wh.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() - existing, getErr := client.Get(webhookConfigName, metav1.GetOptions{}) - if getErr != nil && !errors.IsNotFound(getErr) { - return getErr - } + mwcClient := wh.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() + vwcClient := wh.clientset.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations() caCert, err := readCertFile(wh.certProvider.caCertFile) if err != nil { return err } - webhook := v1beta1.Webhook{ - Name: webhookName, - Rules: []v1beta1.RuleWithOperations{ - { - Operations: []v1beta1.OperationType{v1beta1.Create}, - Rule: v1beta1.Rule{ - APIGroups: []string{""}, - APIVersions: []string{"v1"}, - Resources: []string{"pods"}, - }, + + mutatingRules := []v1beta1.RuleWithOperations{ + { + Operations: []v1beta1.OperationType{v1beta1.Create}, + Rule: v1beta1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"pods"}, + }, + }, + } + + validatingRules := []v1beta1.RuleWithOperations{ + { + Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update}, + Rule: v1beta1.Rule{ + APIGroups: []string{crdapi.GroupName}, + APIVersions: []string{crdv1beta1.Version}, + Resources: []string{sparkApplicationResource.Resource, scheduledSparkApplicationResource.Resource}, }, }, + } + + mutatingWebhook := v1beta1.Webhook{ + Name: webhookName, + Rules: mutatingRules, ClientConfig: v1beta1.WebhookClientConfig{ Service: wh.serviceRef, CABundle: caCert, @@ -317,36 +383,140 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { FailurePolicy: &wh.failurePolicy, NamespaceSelector: wh.selector, } - webhooks := []v1beta1.Webhook{webhook} - if getErr == nil && existing != nil { - // Update case. - glog.Info("Updating existing MutatingWebhookConfiguration for the Spark pod admission webhook") - if !equality.Semantic.DeepEqual(webhooks, existing.Webhooks) { - existing.Webhooks = webhooks - if _, err := client.Update(existing); err != nil { - return err - } + validatingWebhook := v1beta1.Webhook{ + Name: quotaWebhookName, + Rules: validatingRules, + ClientConfig: v1beta1.WebhookClientConfig{ + Service: wh.serviceRef, + CABundle: caCert, + }, + FailurePolicy: &wh.failurePolicy, + NamespaceSelector: wh.selector, + } + + mutatingWebhooks := []v1beta1.Webhook{mutatingWebhook} + validatingWebhooks := []v1beta1.Webhook{validatingWebhook} + + mutatingExisting, mutatingGetErr := mwcClient.Get(webhookConfigName, metav1.GetOptions{}) + if mutatingGetErr != nil { + if !errors.IsNotFound(mutatingGetErr) { + return mutatingGetErr } - } else { // Create case. glog.Info("Creating a MutatingWebhookConfiguration for the Spark pod admission webhook") webhookConfig := &v1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: webhookConfigName, }, - Webhooks: webhooks, + Webhooks: mutatingWebhooks, } - if _, err := client.Create(webhookConfig); err != nil { + if _, err := mwcClient.Create(webhookConfig); err != nil { return err } + } else { + // Update case. + glog.Info("Updating existing MutatingWebhookConfiguration for the Spark pod admission webhook") + if !equality.Semantic.DeepEqual(mutatingWebhooks, mutatingExisting.Webhooks) { + mutatingExisting.Webhooks = mutatingWebhooks + if _, err := mwcClient.Update(mutatingExisting); err != nil { + return err + } + } + } + + if wh.enableResourceQuotaEnforcement { + validatingExisting, validatingGetErr := vwcClient.Get(webhookConfigName, metav1.GetOptions{}) + if validatingGetErr != nil { + if !errors.IsNotFound(validatingGetErr) { + return validatingGetErr + } + // Create case. + glog.Info("Creating a ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") + webhookConfig := &v1beta1.ValidatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: webhookConfigName, + }, + Webhooks: validatingWebhooks, + } + if _, err := vwcClient.Create(webhookConfig); err != nil { + return err + } + + } else { + // Update case. + glog.Info("Updating existing ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") + if !equality.Semantic.DeepEqual(validatingWebhooks, validatingExisting.Webhooks) { + validatingExisting.Webhooks = validatingWebhooks + if _, err := vwcClient.Update(validatingExisting); err != nil { + return err + } + } + } } return nil } func (wh *WebHook) selfDeregistration(webhookConfigName string) error { - client := wh.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() - return client.Delete(webhookConfigName, metav1.NewDeleteOptions(0)) + mutatingConfigs := wh.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() + validatingConfigs := wh.clientset.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations() + if wh.enableResourceQuotaEnforcement { + err := validatingConfigs.Delete(webhookConfigName, metav1.NewDeleteOptions(0)) + if err != nil { + return err + } + } + return mutatingConfigs.Delete(webhookConfigName, metav1.NewDeleteOptions(0)) +} + +func admitSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1beta1.AdmissionResponse, error) { + if review.Request.Resource != sparkApplicationResource { + return nil, fmt.Errorf("expected resource to be %s, got %s", sparkApplicationResource, review.Request.Resource) + } + + raw := review.Request.Object.Raw + app := &crdv1beta1.SparkApplication{} + if err := json.Unmarshal(raw, app); err != nil { + return nil, fmt.Errorf("failed to unmarshal a SparkApplication from the raw data in the admission request: %v", err) + } + + reason, err := enforcer.AdmitSparkApplication(*app) + if err != nil { + return nil, fmt.Errorf("resource quota enforcement failed for SparkApplication: %v", err) + } + response := &admissionv1beta1.AdmissionResponse{Allowed: reason == ""} + if reason != "" { + response.Result = &metav1.Status{ + Message: reason, + Code: 400, + } + } + return response, nil +} + +func admitScheduledSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1beta1.AdmissionResponse, error) { + if review.Request.Resource != scheduledSparkApplicationResource { + return nil, fmt.Errorf("expected resource to be %s, got %s", scheduledSparkApplicationResource, review.Request.Resource) + } + + raw := review.Request.Object.Raw + app := &crdv1beta1.ScheduledSparkApplication{} + if err := json.Unmarshal(raw, app); err != nil { + return nil, fmt.Errorf("failed to unmarshal a ScheduledSparkApplication from the raw data in the admission request: %v", err) + } + + response := &admissionv1beta1.AdmissionResponse{Allowed: true} + reason, err := enforcer.AdmitScheduledSparkApplication(*app) + if err != nil { + return nil, fmt.Errorf("resource quota enforcement failed for ScheduledSparkApplication: %v", err) + } else if reason != "" { + response.Allowed = false + response.Result = &metav1.Status{ + Message: reason, + Code: 400, + } + } + return response, nil } func mutatePods( From 76906516318e7f85d9b9ff8014c56046245e32dc Mon Sep 17 00:00:00 2001 From: Kevin Hogeland Date: Fri, 6 Sep 2019 10:42:17 -0700 Subject: [PATCH 4/6] Tweak Cores validation --- .../sparkoperator.k8s.io_scheduledsparkapplications.yaml | 6 ++---- manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml | 6 ++---- pkg/apis/sparkoperator.k8s.io/v1beta2/types.go | 3 +-- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index b587f413d..7fdfab5c3 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -356,9 +356,8 @@ spec: coreLimit: type: string cores: - exclusiveMinimum: true format: int32 - minimum: 0 + minimum: 1 type: integer dnsConfig: properties: @@ -1245,9 +1244,8 @@ spec: coreRequest: type: string cores: - exclusiveMinimum: true format: int32 - minimum: 0 + minimum: 1 type: integer dnsConfig: properties: diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index 473b53b70..e7726e333 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -342,9 +342,8 @@ spec: coreLimit: type: string cores: - exclusiveMinimum: true format: int32 - minimum: 0 + minimum: 1 type: integer dnsConfig: properties: @@ -1231,9 +1230,8 @@ spec: coreRequest: type: string cores: - exclusiveMinimum: true format: int32 - minimum: 0 + minimum: 1 type: integer dnsConfig: properties: diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index 74c1cd4f5..426177e5d 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -358,8 +358,7 @@ type Dependencies struct { type SparkPodSpec struct { // Cores is the number of CPU cores to request for the pod. // Optional. - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:ExclusiveMinimum=true + // +kubebuilder:validation:Minimum=1 Cores *int32 `json:"cores,omitempty"` // CoreLimit specifies a hard limit on CPU cores for the pod. // Optional From 1d38237c4d28614c2b2092c9d4b7bcb2394141ed Mon Sep 17 00:00:00 2001 From: Kevin Hogeland Date: Fri, 13 Sep 2019 10:10:41 -0700 Subject: [PATCH 5/6] Fix typo, merge upstream --- README.md | 2 +- pkg/webhook/resourceusage/enforcer.go | 2 +- pkg/webhook/resourceusage/handlers.go | 2 +- pkg/webhook/resourceusage/util.go | 4 ++-- pkg/webhook/webhook.go | 12 ++++++------ 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 92e7cecd0..ca29279ad 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ The Kubernetes Operator for Apache Spark is under active development, but backward compatibility of the APIs is guaranteed for beta releases. -**If you are currently using the `v1alpha1` or `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta1"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f manifest/crds`.** +**If you are currently using the `v1alpha1` or `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f manifest/crds`.** Customization of Spark pods, e.g., mounting arbitrary volumes and setting pod affinity, is currently experimental and implemented using a Kubernetes [Mutating Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which became beta in Kubernetes 1.9. diff --git a/pkg/webhook/resourceusage/enforcer.go b/pkg/webhook/resourceusage/enforcer.go index cfc7eaf4a..b4aece795 100644 --- a/pkg/webhook/resourceusage/enforcer.go +++ b/pkg/webhook/resourceusage/enforcer.go @@ -2,7 +2,7 @@ package resourceusage import ( "fmt" - so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" "github.com/golang/glog" corev1 "k8s.io/api/core/v1" diff --git a/pkg/webhook/resourceusage/handlers.go b/pkg/webhook/resourceusage/handlers.go index d2dad511b..1b99dc2c0 100644 --- a/pkg/webhook/resourceusage/handlers.go +++ b/pkg/webhook/resourceusage/handlers.go @@ -1,7 +1,7 @@ package resourceusage import ( - so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/golang/glog" corev1 "k8s.io/api/core/v1" diff --git a/pkg/webhook/resourceusage/util.go b/pkg/webhook/resourceusage/util.go index 8b0407ba5..37ee4e27e 100644 --- a/pkg/webhook/resourceusage/util.go +++ b/pkg/webhook/resourceusage/util.go @@ -2,7 +2,7 @@ package resourceusage import ( "fmt" - so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -62,7 +62,7 @@ func resourcesRequiredToSchedule(resourceRequirements corev1.ResourceRequirement func coresRequiredForSparkPod(spec so.SparkPodSpec, instances int64) (int64, error) { var cpu int64 if spec.Cores != nil { - cpu = int64(*spec.Cores * 1000) + cpu = int64(*spec.Cores) * 1000 } else { cpu = defaultCpuMillicores } diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 33c3ab1cd..98e265a94 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -40,7 +40,7 @@ import ( "k8s.io/client-go/kubernetes" crdapi "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" - crdv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + crdv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" @@ -61,13 +61,13 @@ var podResource = metav1.GroupVersionResource{ var sparkApplicationResource = metav1.GroupVersionResource{ Group: crdapi.GroupName, - Version: crdv1beta1.Version, + Version: crdv1beta2.Version, Resource: "sparkapplications", } var scheduledSparkApplicationResource = metav1.GroupVersionResource{ Group: crdapi.GroupName, - Version: crdv1beta1.Version, + Version: crdv1beta2.Version, Resource: "scheduledsparkapplications", } @@ -369,7 +369,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update}, Rule: v1beta1.Rule{ APIGroups: []string{crdapi.GroupName}, - APIVersions: []string{crdv1beta1.Version}, + APIVersions: []string{crdv1beta2.Version}, Resources: []string{sparkApplicationResource.Resource, scheduledSparkApplicationResource.Resource}, }, }, @@ -477,7 +477,7 @@ func admitSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer r } raw := review.Request.Object.Raw - app := &crdv1beta1.SparkApplication{} + app := &crdv1beta2.SparkApplication{} if err := json.Unmarshal(raw, app); err != nil { return nil, fmt.Errorf("failed to unmarshal a SparkApplication from the raw data in the admission request: %v", err) } @@ -502,7 +502,7 @@ func admitScheduledSparkApplications(review *admissionv1beta1.AdmissionReview, e } raw := review.Request.Object.Raw - app := &crdv1beta1.ScheduledSparkApplication{} + app := &crdv1beta2.ScheduledSparkApplication{} if err := json.Unmarshal(raw, app); err != nil { return nil, fmt.Errorf("failed to unmarshal a ScheduledSparkApplication from the raw data in the admission request: %v", err) } From fc449fbf1bdf0988a25e9c62b95840b7e87735f2 Mon Sep 17 00:00:00 2001 From: Kevin Hogeland Date: Fri, 13 Sep 2019 10:14:59 -0700 Subject: [PATCH 6/6] Update remaining docs for v1beta2 --- docs/api.md | 2 +- docs/gcp.md | 4 ++-- docs/quick-start-guide.md | 6 +++--- docs/user-guide.md | 8 ++++---- docs/volcano-integration.md | 6 +++--- sparkctl/cmd/testdata/test-app.yaml | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/api.md b/docs/api.md index 7c499e01c..635d48968 100644 --- a/docs/api.md +++ b/docs/api.md @@ -1,7 +1,7 @@ # SparkApplication API The Kubernetes Operator for Apache Spark uses [CustomResourceDefinitions](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) named `SparkApplication` and `ScheduledSparkApplication` for specifying one-time Spark applications and Spark applications -that are supposed to run on a standard [cron](https://en.wikipedia.org/wiki/Cron) schedule. Similarly to other kinds of Kubernetes resources, they consist of a specification in a `Spec` field and a `Status` field. The definitions are organized in the following structure. The v1beta1 version of the API definition is implemented [here](../pkg/apis/sparkoperator.k8s.io/v1beta1/types.go). +that are supposed to run on a standard [cron](https://en.wikipedia.org/wiki/Cron) schedule. Similarly to other kinds of Kubernetes resources, they consist of a specification in a `Spec` field and a `Status` field. The definitions are organized in the following structure. The v1beta2 version of the API definition is implemented [here](../pkg/apis/sparkoperator.k8s.io/v1beta2/types.go). ``` ScheduledSparkApplication diff --git a/docs/gcp.md b/docs/gcp.md index be822253a..4e63f8dfe 100644 --- a/docs/gcp.md +++ b/docs/gcp.md @@ -43,7 +43,7 @@ The ones set in `core-site.xml` apply to all applications using the image. Also variable `GCS_PROJECT_ID` must be set when using the image at `gcr.io/ynli-k8s/spark:v2.3.0-gcs`. ```yaml -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: foo-gcs-bg @@ -58,7 +58,7 @@ spec: "google.cloud.auth.service.account.enable": "true" "google.cloud.auth.service.account.json.keyfile": "/mnt/secrets/key.json" driver: - cores: 0.1 + cores: 1 secrets: - name: "gcs-bq" path: "/mnt/secrets" diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 67a00117d..9f4f2ee4c 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -69,15 +69,15 @@ $ kubectl get sparkapplications spark-pi -o=yaml This will show something similar to the following: ```yaml -apiVersion: sparkoperator.k8s.io/v1beta1 +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: ... spec: deps: {} driver: - coreLimit: 200m - cores: 0.1 + coreLimit: 1200m + cores: 1 labels: version: 2.3.0 memory: 512m diff --git a/docs/user-guide.md b/docs/user-guide.md index b79a5eeba..ab5e9a970 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -55,7 +55,7 @@ It also has fields for specifying the unified container image (to use for both t Below is an example showing part of a `SparkApplication` specification: ```yaml -apiVersion: sparkoperator.k8s.io/v1beta1 +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi @@ -125,7 +125,7 @@ The following is an example driver specification: ```yaml spec: driver: - cores: 0.1 + cores: 1 coreLimit: 200m memory: 512m labels: @@ -514,7 +514,7 @@ client so effectively the driver gets restarted. The operator supports running a Spark application on a standard [cron](https://en.wikipedia.org/wiki/Cron) schedule using objects of the `ScheduledSparkApplication` custom resource type. A `ScheduledSparkApplication` object specifies a cron schedule on which the application should run and a `SparkApplication` template from which a `SparkApplication` object for each run of the application is created. The following is an example `ScheduledSparkApplication`: ```yaml -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: ScheduledSparkApplication metadata: name: spark-pi-scheduled @@ -531,7 +531,7 @@ spec: mainClass: org.apache.spark.examples.SparkPi mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.11-2.3.0.jar driver: - cores: 0.5 + cores: 1 memory: 512m executor: cores: 1 diff --git a/docs/volcano-integration.md b/docs/volcano-integration.md index 2e5f13eee..332bb041d 100644 --- a/docs/volcano-integration.md +++ b/docs/volcano-integration.md @@ -23,7 +23,7 @@ $ helm install incubator/sparkoperator --namespace spark-operator --set enableBa Now, we can run a updated version of spark application (with `batchScheduler` configured), for instance: ```yaml -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: spark-pi @@ -45,8 +45,8 @@ spec: path: "/tmp" type: Directory driver: - cores: 0.1 - coreLimit: "200m" + cores: 1 + coreLimit: "1200m" memory: "512m" labels: version: 2.4.0 diff --git a/sparkctl/cmd/testdata/test-app.yaml b/sparkctl/cmd/testdata/test-app.yaml index e90aeef2a..b6e027790 100644 --- a/sparkctl/cmd/testdata/test-app.yaml +++ b/sparkctl/cmd/testdata/test-app.yaml @@ -14,7 +14,7 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta1" +apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: example @@ -28,4 +28,4 @@ spec: image: "spark" executor: image: "spark" - instances: 1 \ No newline at end of file + instances: 1