diff --git a/docs/api-docs.md b/docs/api-docs.md
index da62de645..4c1d52255 100644
--- a/docs/api-docs.md
+++ b/docs/api-docs.md
@@ -946,6 +946,19 @@ in-cluster-client.
+coreRequest
+
+string
+
+ |
+
+(Optional)
+ CoreRequest is the physical CPU core request for the driver.
+Maps to spark.kubernetes.driver.request.cores that is available since Spark 3.0.
+ |
+
+
+
serviceAccount
string
@@ -1023,7 +1036,8 @@ string
|
(Optional)
- CoreRequest is the physical CPU core request for the executors.
+CoreRequest is the physical CPU core request for the executors.
+Maps to spark.kubernetes.executor.request.cores that is available since Spark 2.4.
|
@@ -1034,6 +1048,7 @@ string
+(Optional)
JavaOptions is a string of extra JVM options to pass to the executors. For instance,
GC settings or other logging.
|
@@ -1046,8 +1061,9 @@ bool
- DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination
-Optional
+(Optional)
+DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination.
+Maps to spark.kubernetes.executor.deleteOnTermination that is available since Spark 3.0.
|
@@ -2216,7 +2232,7 @@ int32
(Optional)
- Cores is the number of CPU cores to request for the pod.
+Cores maps to spark.driver.cores or spark.executor.cores for the driver and executors, respectively.
|
@@ -2515,5 +2531,5 @@ Kubernetes core/v1.PodDNSConfig
Generated with gen-crd-api-reference-docs
-on git commit 988409b
.
+on git commit 1b96b7b
.
diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go
index 43e347ad3..9fb5d8cf7 100644
--- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go
+++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go
@@ -382,7 +382,7 @@ type Dependencies struct {
// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod.
// TODO: investigate if we should use v1.PodSpec and limit what can be set instead.
type SparkPodSpec struct {
- // Cores is the number of CPU cores to request for the pod.
+ // Cores maps to `spark.driver.cores` or `spark.executor.cores` for the driver and executors, respectively.
// +optional
// +kubebuilder:validation:Minimum=1
Cores *int32 `json:"cores,omitempty"`
@@ -467,6 +467,10 @@ type DriverSpec struct {
// +optional
// +kubebuilder:validation:Pattern=[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
PodName *string `json:"podName,omitempty"`
+ // CoreRequest is the physical CPU core request for the driver.
+ // Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0.
+ // +optional
+ CoreRequest *string `json:"coreRequest,omitempty"`
// ServiceAccount is the name of the Kubernetes service account used by the driver pod
// when requesting executor pods from the API server.
ServiceAccount *string `json:"serviceAccount,omitempty"`
@@ -483,13 +487,16 @@ type ExecutorSpec struct {
// +kubebuilder:validation:Minimum=1
Instances *int32 `json:"instances,omitempty"`
// CoreRequest is the physical CPU core request for the executors.
+ // Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4.
// +optional
CoreRequest *string `json:"coreRequest,omitempty"`
// JavaOptions is a string of extra JVM options to pass to the executors. For instance,
// GC settings or other logging.
+ // +optional
JavaOptions *string `json:"javaOptions,omitempty"`
- // DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination
- // Optional
+ // DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination.
+ // Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0.
+ // +optional
DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"`
}
diff --git a/pkg/config/constants.go b/pkg/config/constants.go
index 60403192e..dbf2f0ad0 100644
--- a/pkg/config/constants.go
+++ b/pkg/config/constants.go
@@ -74,12 +74,14 @@ const (
SparkDriverContainerImageKey = "spark.kubernetes.driver.container.image"
// SparkExecutorContainerImageKey is the configuration property for specifying a custom executor container image.
SparkExecutorContainerImageKey = "spark.kubernetes.executor.container.image"
+ // SparkDriverCoreRequestKey is the configuration property for specifying the physical CPU request for the driver.
+ SparkDriverCoreRequestKey = "spark.kubernetes.driver.request.cores"
+ // SparkExecutorCoreRequestKey is the configuration property for specifying the physical CPU request for executors.
+ SparkExecutorCoreRequestKey = "spark.kubernetes.executor.request.cores"
// SparkDriverCoreLimitKey is the configuration property for specifying the hard CPU limit for the driver pod.
SparkDriverCoreLimitKey = "spark.kubernetes.driver.limit.cores"
// SparkExecutorCoreLimitKey is the configuration property for specifying the hard CPU limit for the executor pods.
SparkExecutorCoreLimitKey = "spark.kubernetes.executor.limit.cores"
- // SparkExecutorCoreRequestKey is the configuration property for specifying the physical CPU request for executors.
- SparkExecutorCoreRequestKey = "spark.kubernetes.executor.request.cores"
// SparkDriverSecretKeyPrefix is the configuration property prefix for specifying secrets to be mounted into the
// driver.
SparkDriverSecretKeyPrefix = "spark.kubernetes.driver.secrets."
diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go
index 32efd5757..9cdffa274 100644
--- a/pkg/controller/sparkapplication/submission.go
+++ b/pkg/controller/sparkapplication/submission.go
@@ -269,6 +269,10 @@ func addDriverConfOptions(app *v1beta2.SparkApplication, submissionID string) ([
driverConfOptions = append(driverConfOptions,
fmt.Sprintf("spark.driver.cores=%d", *app.Spec.Driver.Cores))
}
+ if app.Spec.Driver.CoreRequest != nil {
+ driverConfOptions = append(driverConfOptions,
+ fmt.Sprintf("%s=%s", config.SparkDriverCoreRequestKey, *app.Spec.Driver.CoreRequest))
+ }
if app.Spec.Driver.CoreLimit != nil {
driverConfOptions = append(driverConfOptions,
fmt.Sprintf("%s=%s", config.SparkDriverCoreLimitKey, *app.Spec.Driver.CoreLimit))
@@ -333,15 +337,15 @@ func addExecutorConfOptions(app *v1beta2.SparkApplication, submissionID string)
fmt.Sprintf("%s=%s", config.SparkExecutorContainerImageKey, *app.Spec.Executor.Image))
}
- if app.Spec.Executor.CoreRequest != nil {
- executorConfOptions = append(executorConfOptions,
- fmt.Sprintf("%s=%s", config.SparkExecutorCoreRequestKey, *app.Spec.Executor.CoreRequest))
- }
if app.Spec.Executor.Cores != nil {
// Property "spark.executor.cores" does not allow float values.
executorConfOptions = append(executorConfOptions,
fmt.Sprintf("spark.executor.cores=%d", int32(*app.Spec.Executor.Cores)))
}
+ if app.Spec.Executor.CoreRequest != nil {
+ executorConfOptions = append(executorConfOptions,
+ fmt.Sprintf("%s=%s", config.SparkExecutorCoreRequestKey, *app.Spec.Executor.CoreRequest))
+ }
if app.Spec.Executor.CoreLimit != nil {
executorConfOptions = append(executorConfOptions,
fmt.Sprintf("%s=%s", config.SparkExecutorCoreLimitKey, *app.Spec.Executor.CoreLimit))