Skip to content

Commit

Permalink
Use a secret to mount small files in driver and executors. (#437)
Browse files Browse the repository at this point in the history
* Use a secret to mount small files in driver and executors.

Allows bypassing the resource staging server in a few scenarios.

* Fix scalstyle

* Address comments and add tests.

* Lightly brush up formatting.

* Make the working directory empty so that added files don't clobber existing binaries.

* Address comments.

* Drop testing file size to N+1 of the limit
  • Loading branch information
mccheah authored and ash211 committed Aug 21, 2017
1 parent f8cf9db commit 455317d
Show file tree
Hide file tree
Showing 21 changed files with 524 additions and 93 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -450,6 +450,22 @@ package object config extends Logging {
.timeConf(TimeUnit.MINUTES)
.createWithDefault(5)

private[spark] val EXECUTOR_SUBMITTED_SMALL_FILES_SECRET =
ConfigBuilder("spark.kubernetes.mountdependencies.smallfiles.executor.secretName")
.doc("Name of the secret that should be mounted into the executor containers for" +
" distributing submitted small files without the resource staging server.")
.internal()
.stringConf
.createOptional

private[spark] val EXECUTOR_SUBMITTED_SMALL_FILES_SECRET_MOUNT_PATH =
ConfigBuilder("spark.kubernetes.mountdependencies.smallfiles.executor.secretMountPath")
.doc(s"Mount path in the executors for the secret given by" +
s" ${EXECUTOR_SUBMITTED_SMALL_FILES_SECRET.key}")
.internal()
.stringConf
.createOptional

private[spark] val EXECUTOR_INIT_CONTAINER_CONFIG_MAP =
ConfigBuilder("spark.kubernetes.initcontainer.executor.configmapname")
.doc("Name of the config map to use in the init-container that retrieves submitted files" +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ package object constants {
private[spark] val ENV_MOUNTED_FILES_DIR = "SPARK_MOUNTED_FILES_DIR"
private[spark] val ENV_PYSPARK_FILES = "PYSPARK_FILES"
private[spark] val ENV_PYSPARK_PRIMARY = "PYSPARK_PRIMARY"
private[spark] val ENV_MOUNTED_FILES_FROM_SECRET_DIR = "SPARK_MOUNTED_FILES_FROM_SECRET_DIR"

// Bootstrapping dependencies with the init-container
private[spark] val INIT_CONTAINER_ANNOTATION = "pod.beta.kubernetes.io/init-containers"
Expand All @@ -91,6 +92,9 @@ package object constants {
private[spark] val DEFAULT_SHUFFLE_MOUNT_NAME = "shuffle"
private[spark] val INIT_CONTAINER_SECRET_VOLUME_NAME = "spark-init-secret"

// Bootstrapping dependencies via a secret
private[spark] val MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH = "/etc/spark-submitted-files"

// Miscellaneous
private[spark] val ANNOTATION_EXECUTOR_NODE_AFFINITY = "scheduler.alpha.kubernetes.io/affinity"
private[spark] val DRIVER_CONTAINER_NAME = "spark-kubernetes-driver"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import org.apache.spark.SparkConf
import org.apache.spark.deploy.kubernetes.ConfigurationUtils
import org.apache.spark.deploy.kubernetes.config._
import org.apache.spark.deploy.kubernetes.constants._
import org.apache.spark.deploy.kubernetes.submit.submitsteps.{BaseDriverConfigurationStep, DependencyResolutionStep, DriverConfigurationStep, DriverKubernetesCredentialsStep, InitContainerBootstrapStep, PythonStep}
import org.apache.spark.deploy.kubernetes.submit.submitsteps.{BaseDriverConfigurationStep, DependencyResolutionStep, DriverConfigurationStep, DriverKubernetesCredentialsStep, InitContainerBootstrapStep, MountSmallLocalFilesStep, PythonStep}
import org.apache.spark.deploy.kubernetes.submit.submitsteps.initcontainer.InitContainerConfigurationStepsOrchestrator
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.util.Utils
Expand Down Expand Up @@ -99,40 +99,77 @@ private[spark] class DriverConfigurationStepsOrchestrator(
Option(new PythonStep(mainPyResource, additionalPythonFiles, filesDownloadPath))
case _ => Option.empty[DriverConfigurationStep]
}
val initContainerBootstrapStep = if ((sparkJars ++ sparkFiles).exists { uri =>
Option(Utils.resolveURI(uri).getScheme).getOrElse("file") != "local"
}) {
val initContainerConfigurationStepsOrchestrator =
new InitContainerConfigurationStepsOrchestrator(
namespace,
kubernetesResourceNamePrefix,
sparkJars,

val (localFilesDownloadPath, submittedDependenciesBootstrapSteps) =
if (areAnyFilesNonContainerLocal(sparkJars ++ sparkFiles)) {
val (submittedLocalFilesDownloadPath,
sparkFilesResolvedFromInitContainer,
mountSmallFilesWithoutInitContainerStep) =
// If the resource staging server is specified, submit all local files through that.
submissionSparkConf.get(RESOURCE_STAGING_SERVER_URI).map { _ =>
(filesDownloadPath, sparkFiles, Option.empty[DriverConfigurationStep])
}.getOrElse {
// Else - use a small files bootstrap that submits the local files via a secret.
// Then, indicate to the outer block that the init-container should not handle
// those local files simply by filtering them out.
val sparkFilesWithoutLocal = KubernetesFileUtils.getNonSubmitterLocalFiles(sparkFiles)
val smallFilesSecretName = s"${kubernetesAppId}-submitted-files"
val mountSmallFilesBootstrap = new MountSmallFilesBootstrapImpl(
smallFilesSecretName, MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH)
val mountSmallLocalFilesStep = new MountSmallLocalFilesStep(
sparkFiles,
jarsDownloadPath,
filesDownloadPath,
dockerImagePullPolicy,
allDriverLabels,
smallFilesSecretName,
MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH,
mountSmallFilesBootstrap)
(MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH,
sparkFilesWithoutLocal.toArray,
Some(mountSmallLocalFilesStep))
}

val initContainerBootstrapStep =
if (areAnyFilesNonContainerLocal(sparkJars ++ sparkFilesResolvedFromInitContainer)) {
val initContainerConfigurationStepsOrchestrator =
new InitContainerConfigurationStepsOrchestrator(
namespace,
kubernetesResourceNamePrefix,
sparkJars,
sparkFilesResolvedFromInitContainer,
jarsDownloadPath,
filesDownloadPath,
dockerImagePullPolicy,
allDriverLabels,
initContainerConfigMapName,
INIT_CONTAINER_CONFIG_MAP_KEY,
submissionSparkConf)
val initContainerConfigurationSteps =
initContainerConfigurationStepsOrchestrator.getAllConfigurationSteps()
Some(new InitContainerBootstrapStep(initContainerConfigurationSteps,
initContainerConfigMapName,
INIT_CONTAINER_CONFIG_MAP_KEY,
submissionSparkConf)
val initContainerConfigurationSteps =
initContainerConfigurationStepsOrchestrator.getAllConfigurationSteps()
Some(new InitContainerBootstrapStep(initContainerConfigurationSteps,
initContainerConfigMapName,
INIT_CONTAINER_CONFIG_MAP_KEY))
INIT_CONTAINER_CONFIG_MAP_KEY))
} else Option.empty[DriverConfigurationStep]
(submittedLocalFilesDownloadPath,
mountSmallFilesWithoutInitContainerStep.toSeq ++
initContainerBootstrapStep.toSeq)
} else {
Option.empty[DriverConfigurationStep]
(filesDownloadPath, Seq.empty[DriverConfigurationStep])
}
val dependencyResolutionStep = new DependencyResolutionStep(
sparkJars,
sparkFiles,
jarsDownloadPath,
filesDownloadPath)
localFilesDownloadPath)
Seq(
initialSubmissionStep,
kubernetesCredentialsStep,
dependencyResolutionStep) ++
initContainerBootstrapStep.toSeq ++
submittedDependenciesBootstrapSteps ++
pythonStep.toSeq
}

private def areAnyFilesNonContainerLocal(files: Seq[String]): Boolean = {
files.exists { uri =>
Option(Utils.resolveURI(uri).getScheme).getOrElse("file") != "local"
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.submit

import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, Pod, PodBuilder}

import org.apache.spark.deploy.kubernetes.constants._

private[spark] trait MountSmallFilesBootstrap {
def mountSmallFilesSecret(pod: Pod, container: Container): (Pod, Container)
}

private[spark] class MountSmallFilesBootstrapImpl(
secretName: String, secretMountPath: String) extends MountSmallFilesBootstrap {
def mountSmallFilesSecret(pod: Pod, container: Container): (Pod, Container) = {
val resolvedPod = new PodBuilder(pod)
.editOrNewSpec()
.addNewVolume()
.withName("submitted-files")
.withNewSecret()
.withSecretName(secretName)
.endSecret()
.endVolume()
.endSpec()
.build()
val resolvedContainer = new ContainerBuilder(container)
.addNewEnv()
.withName(ENV_MOUNTED_FILES_FROM_SECRET_DIR)
.withValue(secretMountPath)
.endEnv()
.addNewVolumeMount()
.withName("submitted-files")
.withMountPath(secretMountPath)
.endVolumeMount()
.build()
(resolvedPod, resolvedContainer)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import io.fabric8.kubernetes.api.model.ContainerBuilder

import org.apache.spark.deploy.kubernetes.constants._
import org.apache.spark.deploy.kubernetes.submit.KubernetesFileUtils
import org.apache.spark.deploy.kubernetes.submit.submitsteps.initcontainer.InitContainerConfigurationStep
import org.apache.spark.util.Utils

/**
Expand All @@ -36,11 +37,12 @@ private[spark] class DependencyResolutionStep(
sparkJars: Seq[String],
sparkFiles: Seq[String],
jarsDownloadPath: String,
filesDownloadPath: String) extends DriverConfigurationStep {
localFilesDownloadPath: String) extends DriverConfigurationStep {

override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val resolvedSparkJars = KubernetesFileUtils.resolveSubmittedUris(sparkJars, jarsDownloadPath)
val resolvedSparkFiles = KubernetesFileUtils.resolveSubmittedUris(sparkFiles, filesDownloadPath)
val resolvedSparkFiles = KubernetesFileUtils.resolveSubmittedUris(
sparkFiles, localFilesDownloadPath)
val sparkConfResolvedSparkDependencies = driverSpec.driverSparkConf.clone()
if (resolvedSparkJars.nonEmpty) {
sparkConfResolvedSparkDependencies.set("spark.jars", resolvedSparkJars.mkString(","))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,32 +33,32 @@ private[spark] class InitContainerBootstrapStep(

override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
var currentInitContainerSpec = InitContainerSpec(
initContainerProperties = Map.empty[String, String],
additionalDriverSparkConf = Map.empty[String, String],
initContainer = new ContainerBuilder().build(),
driverContainer = driverSpec.driverContainer,
podToInitialize = driverSpec.driverPod,
initContainerDependentResources = Seq.empty[HasMetadata])
initContainerProperties = Map.empty[String, String],
additionalDriverSparkConf = Map.empty[String, String],
initContainer = new ContainerBuilder().build(),
driverContainer = driverSpec.driverContainer,
podToInitialize = driverSpec.driverPod,
initContainerDependentResources = Seq.empty[HasMetadata])
for (nextStep <- initContainerConfigurationSteps) {
currentInitContainerSpec = nextStep.configureInitContainer(currentInitContainerSpec)
}
val configMap = PropertiesConfigMapFromScalaMapBuilder.buildConfigMap(
initContainerConfigMapName,
initContainerConfigMapKey,
currentInitContainerSpec.initContainerProperties)
initContainerConfigMapName,
initContainerConfigMapKey,
currentInitContainerSpec.initContainerProperties)
val resolvedDriverSparkConf = driverSpec.driverSparkConf.clone()
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP, initContainerConfigMapName)
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP_KEY, initContainerConfigMapKey)
.setAll(currentInitContainerSpec.additionalDriverSparkConf)
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP, initContainerConfigMapName)
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP_KEY, initContainerConfigMapKey)
.setAll(currentInitContainerSpec.additionalDriverSparkConf)
val resolvedDriverPod = InitContainerUtil.appendInitContainer(
currentInitContainerSpec.podToInitialize, currentInitContainerSpec.initContainer)
currentInitContainerSpec.podToInitialize, currentInitContainerSpec.initContainer)
driverSpec.copy(
driverPod = resolvedDriverPod,
driverContainer = currentInitContainerSpec.driverContainer,
driverSparkConf = resolvedDriverSparkConf,
otherKubernetesResources =
driverSpec.otherKubernetesResources ++
currentInitContainerSpec.initContainerDependentResources ++
Seq(configMap))
driverPod = resolvedDriverPod,
driverContainer = currentInitContainerSpec.driverContainer,
driverSparkConf = resolvedDriverSparkConf,
otherKubernetesResources =
driverSpec.otherKubernetesResources ++
currentInitContainerSpec.initContainerDependentResources ++
Seq(configMap))
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.submit.submitsteps

import java.io.File

import com.google.common.io.{BaseEncoding, Files}
import io.fabric8.kubernetes.api.model.SecretBuilder
import scala.collection.JavaConverters._

import org.apache.spark.deploy.kubernetes.config._
import org.apache.spark.deploy.kubernetes.submit.{KubernetesFileUtils, MountSmallFilesBootstrap}
import org.apache.spark.util.Utils

private[spark] class MountSmallLocalFilesStep(
sparkFiles: Seq[String],
smallFilesSecretName: String,
smallFilesSecretMountPath: String,
mountSmallFilesBootstrap: MountSmallFilesBootstrap) extends DriverConfigurationStep {

import MountSmallLocalFilesStep._
override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val localFiles = KubernetesFileUtils.getOnlySubmitterLocalFiles(sparkFiles).map(new File(_))
val totalSizeBytes = localFiles.map(_.length()).sum
val totalSizeBytesString = Utils.bytesToString(totalSizeBytes)
require(totalSizeBytes < MAX_SECRET_BUNDLE_SIZE_BYTES,
s"Total size of all files submitted must be less than $MAX_SECRET_BUNDLE_SIZE_BYTES_STRING" +
s" if you do not use a resource staging server. The total size of all submitted local" +
s" files is $totalSizeBytesString. Please install a resource staging server and configure" +
s" your application to use it via ${RESOURCE_STAGING_SERVER_URI.key}")
val localFileBase64Contents = localFiles.map { file =>
val fileBase64 = BaseEncoding.base64().encode(Files.toByteArray(file))
(file.getName, fileBase64)
}.toMap
val localFilesSecret = new SecretBuilder()
.withNewMetadata()
.withName(smallFilesSecretName)
.endMetadata()
.withData(localFileBase64Contents.asJava)
.build()
val (resolvedDriverPod, resolvedDriverContainer) =
mountSmallFilesBootstrap.mountSmallFilesSecret(
driverSpec.driverPod, driverSpec.driverContainer)
val resolvedSparkConf = driverSpec.driverSparkConf.clone()
.set(EXECUTOR_SUBMITTED_SMALL_FILES_SECRET, smallFilesSecretName)
.set(EXECUTOR_SUBMITTED_SMALL_FILES_SECRET_MOUNT_PATH, smallFilesSecretMountPath)
driverSpec.copy(
driverPod = resolvedDriverPod,
driverContainer = resolvedDriverContainer,
driverSparkConf = resolvedSparkConf,
otherKubernetesResources = driverSpec.otherKubernetesResources ++ Seq(localFilesSecret))
}
}

private[spark] object MountSmallLocalFilesStep {
val MAX_SECRET_BUNDLE_SIZE_BYTES = 10240
val MAX_SECRET_BUNDLE_SIZE_BYTES_STRING =
Utils.bytesToString(MAX_SECRET_BUNDLE_SIZE_BYTES)
}
Loading

0 comments on commit 455317d

Please sign in to comment.