Skip to content

Commit

Permalink
[7.2.1] Enforce and await cleanup in StarlarkBaseExternalContext (#…
Browse files Browse the repository at this point in the history
…22814)

`StarlarkBaseExternalContext` now implements `AutoCloseable` and, in
`close()`:
1. Cancels all pending async tasks.
2. Awaits their termination.
3. Cleans up the working directory (always for module extensions, on
failure for repo rules).
4. Fails if there were pending async tasks in an otherwise successful
evaluation.

Previously, module extensions didn't do any of those. Repo rules did 1
and 4 and sometimes 3, but not in all cases.

This change required replacing the fixed-size thread pool in
`DownloadManager` with virtual threads, thereby resolving a TODO about
not using a fixed-size thread pool for the `GrpcRemoteDownloader`.

Work towards #22680
Work towards #22748

Closes #22772

PiperOrigin-RevId: 644669599
Change-Id: Ib71e5bf346830b92277ac2bd473e11c834cb2624

Closes #22775
  • Loading branch information
fmeum authored Jun 20, 2024
1 parent 10e5676 commit 8fb4dce
Show file tree
Hide file tree
Showing 9 changed files with 218 additions and 175 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ protected ModuleExtensionContext(
timeoutScaling,
processWrapper,
starlarkSemantics,
ModuleExtensionEvaluationProgress.moduleExtensionEvaluationContextString(extensionId),
remoteExecutor,
/* allowWatchingPathsOutsideWorkspace= */ false);
this.extensionId = extensionId;
Expand All @@ -83,8 +84,10 @@ public Path getWorkingDirectory() {
}

@Override
protected String getIdentifyingStringForLogging() {
return ModuleExtensionEvaluationProgress.moduleExtensionEvaluationContextString(extensionId);
protected boolean shouldDeleteWorkingDirectoryOnClose(boolean successful) {
// The contents of the working directory are purely ephemeral, only the repos instantiated by
// the extension are considered its results.
return true;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -891,15 +891,15 @@ public RunModuleExtensionResult run(
BazelModuleContext.of(bzlLoadValue.getModule()).repoMapping(),
directories,
env.getListener());
ModuleExtensionContext moduleContext;
Optional<ModuleExtensionMetadata> moduleExtensionMetadata;
var repoMappingRecorder = new Label.RepoMappingRecorder();
repoMappingRecorder.mergeEntries(bzlLoadValue.getRecordedRepoMappings());
try (Mutability mu =
Mutability.create("module extension", usagesValue.getExtensionUniqueName())) {
Mutability.create("module extension", usagesValue.getExtensionUniqueName());
ModuleExtensionContext moduleContext =
createContext(env, usagesValue, starlarkSemantics, extensionId)) {
StarlarkThread thread = new StarlarkThread(mu, starlarkSemantics);
thread.setPrintHandler(Event.makeDebugPrintHandler(env.getListener()));
moduleContext = createContext(env, usagesValue, starlarkSemantics, extensionId);
threadContext.storeInThread(thread);
new BazelStarlarkContext(
Phase.WORKSPACE,
Expand Down Expand Up @@ -938,39 +938,34 @@ public RunModuleExtensionResult run(
moduleExtensionMetadata = Optional.empty();
}
} catch (NeedsSkyframeRestartException e) {
// Clean up and restart by returning null.
try {
if (moduleContext.getWorkingDirectory().exists()) {
moduleContext.getWorkingDirectory().deleteTree();
}
} catch (IOException e1) {
ExternalDepsException externalDepsException =
ExternalDepsException.withCauseAndMessage(
ExternalDeps.Code.EXTERNAL_DEPS_UNKNOWN,
e1,
"Failed to clean up module context directory");
throw new SingleExtensionEvalFunctionException(
externalDepsException, Transience.TRANSIENT);
}
// Restart by returning null.
return null;
} catch (EvalException e) {
env.getListener().handle(Event.error(e.getMessageWithStack()));
throw new SingleExtensionEvalFunctionException(
ExternalDepsException.withMessage(
ExternalDeps.Code.BAD_MODULE,
"error evaluating module extension %s in %s",
extensionId.getExtensionName(),
extensionId.getBzlFileLabel()),
Transience.TRANSIENT);
}
moduleContext.markSuccessful();
return RunModuleExtensionResult.create(
moduleContext.getRecordedFileInputs(),
moduleContext.getRecordedDirentsInputs(),
moduleContext.getRecordedEnvVarInputs(),
threadContext.getGeneratedRepoSpecs(),
moduleExtensionMetadata,
repoMappingRecorder.recordedEntries());
} catch (EvalException e) {
env.getListener().handle(Event.error(e.getMessageWithStack()));
throw new SingleExtensionEvalFunctionException(
ExternalDepsException.withMessage(
ExternalDeps.Code.BAD_MODULE,
"error evaluating module extension %s in %s",
extensionId.getExtensionName(),
extensionId.getBzlFileLabel()),
Transience.TRANSIENT);
} catch (IOException e) {
ExternalDepsException externalDepsException =
ExternalDepsException.withCauseAndMessage(
ExternalDeps.Code.EXTERNAL_DEPS_UNKNOWN,
e,
"Failed to clean up module context directory");
throw new SingleExtensionEvalFunctionException(externalDepsException, Transience.TRANSIENT);
}
return RunModuleExtensionResult.create(
moduleContext.getRecordedFileInputs(),
moduleContext.getRecordedDirentsInputs(),
moduleContext.getRecordedEnvVarInputs(),
threadContext.getGeneratedRepoSpecs(),
moduleExtensionMetadata,
repoMappingRecorder.recordedEntries());
}

private ModuleExtensionContext createContext(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.devtools.build.lib.authandtls.StaticCredentials;
import com.google.devtools.build.lib.bazel.repository.cache.RepositoryCache;
import com.google.devtools.build.lib.bazel.repository.cache.RepositoryCache.KeyType;
Expand All @@ -48,7 +47,6 @@
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import javax.annotation.Nullable;

Expand All @@ -59,17 +57,6 @@
* to disk.
*/
public class DownloadManager {
private static final ExecutorService DOWNLOAD_EXECUTOR =
Executors.newFixedThreadPool(
// There is also GrpcRemoteDownloader so if we set the thread pool to the same size as
// the allowed number of HTTP downloads, it might unnecessarily block. No, this is not a
// very
// principled approach; ideally, we'd grow the thread pool as needed with some generous
// upper
// limit.
2 * HttpDownloader.MAX_PARALLEL_DOWNLOADS,
new ThreadFactoryBuilder().setNameFormat("download-manager-%d").build());

private final RepositoryCache repositoryCache;
private List<Path> distdir = ImmutableList.of();
private UrlRewriter rewriter;
Expand Down Expand Up @@ -115,6 +102,7 @@ public void setCredentialFactory(CredentialFactory credentialFactory) {
}

public Future<Path> startDownload(
ExecutorService executorService,
List<URL> originalUrls,
Map<String, List<String>> headers,
Map<URI, Map<String, List<String>>> authHeaders,
Expand All @@ -125,7 +113,7 @@ public Future<Path> startDownload(
ExtendedEventHandler eventHandler,
Map<String, String> clientEnv,
String context) {
return DOWNLOAD_EXECUTOR.submit(
return executorService.submit(
() -> {
try (SilentCloseable c = Profiler.instance().profile("fetching: " + context)) {
return downloadInExecutor(
Expand Down Expand Up @@ -154,33 +142,6 @@ public Path finalizeDownload(Future<Path> download) throws IOException, Interrup
}
}

public Path download(
List<URL> originalUrls,
Map<String, List<String>> headers,
Map<URI, Map<String, List<String>>> authHeaders,
Optional<Checksum> checksum,
String canonicalId,
Optional<String> type,
Path output,
ExtendedEventHandler eventHandler,
Map<String, String> clientEnv,
String context)
throws IOException, InterruptedException {
Future<Path> future =
startDownload(
originalUrls,
headers,
authHeaders,
checksum,
canonicalId,
type,
output,
eventHandler,
clientEnv,
context);
return finalizeDownload(future);
}

/**
* Downloads file to disk and returns path.
*
Expand Down
Loading

0 comments on commit 8fb4dce

Please sign in to comment.