Skip to content

Commit

Permalink
Small wins in Engine CI job execution (#7846)
Browse files Browse the repository at this point in the history
Reorder steps of Engine tests, run dry-run benchmarks only on Linux.

---------

Co-authored-by: Pavel Marek <[email protected]>
Co-authored-by: Michał W. Urbańczyk <[email protected]>
  • Loading branch information
3 people authored Sep 25, 2023
1 parent 7a31dcd commit df4183e
Show file tree
Hide file tree
Showing 3 changed files with 90 additions and 74 deletions.
2 changes: 1 addition & 1 deletion build/build/src/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ pub struct BuildConfigurationFlags {
pub build_benchmarks: bool,
/// Whether the Enso-written benchmarks should be checked whether they compile.
///
/// Note that this does not benchmark, only ensures that they are buildable.
/// Note that this does not run benchmark, only ensures that they are buildable.
/// Also, this does nothing if `execute_benchmarks` contains `Benchmarks::Enso`.
pub check_enso_benchmarks: bool,
/// Which benchmarks should be run.
Expand Down
153 changes: 81 additions & 72 deletions build/build/src/engine/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,8 @@ impl RunContext {
self.config.build_engine_package() && big_memory_machine && TARGET_OS != OS::Windows;


// === Build project-manager distribution and native image ===
debug!("Bulding project-manager distribution and Native Image");
if big_memory_machine {
let mut tasks = vec![];

Expand All @@ -314,30 +316,7 @@ impl RunContext {
if self.config.build_launcher_package() {
tasks.push("buildLauncherDistribution");
}

// This just compiles benchmarks, not run them. At least we'll know that they can be
// run. Actually running them, as part of this routine, would be too heavy.
// TODO [mwu] It should be possible to run them through context config option.
if self.config.build_benchmarks {
tasks.extend([
"runtime/Benchmark/compile",
"language-server/Benchmark/compile",
"searcher/Benchmark/compile",
"std-benchmarks/Benchmark/compile",
]);
}

// We want benchmarks to run only after the other build tasks are done, as they are
// really CPU-heavy.
let build_command = (!tasks.is_empty()).then_some(Sbt::concurrent_tasks(tasks));
let benchmark_tasks = self.config.execute_benchmarks.iter().flat_map(|b| b.sbt_task());
let command_sequence = build_command.as_deref().into_iter().chain(benchmark_tasks);
let final_command = Sbt::sequential_tasks(command_sequence);
if !final_command.is_empty() {
sbt.call_arg(final_command).await?;
} else {
debug!("No SBT tasks to run.");
}
sbt.call_arg(Sbt::concurrent_tasks(tasks)).await?;
} else {
// If we are run on a weak machine (like GH-hosted runner), we need to build things one
// by one.
Expand All @@ -363,27 +342,8 @@ impl RunContext {

// Prepare Project Manager Distribution
sbt.call_arg("buildProjectManagerDistribution").await?;

if self.config.build_benchmarks {
// Check Runtime Benchmark Compilation
sbt.call_arg("runtime/Benchmark/compile").await?;

// Check Language Server Benchmark Compilation
sbt.call_arg("language-server/Benchmark/compile").await?;

// Check Searcher Benchmark Compilation
sbt.call_arg("searcher/Benchmark/compile").await?;

// Check Enso JMH benchmark compilation
sbt.call_arg("std-benchmarks/Benchmark/compile").await?;
}

for benchmark in &self.config.execute_benchmarks {
if let Some(task) = benchmark.sbt_task() {
sbt.call_arg(task).await?;
}
}
} // End of Sbt run.
}
// === End of Build project-manager distribution and native image ===

let ret = self.expected_artifacts();

Expand Down Expand Up @@ -414,13 +374,87 @@ impl RunContext {
}

let enso = BuiltEnso { paths: self.paths.clone() };

// === Unit tests and Enso tests ===
debug!("Running unit tests and Enso tests.");
if self.config.test_scala {
// Run unit tests
sbt.call_arg("set Global / parallelExecution := false; test").await?;
}
if self.config.test_standard_library {
enso.run_tests(IrCaches::No, &sbt, PARALLEL_ENSO_TESTS).await?;
}
// If we are run in CI conditions and we prepared some test results, we want to upload
// them as a separate artifact to ease debugging.
if let Some(test_results_dir) = test_results_dir && is_in_env() {
// Each platform gets its own log results, so we need to generate unique names.
let name = format!("Test_Results_{TARGET_OS}");
if let Err(err) = ide_ci::actions::artifacts::upload_compressed_directory(&test_results_dir, name)
.await {
// We wouldn't want to fail the whole build if we can't upload the test results.
// Still, it should be somehow visible in the build summary.
ide_ci::actions::workflow::message(MessageLevel::Warning, format!("Failed to upload test results: {err}"));
}
}

perhaps_test_java_generated_from_rust_job.await.transpose()?;

// === Run benchmarks ===
debug!("Running benchmarks.");
if big_memory_machine {
let mut tasks = vec![];
// This just compiles benchmarks, not run them. At least we'll know that they can be
// run. Actually running them, as part of this routine, would be too heavy.
// TODO [mwu] It should be possible to run them through context config option.
if self.config.build_benchmarks {
tasks.extend([
"runtime/Benchmark/compile",
"language-server/Benchmark/compile",
"searcher/Benchmark/compile",
"std-benchmarks/Benchmark/compile",
]);
}

let build_command = (!tasks.is_empty()).then_some(Sbt::concurrent_tasks(tasks));

// We want benchmarks to run only after the other build tasks are done, as they are
// really CPU-heavy.
let benchmark_tasks = self.config.execute_benchmarks.iter().flat_map(|b| b.sbt_task());
let command_sequence = build_command.as_deref().into_iter().chain(benchmark_tasks);
let final_command = Sbt::sequential_tasks(command_sequence);
if !final_command.is_empty() {
sbt.call_arg(final_command).await?;
} else {
debug!("No SBT tasks to run.");
}
} else {
if self.config.build_benchmarks {
// Check Runtime Benchmark Compilation
sbt.call_arg("runtime/Benchmark/compile").await?;

// Check Language Server Benchmark Compilation
sbt.call_arg("language-server/Benchmark/compile").await?;

// Check Searcher Benchmark Compilation
sbt.call_arg("searcher/Benchmark/compile").await?;

// Check Enso JMH benchmark compilation
sbt.call_arg("std-benchmarks/Benchmark/compile").await?;
}

for benchmark in &self.config.execute_benchmarks {
if let Some(task) = benchmark.sbt_task() {
sbt.call_arg(task).await?;
}
}
}

if self.config.execute_benchmarks.contains(&Benchmarks::Enso) {
enso.run_benchmarks(BenchmarkOptions { dry_run: false }).await?;
} else if self.config.check_enso_benchmarks {
enso.run_benchmarks(BenchmarkOptions { dry_run: true }).await?;
}


// If we were running any benchmarks, they are complete by now. Upload the report.
if is_in_env() {
for bench in &self.config.execute_benchmarks {
Expand Down Expand Up @@ -462,18 +496,9 @@ impl RunContext {
}
}

if self.config.test_scala {
// Test Enso
sbt.call_arg("set Global / parallelExecution := false; test").await?;
}

perhaps_test_java_generated_from_rust_job.await.transpose()?;

// === Build Distribution ===
if self.config.test_standard_library {
enso.run_tests(IrCaches::No, &sbt, PARALLEL_ENSO_TESTS).await?;
}

debug!("Building distribution");
if self.config.build_engine_package() {
let std_libs =
&self.repo_root.built_distribution.enso_engine_triple.engine_package.lib.standard;
Expand All @@ -487,22 +512,6 @@ impl RunContext {
}
}

if self.config.test_standard_library {
enso.run_tests(IrCaches::Yes, &sbt, PARALLEL_ENSO_TESTS).await?;
}

// If we are run in CI conditions and we prepared some test results, we want to upload
// them as a separate artifact to ease debugging.
if let Some(test_results_dir) = test_results_dir && is_in_env() {
// Each platform gets its own log results, so we need to generate unique names.
let name = format!("Test_Results_{TARGET_OS}");
if let Err(err) = ide_ci::actions::artifacts::upload_compressed_directory(&test_results_dir, name)
.await {
// We wouldn't want to fail the whole build if we can't upload the test results.
// Still, it should be somehow visible in the build summary.
ide_ci::actions::workflow::message(MessageLevel::Warning, format!("Failed to upload test results: {err}"));
}
}

// if build_native_runner {
// let factorial_input = "6";
Expand Down
9 changes: 8 additions & 1 deletion build/cli/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,14 @@ impl Processor {
test_standard_library: true,
test_java_generated_from_rust: true,
build_benchmarks: true,
execute_benchmarks: once(Benchmarks::Runtime).collect(),
execute_benchmarks: {
// Run benchmarks only on Linux.
let mut ret = BTreeSet::new();
if TARGET_OS == OS::Linux {
ret.insert(Benchmarks::Runtime);
}
ret
},
execute_benchmarks_once: true,
check_enso_benchmarks: true,
verify_packages: true,
Expand Down

0 comments on commit df4183e

Please sign in to comment.