Skip to content

Commit

Permalink
Fix flaky tests
Browse files Browse the repository at this point in the history
* Avoid long delay in `test_archive_recovery` due to competing write
  transactions, by tweaking backoff parameters
* In aggregates test, wait for aggregator task to update statistics
  before checking
* Don't fetch parent leaves that have already been pruned
  • Loading branch information
jbearer committed Dec 4, 2024
1 parent 2eb2f4a commit c7008da
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 3 deletions.
12 changes: 12 additions & 0 deletions src/data_source/fetching/leaf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,18 @@ pub(super) fn trigger_fetch_for_parent<Types, S, P>(
// Check if we already have the parent.
match fetcher.storage.read().await {
Ok(mut tx) => {
// Don't bother fetching a pruned leaf.
if let Ok(pruned_height) = tx.load_pruned_height().await {
if !pruned_height.map_or(true, |ph| height > ph) {
tracing::info!(
height,
?pruned_height,
"not fetching pruned parent leaf"
);
return;
}
}

if tx.get_leaf(((height - 1) as usize).into()).await.is_ok() {
return;
}
Expand Down
15 changes: 13 additions & 2 deletions src/fetching/provider/query_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -944,10 +944,21 @@ mod test {
.pruner_cfg(
PrunerCfg::new()
.with_target_retention(Duration::from_secs(0))
.with_interval(Duration::from_secs(1)),
.with_interval(Duration::from_secs(5)),
)
.unwrap()
.connect(provider.clone())
.builder(provider.clone())
.await
.unwrap()
// Set a fast retry for failed operations. Occasionally storage operations will fail due
// to conflicting write-mode transactions running concurrently. This is ok as they will
// be retried. Having a fast retry interval speeds up the test.
.with_min_retry_interval(Duration::from_millis(100))
// Randomize retries a lot. This will temporarlly separate competing transactions write
// transactions with high probability, so that one of them quickly gets exclusive access
// to the database.
.with_retry_randomization_factor(3.)
.build()
.await
.unwrap();

Expand Down
17 changes: 16 additions & 1 deletion src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ mod test {
use std::time::Duration;
use surf_disco::Client;
use tempfile::TempDir;
use tide_disco::App;
use tide_disco::{App, Error as _};
use tokio::time::sleep;
use toml::toml;

Expand Down Expand Up @@ -469,6 +469,21 @@ mod test {
}
tracing::info!(?tx_heights, ?tx_sizes, "transactions sequenced");

// Wait for the aggregator to process the inserted blocks.
while let Err(err) = client
.get::<usize>(&format!("node/transactions/count/{}", tx_heights[1]))
.send()
.await
{
if err.status() == StatusCode::NOT_FOUND {
tracing::info!(?tx_heights, "waiting for aggregator");
sleep(Duration::from_secs(1)).await;
continue;
} else {
panic!("unexpected error: {err:#}");
}
}

// Range including empty blocks (genesis block) only
assert_eq!(
0,
Expand Down

0 comments on commit c7008da

Please sign in to comment.