Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Misc] Small improvements to forge, consensus observer and state sync. #14825

Merged
merged 3 commits into from
Oct 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions config/src/config/consensus_observer_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ use serde::{Deserialize, Serialize};
use serde_yaml::Value;

// Useful constants for enabling consensus observer on different node types
const ENABLE_ON_VALIDATORS: bool = true;
const ENABLE_ON_VALIDATOR_FULLNODES: bool = true;
const ENABLE_ON_VALIDATORS: bool = false;
const ENABLE_ON_VALIDATOR_FULLNODES: bool = false;
const ENABLE_ON_PUBLIC_FULLNODES: bool = false;

#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
Expand Down
2 changes: 1 addition & 1 deletion config/src/config/state_sync_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize};
use serde_yaml::Value;

// The maximum message size per state sync message
const MAX_MESSAGE_SIZE: usize = 8 * 1024 * 1024; /* 8 MiB */
const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024; /* 10 MiB */

// The maximum chunk sizes for data client requests and response
const MAX_EPOCH_CHUNK_SIZE: u64 = 200;
Expand Down
23 changes: 21 additions & 2 deletions testsuite/testcases/src/fullnode_reboot_stress_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ use rand::{seq::SliceRandom, thread_rng};
use std::{sync::Arc, time::Duration};
use tokio::time::Instant;

// The buffer (in seconds) at the end of the test to allow for graceful shutdown
const END_OF_TEST_BUFFER_SECS: u64 = 60;

// The wait time (in seconds) between fullnode reboots
const WAIT_TIME_BETWEEN_REBOOTS_SECS: u64 = 10;

pub struct FullNodeRebootStressTest;

impl Test for FullNodeRebootStressTest {
Expand All @@ -30,8 +36,19 @@ impl NetworkLoadTest for FullNodeRebootStressTest {
_report: &mut TestReport,
duration: Duration,
) -> Result<()> {
// Start the test timer
let start = Instant::now();

// Ensure the total test duration is at least as long as the buffer
let end_of_test_buffer = Duration::from_secs(END_OF_TEST_BUFFER_SECS);
if duration <= end_of_test_buffer {
panic!(
"Total test duration must be at least: {:?}! Given duration: {:?}",
end_of_test_buffer, duration
);
}

// Collect all the fullnodes
let all_fullnodes = {
swarm
.read()
Expand All @@ -41,7 +58,9 @@ impl NetworkLoadTest for FullNodeRebootStressTest {
.collect::<Vec<_>>()
};

while start.elapsed() < duration {
// Reboot fullnodes until the test duration is reached
let test_reboot_duration = duration - end_of_test_buffer;
while start.elapsed() < test_reboot_duration {
{
let swarm = swarm.read().await;
let fullnode_to_reboot = {
Expand All @@ -53,7 +72,7 @@ impl NetworkLoadTest for FullNodeRebootStressTest {
fullnode_to_reboot.stop().await?;
fullnode_to_reboot.start().await?;
}
tokio::time::sleep(Duration::from_secs(10)).await;
tokio::time::sleep(Duration::from_secs(WAIT_TIME_BETWEEN_REBOOTS_SECS)).await;
}

Ok(())
Expand Down
Loading