diff --git a/config/src/config/network_config.rs b/config/src/config/network_config.rs index 9bd212636f4b7..4fb512e80f3e5 100644 --- a/config/src/config/network_config.rs +++ b/config/src/config/network_config.rs @@ -53,10 +53,6 @@ pub const MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; /* 64 MiB */ pub const CONNECTION_BACKOFF_BASE: u64 = 2; pub const IP_BYTE_BUCKET_RATE: usize = 102400 /* 100 KiB */; pub const IP_BYTE_BUCKET_SIZE: usize = IP_BYTE_BUCKET_RATE; -pub const INBOUND_TCP_RX_BUFFER_SIZE: u32 = 3 * 1024 * 1024; // 3MB ~6MB/s with 500ms latency -pub const INBOUND_TCP_TX_BUFFER_SIZE: u32 = 512 * 1024; // 1MB use a bigger spoon -pub const OUTBOUND_TCP_RX_BUFFER_SIZE: u32 = 3 * 1024 * 1024; // 3MB ~6MB/s with 500ms latency -pub const OUTBOUND_TCP_TX_BUFFER_SIZE: u32 = 1024 * 1024; // 1MB use a bigger spoon #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(default, deny_unknown_fields)] @@ -89,6 +85,14 @@ pub struct NetworkConfig { pub network_id: NetworkId, /// Number of threads to run for networking pub runtime_threads: Option<usize>, + /// Overrides for the size of the inbound and outbound buffers for each peer. + /// NOTE: The defaults are None, so socket options are not called. Change to Some values with + /// caution. Experiments have shown that relying on Linux's default tcp auto-tuning can perform + /// better than setting these. In particular, for larger values to take effect, the + /// `net.core.rmem_max` and `net.core.wmem_max` sysctl values may need to be increased. On a + /// vanilla GCP machine, these are set to 212992. Without increasing the sysctl values and + /// setting a value will constrain the buffer size to the sysctl value. (In contrast, default + /// auto-tuning can increase beyond these values.) pub inbound_rx_buffer_size_bytes: Option<u32>, pub inbound_tx_buffer_size_bytes: Option<u32>, pub outbound_rx_buffer_size_bytes: Option<u32>, @@ -157,10 +161,10 @@ impl NetworkConfig { inbound_rate_limit_config: None, outbound_rate_limit_config: None, max_message_size: MAX_MESSAGE_SIZE, - inbound_rx_buffer_size_bytes: Some(INBOUND_TCP_RX_BUFFER_SIZE), - inbound_tx_buffer_size_bytes: Some(INBOUND_TCP_TX_BUFFER_SIZE), - outbound_rx_buffer_size_bytes: Some(OUTBOUND_TCP_RX_BUFFER_SIZE), - outbound_tx_buffer_size_bytes: Some(OUTBOUND_TCP_TX_BUFFER_SIZE), + inbound_rx_buffer_size_bytes: None, + inbound_tx_buffer_size_bytes: None, + outbound_rx_buffer_size_bytes: None, + outbound_tx_buffer_size_bytes: None, max_parallel_deserialization_tasks: None, }; diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index 06c2278450aa4..9f8621d50b8f4 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -1367,12 +1367,12 @@ fn netbench_config_100_megabytes_per_sec(netbench_config: &mut NetbenchConfig) { netbench_config.direct_send_per_second = 1000; } -fn netbench_config_2_megabytes_per_sec(netbench_config: &mut NetbenchConfig) { +fn netbench_config_4_megabytes_per_sec(netbench_config: &mut NetbenchConfig) { netbench_config.enabled = true; netbench_config.max_network_channel_size = 1000; netbench_config.enable_direct_send_testing = true; netbench_config.direct_send_data_size = 100000; - netbench_config.direct_send_per_second = 20; + netbench_config.direct_send_per_second = 40; } fn net_bench() -> ForgeConfig { @@ -1393,7 +1393,7 @@ fn net_bench_two_region_env() -> ForgeConfig { .with_validator_override_node_config_fn(Arc::new(|config, _| { // Not using 100 MBps here, as it will lead to throughput collapse let mut netbench_config = NetbenchConfig::default(); - netbench_config_2_megabytes_per_sec(&mut netbench_config); + netbench_config_4_megabytes_per_sec(&mut netbench_config); config.netbench = Some(netbench_config); })) } @@ -2062,13 +2062,7 @@ fn pfn_performance( add_network_emulation: bool, ) -> ForgeConfig { // Determine the minimum expected TPS - let min_expected_tps = if add_cpu_chaos { - 3000 - } else if add_network_emulation { - 4000 - } else { - 4500 - }; + let min_expected_tps = 4500; // Create the forge config ForgeConfig::default()