Skip to content

Commit

Permalink
feat(s2n-quic): expose configuration options for congestion control
Browse files Browse the repository at this point in the history
  • Loading branch information
toidiu committed Jun 3, 2024
1 parent d90729d commit 8932619
Show file tree
Hide file tree
Showing 14 changed files with 421 additions and 153 deletions.
2 changes: 1 addition & 1 deletion dc/s2n-quic-dc/src/congestion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub struct Controller {
impl Controller {
#[inline]
pub fn new(mtu: u16) -> Self {
let mut controller = BbrCongestionController::new(mtu);
let mut controller = BbrCongestionController::new(mtu, Default::default());
let publisher = &mut NoopPublisher;
controller.on_mtu_update(mtu, publisher);
Self { controller }
Expand Down
93 changes: 71 additions & 22 deletions quic/s2n-quic-core/src/recovery/bbr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,21 +95,28 @@ enum State {

impl State {
/// The dynamic gain factor used to scale BBR.bw to produce BBR.pacing_rate
fn pacing_gain(&self) -> Ratio<u64> {
fn pacing_gain(&self, app_settings: &ApplicationSettings) -> Ratio<u64> {
match self {
State::Startup => startup::PACING_GAIN,
State::Drain => drain::PACING_GAIN,
State::ProbeBw(probe_bw_state) => probe_bw_state.cycle_phase().pacing_gain(),
State::ProbeBw(probe_bw_state) => {
probe_bw_state.cycle_phase().pacing_gain(app_settings)
}
State::ProbeRtt(_) => probe_rtt::PACING_GAIN,
}
}

/// The dynamic gain factor used to scale the estimated BDP to produce a congestion window (cwnd)
fn cwnd_gain(&self) -> Ratio<u64> {
fn cwnd_gain(&self, app_settings: &ApplicationSettings) -> Ratio<u64> {
let cwnd_gain = app_settings
.cwnd_gain
.map(|cwnd_gain| Ratio::new_raw(cwnd_gain as u64, 100))
.unwrap_or(probe_bw::CWND_GAIN);

match self {
State::Startup => startup::CWND_GAIN,
State::Drain => drain::CWND_GAIN,
State::ProbeBw(_) => probe_bw::CWND_GAIN,
State::ProbeBw(_) => cwnd_gain,
State::ProbeRtt(_) => probe_rtt::CWND_GAIN,
}
}
Expand Down Expand Up @@ -210,6 +217,14 @@ impl IntoEvent<event::builder::BbrState> for &State {
}
}

#[derive(Default, Debug, Clone)]
pub struct ApplicationSettings {
pub cwnd: Option<u32>,
pub cwnd_gain: Option<u32>,
pub loss_threshold: Option<u32>,
pub up_pacing_gain: Option<u32>,
}

/// A congestion controller that implements "Bottleneck Bandwidth and Round-trip propagation time"
/// version 2 (BBRv2) as specified in <https://datatracker.ietf.org/doc/draft-cardwell-iccrg-bbr-congestion-control/>.
///
Expand Down Expand Up @@ -250,6 +265,7 @@ pub struct BbrCongestionController {
//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#2.1
//# True if the connection has fully utilized its cwnd at any point in the last packet-timed round trip.
cwnd_limited_in_round: bool,
app_settings: ApplicationSettings,
}

type BytesInFlight = Counter<u32>;
Expand Down Expand Up @@ -325,7 +341,7 @@ impl CongestionController for BbrCongestionController {
self.pacer.initialize_pacing_rate(
self.cwnd,
rtt_estimator.smoothed_rtt(),
self.state.pacing_gain(),
self.state.pacing_gain(&self.app_settings),
publisher,
);
}
Expand Down Expand Up @@ -440,7 +456,7 @@ impl CongestionController for BbrCongestionController {
//# BBRSetCwnd()
self.pacer.set_pacing_rate(
self.data_rate_model.bw(),
self.state.pacing_gain(),
self.state.pacing_gain(&self.app_settings),
self.full_pipe_estimator.filled_pipe(),
publisher,
);
Expand Down Expand Up @@ -538,7 +554,7 @@ impl BbrCongestionController {
/// Constructs a new `BbrCongestionController`
/// max_datagram_size is the current max_datagram_size, and is
/// expected to be 1200 when the congestion controller is created.
pub fn new(max_datagram_size: u16) -> Self {
pub fn new(max_datagram_size: u16, app_settings: ApplicationSettings) -> Self {
//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.2.1
//# BBROnInit():
//# init_windowed_max_filter(filter=BBR.MaxBwFilter, value=0, time=0)
Expand Down Expand Up @@ -568,7 +584,7 @@ impl BbrCongestionController {
bw_estimator: Default::default(),
full_pipe_estimator: Default::default(),
bytes_in_flight: Default::default(),
cwnd: Self::initial_window(max_datagram_size),
cwnd: Self::initial_window(max_datagram_size, &app_settings),
prior_cwnd: 0,
recovery_state: recovery::State::Recovered,
congestion_state: Default::default(),
Expand All @@ -579,9 +595,10 @@ impl BbrCongestionController {
max_datagram_size,
idle_restart: false,
bw_probe_samples: false,
pacer: Pacer::new(max_datagram_size),
pacer: Pacer::new(max_datagram_size, &app_settings),
try_fast_path: false,
cwnd_limited_in_round: false,
app_settings,
}
}
/// The bandwidth-delay product
Expand All @@ -606,7 +623,7 @@ impl BbrCongestionController {
gain.checked_mul(&(bw * min_rtt).into())
.map_or(u64::MAX, |bdp| bdp.to_integer())
} else {
Self::initial_window(self.max_datagram_size).into()
Self::initial_window(self.max_datagram_size, &self.app_settings).into()
}
}

Expand Down Expand Up @@ -639,7 +656,10 @@ impl BbrCongestionController {
// max_inflight is calculated and returned from this function
// as needed, rather than maintained as a field

let bdp = self.bdp_multiple(self.data_rate_model.bw(), self.state.cwnd_gain());
let bdp = self.bdp_multiple(
self.data_rate_model.bw(),
self.state.cwnd_gain(&self.app_settings),
);
let inflight = bdp.saturating_add(self.data_volume_model.extra_acked());
self.quantization_budget(inflight)
}
Expand Down Expand Up @@ -723,12 +743,14 @@ impl BbrCongestionController {
max_datagram_size: u16,
loss_bursts: u8,
loss_burst_limit: u8,
app_settings: &ApplicationSettings,
) -> bool {
if Self::is_loss_too_high(
rate_sample.lost_bytes,
rate_sample.bytes_in_flight,
loss_bursts,
loss_burst_limit,
app_settings,
) {
return true;
}
Expand All @@ -753,12 +775,13 @@ impl BbrCongestionController {
bytes_inflight: u32,
loss_bursts: u8,
loss_burst_limit: u8,
app_settings: &ApplicationSettings,
) -> bool {
//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.5.6.2
//# IsInflightTooHigh()
//# return (rs.lost > rs.tx_in_flight * BBRLossThresh)
loss_bursts >= loss_burst_limit
&& lost_bytes > (LOSS_THRESH * bytes_inflight).to_integer() as u64
&& lost_bytes > (Self::loss_thresh(app_settings) * bytes_inflight).to_integer() as u64
}

//= https://www.rfc-editor.org/rfc/rfc9002#section-7.2
Expand All @@ -767,12 +790,13 @@ impl BbrCongestionController {
//# while limiting the window to the larger of 14,720 bytes or twice the
//# maximum datagram size.
#[inline]
fn initial_window(max_datagram_size: u16) -> u32 {
fn initial_window(max_datagram_size: u16, app_settings: &ApplicationSettings) -> u32 {
const INITIAL_WINDOW_LIMIT: u32 = 14720;
min(
let default = min(
10 * max_datagram_size as u32,
max(INITIAL_WINDOW_LIMIT, 2 * max_datagram_size as u32),
)
);
app_settings.cwnd.unwrap_or(default)
}

/// The minimal cwnd value BBR targets
Expand Down Expand Up @@ -804,7 +828,7 @@ impl BbrCongestionController {
//# BBRModulateCwndForRecovery()

let max_inflight = self.max_inflight().try_into().unwrap_or(u32::MAX);
let initial_cwnd = Self::initial_window(self.max_datagram_size);
let initial_cwnd = Self::initial_window(self.max_datagram_size, &self.app_settings);
let mut cwnd = self.cwnd;

// Enable fast path if the cwnd has reached max_inflight
Expand Down Expand Up @@ -945,9 +969,14 @@ impl BbrCongestionController {
packet_info.bytes_in_flight,
self.congestion_state.loss_bursts_in_round(),
PROBE_BW_FULL_LOSS_COUNT,
&self.app_settings,
) {
let inflight_hi_from_lost_packet =
Self::inflight_hi_from_lost_packet(lost_bytes, lost_since_transmit, packet_info);
let inflight_hi_from_lost_packet = Self::inflight_hi_from_lost_packet(
lost_bytes,
lost_since_transmit,
packet_info,
&self.app_settings,
);
self.on_inflight_too_high(
packet_info.is_app_limited,
inflight_hi_from_lost_packet,
Expand All @@ -964,7 +993,9 @@ impl BbrCongestionController {
size: u32,
lost_since_transmit: u32,
packet_info: <BbrCongestionController as CongestionController>::PacketInfo,
app_settings: &ApplicationSettings,
) -> u32 {
let loss_thresh = Self::loss_thresh(app_settings);
//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.5.6.2
//# BBRInflightHiFromLostPacket(rs, packet):
//# size = packet.size
Expand All @@ -989,15 +1020,23 @@ impl BbrCongestionController {
// What was lost before this packet?
let lost_prev = lost_since_transmit - size;
// BBRLossThresh * inflight_prev - lost_prev
let loss_budget = (LOSS_THRESH * inflight_prev)
let loss_budget = (loss_thresh * inflight_prev)
.to_integer()
.saturating_sub(lost_prev);
// Multiply by the inverse of 1 - LOSS_THRESH instead of dividing
let lost_prefix = ((Ratio::one() - LOSS_THRESH).inv() * loss_budget).to_integer();
let lost_prefix = ((Ratio::one() - loss_thresh).inv() * loss_budget).to_integer();
// At what inflight value did losses cross BBRLossThresh?
inflight_prev + lost_prefix
}

#[inline]
fn loss_thresh(app_settings: &ApplicationSettings) -> Ratio<u32> {
app_settings
.loss_threshold
.map(|loss_threshold| Ratio::new_raw(loss_threshold, 100))
.unwrap_or(LOSS_THRESH)
}

/// Handles when the connection resumes transmitting after an idle period
#[inline]
fn handle_restart_from_idle<Pub: Publisher>(&mut self, now: Timestamp, publisher: &mut Pub) {
Expand Down Expand Up @@ -1074,7 +1113,9 @@ impl BbrCongestionController {

#[non_exhaustive]
#[derive(Debug, Default)]
pub struct Endpoint {}
pub struct Endpoint {
app_settings: ApplicationSettings,
}

impl congestion_controller::Endpoint for Endpoint {
type CongestionController = BbrCongestionController;
Expand All @@ -1083,7 +1124,15 @@ impl congestion_controller::Endpoint for Endpoint {
&mut self,
path_info: congestion_controller::PathInfo,
) -> Self::CongestionController {
BbrCongestionController::new(path_info.max_datagram_size)
BbrCongestionController::new(path_info.max_datagram_size, self.app_settings.clone())
}
}

pub struct Builder {}

impl Builder {
pub fn build_with(app_settings: ApplicationSettings) -> Endpoint {
Endpoint { app_settings }
}
}

Expand Down
4 changes: 2 additions & 2 deletions quic/s2n-quic-core/src/recovery/bbr/drain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ mod tests {

#[test]
fn enter_drain() {
let mut bbr = BbrCongestionController::new(MINIMUM_MAX_DATAGRAM_SIZE);
let mut bbr = BbrCongestionController::new(MINIMUM_MAX_DATAGRAM_SIZE, Default::default());
let mut publisher = event::testing::Publisher::snapshot();
let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());

Expand All @@ -106,7 +106,7 @@ mod tests {
//# BBREnterProbeBW() /* BBR estimates the queue was drained */
#[test]
fn check_drain_done() {
let mut bbr = BbrCongestionController::new(MINIMUM_MAX_DATAGRAM_SIZE);
let mut bbr = BbrCongestionController::new(MINIMUM_MAX_DATAGRAM_SIZE, Default::default());
let now = NoopClock.get_time();
let mut rng = random::testing::Generator::default();
let mut publisher = event::testing::Publisher::snapshot();
Expand Down
52 changes: 44 additions & 8 deletions quic/s2n-quic-core/src/recovery/bbr/full_pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@

use crate::{
counter::{Counter, Saturating},
recovery::{bandwidth, bandwidth::Bandwidth, bbr::BbrCongestionController},
recovery::{
bandwidth,
bandwidth::Bandwidth,
bbr::{ApplicationSettings, BbrCongestionController},
},
};
use num_rational::Ratio;

Expand Down Expand Up @@ -65,13 +69,18 @@ impl Estimator {
rate_sample: bandwidth::RateSample,
loss_bursts_in_round: u8,
max_datagram_size: u16,
app_settings: &ApplicationSettings,
) {
if self.filled_pipe {
return;
}

self.filled_pipe =
self.excessive_inflight(rate_sample, loss_bursts_in_round, max_datagram_size);
self.filled_pipe = self.excessive_inflight(
rate_sample,
loss_bursts_in_round,
max_datagram_size,
app_settings,
);
}

/// Determines if the rate of increase of bandwidth has decreased enough to estimate the
Expand Down Expand Up @@ -135,6 +144,7 @@ impl Estimator {
rate_sample: bandwidth::RateSample,
loss_bursts_in_round: u8,
max_datagram_size: u16,
app_settings: &ApplicationSettings,
) -> bool {
//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.1.3
//# A second method BBR uses for estimating the bottleneck is full is by looking at sustained
Expand Down Expand Up @@ -176,6 +186,7 @@ impl Estimator {
max_datagram_size,
loss_bursts_in_round,
STARTUP_FULL_LOSS_COUNT,
app_settings,
)
}

Expand Down Expand Up @@ -266,12 +277,22 @@ mod tests {
};

// Only 7 loss bursts, not enough to be considered excessive loss
fp_estimator.on_loss_round_start(rate_sample, 7, MINIMUM_MAX_DATAGRAM_SIZE);
fp_estimator.on_loss_round_start(
rate_sample,
7,
MINIMUM_MAX_DATAGRAM_SIZE,
&Default::default(),
);
// The pipe has not been filled yet since there were only 2 loss bursts
assert!(!fp_estimator.filled_pipe());

// 3 loss bursts, enough to be considered excessive loss
fp_estimator.on_loss_round_start(rate_sample, 8, MINIMUM_MAX_DATAGRAM_SIZE);
fp_estimator.on_loss_round_start(
rate_sample,
8,
MINIMUM_MAX_DATAGRAM_SIZE,
&Default::default(),
);
// The pipe has been filled due to loss
assert!(fp_estimator.filled_pipe());
}
Expand All @@ -289,9 +310,19 @@ mod tests {
};

// Only 7 loss bursts, not enough to be considered excessive loss
fp_estimator.on_loss_round_start(rate_sample, 7, MINIMUM_MAX_DATAGRAM_SIZE);
fp_estimator.on_loss_round_start(
rate_sample,
7,
MINIMUM_MAX_DATAGRAM_SIZE,
&Default::default(),
);

fp_estimator.on_loss_round_start(rate_sample, 8, MINIMUM_MAX_DATAGRAM_SIZE);
fp_estimator.on_loss_round_start(
rate_sample,
8,
MINIMUM_MAX_DATAGRAM_SIZE,
&Default::default(),
);
// The pipe has been filled due to ECN
assert!(fp_estimator.filled_pipe());
}
Expand All @@ -308,7 +339,12 @@ mod tests {
..Default::default()
};
// 8 loss bursts, enough to be considered excessive loss
fp_estimator.on_loss_round_start(rate_sample, 8, MINIMUM_MAX_DATAGRAM_SIZE);
fp_estimator.on_loss_round_start(
rate_sample,
8,
MINIMUM_MAX_DATAGRAM_SIZE,
&Default::default(),
);
// The pipe has not been filled yet since the loss rate was not high enough
assert!(!fp_estimator.filled_pipe());
}
Expand Down
Loading

0 comments on commit 8932619

Please sign in to comment.