From 13720cd15be86c373a9544d5e0e9f5f93e46482b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 29 Jan 2021 16:31:51 +0100 Subject: [PATCH 01/43] src/tests: Write and read concurrently Most of the tests in `src/tests.rs` have the structure of a client sending data to a server which in turn echoes the data back to the client which reads it in full and compares it to what it initially sent. Some of the tests use randomly generated configurations where the `WindowUpdateMode` can be either `OnRead` or `OnReceive`. When using `WindowUpdateMode::OnRead` the client can not first send all data to the server and only then receive all data from the server. In such case the server would eventually exhaust its window to the client, thus not being able to forward bytes to the client, thus not accepting new bytes from the client, thus the client would be blocked on sending and thus the whole test would deadlock. With this commit the client writes and reads concurrently and thus does not run into a deadlock when executing with `WindowUpdateMode::OnRead`. --- src/tests.rs | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/src/tests.rs b/src/tests.rs index fc661a68..4e1f3978 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -11,6 +11,7 @@ use crate::{Config, Connection, ConnectionError, Mode, Control, connection::State}; use crate::WindowUpdateMode; use futures::{future, prelude::*}; +use futures::io::AsyncReadExt; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use rand::Rng; use std::{fmt::Debug, io, net::{Ipv4Addr, SocketAddr, SocketAddrV4}}; @@ -256,43 +257,58 @@ where I: Iterator> { let mut result = Vec::new(); + for msg in iter { - let mut stream = control.open_stream().await?; + let stream = control.open_stream().await?; log::debug!("C: new stream: {}", stream); let id = stream.id(); let len = msg.len(); - stream.write_all(&msg).await?; - log::debug!("C: {}: sent {} bytes", id, len); - stream.close().await?; + let (mut reader, mut writer) = AsyncReadExt::split(stream); + let write_fut = async { + writer.write_all(&msg).await.unwrap(); + log::debug!("C: {}: sent {} bytes", id, len); + writer.close().await.unwrap(); + }; let mut data = Vec::new(); - stream.read_to_end(&mut data).await?; - log::debug!("C: {}: received {} bytes", id, data.len()); - result.push(data) + let read_fut = async { + reader.read_to_end(&mut data).await.unwrap(); + log::debug!("C: {}: received {} bytes", id, data.len()); + }; + futures::future::join(write_fut, read_fut).await; + result.push(data); } + log::debug!("C: closing connection"); control.close().await?; Ok(result) } -/// Open a stream, send all messages and collect the responses. +/// Open a stream, send all messages and collect the responses. The +/// sequence of responses will be returned. async fn send_recv_single(mut control: Control, iter: I) -> Result>, ConnectionError> where I: Iterator> { - let mut stream = control.open_stream().await?; + let stream = control.open_stream().await?; log::debug!("C: new stream: {}", stream); + let id = stream.id(); + let (mut reader, mut writer) = AsyncReadExt::split(stream); let mut result = Vec::new(); for msg in iter { - let id = stream.id(); let len = msg.len(); - stream.write_all(&msg).await?; - log::debug!("C: {}: sent {} bytes", id, len); + let write_fut = async { + writer.write_all(&msg).await.unwrap(); + log::debug!("C: {}: sent {} bytes", id, len); + }; let mut data = vec![0; msg.len()]; - stream.read_exact(&mut data).await?; - log::debug!("C: {}: received {} bytes", id, data.len()); + let read_fut = async { + reader.read_exact(&mut data).await.unwrap(); + log::debug!("C: {}: received {} bytes", id, data.len()); + }; + futures::future::join(write_fut, read_fut).await; result.push(data) } - stream.close().await?; + writer.close().await?; log::debug!("C: closing connection"); control.close().await?; Ok(result) From 168d4f5a948549df9e3d8d1373f41118f660c808 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 29 Jan 2021 16:44:40 +0100 Subject: [PATCH 02/43] src/tests: Restrict number of quickcheck runs With quickcheck iterations potentially taking long, restrict number of iterations per test to 10. --- src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests.rs b/src/tests.rs index 4e1f3978..b24908c9 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -47,7 +47,7 @@ fn prop_config_send_recv_single() { TestResult::from_bool(result.len() == num_requests && result.into_iter().eq(iter)) }) } - QuickCheck::new().quickcheck(prop as fn(_, _, _) -> _) + QuickCheck::new().tests(10).quickcheck(prop as fn(_, _, _) -> _) } #[test] @@ -79,7 +79,7 @@ fn prop_config_send_recv_multi() { TestResult::from_bool(result.len() == num_requests && result.into_iter().eq(iter)) }) } - QuickCheck::new().quickcheck(prop as fn(_, _, _) -> _) + QuickCheck::new().tests(10).quickcheck(prop as fn(_, _, _) -> _) } #[test] From b36a12414df1eb545ba2687c753c94edcc117547 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 1 Feb 2021 18:33:51 +0100 Subject: [PATCH 03/43] benches: Update to new bench_with_input function --- benches/concurrent.rs | 100 ++++++++++++++++-------------------------- 1 file changed, 39 insertions(+), 61 deletions(-) diff --git a/benches/concurrent.rs b/benches/concurrent.rs index 108b5d92..0b553e32 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -8,8 +8,8 @@ // at https://www.apache.org/licenses/LICENSE-2.0 and a copy of the MIT license // at https://opensource.org/licenses/MIT. -use criterion::{criterion_group, criterion_main, Criterion}; -use futures::{channel::mpsc, future, prelude::*, ready}; +use criterion::{BenchmarkId, criterion_group, criterion_main, Criterion, Throughput}; +use futures::{channel::mpsc, future, prelude::*, ready, io::AsyncReadExt}; use std::{fmt, io, pin::Pin, sync::Arc, task::{Context, Poll}}; use tokio::{runtime::Runtime, task}; use yamux::{Config, Connection, Mode}; @@ -20,9 +20,9 @@ criterion_main!(benches); #[derive(Copy, Clone)] struct Params { streams: usize, messages: usize } -impl fmt::Debug for Params { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "((streams {}) (messages {}))", self.streams, self.messages) +impl fmt::Display for Params { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "(streams {}, messages {})", self.streams, self.messages) } } @@ -36,40 +36,30 @@ impl AsRef<[u8]> for Bytes { } fn concurrent(c: &mut Criterion) { - let params = &[ - Params { streams: 1, messages: 1 }, - Params { streams: 10, messages: 1 }, - Params { streams: 1, messages: 10 }, - Params { streams: 100, messages: 1 }, - Params { streams: 1, messages: 100 }, - Params { streams: 10, messages: 100 }, - Params { streams: 100, messages: 10 } - ]; - - let data0 = Bytes(Arc::new(vec![0x42; 4096])); - let data1 = data0.clone(); - let data2 = data0.clone(); - - c.bench_function_over_inputs("one by one", move |b, &¶ms| { - let data = data1.clone(); - let mut rt = Runtime::new().unwrap(); - b.iter(move || { - rt.block_on(roundtrip(params.streams, params.messages, data.clone(), false)) - }) - }, - params); + let data = Bytes(Arc::new(vec![0x42; 4096])); + + let mut group = c.benchmark_group("concurrent"); - c.bench_function_over_inputs("all at once", move |b, &¶ms| { - let data = data2.clone(); + for nstreams in [1, 10, 100].iter() { + for nmessages in [1, 10, 100].iter() { + let params = Params { streams: *nstreams, messages: *nmessages }; + let data = data.clone(); let mut rt = Runtime::new().unwrap(); - b.iter(move || { - rt.block_on(roundtrip(params.streams, params.messages, data.clone(), true)) - }) - }, - params); + group.throughput(Throughput::Bytes((nstreams * nmessages * data.0.len()) as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(params), + ¶ms, + |b, &Params { streams, messages }| b.iter(|| + rt.block_on(roundtrip(streams, messages, data.clone())) + ), + ); + } + } + + group.finish(); } -async fn roundtrip(nstreams: usize, nmessages: usize, data: Bytes, send_all: bool) { +async fn roundtrip(nstreams: usize, nmessages: usize, data: Bytes) { let msg_len = data.0.len(); let (server, client) = Endpoint::new(); let server = server.into_async_read(); @@ -101,33 +91,21 @@ async fn roundtrip(nstreams: usize, nmessages: usize, data: Bytes, send_all: boo let tx = tx.clone(); let mut ctrl = ctrl.clone(); task::spawn(async move { - let mut stream = ctrl.open_stream().await?; - if send_all { - // Send `nmessages` messages and receive `nmessages` messages. - for _ in 0 .. nmessages { - stream.write_all(data.as_ref()).await? - } - stream.close().await?; - let mut n = 0; - let mut b = vec![0; data.0.len()]; - loop { - let k = stream.read(&mut b).await?; - if k == 0 { break } - n += k - } - tx.unbounded_send(n).expect("unbounded_send") - } else { - // Send and receive `nmessages` messages. - let mut n = 0; - let mut b = vec![0; data.0.len()]; - for _ in 0 .. nmessages { - stream.write_all(data.as_ref()).await?; - stream.read_exact(&mut b[..]).await?; - n += b.len() - } - stream.close().await?; - tx.unbounded_send(n).expect("unbounded_send"); + let stream = ctrl.open_stream().await?; + let (mut reader, mut writer) = AsyncReadExt::split(stream); + // Send and receive `nmessages` messages. + let mut n = 0; + let mut b = vec![0; data.0.len()]; + for _ in 0 .. nmessages { + futures::future::join( + writer.write_all(data.as_ref()).map(|r| r.unwrap()), + reader.read_exact(&mut b[..]).map(|r| r.unwrap()), + ).await; + n += b.len() } + let mut stream = reader.reunite(writer).unwrap(); + stream.close().await?; + tx.unbounded_send(n).expect("unbounded_send"); Ok::<(), yamux::ConnectionError>(()) }); } From 054ce0ab1ddedcbd57387706a32dea2b719d933b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Feb 2021 11:59:56 +0100 Subject: [PATCH 04/43] benches: Benchmark with constrained connections Previously the benchmarks would use an unbounded channel to simulate a network connection. While this is helpful to measure the CPU impact of Yamux specific changes, it is difficult to estimate how changes might effect network throughput in different environments. The crate `constrained-connection` can help simulate different environments (DSL, HSDPA, GBit LAN, ...) by providing a channel enforcing a specified bandwidth and delay. That said, it does not simulate the OS network stack, routers, buffers, packet loss, ... Thus as one would expect this can be used to detect trends, but not to e.g. determine bandwidth expectations. This commit makes the benchmarks run on top of a subset of the connection types provided by `constrained-connection`. In addition it retains the simulation on top of an unconstrained connection. When simulating with low bandwidth-delay-product connections, yamux can deadlock in the roundtrip scenario. Imagine a client sending a large payload exceeding the bandwidth-delay-product of the connection to a server. The server will try to echo the payload back to the client. Once both client and server exhausted the buffer provided through the bandwidth-delay-product of the connection, neither will be able to make progress as the other won't read from the connection. This commit rewrites the benchmarks, only having the client send to the server, but not the server echoing back to the client. --- Cargo.toml | 2 +- benches/concurrent.rs | 175 ++++++++++++++++-------------------------- 2 files changed, 66 insertions(+), 111 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4e43d3b5..84d1c85d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,8 +25,8 @@ futures = "0.3.4" quickcheck = "0.9" tokio = { version = "0.2", features = ["tcp", "rt-threaded", "macros"] } tokio-util = { version = "0.3", features = ["compat"] } +constrained-connection = "0.1" [[bench]] name = "concurrent" harness = false - diff --git a/benches/concurrent.rs b/benches/concurrent.rs index 0b553e32..604bd8e4 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -10,22 +10,14 @@ use criterion::{BenchmarkId, criterion_group, criterion_main, Criterion, Throughput}; use futures::{channel::mpsc, future, prelude::*, ready, io::AsyncReadExt}; -use std::{fmt, io, pin::Pin, sync::Arc, task::{Context, Poll}}; +use std::{fmt, io, pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration}; use tokio::{runtime::Runtime, task}; use yamux::{Config, Connection, Mode}; +use constrained_connection::{Endpoint, new_unconstrained_connection, samples}; criterion_group!(benches, concurrent); criterion_main!(benches); -#[derive(Copy, Clone)] -struct Params { streams: usize, messages: usize } - -impl fmt::Display for Params { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "(streams {}, messages {})", self.streams, self.messages) - } -} - #[derive(Debug, Clone)] struct Bytes(Arc>); @@ -37,134 +29,97 @@ impl AsRef<[u8]> for Bytes { fn concurrent(c: &mut Criterion) { let data = Bytes(Arc::new(vec![0x42; 4096])); + let networks = vec![ + ("mobile", (|| samples::mobile_hsdpa().2) as fn() -> (_, _)), + ("adsl2+", (|| samples::residential_adsl2().2) as fn() -> (_, _)), + ("gbit-lan", (|| samples::gbit_lan().2) as fn() -> (_, _)), + ("unconstrained", new_unconstrained_connection as fn() -> (_, _)), + ]; let mut group = c.benchmark_group("concurrent"); - - for nstreams in [1, 10, 100].iter() { - for nmessages in [1, 10, 100].iter() { - let params = Params { streams: *nstreams, messages: *nmessages }; - let data = data.clone(); - let mut rt = Runtime::new().unwrap(); - group.throughput(Throughput::Bytes((nstreams * nmessages * data.0.len()) as u64)); - group.bench_with_input( - BenchmarkId::from_parameter(params), - ¶ms, - |b, &Params { streams, messages }| b.iter(|| - rt.block_on(roundtrip(streams, messages, data.clone())) - ), - ); + group.sample_size(10); + + for (network_name, new_connection) in networks.into_iter() { + for nstreams in [1, 10, 100].iter() { + for nmessages in [1, 10, 100].iter() { + let data = data.clone(); + let mut rt = Runtime::new().unwrap(); + + group.throughput(Throughput::Bytes((nstreams * nmessages * data.0.len()) as u64)); + group.bench_function( + BenchmarkId::from_parameter( + format!("{}/#streams{}/#messages{}", network_name, nstreams, nmessages), + ), + |b| b.iter(|| { + let (server, client) = new_connection(); + rt.block_on(oneway(*nstreams, *nmessages, data.clone(), server, client)) + }), + ); + } } } group.finish(); } -async fn roundtrip(nstreams: usize, nmessages: usize, data: Bytes) { +fn config() -> Config { + let mut c = Config::default(); + c.set_window_update_mode(yamux::WindowUpdateMode::OnRead); + c +} + +async fn oneway( + nstreams: usize, + nmessages: usize, + data: Bytes, + server: Endpoint, + client: Endpoint, +) { let msg_len = data.0.len(); - let (server, client) = Endpoint::new(); - let server = server.into_async_read(); - let client = client.into_async_read(); + let (tx, rx) = mpsc::unbounded(); let server = async move { - yamux::into_stream(Connection::new(server, Config::default(), Mode::Server)) - .try_for_each_concurrent(None, |mut stream| async move { - { - let (mut r, mut w) = futures::io::AsyncReadExt::split(&mut stream); - futures::io::copy(&mut r, &mut w).await?; + let mut connection = Connection::new(server, config(), Mode::Server); + + while let Some(mut stream) = connection.next_stream().await.unwrap() { + let tx = tx.clone(); + + task::spawn(async move { + let mut n = 0; + let mut b = vec![0; msg_len]; + + for _ in 0 .. nmessages { + stream.read_exact(&mut b[..]).await.unwrap(); + n += b.len() } - stream.close().await?; - Ok(()) - }) - .await - .expect("server works") - }; + tx.unbounded_send(n).expect("unbounded_send"); + stream.close().await.unwrap(); + }); + } + }; task::spawn(server); - let (tx, rx) = mpsc::unbounded(); - let conn = Connection::new(client, Config::default(), Mode::Client); + let conn = Connection::new(client, config(), Mode::Client); let mut ctrl = conn.control(); - task::spawn(yamux::into_stream(conn).for_each(|_| future::ready(()))); + task::spawn(yamux::into_stream(conn).for_each(|r| {r.unwrap(); future::ready(())} )); for _ in 0 .. nstreams { let data = data.clone(); - let tx = tx.clone(); let mut ctrl = ctrl.clone(); task::spawn(async move { - let stream = ctrl.open_stream().await?; - let (mut reader, mut writer) = AsyncReadExt::split(stream); + let mut stream = ctrl.open_stream().await.unwrap(); + // Send and receive `nmessages` messages. - let mut n = 0; - let mut b = vec![0; data.0.len()]; for _ in 0 .. nmessages { - futures::future::join( - writer.write_all(data.as_ref()).map(|r| r.unwrap()), - reader.read_exact(&mut b[..]).map(|r| r.unwrap()), - ).await; - n += b.len() + stream.write_all(data.as_ref()).await.unwrap(); } - let mut stream = reader.reunite(writer).unwrap(); - stream.close().await?; - tx.unbounded_send(n).expect("unbounded_send"); + stream.close().await.unwrap(); Ok::<(), yamux::ConnectionError>(()) }); } let n = rx.take(nstreams).fold(0, |acc, n| future::ready(acc + n)).await; assert_eq!(n, nstreams * nmessages * msg_len); - ctrl.close().await.expect("close") -} - -#[derive(Debug)] -struct Endpoint { - incoming: mpsc::UnboundedReceiver>, - outgoing: mpsc::UnboundedSender> -} - -impl Endpoint { - fn new() -> (Self, Self) { - let (tx_a, rx_a) = mpsc::unbounded(); - let (tx_b, rx_b) = mpsc::unbounded(); - - let a = Endpoint { incoming: rx_a, outgoing: tx_b }; - let b = Endpoint { incoming: rx_b, outgoing: tx_a }; - - (a, b) - } -} - -impl Stream for Endpoint { - type Item = Result, io::Error>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - if let Some(b) = ready!(Pin::new(&mut self.incoming).poll_next(cx)) { - return Poll::Ready(Some(Ok(b))) - } - Poll::Pending - } -} - -impl AsyncWrite for Endpoint { - fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { - if ready!(Pin::new(&mut self.outgoing).poll_ready(cx)).is_err() { - return Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())) - } - let n = buf.len(); - if Pin::new(&mut self.outgoing).start_send(Vec::from(buf)).is_err() { - return Poll::Ready(Err(io::ErrorKind::ConnectionAborted.into())) - } - Poll::Ready(Ok(n)) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut self.outgoing) - .poll_flush(cx) - .map_err(|_| io::ErrorKind::ConnectionAborted.into()) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut self.outgoing) - .poll_close(cx) - .map_err(|_| io::ErrorKind::ConnectionAborted.into()) - } + ctrl.close().await.expect("close"); } From 7eb4658f73ab475de7a89e30661bdf82f578e5d1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Feb 2021 13:04:27 +0100 Subject: [PATCH 05/43] benches: Revert WindowUpdateMode back to OnReceive --- benches/concurrent.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/benches/concurrent.rs b/benches/concurrent.rs index 604bd8e4..b4314f5f 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -62,12 +62,6 @@ fn concurrent(c: &mut Criterion) { group.finish(); } -fn config() -> Config { - let mut c = Config::default(); - c.set_window_update_mode(yamux::WindowUpdateMode::OnRead); - c -} - async fn oneway( nstreams: usize, nmessages: usize, @@ -79,7 +73,7 @@ async fn oneway( let (tx, rx) = mpsc::unbounded(); let server = async move { - let mut connection = Connection::new(server, config(), Mode::Server); + let mut connection = Connection::new(server, Config::default(), Mode::Server); while let Some(mut stream) = connection.next_stream().await.unwrap() { let tx = tx.clone(); @@ -100,7 +94,7 @@ async fn oneway( }; task::spawn(server); - let conn = Connection::new(client, config(), Mode::Client); + let conn = Connection::new(client, Config::default(), Mode::Client); let mut ctrl = conn.control(); task::spawn(yamux::into_stream(conn).for_each(|r| {r.unwrap(); future::ready(())} )); From 4674057366c199c6d530120bfd425dd56d33db84 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 3 Feb 2021 14:14:22 +0100 Subject: [PATCH 06/43] benches/concurrent: Update doc comment --- benches/concurrent.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/benches/concurrent.rs b/benches/concurrent.rs index b4314f5f..4dc37cf7 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -82,6 +82,7 @@ async fn oneway( let mut n = 0; let mut b = vec![0; msg_len]; + // Receive `nmessages` messages. for _ in 0 .. nmessages { stream.read_exact(&mut b[..]).await.unwrap(); n += b.len() @@ -104,10 +105,11 @@ async fn oneway( task::spawn(async move { let mut stream = ctrl.open_stream().await.unwrap(); - // Send and receive `nmessages` messages. + // Send `nmessages` messages. for _ in 0 .. nmessages { stream.write_all(data.as_ref()).await.unwrap(); } + stream.close().await.unwrap(); Ok::<(), yamux::ConnectionError>(()) }); From 9e4297c3bb81646cb02ca9c3aaface31680d84f5 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 3 Feb 2021 14:16:02 +0100 Subject: [PATCH 07/43] benches/concurrent: Remove unused imports --- benches/concurrent.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benches/concurrent.rs b/benches/concurrent.rs index 4dc37cf7..da3d95db 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -8,12 +8,12 @@ // at https://www.apache.org/licenses/LICENSE-2.0 and a copy of the MIT license // at https://opensource.org/licenses/MIT. +use constrained_connection::{Endpoint, new_unconstrained_connection, samples}; use criterion::{BenchmarkId, criterion_group, criterion_main, Criterion, Throughput}; -use futures::{channel::mpsc, future, prelude::*, ready, io::AsyncReadExt}; -use std::{fmt, io, pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration}; +use futures::{channel::mpsc, future, prelude::*, io::AsyncReadExt}; +use std::sync::Arc; use tokio::{runtime::Runtime, task}; use yamux::{Config, Connection, Mode}; -use constrained_connection::{Endpoint, new_unconstrained_connection, samples}; criterion_group!(benches, concurrent); criterion_main!(benches); From ae4745ebff31158b40bf34910450480fe503ec2c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 4 Feb 2021 11:44:16 +0100 Subject: [PATCH 08/43] .github: Add dependabot --- .github/dependabot.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..11166769 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "daily" + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" From 1182bd497c5860df6eaaff50abacddc0f589fada Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Wed, 3 Feb 2021 18:19:15 +0100 Subject: [PATCH 09/43] Add a test for a potential write deadlock. The test is disabled for now, due to actually stalling. As an aside, update tokio in the tests. --- Cargo.toml | 5 +- benches/concurrent.rs | 2 +- src/tests.rs | 230 ++++++++++++++++++++++++++++++++++++++++-- tests/concurrent.rs | 4 +- 4 files changed, 225 insertions(+), 16 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 84d1c85d..79921321 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,10 +21,11 @@ static_assertions = "1" [dev-dependencies] anyhow = "1" criterion = "0.3" +env_logger = "0.6" futures = "0.3.4" quickcheck = "0.9" -tokio = { version = "0.2", features = ["tcp", "rt-threaded", "macros"] } -tokio-util = { version = "0.3", features = ["compat"] } +tokio = { version = "1.0", features = ["net", "rt-multi-thread", "macros", "time"] } +tokio-util = { version = "0.6", features = ["compat"] } constrained-connection = "0.1" [[bench]] diff --git a/benches/concurrent.rs b/benches/concurrent.rs index da3d95db..70f02200 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -43,7 +43,7 @@ fn concurrent(c: &mut Criterion) { for nstreams in [1, 10, 100].iter() { for nmessages in [1, 10, 100].iter() { let data = data.clone(); - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); group.throughput(Throughput::Bytes((nstreams * nmessages * data.0.len()) as u64)); group.bench_function( diff --git a/src/tests.rs b/src/tests.rs index b24908c9..fe22a059 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -16,18 +16,25 @@ use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use rand::Rng; use std::{fmt::Debug, io, net::{Ipv4Addr, SocketAddr, SocketAddrV4}}; use tokio::{net::{TcpStream, TcpListener}, runtime::Runtime, task}; -use tokio_util::compat::{Compat, Tokio02AsyncReadCompatExt}; +use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; +use futures::channel::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; +use futures::executor::LocalPool; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll, Waker}; +use std::pin::Pin; +use futures::future::join; +use futures::task::{Spawn, SpawnExt}; #[test] fn prop_config_send_recv_single() { fn prop(mut msgs: Vec, cfg1: TestConfig, cfg2: TestConfig) -> TestResult { - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); msgs.insert(0, Msg(vec![1u8; crate::DEFAULT_CREDIT as usize])); rt.block_on(async move { let num_requests = msgs.len(); let iter = msgs.into_iter().map(|m| m.0); - let (mut listener, address) = bind().await.expect("bind"); + let (listener, address) = bind().await.expect("bind"); let server = async { let socket = listener.accept().await.expect("accept").0.compat(); @@ -53,13 +60,13 @@ fn prop_config_send_recv_single() { #[test] fn prop_config_send_recv_multi() { fn prop(mut msgs: Vec, cfg1: TestConfig, cfg2: TestConfig) -> TestResult { - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); msgs.insert(0, Msg(vec![1u8; crate::DEFAULT_CREDIT as usize])); rt.block_on(async move { let num_requests = msgs.len(); let iter = msgs.into_iter().map(|m| m.0); - let (mut listener, address) = bind().await.expect("bind"); + let (listener, address) = bind().await.expect("bind"); let server = async { let socket = listener.accept().await.expect("accept").0.compat(); @@ -88,12 +95,12 @@ fn prop_send_recv() { if msgs.is_empty() { return TestResult::discard() } - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); rt.block_on(async move { let num_requests = msgs.len(); let iter = msgs.into_iter().map(|m| m.0); - let (mut listener, address) = bind().await.expect("bind"); + let (listener, address) = bind().await.expect("bind"); let server = async { let socket = listener.accept().await.expect("accept").0.compat(); @@ -123,9 +130,9 @@ fn prop_max_streams() { let mut cfg = Config::default(); cfg.set_max_num_streams(max_streams); - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); rt.block_on(async move { - let (mut listener, address) = bind().await.expect("bind"); + let (listener, address) = bind().await.expect("bind"); let cfg_s = cfg.clone(); let server = async move { @@ -158,9 +165,9 @@ fn prop_max_streams() { fn prop_send_recv_half_closed() { fn prop(msg: Msg) { let msg_len = msg.0.len(); - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); rt.block_on(async move { - let (mut listener, address) = bind().await.expect("bind"); + let (listener, address) = bind().await.expect("bind"); // Server should be able to write on a stream shutdown by the client. let server = async { @@ -199,6 +206,85 @@ fn prop_send_recv_half_closed() { QuickCheck::new().tests(7).quickcheck(prop as fn(_)) } +/// This test simulates two endpoints of a Yamux connection which may be unable to +/// write simultaneously but can make progress by reading. If both endpoints +/// don't read in-between trying to finish their writes, a deadlock occurs. +#[test] +#[ignore] +fn write_deadlock() { + let _ = env_logger::try_init(); + let mut pool = LocalPool::new(); + + // We make the message to transmit large enough s.t. the "server" + // is forced to start writing (i.e. echoing) the bytes before + // having read the entire payload. + let msg = vec![1u8; 1024 * 1024]; + + // Create a bounded channel representing the underlying "connection". + // Each endpoint gets a name and a bounded capacity for its outbound + // channel (which is the other's inbound channel). + let (server_endpoint, client_endpoint) = bounded::channel(("S", 1024), ("C", 1024)); + + // Create and spawn a "server" that echoes every message back to the client. + let server = Connection::new(server_endpoint, Config::default(), Mode::Server); + pool.spawner().spawn_obj(async move { + crate::into_stream(server).try_for_each_concurrent( + None, |mut stream| async move { + { + let (mut r, mut w) = AsyncReadExt::split(&mut stream); + // Write back the bytes received. This may buffer internally. + futures::io::copy(&mut r, &mut w).await?; + } + log::debug!("S: stream {} done.", stream.id()); + stream.close().await?; + Ok(()) + }) + .await + .expect("server failed") + }.boxed().into()).unwrap(); + + // Create and spawn a "client" that sends messages expected to be echoed + // by the server. + let client = Connection::new(client_endpoint, Config::default(), Mode::Client); + let mut ctrl = client.control(); + + // Continuously advance the Yamux connection of the client in a background task. + pool.spawner().spawn_obj( + crate::into_stream(client).for_each(|_| { + panic!("Unexpected inbound stream for client"); + #[allow(unreachable_code)] + future::ready(()) + }).boxed().into() + ).unwrap(); + + // Handles to tasks on which to wait for completion. + let mut handles = Vec::new(); + + // Send the message, expecting it to be echo'd. + let msg = msg.clone(); + handles.push(pool.spawner().spawn_with_handle(async move { + let stream = ctrl.open_stream().await.unwrap(); + let (mut reader, mut writer) = AsyncReadExt::split(stream); + let mut b = vec![0; msg.len()]; + // Write & read concurrently, so that the client is able + // to start reading the echo'd bytes before it even finished + // sending them all. + let _ = join( + writer.write_all(msg.as_ref()).map_err(|e| panic!(e)), + reader.read_exact(&mut b[..]).map_err(|e| panic!(e)), + ).await; + let mut stream = reader.reunite(writer).unwrap(); + stream.close().await.unwrap(); + log::debug!("C: Stream {} done.", stream.id()); + assert_eq!(b, msg); + }.boxed()).unwrap()); + + // Wait for completion. + for h in handles { + pool.run_until(h); + } +} + #[derive(Clone, Debug)] struct Msg(Vec); @@ -314,3 +400,125 @@ where Ok(result) } +/// This module implements a duplex connection via channels with bounded +/// capacities. The channels used for the implementation are unbounded +/// as the operate at the granularity of variably-sized chunks of bytes +/// (`Vec`), whereas the capacity bounds (i.e. max. number of bytes +/// in transit in one direction) are enforced separately. +mod bounded { + use super::*; + use futures::ready; + use std::io::{Error, ErrorKind, Result}; + + pub struct Endpoint { + name: &'static str, + capacity: usize, + send: UnboundedSender>, + send_guard: Arc>, + recv: UnboundedReceiver>, + recv_buf: Vec, + recv_guard: Arc>, + } + + /// A `ChannelGuard` is used to enforce the maximum number of + /// bytes "in transit" across all chunks of an unbounded channel. + #[derive(Default)] + struct ChannelGuard { + size: usize, + waker: Option, + } + + pub fn channel( + (name_a, capacity_a): (&'static str, usize), + (name_b, capacity_b): (&'static str, usize) + ) -> (Endpoint, Endpoint) { + let (a_to_b_sender, a_to_b_receiver) = unbounded(); + let (b_to_a_sender, b_to_a_receiver) = unbounded(); + + let a_to_b_guard = Arc::new(Mutex::new(ChannelGuard::default())); + let b_to_a_guard = Arc::new(Mutex::new(ChannelGuard::default())); + + let a = Endpoint { + name: name_a, + capacity: capacity_a, + send: a_to_b_sender, + send_guard: a_to_b_guard.clone(), + recv: b_to_a_receiver, + recv_buf: Vec::new(), + recv_guard: b_to_a_guard.clone(), + }; + + let b = Endpoint { + name: name_b, + capacity: capacity_b, + send: b_to_a_sender, + send_guard: b_to_a_guard, + recv: a_to_b_receiver, + recv_buf: Vec::new(), + recv_guard: a_to_b_guard, + }; + + (a, b) + } + + impl AsyncRead for Endpoint { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + if self.recv_buf.is_empty() { + match ready!(self.recv.poll_next_unpin(cx)) { + Some(bytes) => { self.recv_buf = bytes; } + None => return Poll::Ready(Ok(0)) + } + } + + let n = std::cmp::min(buf.len(), self.recv_buf.len()); + buf[0..n].copy_from_slice(&self.recv_buf[0..n]); + self.recv_buf = self.recv_buf.split_off(n); + + let mut guard = self.recv_guard.lock().unwrap(); + if let Some(waker) = guard.waker.take() { + log::debug!("{}: read: notifying waker after read of {} bytes", self.name, n); + waker.wake(); + } + guard.size -= n; + + log::debug!("{}: read: channel: {}/{}", self.name, guard.size, self.capacity); + + Poll::Ready(Ok(n)) + } + } + + impl AsyncWrite for Endpoint { + fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + debug_assert!(buf.len() > 0); + let mut guard = self.send_guard.lock().unwrap(); + let n = std::cmp::min(self.capacity - guard.size, buf.len()); + if n == 0 { + log::debug!("{}: write: channel full, registering waker", self.name); + guard.waker = Some(cx.waker().clone()); + return Poll::Pending; + } + + self.send.unbounded_send(buf[0..n].to_vec()) + .map_err(|e| Error::new(ErrorKind::ConnectionAborted, e))?; + + guard.size += n; + log::debug!("{}: write: channel: {}/{}", self.name, guard.size, self.capacity); + + Poll::Ready(Ok(n)) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(self.send.poll_flush_unpin(cx)).unwrap(); + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(self.send.poll_close_unpin(cx)).unwrap(); + Poll::Ready(Ok(())) + } + } +} diff --git a/tests/concurrent.rs b/tests/concurrent.rs index ba2f1f06..6b288daa 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -11,11 +11,11 @@ use futures::{channel::mpsc, prelude::*}; use std::{net::{Ipv4Addr, SocketAddr, SocketAddrV4}, sync::Arc}; use tokio::{net::{TcpStream, TcpListener}, task}; -use tokio_util::compat::Tokio02AsyncReadCompatExt; +use tokio_util::compat::TokioAsyncReadCompatExt; use yamux::{Config, Connection, Mode}; async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { - let mut listener = TcpListener::bind(&address).await.expect("bind"); + let listener = TcpListener::bind(&address).await.expect("bind"); let address = listener.local_addr().expect("local address"); let server = async move { From 2ed415012803c22d65d539bc5c7417cbda9dfd91 Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Mon, 8 Feb 2021 11:56:38 +0100 Subject: [PATCH 10/43] Small cleanup. --- src/tests.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/src/tests.rs b/src/tests.rs index fe22a059..e2ae87de 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -257,12 +257,8 @@ fn write_deadlock() { }).boxed().into() ).unwrap(); - // Handles to tasks on which to wait for completion. - let mut handles = Vec::new(); - // Send the message, expecting it to be echo'd. - let msg = msg.clone(); - handles.push(pool.spawner().spawn_with_handle(async move { + pool.run_until(pool.spawner().spawn_with_handle(async move { let stream = ctrl.open_stream().await.unwrap(); let (mut reader, mut writer) = AsyncReadExt::split(stream); let mut b = vec![0; msg.len()]; @@ -278,11 +274,6 @@ fn write_deadlock() { log::debug!("C: Stream {} done.", stream.id()); assert_eq!(b, msg); }.boxed()).unwrap()); - - // Wait for completion. - for h in handles { - pool.run_until(h); - } } #[derive(Clone, Debug)] From fb1122c0bfc83ea36d4d7f36ec9f0497222691e3 Mon Sep 17 00:00:00 2001 From: Roman Borschel Date: Mon, 8 Feb 2021 11:57:39 +0100 Subject: [PATCH 11/43] Update src/tests.rs Co-authored-by: Max Inden --- src/tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tests.rs b/src/tests.rs index e2ae87de..5b707db0 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -209,6 +209,8 @@ fn prop_send_recv_half_closed() { /// This test simulates two endpoints of a Yamux connection which may be unable to /// write simultaneously but can make progress by reading. If both endpoints /// don't read in-between trying to finish their writes, a deadlock occurs. +// +// Ignored for now as the current implementation is prone to the deadlock tested below. #[test] #[ignore] fn write_deadlock() { From e56e0f60a1e934ed183c2a43884289267acfb6f1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 3 Feb 2021 15:46:12 +0100 Subject: [PATCH 12/43] src/connection: Send WindowUpdate message early To prevent senders from being blocked every time they have sent RECEIVE_WINDOW (e.g. 256 KB) number of bytes, waiting for a WindowUpdate message granting further sending credit, this commit makes Yamux send a WindowUpdate message once half or more of the window has been received (and consumed in `WindowUpdateMode::OnRead`). Benchmarking shows that sending WindowUpdate messages early prevents senders from being blocked waiting for additional credit on common network types. For a detailed discussion as well as various benchmark results see https://github.com/paritytech/yamux/issues/100. Next to the above, this commit includes the following changes: - Use `WindowUpdateMode::OnRead` in benchmarks. I would argue that `OnRead` should be the default setting, given the importance of flow-control. With that in mind, I suggest to benchmark that default case. - Ignore WindowUpdate messages for unknown streams. With this commit WindowUpdate messages are sent more agressively. The following scenario would surface: A sender would close its channel, eventually being garbage collected. The receiver, before receiving the closing message, sends out a WindowUpdate message. The sender receives the WindowUpdate message not being able to associate it to a stream, thus resetting the whole connection. This commit ignores WindowUpdate messages for unknown streams instead. When sending very large messages, WindowUpdate messages are still not returned to the sender in time, thus the sender still being blocked in such case. This can be prevented by splitting large messages into smaller Yamux frames, see https://github.com/paritytech/yamux/issues/100#issuecomment-774461550. This additional optimization can be done in a future commit without interfering with the optimization introduced in this commit. --- benches/concurrent.rs | 13 ++- src/connection.rs | 29 ++---- src/connection/stream.rs | 202 +++++++++++++++++++++------------------ 3 files changed, 129 insertions(+), 115 deletions(-) diff --git a/benches/concurrent.rs b/benches/concurrent.rs index da3d95db..ad68f8ec 100644 --- a/benches/concurrent.rs +++ b/benches/concurrent.rs @@ -62,6 +62,12 @@ fn concurrent(c: &mut Criterion) { group.finish(); } +fn config() -> Config { + let mut c = Config::default(); + c.set_window_update_mode(yamux::WindowUpdateMode::OnRead); + c +} + async fn oneway( nstreams: usize, nmessages: usize, @@ -73,7 +79,7 @@ async fn oneway( let (tx, rx) = mpsc::unbounded(); let server = async move { - let mut connection = Connection::new(server, Config::default(), Mode::Server); + let mut connection = Connection::new(server, config(), Mode::Server); while let Some(mut stream) = connection.next_stream().await.unwrap() { let tx = tx.clone(); @@ -85,7 +91,7 @@ async fn oneway( // Receive `nmessages` messages. for _ in 0 .. nmessages { stream.read_exact(&mut b[..]).await.unwrap(); - n += b.len() + n += b.len(); } tx.unbounded_send(n).expect("unbounded_send"); @@ -95,7 +101,7 @@ async fn oneway( }; task::spawn(server); - let conn = Connection::new(client, Config::default(), Mode::Client); + let conn = Connection::new(client, config(), Mode::Client); let mut ctrl = conn.control(); task::spawn(yamux::into_stream(conn).for_each(|r| {r.unwrap(); future::ready(())} )); @@ -111,7 +117,6 @@ async fn oneway( } stream.close().await.unwrap(); - Ok::<(), yamux::ConnectionError>(()) }); } diff --git a/src/connection.rs b/src/connection.rs index 0458a15e..8a7758ef 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -658,7 +658,7 @@ impl Connection { let sender = self.stream_sender.clone(); Stream::new(stream_id, self.id, config, credit, credit, sender) }; - let window_update; + let mut window_update = None; { let mut shared = stream.shared(); if is_finish { @@ -666,16 +666,12 @@ impl Connection { } shared.window = shared.window.saturating_sub(frame.body_len()); shared.buffer.push(frame.into_body()); - if !is_finish - && shared.window == 0 - && self.config.window_update_mode == WindowUpdateMode::OnReceive - { - shared.window = self.config.receive_window; - let mut frame = Frame::window_update(stream_id, self.config.receive_window); + + if let Some(credit) = shared.next_window_update() { + shared.window += credit; + let mut frame = Frame::window_update(stream_id, credit); frame.header_mut().ack(); window_update = Some(frame) - } else { - window_update = None } } if window_update.is_none() { @@ -706,12 +702,9 @@ impl Connection { if let Some(w) = shared.reader.take() { w.wake() } - if !is_finish - && shared.window == 0 - && self.config.window_update_mode == WindowUpdateMode::OnReceive - { - shared.window = self.config.receive_window; - let frame = Frame::window_update(stream_id, self.config.receive_window); + if let Some(credit) = shared.next_window_update() { + shared.window += credit; + let frame = Frame::window_update(stream_id, credit); return Action::Update(frame) } } else if !is_finish { @@ -781,10 +774,7 @@ impl Connection { w.wake() } } else if !is_finish { - log::debug!("{}/{}: window update for unknown stream", self.id, stream_id); - let mut header = Header::data(stream_id, 0); - header.rst(); - return Action::Reset(Frame::new(header)) + log::debug!("{}/{}: window update for unknown stream, ignoring", self.id, stream_id); } Action::None @@ -939,4 +929,3 @@ where } }) } - diff --git a/src/connection/stream.rs b/src/connection/stream.rs index e9290b8b..56310dcd 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -21,6 +21,7 @@ use crate::{ use futures::{future::Either, ready, channel::mpsc, io::{AsyncRead, AsyncWrite}}; use parking_lot::{Mutex, MutexGuard}; use std::{fmt, io, pin::Pin, sync::Arc, task::{Context, Poll, Waker}}; +use std::convert::TryInto; /// The state of a Yamux stream. #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -78,7 +79,6 @@ pub struct Stream { conn: connection::Id, config: Arc, sender: mpsc::Sender, - window_update: Option>, flag: Flag, shared: Arc> } @@ -88,7 +88,6 @@ impl fmt::Debug for Stream { f.debug_struct("Stream") .field("id", &self.id.val()) .field("connection", &self.conn) - .field("window_update", &self.window_update.is_some()) .finish() } } @@ -112,11 +111,10 @@ impl Stream { Stream { id, conn, - config, + config: config.clone(), sender, - window_update: None, flag: Flag::None, - shared: Arc::new(Mutex::new(Shared::new(window, credit))), + shared: Arc::new(Mutex::new(Shared::new(window, credit, config))), } } @@ -149,7 +147,6 @@ impl Stream { conn: self.conn, config: self.config.clone(), sender: self.sender.clone(), - window_update: None, flag: self.flag, shared: self.shared.clone() } @@ -175,16 +172,28 @@ impl Stream { } } - /// Try to deliver a pending window update. - fn send_pending_window_update(&mut self, cx: &mut Context) -> Poll> { - if self.window_update.is_some() { + /// Send new credit to the sending side via a window update message if + /// permitted. + fn send_window_update(&mut self, cx: &mut Context) -> Poll> { + // When using [`WindowUpdateMode::OnReceive`] window update messages are + // send early on data receival (see [`crate::Connection::on_frame`]). + if matches!(self.config.window_update_mode, WindowUpdateMode::OnReceive) { + return Poll::Ready(Ok(())); + } + + let credit = self.shared().next_window_update(); + + if let Some(credit) = credit { ready!(self.sender.poll_ready(cx).map_err(|_| self.write_zero_err())?); - let mut frame = self.window_update.take().expect("window_update.is_some()").right(); + + let mut frame = Frame::window_update(self.id, credit).right(); self.add_flag(frame.header_mut()); let cmd = StreamCommand::SendFrame(frame); + self.sender.start_send(cmd).map_err(|_| self.write_zero_err())?; - self.shared().window = self.config.receive_window + self.shared().window += credit; } + Poll::Ready(Ok(())) } } @@ -207,48 +216,33 @@ impl futures::stream::Stream for Stream { return Poll::Ready(None) } - ready!(self.send_pending_window_update(cx))?; + ready!(self.send_window_update(cx))?; - // We need to limit the `shared` `MutexGuard` scope, or else we run into - // borrow check troubles further down. - { - let mut shared = self.shared(); + let mut shared = self.shared(); - if let Some(bytes) = shared.buffer.pop() { - let off = bytes.offset(); - let mut vec = bytes.into_vec(); - if off != 0 { - // This should generally not happen when the stream is used only as - // a `futures::stream::Stream` since the whole point of this impl is - // to consume chunks atomically. It may perhaps happen when mixing - // this impl and the `AsyncRead` one. - log::debug!("{}/{}: chunk has been partially consumed", self.conn, self.id); - vec = vec.split_off(off) - } - return Poll::Ready(Some(Ok(Packet(vec)))) + if let Some(bytes) = shared.buffer.pop() { + let off = bytes.offset(); + let mut vec = bytes.into_vec(); + if off != 0 { + // This should generally not happen when the stream is used only as + // a `futures::stream::Stream` since the whole point of this impl is + // to consume chunks atomically. It may perhaps happen when mixing + // this impl and the `AsyncRead` one. + log::debug!("{}/{}: chunk has been partially consumed", self.conn, self.id); + vec = vec.split_off(off) } + return Poll::Ready(Some(Ok(Packet(vec)))) + } - // Buffer is empty, let's check if we can expect to read more data. - if !shared.state().can_read() { - log::debug!("{}/{}: eof", self.conn, self.id); - return Poll::Ready(None) // stream has been reset - } - - // Since we have no more data at this point, we want to be woken up - // by the connection when more becomes available for us. - shared.reader = Some(cx.waker().clone()); - - // Finally, let's see if we need to send a window update to the remote. - if self.config.window_update_mode != WindowUpdateMode::OnRead || shared.window > 0 { - // No, time to go. - return Poll::Pending - } + // Buffer is empty, let's check if we can expect to read more data. + if !shared.state().can_read() { + log::debug!("{}/{}: eof", self.conn, self.id); + return Poll::Ready(None) // stream has been reset } - // At this point we know we have to send a window update to the remote. - debug_assert!(self.window_update.is_none()); - self.window_update = Some(Frame::window_update(self.id, self.config.receive_window)); - ready!(self.send_pending_window_update(cx))?; + // Since we have no more data at this point, we want to be woken up + // by the connection when more becomes available for us. + shared.reader = Some(cx.waker().clone()); Poll::Pending } @@ -262,54 +256,39 @@ impl AsyncRead for Stream { return Poll::Ready(Ok(0)) } - ready!(self.send_pending_window_update(cx))?; - - // We need to limit the `shared` `MutexGuard` scope, or else we run into - // borrow check troubles further down. - { - // Copy data from stream buffer. - let mut shared = self.shared(); - let mut n = 0; - while let Some(chunk) = shared.buffer.front_mut() { - if chunk.is_empty() { - shared.buffer.pop(); - continue - } - let k = std::cmp::min(chunk.len(), buf.len() - n); - (&mut buf[n .. n + k]).copy_from_slice(&chunk.as_ref()[.. k]); - n += k; - chunk.advance(k); - if n == buf.len() { - break - } - } + ready!(self.send_window_update(cx))?; - if n > 0 { - log::trace!("{}/{}: read {} bytes", self.conn, self.id, n); - return Poll::Ready(Ok(n)) + // Copy data from stream buffer. + let mut shared = self.shared(); + let mut n = 0; + while let Some(chunk) = shared.buffer.front_mut() { + if chunk.is_empty() { + shared.buffer.pop(); + continue } - - // Buffer is empty, let's check if we can expect to read more data. - if !shared.state().can_read() { - log::debug!("{}/{}: eof", self.conn, self.id); - return Poll::Ready(Ok(0)) // stream has been reset + let k = std::cmp::min(chunk.len(), buf.len() - n); + (&mut buf[n .. n + k]).copy_from_slice(&chunk.as_ref()[.. k]); + n += k; + chunk.advance(k); + if n == buf.len() { + break } + } - // Since we have no more data at this point, we want to be woken up - // by the connection when more becomes available for us. - shared.reader = Some(cx.waker().clone()); + if n > 0 { + log::trace!("{}/{}: read {} bytes", self.conn, self.id, n); + return Poll::Ready(Ok(n)) + } - // Finally, let's see if we need to send a window update to the remote. - if self.config.window_update_mode != WindowUpdateMode::OnRead || shared.window > 0 { - // No, time to go. - return Poll::Pending - } + // Buffer is empty, let's check if we can expect to read more data. + if !shared.state().can_read() { + log::debug!("{}/{}: eof", self.conn, self.id); + return Poll::Ready(Ok(0)) // stream has been reset } - // At this point we know we have to send a window update to the remote. - debug_assert!(self.window_update.is_none()); - self.window_update = Some(Frame::window_update(self.id, self.config.receive_window)); - ready!(self.send_pending_window_update(cx))?; + // Since we have no more data at this point, we want to be woken up + // by the connection when more becomes available for us. + shared.reader = Some(cx.waker().clone()); Poll::Pending } @@ -372,18 +351,20 @@ pub(crate) struct Shared { pub(crate) credit: u32, pub(crate) buffer: Chunks, pub(crate) reader: Option, - pub(crate) writer: Option + pub(crate) writer: Option, + config: Arc } impl Shared { - fn new(window: u32, credit: u32) -> Self { + fn new(window: u32, credit: u32, config: Arc) -> Self { Shared { state: State::Open, window, credit, buffer: Chunks::new(), reader: None, - writer: None + writer: None, + config } } @@ -414,5 +395,44 @@ impl Shared { current // Return the previous stream state for informational purposes. } + + /// Calculate the number of additional window bytes the receiving side + /// should grant the sending side via a window update message. + /// + /// Returns `None` if too small to justify a window update message. + /// + /// Note: Once a caller successfully sent a window update message, the + /// locally tracked window size needs to be updated manually by the caller. + pub(crate) fn next_window_update(&mut self) -> Option { + if !self.state.can_read() { + return None; + } + + let new_credit = match self.config.window_update_mode { + WindowUpdateMode::OnReceive => { + let bytes_received = self.config.receive_window - self.window; + bytes_received + }, + WindowUpdateMode::OnRead => { + let buffer_len: u32 = self.buffer.len() + .and_then(|l| l.try_into().ok()) + .unwrap_or(std::u32::MAX); + let bytes_received = self.config.receive_window - self.window; + let bytes_read = bytes_received.saturating_sub(buffer_len); + bytes_read + } + }; + + // Send WindowUpdate message when half or more of the configured receive + // window can be granted as additional credit to the sender. + // + // See https://github.com/paritytech/yamux/issues/100 for a detailed + // discussion. + if new_credit >= self.config.receive_window / 2 { + Some(new_credit) + } else { + None + } + } } From 79f0905d555494f1891d552dd3a3946e925c5b0d Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 8 Feb 2021 16:42:40 +0100 Subject: [PATCH 13/43] src/connection: Do not check window when receiving and in OnRead In `WindowUpdateMode::OnRead` one only grants the sender new credit once existing bytes have been read. Thus there is no need to check whether to send a new WindowUpdate message when receiving new bytes. One only needs to check whether to send a new WindowUpdate message when reading bytes. --- src/connection.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index 8a7758ef..eb944f5a 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -667,11 +667,13 @@ impl Connection { shared.window = shared.window.saturating_sub(frame.body_len()); shared.buffer.push(frame.into_body()); - if let Some(credit) = shared.next_window_update() { - shared.window += credit; - let mut frame = Frame::window_update(stream_id, credit); - frame.header_mut().ack(); - window_update = Some(frame) + if matches!(self.config.window_update_mode, WindowUpdateMode::OnReceive) { + if let Some(credit) = shared.next_window_update() { + shared.window += credit; + let mut frame = Frame::window_update(stream_id, credit); + frame.header_mut().ack(); + window_update = Some(frame) + } } } if window_update.is_none() { @@ -702,10 +704,12 @@ impl Connection { if let Some(w) = shared.reader.take() { w.wake() } - if let Some(credit) = shared.next_window_update() { - shared.window += credit; - let frame = Frame::window_update(stream_id, credit); - return Action::Update(frame) + if matches!(self.config.window_update_mode, WindowUpdateMode::OnReceive) { + if let Some(credit) = shared.next_window_update() { + shared.window += credit; + let frame = Frame::window_update(stream_id, credit); + return Action::Update(frame) + } } } else if !is_finish { log::debug!("{}/{}: data for unknown stream", self.id, stream_id); From faa0c91a1102d9abf298ad9aefe24b41a49ac535 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 8 Feb 2021 16:56:18 +0100 Subject: [PATCH 14/43] src/connection/stream: Hold lock while sending window update Hold lock for `Shared` long enough to prevent interleaved calls to `next_window_update` possibly granting a receive window higher than the configured one. --- src/connection/stream.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/connection/stream.rs b/src/connection/stream.rs index 56310dcd..729c8939 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -181,17 +181,18 @@ impl Stream { return Poll::Ready(Ok(())); } - let credit = self.shared().next_window_update(); + let mut shared = self.shared.lock(); - if let Some(credit) = credit { + if let Some(credit) = shared.next_window_update() { ready!(self.sender.poll_ready(cx).map_err(|_| self.write_zero_err())?); + shared.window += credit; + drop(shared); + let mut frame = Frame::window_update(self.id, credit).right(); self.add_flag(frame.header_mut()); let cmd = StreamCommand::SendFrame(frame); - self.sender.start_send(cmd).map_err(|_| self.write_zero_err())?; - self.shared().window += credit; } Poll::Ready(Ok(())) From df117583b708aae9a8e03c5668370093dc0dd345 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 8 Feb 2021 17:17:50 +0100 Subject: [PATCH 15/43] src/connection/stream: Use saturating sub when calculating new window --- src/connection/stream.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/connection/stream.rs b/src/connection/stream.rs index 729c8939..aff6fc43 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -411,14 +411,16 @@ impl Shared { let new_credit = match self.config.window_update_mode { WindowUpdateMode::OnReceive => { - let bytes_received = self.config.receive_window - self.window; + debug_assert!(self.config.receive_window >= self.window); + let bytes_received = self.config.receive_window.saturating_sub(self.window); bytes_received }, WindowUpdateMode::OnRead => { + debug_assert!(self.config.receive_window >= self.window); + let bytes_received = self.config.receive_window.saturating_sub(self.window); let buffer_len: u32 = self.buffer.len() .and_then(|l| l.try_into().ok()) .unwrap_or(std::u32::MAX); - let bytes_received = self.config.receive_window - self.window; let bytes_read = bytes_received.saturating_sub(buffer_len); bytes_read } From 9cadf113f21e0ec311f23365d7352d27daac4c87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Feb 2021 20:06:46 +0100 Subject: [PATCH 16/43] Update quickcheck requirement from 0.9 to 1.0 (#106) * Update quickcheck requirement from 0.9 to 1.0 Updates the requirements on [quickcheck](https://github.com/BurntSushi/quickcheck) to permit the latest version. - [Release notes](https://github.com/BurntSushi/quickcheck/releases) - [Commits](https://github.com/BurntSushi/quickcheck/compare/quickcheck_macros-0.9.0...1.0.3) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Max Inden --- Cargo.toml | 2 +- src/frame/header.rs | 15 ++++++--------- src/frame/io.rs | 2 +- src/tests.rs | 25 +++++++++++++++---------- 4 files changed, 23 insertions(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 84d1c85d..5aa83b34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ static_assertions = "1" anyhow = "1" criterion = "0.3" futures = "0.3.4" -quickcheck = "0.9" +quickcheck = "1.0" tokio = { version = "0.2", features = ["tcp", "rt-threaded", "macros"] } tokio-util = { version = "0.3", features = ["compat"] } constrained-connection = "0.1" diff --git a/src/frame/header.rs b/src/frame/header.rs index 8e70f225..1d356e9f 100644 --- a/src/frame/header.rs +++ b/src/frame/header.rs @@ -406,22 +406,19 @@ impl std::error::Error for HeaderDecodeError {} #[cfg(test)] mod tests { use quickcheck::{Arbitrary, Gen, QuickCheck}; - use rand::{Rng, seq::SliceRandom}; use super::*; impl Arbitrary for Header<()> { - fn arbitrary(g: &mut G) -> Self { - let tag = [Tag::Data, Tag::WindowUpdate, Tag::Ping, Tag::GoAway] - .choose(g) - .unwrap() - .clone(); + fn arbitrary(g: &mut Gen) -> Self { + let tag = *g.choose(&[Tag::Data, Tag::WindowUpdate, Tag::Ping, Tag::GoAway]) + .unwrap(); Header { version: Version(0), tag, - flags: Flags(g.gen()), - stream_id: StreamId(g.gen()), - length: Len(g.gen()), + flags: Flags(Arbitrary::arbitrary(g)), + stream_id: StreamId(Arbitrary::arbitrary(g)), + length: Len(Arbitrary::arbitrary(g)), _marker: std::marker::PhantomData } } diff --git a/src/frame/io.rs b/src/frame/io.rs index a8d24c81..8540905f 100644 --- a/src/frame/io.rs +++ b/src/frame/io.rs @@ -214,7 +214,7 @@ mod tests { use super::*; impl Arbitrary for Frame<()> { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let mut header: header::Header<()> = Arbitrary::arbitrary(g); let body = if header.tag() == header::Tag::Data { diff --git a/src/tests.rs b/src/tests.rs index b24908c9..2c7a69d3 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -13,7 +13,6 @@ use crate::WindowUpdateMode; use futures::{future, prelude::*}; use futures::io::AsyncReadExt; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; -use rand::Rng; use std::{fmt::Debug, io, net::{Ipv4Addr, SocketAddr, SocketAddrV4}}; use tokio::{net::{TcpStream, TcpListener}, runtime::Runtime, task}; use tokio_util::compat::{Compat, Tokio02AsyncReadCompatExt}; @@ -203,11 +202,17 @@ fn prop_send_recv_half_closed() { struct Msg(Vec); impl Arbitrary for Msg { - fn arbitrary(g: &mut G) -> Msg { - let n: usize = g.gen_range(1, g.size() + 1); - let mut v = vec![0; n]; - g.fill(&mut v[..]); - Msg(v) + fn arbitrary(g: &mut Gen) -> Msg { + let mut msg = Msg(Arbitrary::arbitrary(g)); + if msg.0.is_empty() { + msg.0.push(Arbitrary::arbitrary(g)); + } + + msg + } + + fn shrink(&self) -> Box> { + Box::new(self.0.shrink().filter(|v| !v.is_empty()).map(|v| Msg(v))) } } @@ -215,15 +220,15 @@ impl Arbitrary for Msg { struct TestConfig(Config); impl Arbitrary for TestConfig { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let mut c = Config::default(); - c.set_window_update_mode(if g.gen() { + c.set_window_update_mode(if bool::arbitrary(g) { WindowUpdateMode::OnRead } else { WindowUpdateMode::OnReceive }); - c.set_read_after_close(g.gen()); - c.set_receive_window(g.gen_range(256 * 1024, 1024 * 1024)); + c.set_read_after_close(Arbitrary::arbitrary(g)); + c.set_receive_window(256 * 1024 + u32::arbitrary(g) % (768 * 1024)); TestConfig(c) } } From 90720d796ef66b64f6453d8342c8f786ea5611ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Feb 2021 22:28:20 +0100 Subject: [PATCH 17/43] Update rand requirement from 0.7.2 to 0.8.3 (#107) Updates the requirements on [rand](https://github.com/rust-random/rand) to permit the latest version. - [Release notes](https://github.com/rust-random/rand/releases) - [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-random/rand/commits) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 5aa83b34..fb288cb6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ futures = { version = "0.3.4", default-features = false, features = ["std"] } log = "0.4.8" nohash-hasher = "0.2" parking_lot = "0.11" -rand = "0.7.2" +rand = "0.8.3" static_assertions = "1" [dev-dependencies] From 439180c8064cb77e9687ba7b5c92bda4c2d89d33 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 10 Feb 2021 11:32:42 +0100 Subject: [PATCH 18/43] src/connection: Ignore frames from unknown streams There are benign cases where frames from unknown streams can occur and where a stream reset is not appropriate. See https://github.com/paritytech/yamux/issues/110 for details. This commit ignores frames from unknown streams for data, window update and ping frames and adds documentation referencing back to issue #110. --- src/connection.rs | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index eb944f5a..12f30c24 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -711,11 +711,15 @@ impl Connection { return Action::Update(frame) } } - } else if !is_finish { - log::debug!("{}/{}: data for unknown stream", self.id, stream_id); - let mut header = Header::data(stream_id, 0); - header.rst(); - return Action::Reset(Frame::new(header)) + } else { + log::debug!("{}/{}: data for unknown stream, ignoring", self.id, stream_id); + // Previous implementations would send back a stream reset when receiving a data frame + // on an unknown stream. There are multiple benign scenarios that can lead to this + // happening and where a stream reset would not be appropriate. One such case would be + // when still in the process of sending out a frame closing the stream, while the stream + // data structure itself has already been garbage collected. + // + // See https://github.com/paritytech/yamux/issues/110 for details. } Action::None @@ -777,8 +781,15 @@ impl Connection { if let Some(w) = shared.writer.take() { w.wake() } - } else if !is_finish { + } else { log::debug!("{}/{}: window update for unknown stream, ignoring", self.id, stream_id); + // Previous implementations would send back a stream reset when receiving a window + // update frame on an unknown stream. There are multiple benign scenarios that can lead + // to this happening and where a stream reset would not be appropriate. One such case + // would be when still in the process of sending out a frame closing the stream, while + // the stream data structure itself has already been garbage collected. + // + // See https://github.com/paritytech/yamux/issues/110 for details. } Action::None @@ -795,9 +806,15 @@ impl Connection { return Action::Ping(Frame::new(hdr)) } log::debug!("{}/{}: ping for unknown stream", self.id, stream_id); - let mut header = Header::data(stream_id, 0); - header.rst(); - Action::Reset(Frame::new(header)) + // Previous implementations would send back a stream reset when receiving a ping frame on + // an unknown stream. There are multiple benign scenarios that can lead to this happening + // and where a stream reset would not be appropriate. One such case would be when still in + // the process of sending out a frame closing the stream, while the stream data structure + // itself has already been garbage collected. + // + // See https://github.com/paritytech/yamux/issues/110 for details. + + Action::None } fn next_stream_id(&mut self) -> Result { From 4b6a72ceac397c3cd6bedde32de1362a06591232 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 10 Feb 2021 12:04:33 +0100 Subject: [PATCH 19/43] src/connection/stream: Continue reading when blocked on window update --- src/connection/stream.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/connection/stream.rs b/src/connection/stream.rs index aff6fc43..72268ea9 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -217,7 +217,12 @@ impl futures::stream::Stream for Stream { return Poll::Ready(None) } - ready!(self.send_window_update(cx))?; + match self.send_window_update(cx) { + Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), + // Continue reading buffered data even though sending a window update blocked. + Poll::Pending => {}, + } let mut shared = self.shared(); @@ -257,7 +262,12 @@ impl AsyncRead for Stream { return Poll::Ready(Ok(0)) } - ready!(self.send_window_update(cx))?; + match self.send_window_update(cx) { + Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + // Continue reading buffered data even though sending a window update blocked. + Poll::Pending => {}, + } // Copy data from stream buffer. let mut shared = self.shared(); From d4398f0fa23870e66a3e2a69fb99092a3cfb1e77 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 10 Feb 2021 20:43:26 +0100 Subject: [PATCH 20/43] src/chunks: Track length instead of folding over each chunk --- src/chunks.rs | 18 ++++++++++-------- src/connection.rs | 2 +- src/connection/stream.rs | 4 +--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/chunks.rs b/src/chunks.rs index c22308e4..213bbec3 100644 --- a/src/chunks.rs +++ b/src/chunks.rs @@ -17,24 +17,24 @@ use std::{collections::VecDeque, io}; /// [`Chunk`] elements. #[derive(Debug)] pub(crate) struct Chunks { - seq: VecDeque + seq: VecDeque, + len: usize } impl Chunks { /// A new empty chunk list. pub(crate) fn new() -> Self { - Chunks { seq: VecDeque::new() } + Chunks { seq: VecDeque::new(), len: 0 } } - /// The total length of bytes contained in all `Chunk`s. - pub(crate) fn len(&self) -> Option { - self.seq.iter().fold(Some(0), |total, x| { - total.and_then(|n| n.checked_add(x.len())) - }) + /// The total length of bytes yet-to-be-read in all `Chunk`s. + pub(crate) fn len(&self) -> usize { + self.len - self.seq.front().map(|c| c.offset()).unwrap_or(0) } /// Add another chunk of bytes to the end. pub(crate) fn push(&mut self, x: Vec) { + self.len += x.len(); if !x.is_empty() { self.seq.push_back(Chunk { cursor: io::Cursor::new(x) }) } @@ -42,7 +42,9 @@ impl Chunks { /// Remove and return the first chunk. pub(crate) fn pop(&mut self) -> Option { - self.seq.pop_front() + let chunk = self.seq.pop_front(); + self.len -= chunk.as_ref().map(|c| c.len() + c.offset()).unwrap_or(0); + chunk } /// Get a mutable reference to the first chunk. diff --git a/src/connection.rs b/src/connection.rs index 12f30c24..a6e1379c 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -693,7 +693,7 @@ impl Connection { shared.update_state(self.id, stream_id, State::RecvClosed); } let max_buffer_size = self.config.max_buffer_size; - if shared.buffer.len().map(move |n| n >= max_buffer_size).unwrap_or(true) { + if shared.buffer.len() >= max_buffer_size { log::error!("{}/{}: buffer of stream grows beyond limit", self.id, stream_id); let mut header = Header::data(stream_id, 0); header.rst(); diff --git a/src/connection/stream.rs b/src/connection/stream.rs index 72268ea9..c411d9a9 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -428,9 +428,7 @@ impl Shared { WindowUpdateMode::OnRead => { debug_assert!(self.config.receive_window >= self.window); let bytes_received = self.config.receive_window.saturating_sub(self.window); - let buffer_len: u32 = self.buffer.len() - .and_then(|l| l.try_into().ok()) - .unwrap_or(std::u32::MAX); + let buffer_len: u32 = self.buffer.len().try_into().unwrap_or(std::u32::MAX); let bytes_read = bytes_received.saturating_sub(buffer_len); bytes_read } From 55db98a5133b5c563ab56bb7b06338d57bdca168 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 10 Feb 2021 20:46:19 +0100 Subject: [PATCH 21/43] src/connection: Use eloquent doc comment for not resetting stream --- src/connection.rs | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index a6e1379c..b7581f72 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -713,11 +713,11 @@ impl Connection { } } else { log::debug!("{}/{}: data for unknown stream, ignoring", self.id, stream_id); - // Previous implementations would send back a stream reset when receiving a data frame - // on an unknown stream. There are multiple benign scenarios that can lead to this - // happening and where a stream reset would not be appropriate. One such case would be - // when still in the process of sending out a frame closing the stream, while the stream - // data structure itself has already been garbage collected. + // We do not consider this a protocol violation and thus do not send a stream reset + // because we may still be processing pending `StreamCommand`s of this stream that were + // sent before it has been dropped and "garbage collected". Such a stream reset would + // interfere with the frames that still need to be sent, causing premature stream + // termination for the remote. // // See https://github.com/paritytech/yamux/issues/110 for details. } @@ -783,11 +783,11 @@ impl Connection { } } else { log::debug!("{}/{}: window update for unknown stream, ignoring", self.id, stream_id); - // Previous implementations would send back a stream reset when receiving a window - // update frame on an unknown stream. There are multiple benign scenarios that can lead - // to this happening and where a stream reset would not be appropriate. One such case - // would be when still in the process of sending out a frame closing the stream, while - // the stream data structure itself has already been garbage collected. + // We do not consider this a protocol violation and thus do not send a stream reset + // because we may still be processing pending `StreamCommand`s of this stream that were + // sent before it has been dropped and "garbage collected". Such a stream reset would + // interfere with the frames that still need to be sent, causing premature stream + // termination for the remote. // // See https://github.com/paritytech/yamux/issues/110 for details. } @@ -806,11 +806,10 @@ impl Connection { return Action::Ping(Frame::new(hdr)) } log::debug!("{}/{}: ping for unknown stream", self.id, stream_id); - // Previous implementations would send back a stream reset when receiving a ping frame on - // an unknown stream. There are multiple benign scenarios that can lead to this happening - // and where a stream reset would not be appropriate. One such case would be when still in - // the process of sending out a frame closing the stream, while the stream data structure - // itself has already been garbage collected. + // We do not consider this a protocol violation and thus do not send a stream reset because + // we may still be processing pending `StreamCommand`s of this stream that were sent before + // it has been dropped and "garbage collected". Such a stream reset would interfere with the + // frames that still need to be sent, causing premature stream termination for the remote. // // See https://github.com/paritytech/yamux/issues/110 for details. From 8838373fadb898a67a164e533eccd3ad9c47adf6 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 10 Feb 2021 18:41:01 +0100 Subject: [PATCH 22/43] src: Limit data frame payload size By limitting the data frame payload size one gains the following beneftis: 1. Reduce delays sending time-sensitive frames, e.g. window updates. 2. Minimize head-of-line blocking across streams. 3. Enable better interleaving of send and receive operations, as each is carried out atomically instead of concurrently with its respective counterpart. Limiting the frame size to 16KiB does not introduce a large overhead. A Yamux frame header is 12 bytes large, thus this change introduces an overhead of ~0.07%. --- src/connection/stream.rs | 1 + src/lib.rs | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/src/connection/stream.rs b/src/connection/stream.rs index e9290b8b..636f2d5a 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -330,6 +330,7 @@ impl AsyncWrite for Stream { return Poll::Pending } let k = std::cmp::min(shared.credit as usize, buf.len()); + let k = std::cmp::min(k, crate::MAX_DATA_FRAME_PAYLOAD_SIZE); shared.credit = shared.credit.saturating_sub(k as u32); Vec::from(&buf[.. k]) }; diff --git a/src/lib.rs b/src/lib.rs index 44528100..d67db9dc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,6 +40,21 @@ pub use crate::frame::{FrameDecodeError, header::{HeaderDecodeError, StreamId}}; const DEFAULT_CREDIT: u32 = 256 * 1024; // as per yamux specification +/// Maximum number of bytes a Yamux data frame might carry as its payload. +/// +/// The data frame payload size is not restricted by the yamux specification. +/// Still, this implementation restricts the size to: +/// +/// 1. Reduce delays sending time-sensitive frames, e.g. window updates. +/// 2. Minimize head-of-line blocking across streams. +/// 3. Enable better interleaving of send and receive operations, as each is +/// carried out atomically instead of concurrently with its respective +/// counterpart. +/// +/// For details on why this concrete value was chosen, see +/// https://github.com/paritytech/yamux/issues/100. +const MAX_DATA_FRAME_PAYLOAD_SIZE: usize = 16 * 1024; + /// Specifies when window update frames are sent. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WindowUpdateMode { From 7a77cfbc665d6f88682876bfb9b78f0db063b0c1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 11 Feb 2021 15:47:57 +0100 Subject: [PATCH 23/43] src/lib: Make split_send_size configurable --- src/connection/stream.rs | 2 +- src/lib.rs | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/connection/stream.rs b/src/connection/stream.rs index 636f2d5a..d35c9ff7 100644 --- a/src/connection/stream.rs +++ b/src/connection/stream.rs @@ -330,7 +330,7 @@ impl AsyncWrite for Stream { return Poll::Pending } let k = std::cmp::min(shared.credit as usize, buf.len()); - let k = std::cmp::min(k, crate::MAX_DATA_FRAME_PAYLOAD_SIZE); + let k = std::cmp::min(k, self.config.split_send_size); shared.credit = shared.credit.saturating_sub(k as u32); Vec::from(&buf[.. k]) }; diff --git a/src/lib.rs b/src/lib.rs index d67db9dc..b2a8740c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,7 +40,8 @@ pub use crate::frame::{FrameDecodeError, header::{HeaderDecodeError, StreamId}}; const DEFAULT_CREDIT: u32 = 256 * 1024; // as per yamux specification -/// Maximum number of bytes a Yamux data frame might carry as its payload. +/// Default maximum number of bytes a Yamux data frame might carry as its +/// payload when being send. Larger Payloads will be split. /// /// The data frame payload size is not restricted by the yamux specification. /// Still, this implementation restricts the size to: @@ -53,7 +54,7 @@ const DEFAULT_CREDIT: u32 = 256 * 1024; // as per yamux specification /// /// For details on why this concrete value was chosen, see /// https://github.com/paritytech/yamux/issues/100. -const MAX_DATA_FRAME_PAYLOAD_SIZE: usize = 16 * 1024; +const DEFAULT_SPLIT_SEND_SIZE: usize = 16 * 1024; /// Specifies when window update frames are sent. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -91,13 +92,15 @@ pub enum WindowUpdateMode { /// - max. number of streams = 8192 /// - window update mode = on receive /// - read after close = true +/// - split send size = 16 KiB #[derive(Debug, Clone)] pub struct Config { receive_window: u32, max_buffer_size: usize, max_num_streams: usize, window_update_mode: WindowUpdateMode, - read_after_close: bool + read_after_close: bool, + split_send_size: usize } impl Default for Config { @@ -107,7 +110,8 @@ impl Default for Config { max_buffer_size: 1024 * 1024, max_num_streams: 8192, window_update_mode: WindowUpdateMode::OnReceive, - read_after_close: true + read_after_close: true, + split_send_size: DEFAULT_SPLIT_SEND_SIZE } } } @@ -148,6 +152,13 @@ impl Config { self.read_after_close = b; self } + + /// Set the max. payload size used when sending data frames. Payloads larger + /// than the configured max. will be split. + pub fn set_split_send_size(&mut self, n: usize) -> &mut Self { + self.split_send_size = n; + self + } } // Check that we can safely cast a `usize` to a `u64`. From 24d5464b25c938b786519f75064e8a81f0b3d80d Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Wed, 10 Feb 2021 20:13:49 +0100 Subject: [PATCH 24/43] Allow reads concurrent to pending writes. Thereby propagate pending write back-pressure via the paused command channels, rather than by "blocking" `Connection::next()` itself until a write completes. Notably, no new buffers are introduced. When a frame write cannot complete, command channels are paused and I/O reads can continue while the write is pending. The paused command channels exercise write back-pressure on the streams and API and ensure that the only frames we still try to send are those as a result of reading a frame - these then indeed wait for completion of the prior pending send operation, if any, but since it is done as a result of reading a frame, the remote can in turn write again, should it have been waiting to be able to do so before it in turn can read again. Unexpected write deadlocks of peers which otherwise read & write concurrently from substreams can thus be avoided. --- src/connection.rs | 174 +++++++++++++++++++++++++++++++++++--------- src/frame.rs | 12 ++- src/frame/header.rs | 9 ++- src/frame/io.rs | 157 +++++++++++++++++++++++++++++++++++---- src/tests.rs | 14 +++- tests/concurrent.rs | 1 + 6 files changed, 312 insertions(+), 55 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index b7581f72..60bed2cf 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -106,7 +106,7 @@ use futures::{ stream::{Fuse, FusedStream} }; use nohash_hasher::IntMap; -use std::{fmt, sync::Arc, task::{Context, Poll}}; +use std::{fmt, io, sync::Arc, task::{Context, Poll}}; pub use control::Control; pub use stream::{Packet, State, Stream}; @@ -168,7 +168,7 @@ pub struct Connection { control_sender: mpsc::Sender, control_receiver: Pausable>, stream_sender: mpsc::Sender, - stream_receiver: mpsc::Receiver, + stream_receiver: Pausable>, garbage: Vec, // see `Connection::garbage_collect()` shutdown: Shutdown, is_closed: bool @@ -282,7 +282,7 @@ impl Connection { control_sender, control_receiver: Pausable::new(control_receiver), stream_sender, - stream_receiver, + stream_receiver: Pausable::new(stream_receiver), next_id: match mode { Mode::Client => 1, Mode::Server => 2 @@ -348,7 +348,7 @@ impl Connection { // Close and drain the stream command receiver. if !self.stream_receiver.is_terminated() { - self.stream_receiver.close(); + self.stream_receiver.stream().close(); while let Some(_cmd) = self.stream_receiver.next().await { // drop it } @@ -382,12 +382,37 @@ impl Connection { let mut num_terminated = 0; - let mut next_inbound_frame = + // Determine if the stream command receiver is paused, which + // is only the case if we have pending writes, i.e. back-pressure + // from the underlying connection. + let stream_receiver_paused = self.stream_receiver.is_paused(); + + let mut next_io_event = if self.socket.is_terminated() { num_terminated += 1; Either::Left(future::pending()) } else { - Either::Right(self.socket.try_next().err_into()) + let socket = &mut self.socket; + let io = future::poll_fn(move |cx| { + // Progress writing. + match socket.get_mut().poll_send::<()>(cx, None) { + frame::PollSend::Pending(_) => {} + frame::PollSend::Ready(res) => { + res.or(Err(ConnectionError::Closed))?; + if stream_receiver_paused { + return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) + } + } + } + // Progress reading. + let next_frame = match futures::ready!(socket.poll_next_unpin(cx)) { + None => Ok(None), + Some(Err(e)) => Err(e.into()), + Some(Ok(f)) => Ok(Some(f)) + }; + Poll::Ready(Ok(IoEvent::Inbound(next_frame))) + }); + Either::Right(io) }; let mut next_stream_command = @@ -415,14 +440,14 @@ impl Connection { future::poll_fn(move |cx: &mut Context| { let a = next_stream_command.poll_unpin(cx); let b = next_control_command.poll_unpin(cx); - let c = next_inbound_frame.poll_unpin(cx); + let c = next_io_event.poll_unpin(cx); if a.is_pending() && b.is_pending() && c.is_pending() { return Poll::Pending } Poll::Ready((a, b, c)) }); - let (stream_command, control_command, inbound_frame) = next_item.await; + let (stream_command, control_command, io_event) = next_item.await; if let Poll::Ready(cmd) = control_command { self.on_control_command(cmd).await? @@ -432,14 +457,25 @@ impl Connection { self.on_stream_command(cmd).await? } - if let Poll::Ready(frame) = inbound_frame { - if let Some(stream) = self.on_frame(frame).await? { - self.socket.get_mut().flush().await.or(Err(ConnectionError::Closed))?; - return Ok(Some(stream)) + match io_event? { + Poll::Ready(IoEvent::OutboundReady) => { + self.stream_receiver.unpause(); + // Only unpause the control command receiver if we're not + // shutting down already. + if let Shutdown::NotStarted = self.shutdown { + self.control_receiver.unpause(); + } + } + Poll::Ready(IoEvent::Inbound(frame)) => { + if let Some(stream) = self.on_frame(frame).await? { + self.flush_nowait().await.or(Err(ConnectionError::Closed))?; + return Ok(Some(stream)) + } } + Poll::Pending => {} } - self.socket.get_mut().flush().await.or(Err(ConnectionError::Closed))? + self.flush_nowait().await.or(Err(ConnectionError::Closed))?; } } @@ -467,8 +503,8 @@ impl Connection { if extra_credit > 0 { let mut frame = Frame::window_update(id, extra_credit); frame.header_mut().syn(); - log::trace!("{}: sending initial {}", self.id, frame.header()); - self.socket.get_mut().send(&frame).await.or(Err(ConnectionError::Closed))? + log::trace!("{}/{}: sending initial {}", self.id, id, frame.header()); + self.send(frame).await.or(Err(ConnectionError::Closed))? } let stream = { let config = self.config.clone(); @@ -489,7 +525,8 @@ impl Connection { let mut header = Header::data(id, 0); header.rst(); let frame = Frame::new(header); - self.socket.get_mut().send(&frame).await.or(Err(ConnectionError::Closed))? + log::trace!("{}/{}: sending reset", self.id, id); + self.send(frame).await.or(Err(ConnectionError::Closed))? } } } @@ -499,12 +536,14 @@ impl Connection { let _ = reply.send(()); return Ok(()) } - // Handle initial close command. + // Handle initial close command by pausing the control command + // receiver and closing the stream command receiver. I.e. we + // wait for the stream commands to drain. debug_assert!(self.shutdown.has_not_started()); self.shutdown = Shutdown::InProgress(reply); log::trace!("{}: shutting down connection", self.id); self.control_receiver.pause(); - self.stream_receiver.close() + self.stream_receiver.stream().close() } None => { // We only get here after the whole connection shutdown is complete. @@ -518,31 +557,35 @@ impl Connection { Ok(()) } + async fn send(&mut self, f: impl Into>) -> Result<()> { + send(self.id, &mut self.socket, &mut self.stream_receiver, &mut self.control_receiver, f).await + } + /// Process a command from one of our `Stream`s. async fn on_stream_command(&mut self, cmd: Option) -> Result<()> { match cmd { Some(StreamCommand::SendFrame(frame)) => { - log::trace!("{}: sending: {}", self.id, frame.header()); - self.socket.get_mut().send(&frame).await.or(Err(ConnectionError::Closed))? + log::trace!("{}/{}: sending: {}", self.id, frame.header().stream_id(), frame.header()); + self.send(frame).await.or(Err(ConnectionError::Closed))? } Some(StreamCommand::CloseStream { id, ack }) => { - log::trace!("{}: closing stream {} of {}", self.id, id, self); + log::trace!("{}/{}: sending close", self.id, id); let mut header = Header::data(id, 0); header.fin(); if ack { header.ack() } let frame = Frame::new(header); - self.socket.get_mut().send(&frame).await.or(Err(ConnectionError::Closed))? + self.send(frame).await.or(Err(ConnectionError::Closed))? } None => { // We only get to this point when `self.stream_receiver` // was closed which only happens in response to a close control // command. Now that we are at the end of the stream command queue, // we send the final term frame to the remote and complete the - // closure. + // closure by closing the already paused control command receiver. debug_assert!(self.shutdown.is_in_progress()); - log::debug!("{}: closing {}", self.id, self); + log::debug!("{}: sending term", self.id); let frame = Frame::term(); - self.socket.get_mut().send(&frame).await.or(Err(ConnectionError::Closed))?; + self.send(frame).await.or(Err(ConnectionError::Closed))?; let shutdown = std::mem::replace(&mut self.shutdown, Shutdown::Complete); if let Shutdown::InProgress(tx) = shutdown { // Inform the `Control` that initiated the shutdown. @@ -578,25 +621,25 @@ impl Connection { log::trace!("{}: new inbound {} of {}", self.id, stream, self); if let Some(f) = update { log::trace!("{}/{}: sending update", self.id, f.header().stream_id()); - self.socket.get_mut().send(&f).await.or(Err(ConnectionError::Closed))? + self.send(f).await.or(Err(ConnectionError::Closed))? } return Ok(Some(stream)) } Action::Update(f) => { log::trace!("{}/{}: sending update", self.id, f.header().stream_id()); - self.socket.get_mut().send(&f).await.or(Err(ConnectionError::Closed))? + self.send(f).await.or(Err(ConnectionError::Closed))? } Action::Ping(f) => { log::trace!("{}/{}: pong", self.id, f.header().stream_id()); - self.socket.get_mut().send(&f).await.or(Err(ConnectionError::Closed))? + self.send(f).await.or(Err(ConnectionError::Closed))? } Action::Reset(f) => { log::trace!("{}/{}: sending reset", self.id, f.header().stream_id()); - self.socket.get_mut().send(&f).await.or(Err(ConnectionError::Closed))? + self.send(f).await.or(Err(ConnectionError::Closed))? } Action::Terminate(f) => { log::trace!("{}: sending term", self.id); - self.socket.get_mut().send(&f).await.or(Err(ConnectionError::Closed))? + self.send(f).await.or(Err(ConnectionError::Closed))? } } Ok(None) @@ -605,7 +648,7 @@ impl Connection { log::debug!("{}: socket eof", self.id); Err(ConnectionError::Closed) } - Err(e) if e.io_kind() == Some(std::io::ErrorKind::ConnectionReset) => { + Err(e) if e.io_kind() == Some(io::ErrorKind::ConnectionReset) => { log::debug!("{}: connection reset", self.id); Err(ConnectionError::Closed) } @@ -837,6 +880,14 @@ impl Connection { } } + /// Try to flush the underlying I/O stream, without waiting for it. + async fn flush_nowait(&mut self) -> Result<()> { + future::poll_fn(|cx| { + let _ = self.socket.get_mut().poll_flush(cx)?; + Poll::Ready(Ok(())) + }).await + } + /// Remove stale streams and send necessary messages to the remote. /// /// If we ever get async destructors we can replace this with streams @@ -902,8 +953,9 @@ impl Connection { frame }; if let Some(f) = frame { - log::trace!("{}: sending: {}", self.id, f.header()); - self.socket.get_mut().send(&f).await.or(Err(ConnectionError::Closed))? + log::trace!("{}/{}: sending final frame: {}", self.id, stream_id, f.header()); + send(conn_id, &mut self.socket, &mut self.stream_receiver, + &mut self.control_receiver, f).await.or(Err(ConnectionError::Closed))? } self.garbage.push(stream_id) } @@ -936,6 +988,62 @@ impl Drop for Connection { } } +/// Events related to reading from or writing to the underlying socket. +enum IoEvent { + /// A new inbound frame arrived. + Inbound(Result>>), + /// We can continue sending frames after having to pause earlier. + OutboundReady, +} + +/// Sends a frame on the given `io` stream. +/// +/// If the frame is taken by `io` but cannot be fully sent, the command +/// receivers are paused, without waiting for completion. +/// +/// If a prior send operation is still pending, waits for its completion. +async fn send( + id: Id, + io: &mut Fuse>, + stream_receiver: &mut Pausable>, + control_receiver: &mut Pausable>, + frame: impl Into> +) -> Result<()> { + let mut frame = Some(frame.into()); + future::poll_fn(move |cx| { + let next = frame.take().expect("Frame has not yet been taken by `io`."); + match io.get_mut().poll_send(cx, Some(next)) { + frame::PollSend::Pending(Some(f)) => { + debug_assert!(stream_receiver.is_paused()); + log::debug!("{}: send: Prior write pending. Waiting.", id); + frame = Some(f); + return Poll::Pending + } + frame::PollSend::Pending(None) => { + // The frame could not yet fully be written to the underlying + // socket, so we pause the processing of commands in order to + // pause writing while still being able to read from the socket. + // The only frames that may still be sent while commands are paused + // are as a reaction to frames being read, which in turn allows + // the remote to make progress eventually, if it should + // currently be blocked on writing. In this way unnecessary + // deadlocks between peers blocked on writing are avoided. + log::trace!("{}: send: Write pending. Continuing with paused command streams.", id); + stream_receiver.pause(); + control_receiver.pause(); + return Poll::Ready(Ok(())) + } + frame::PollSend::Ready(Err(e)) => { + return Poll::Ready(Err(e.into())) + } + frame::PollSend::Ready(Ok(())) => { + // Note: We leave the unpausing of the command streams to `Connection::next()`. + return Poll::Ready(Ok(())) + } + } + }).await +} + /// Turn a Yamux [`Connection`] into a [`futures::Stream`]. pub fn into_stream(c: Connection) -> impl futures::stream::Stream> where diff --git a/src/frame.rs b/src/frame.rs index 1c4c6d3c..95395dac 100644 --- a/src/frame.rs +++ b/src/frame.rs @@ -15,7 +15,7 @@ use futures::future::Either; use header::{Header, StreamId, Data, WindowUpdate, GoAway, Ping}; use std::{convert::TryInto, num::TryFromIntError}; -pub(crate) use io::Io; +pub(crate) use io::{Io, PollSend}; pub use io::FrameDecodeError; /// A Yamux message frame consisting of header and body. @@ -49,6 +49,15 @@ impl Frame { } } +impl From> for Frame<()> { + fn from(f: Frame) -> Frame<()> { + Frame { + header: f.header.into(), + body: f.body + } + } +} + impl Frame<()> { pub(crate) fn into_data(self) -> Frame { Frame { header: self.header.into_data(), body: self.body } @@ -117,4 +126,3 @@ impl Frame { } } } - diff --git a/src/frame/header.rs b/src/frame/header.rs index 1d356e9f..b3e45c0d 100644 --- a/src/frame/header.rs +++ b/src/frame/header.rs @@ -77,6 +77,12 @@ impl Header { } } +impl From> for Header<()> { + fn from(h: Header) -> Header<()> { + h.cast() + } +} + impl Header<()> { pub(crate) fn into_data(self) -> Header { debug_assert_eq!(self.tag, Tag::Data); @@ -242,12 +248,13 @@ pub trait HasRst: private::Sealed {} impl HasRst for Data {} impl HasRst for WindowUpdate {} -mod private { +pub(super) mod private { pub trait Sealed {} impl Sealed for super::Data {} impl Sealed for super::WindowUpdate {} impl Sealed for super::Ping {} + impl Sealed for super::GoAway {} impl Sealed for super::Either {} } diff --git a/src/frame/io.rs b/src/frame/io.rs index 8540905f..f909aec8 100644 --- a/src/frame/io.rs +++ b/src/frame/io.rs @@ -18,7 +18,8 @@ use super::{Frame, header::{self, HeaderDecodeError}}; pub(crate) struct Io { id: Id, io: T, - state: ReadState, + read_state: ReadState, + write_state: WriteState, max_body_len: usize } @@ -27,24 +28,142 @@ impl Io { Io { id, io, - state: ReadState::Init, + read_state: ReadState::Init, + write_state: WriteState::Init, max_body_len: max_frame_body_len } } - pub(crate) async fn send(&mut self, frame: &Frame) -> io::Result<()> { - let header = header::encode(&frame.header); - self.io.write_all(&header).await?; - self.io.write_all(&frame.body).await + /// Continue writing on the underlying connection, possibly with a new frame. + /// + /// If there is no data pending to be written from a prior frame, an attempt + /// is made at writing the new frame, if any. + /// + /// If there is data pending to be written from a prior frame, an attempt is made to + /// complete the pending writes before accepting the new frame, if any. If + /// the new frame cannot be accepted due to pending writes, it is returned. + pub(crate) fn poll_send(&mut self, cx: &mut Context<'_>, mut frame: Option>) + -> PollSend> + { + loop { + match &mut self.write_state { + WriteState::Init => { + if let Some(f) = frame.take() { + let header = header::encode(&f.header); + let buffer = f.body; + self.write_state = WriteState::Header { header, buffer, offset: 0 }; + } else { + return PollSend::Ready(Ok(())) + } + } + WriteState::Header { header, buffer, ref mut offset } => { + match Pin::new(&mut self.io).poll_write(cx, &header[*offset ..]) { + Poll::Pending => { + if let Some(f) = frame.take() { + return PollSend::Pending(Some(f)) + } + return PollSend::Pending(None) + } + Poll::Ready(Err(e)) => { + if let Some(f) = frame.take() { + return PollSend::Pending(Some(f)) + } + return PollSend::Ready(Err(e)) + } + Poll::Ready(Ok(n)) => { + if n == 0 { + return PollSend::Ready(Err(io::ErrorKind::WriteZero.into())) + } + *offset += n; + if *offset == header.len() { + if buffer.len() > 0 { + let buffer = std::mem::take(buffer); + self.write_state = WriteState::Body { buffer, offset: 0 }; + } else { + self.write_state = WriteState::Init; + } + } + } + } + } + WriteState::Body { buffer, ref mut offset } => { + match Pin::new(&mut self.io).poll_write(cx, &buffer[*offset ..]) { + Poll::Pending => { + if let Some(f) = frame.take() { + return PollSend::Pending(Some(f)) + } + return PollSend::Pending(None) + } + Poll::Ready(Err(e)) => { + if let Some(f) = frame.take() { + return PollSend::Pending(Some(f)) + } + return PollSend::Ready(Err(e)) + } + Poll::Ready(Ok(n)) => { + if n == 0 { + return PollSend::Ready(Err(io::ErrorKind::WriteZero.into())) + } + *offset += n; + if *offset == buffer.len() { + self.write_state = WriteState::Init; + } + } + } + } + } + } } - pub(crate) async fn flush(&mut self) -> io::Result<()> { - self.io.flush().await + pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + match self.poll_send(cx, Option::>::None) { + PollSend::Pending(_) => return Poll::Pending, + PollSend::Ready(r) => r? + } + Pin::new(&mut self.io).poll_flush(cx) } pub(crate) async fn close(&mut self) -> io::Result<()> { + future::poll_fn(|cx| self.poll_flush(cx)).await?; self.io.close().await } + + #[cfg(test)] + async fn flush(&mut self) -> io::Result<()> { + future::poll_fn(|cx| self.poll_flush(cx)).await + } + + #[cfg(test)] + async fn send(&mut self, frame: Frame) -> io::Result<()> { + let mut frame = Some(frame); + future::poll_fn(|cx| { + match self.poll_send(cx, Some(frame.take().expect("`frame` not yet taken"))) { + PollSend::Pending(f) => { frame = f; return Poll::Pending } + PollSend::Ready(r) => return Poll::Ready(r) + } + }).await + } +} + +/// The result of [`Io::poll_send`]. +pub enum PollSend { + Pending(Option>), + Ready(B), +} + +/// The stages of writing a new `Frame`. +#[derive(Debug)] +enum WriteState { + Init, + Header { + header: [u8; header::HEADER_SIZE], + buffer: Vec, + offset: usize + }, + Body { + buffer: Vec, + offset: usize + } } /// The stages of reading a new `Frame`. @@ -70,10 +189,10 @@ impl Stream for Io { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = &mut *self; loop { - log::trace!("{}: read: {:?}", this.id, this.state); - match this.state { + log::debug!("{}: poll_next: read: {:?}", this.id, this.read_state); + match this.read_state { ReadState::Init => { - this.state = ReadState::Header { + this.read_state = ReadState::Header { offset: 0, buffer: [0; header::HEADER_SIZE] }; @@ -89,7 +208,7 @@ impl Stream for Io { log::trace!("{}: read: {}", this.id, header); if header.tag() != header::Tag::Data { - this.state = ReadState::Init; + this.read_state = ReadState::Init; return Poll::Ready(Some(Ok(Frame::new(header)))) } @@ -99,7 +218,7 @@ impl Stream for Io { return Poll::Ready(Some(Err(FrameDecodeError::FrameTooLarge(body_len)))) } - this.state = ReadState::Body { + this.read_state = ReadState::Body { header, offset: 0, buffer: vec![0; body_len] @@ -124,19 +243,25 @@ impl Stream for Io { let body_len = header.len().val() as usize; if *offset == body_len { + log::debug!("Read complete frame of {} bytes", offset); let h = header.clone(); let v = std::mem::take(buffer); - this.state = ReadState::Init; + this.read_state = ReadState::Init; return Poll::Ready(Some(Ok(Frame { header: h, body: v }))) } + log::debug!("Trying to read body bytes ..."); + let buf = &mut buffer[*offset .. body_len]; match ready!(Pin::new(&mut this.io).poll_read(cx, buf))? { 0 => { let e = FrameDecodeError::Io(io::ErrorKind::UnexpectedEof.into()); return Poll::Ready(Some(Err(e))) } - n => *offset += n + n => { + log::debug!("Read {} body bytes", n); + *offset += n + } } } } @@ -235,7 +360,7 @@ mod tests { futures::executor::block_on(async move { let id = crate::connection::Id::random(); let mut io = Io::new(id, futures::io::Cursor::new(Vec::new()), f.body.len()); - if io.send(&f).await.is_err() { + if io.send(f.clone()).await.is_err() { return false } if io.flush().await.is_err() { diff --git a/src/tests.rs b/src/tests.rs index d141e886..dc98e2e4 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -211,7 +211,6 @@ fn prop_send_recv_half_closed() { // // Ignored for now as the current implementation is prone to the deadlock tested below. #[test] -#[ignore] fn write_deadlock() { let _ = env_logger::try_init(); let mut pool = LocalPool::new(); @@ -219,12 +218,21 @@ fn write_deadlock() { // We make the message to transmit large enough s.t. the "server" // is forced to start writing (i.e. echoing) the bytes before // having read the entire payload. - let msg = vec![1u8; 1024 * 1024]; + let msg = vec![1u8; 1 * 1024 * 1024]; + + // We choose a "connection capacity" that is artificially below + // the size of a receive window. If it were equal or greater, + // multiple concurrently writing streams would be needed to non-deterministically + // provoke the write deadlock. This is supposed to reflect the + // fact that the sum of receive windows of all open streams can easily + // be larger than the send capacity of the connection at any point in time. + // Using such a low capacity here therefore yields a more reproducible test. + let capacity = 1024; // Create a bounded channel representing the underlying "connection". // Each endpoint gets a name and a bounded capacity for its outbound // channel (which is the other's inbound channel). - let (server_endpoint, client_endpoint) = bounded::channel(("S", 1024), ("C", 1024)); + let (server_endpoint, client_endpoint) = bounded::channel(("S", capacity), ("C", capacity)); // Create and spawn a "server" that echoes every message back to the client. let server = Connection::new(server_endpoint, Config::default(), Mode::Server); diff --git a/tests/concurrent.rs b/tests/concurrent.rs index 6b288daa..5f8aee6e 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -68,6 +68,7 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { #[tokio::test] async fn concurrent_streams() { + let _ = env_logger::try_init(); let data = Arc::new(vec![0x42; 100 * 1024]); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); roundtrip(addr, 1000, data).await From b7a8b694d700e9716b6a6ea56881871b8acbff3b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Feb 2021 07:00:58 +0000 Subject: [PATCH 25/43] Update env_logger requirement from 0.6 to 0.8 Updates the requirements on [env_logger](https://github.com/env-logger-rs/env_logger) to permit the latest version. - [Release notes](https://github.com/env-logger-rs/env_logger/releases) - [Changelog](https://github.com/env-logger-rs/env_logger/blob/master/CHANGELOG.md) - [Commits](https://github.com/env-logger-rs/env_logger/compare/v0.6.0...v0.8.3) Signed-off-by: dependabot[bot] --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index f6b5cbee..72f5a088 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ static_assertions = "1" [dev-dependencies] anyhow = "1" criterion = "0.3" -env_logger = "0.6" +env_logger = "0.8" futures = "0.3.4" quickcheck = "1.0" tokio = { version = "1.0", features = ["net", "rt-multi-thread", "macros", "time"] } From 5ae540b69d9e8098acf7c631aaeacced832d89bc Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Mon, 15 Feb 2021 11:46:05 +0100 Subject: [PATCH 26/43] Prepare v0.9 --- CHANGELOG.md | 19 +++++++++++++++++++ Cargo.toml | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16807cf0..cebc558e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +# 0.9.0 + +- Force-split larger frames, for better interleaving of + reads and writes between different substreams and to avoid + single, large writes. By default frames are capped at, and + thus split at, `16KiB`, which can be adjusted by a new + configuration option, if necessary. + +- Send window updates earlier, when half of the window has + been consumed, to minimise pauses due to transmission delays, + particularly if there is just a single dominant substream. + +- Avoid possible premature stream resets of streams that + have been properly closed and already dropped but receive + window update or other frames while the remaining buffered + frames are still sent out. Incoming frames for unknown streams + are now ignored, instead of triggering a stream reset for the + remote. + # 0.8.0 - Upgrade step 4 of 4. This version always assumes the new semantics and diff --git a/Cargo.toml b/Cargo.toml index 72f5a088..5be904fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "yamux" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] license = "Apache-2.0 OR MIT" description = "Multiplexer over reliable, ordered connections" From eb6c73201822d2835320414b133ad6c36a5bbafb Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Mon, 15 Feb 2021 15:32:07 +0100 Subject: [PATCH 27/43] Implement Sink for Io. --- Cargo.toml | 2 +- src/connection.rs | 48 +++++---------- src/frame.rs | 2 +- src/frame/io.rs | 152 +++++++++++++++++----------------------------- 4 files changed, 73 insertions(+), 131 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f6b5cbee..8f8c0e39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ readme = "README.md" edition = "2018" [dependencies] -futures = { version = "0.3.4", default-features = false, features = ["std"] } +futures = { version = "0.3.12", default-features = false, features = ["std"] } log = "0.4.8" nohash-hasher = "0.2" parking_lot = "0.11" diff --git a/src/connection.rs b/src/connection.rs index 60bed2cf..429e19bc 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -103,7 +103,8 @@ use futures::{ channel::{mpsc, oneshot}, future::{self, Either}, prelude::*, - stream::{Fuse, FusedStream} + stream::{Fuse, FusedStream}, + sink::SinkExt, }; use nohash_hasher::IntMap; use std::{fmt, io, sync::Arc, task::{Context, Poll}}; @@ -395,9 +396,9 @@ impl Connection { let socket = &mut self.socket; let io = future::poll_fn(move |cx| { // Progress writing. - match socket.get_mut().poll_send::<()>(cx, None) { - frame::PollSend::Pending(_) => {} - frame::PollSend::Ready(res) => { + match socket.get_mut().poll_ready_unpin(cx) { + Poll::Pending => {} + Poll::Ready(res) => { res.or(Err(ConnectionError::Closed))?; if stream_receiver_paused { return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) @@ -883,7 +884,7 @@ impl Connection { /// Try to flush the underlying I/O stream, without waiting for it. async fn flush_nowait(&mut self) -> Result<()> { future::poll_fn(|cx| { - let _ = self.socket.get_mut().poll_flush(cx)?; + let _ = self.socket.get_mut().poll_flush_unpin(cx)?; Poll::Ready(Ok(())) }).await } @@ -1009,37 +1010,18 @@ async fn send( control_receiver: &mut Pausable>, frame: impl Into> ) -> Result<()> { - let mut frame = Some(frame.into()); - future::poll_fn(move |cx| { - let next = frame.take().expect("Frame has not yet been taken by `io`."); - match io.get_mut().poll_send(cx, Some(next)) { - frame::PollSend::Pending(Some(f)) => { - debug_assert!(stream_receiver.is_paused()); - log::debug!("{}: send: Prior write pending. Waiting.", id); - frame = Some(f); - return Poll::Pending - } - frame::PollSend::Pending(None) => { - // The frame could not yet fully be written to the underlying - // socket, so we pause the processing of commands in order to - // pause writing while still being able to read from the socket. - // The only frames that may still be sent while commands are paused - // are as a reaction to frames being read, which in turn allows - // the remote to make progress eventually, if it should - // currently be blocked on writing. In this way unnecessary - // deadlocks between peers blocked on writing are avoided. - log::trace!("{}: send: Write pending. Continuing with paused command streams.", id); + SinkExt::feed(io.get_mut(), frame.into()).await?; + // Check if the write "goes through" or is pending due to back-pressure + // from the underlying socket. + future::poll_fn(|cx| { + match io.get_mut().poll_ready_unpin(cx)? { + Poll::Pending => { + log::debug!("{}: send: Write pending. Continuing with paused command streams.", id); stream_receiver.pause(); control_receiver.pause(); - return Poll::Ready(Ok(())) - } - frame::PollSend::Ready(Err(e)) => { - return Poll::Ready(Err(e.into())) - } - frame::PollSend::Ready(Ok(())) => { - // Note: We leave the unpausing of the command streams to `Connection::next()`. - return Poll::Ready(Ok(())) + return Poll::Ready(Result::Ok(())) } + Poll::Ready(()) => return Poll::Ready(Ok(())) } }).await } diff --git a/src/frame.rs b/src/frame.rs index 95395dac..0e93e5e1 100644 --- a/src/frame.rs +++ b/src/frame.rs @@ -15,7 +15,7 @@ use futures::future::Either; use header::{Header, StreamId, Data, WindowUpdate, GoAway, Ping}; use std::{convert::TryInto, num::TryFromIntError}; -pub(crate) use io::{Io, PollSend}; +pub(crate) use io::Io; pub use io::FrameDecodeError; /// A Yamux message frame consisting of header and body. diff --git a/src/frame/io.rs b/src/frame/io.rs index f909aec8..1e09d2fb 100644 --- a/src/frame/io.rs +++ b/src/frame/io.rs @@ -33,80 +33,65 @@ impl Io { max_body_len: max_frame_body_len } } +} + +/// The stages of writing a new `Frame`. +#[derive(Debug)] +enum WriteState { + Init, + Header { + header: [u8; header::HEADER_SIZE], + buffer: Vec, + offset: usize + }, + Body { + buffer: Vec, + offset: usize + } +} + +impl Sink> for Io { + type Error = io::Error; - /// Continue writing on the underlying connection, possibly with a new frame. - /// - /// If there is no data pending to be written from a prior frame, an attempt - /// is made at writing the new frame, if any. - /// - /// If there is data pending to be written from a prior frame, an attempt is made to - /// complete the pending writes before accepting the new frame, if any. If - /// the new frame cannot be accepted due to pending writes, it is returned. - pub(crate) fn poll_send(&mut self, cx: &mut Context<'_>, mut frame: Option>) - -> PollSend> - { + fn poll_ready( + self: Pin<&mut Self>, + cx: &mut Context<'_> + ) -> Poll> { + let this = Pin::into_inner(self); loop { - match &mut self.write_state { - WriteState::Init => { - if let Some(f) = frame.take() { - let header = header::encode(&f.header); - let buffer = f.body; - self.write_state = WriteState::Header { header, buffer, offset: 0 }; - } else { - return PollSend::Ready(Ok(())) - } - } + match &mut this.write_state { + WriteState::Init => return Poll::Ready(Ok(())), WriteState::Header { header, buffer, ref mut offset } => { - match Pin::new(&mut self.io).poll_write(cx, &header[*offset ..]) { - Poll::Pending => { - if let Some(f) = frame.take() { - return PollSend::Pending(Some(f)) - } - return PollSend::Pending(None) - } - Poll::Ready(Err(e)) => { - if let Some(f) = frame.take() { - return PollSend::Pending(Some(f)) - } - return PollSend::Ready(Err(e)) - } + match Pin::new(&mut this.io).poll_write(cx, &header[*offset ..]) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), Poll::Ready(Ok(n)) => { if n == 0 { - return PollSend::Ready(Err(io::ErrorKind::WriteZero.into())) + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())) } *offset += n; if *offset == header.len() { if buffer.len() > 0 { let buffer = std::mem::take(buffer); - self.write_state = WriteState::Body { buffer, offset: 0 }; + this.write_state = WriteState::Body { buffer, offset: 0 }; } else { - self.write_state = WriteState::Init; + this.write_state = WriteState::Init; } } } } } WriteState::Body { buffer, ref mut offset } => { - match Pin::new(&mut self.io).poll_write(cx, &buffer[*offset ..]) { - Poll::Pending => { - if let Some(f) = frame.take() { - return PollSend::Pending(Some(f)) - } - return PollSend::Pending(None) - } - Poll::Ready(Err(e)) => { - if let Some(f) = frame.take() { - return PollSend::Pending(Some(f)) - } - return PollSend::Ready(Err(e)) - } + match Pin::new(&mut this.io).poll_write(cx, &buffer[*offset ..]) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), Poll::Ready(Ok(n)) => { if n == 0 { - return PollSend::Ready(Err(io::ErrorKind::WriteZero.into())) + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())) } *offset += n; if *offset == buffer.len() { - self.write_state = WriteState::Init; + this.write_state = WriteState::Init; } } } @@ -115,54 +100,29 @@ impl Io { } } - pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { - match self.poll_send(cx, Option::>::None) { - PollSend::Pending(_) => return Poll::Pending, - PollSend::Ready(r) => r? - } - Pin::new(&mut self.io).poll_flush(cx) - } - - pub(crate) async fn close(&mut self) -> io::Result<()> { - future::poll_fn(|cx| self.poll_flush(cx)).await?; - self.io.close().await + fn start_send(self: Pin<&mut Self>, f: Frame<()>) -> Result<(), Self::Error> { + let header = header::encode(&f.header); + let buffer = f.body; + self.get_mut().write_state = WriteState::Header { header, buffer, offset: 0 }; + Ok(()) } - #[cfg(test)] - async fn flush(&mut self) -> io::Result<()> { - future::poll_fn(|cx| self.poll_flush(cx)).await + fn poll_flush( + self: Pin<&mut Self>, + cx: &mut Context<'_> + ) -> Poll> { + let this = Pin::into_inner(self); + ready!(this.poll_ready_unpin(cx))?; + Pin::new(&mut this.io).poll_flush(cx) } - #[cfg(test)] - async fn send(&mut self, frame: Frame) -> io::Result<()> { - let mut frame = Some(frame); - future::poll_fn(|cx| { - match self.poll_send(cx, Some(frame.take().expect("`frame` not yet taken"))) { - PollSend::Pending(f) => { frame = f; return Poll::Pending } - PollSend::Ready(r) => return Poll::Ready(r) - } - }).await - } -} - -/// The result of [`Io::poll_send`]. -pub enum PollSend { - Pending(Option>), - Ready(B), -} - -/// The stages of writing a new `Frame`. -#[derive(Debug)] -enum WriteState { - Init, - Header { - header: [u8; header::HEADER_SIZE], - buffer: Vec, - offset: usize - }, - Body { - buffer: Vec, - offset: usize + fn poll_close( + self: Pin<&mut Self>, + cx: &mut Context<'_> + ) -> Poll> { + let this = Pin::into_inner(self); + ready!(this.poll_ready_unpin(cx))?; + Pin::new(&mut this.io).poll_close(cx) } } From a4f871fde941819683bfb78f09d646e440afefed Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Wed, 17 Feb 2021 14:19:53 +0100 Subject: [PATCH 28/43] Some more polishing. --- src/connection.rs | 24 ++++++++++++++---------- src/frame/io.rs | 8 +++----- tests/concurrent.rs | 2 +- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index 429e19bc..816e13d5 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -395,14 +395,14 @@ impl Connection { } else { let socket = &mut self.socket; let io = future::poll_fn(move |cx| { - // Progress writing. - match socket.get_mut().poll_ready_unpin(cx) { - Poll::Pending => {} - Poll::Ready(res) => { + // If the stream command receiver was paused, we got + // back-pressure from the underlying I/O stream and + // must wait for the sink to be ready again for the + // next frame. + if stream_receiver_paused { + if let Poll::Ready(res) = socket.get_mut().poll_ready_unpin(cx) { res.or(Err(ConnectionError::Closed))?; - if stream_receiver_paused { - return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) - } + return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) } } // Progress reading. @@ -460,6 +460,7 @@ impl Connection { match io_event? { Poll::Ready(IoEvent::OutboundReady) => { + log::debug!("{}: Sink ready, unpausing command streams.", self.id); self.stream_receiver.unpause(); // Only unpause the control command receiver if we're not // shutting down already. @@ -756,7 +757,8 @@ impl Connection { } } } else { - log::debug!("{}/{}: data for unknown stream, ignoring", self.id, stream_id); + log::trace!("{}/{}: data frame for unknown stream, possibly dropped earlier: {:?}", + self.id, stream_id, frame); // We do not consider this a protocol violation and thus do not send a stream reset // because we may still be processing pending `StreamCommand`s of this stream that were // sent before it has been dropped and "garbage collected". Such a stream reset would @@ -826,7 +828,8 @@ impl Connection { w.wake() } } else { - log::debug!("{}/{}: window update for unknown stream, ignoring", self.id, stream_id); + log::trace!("{}/{}: window update for unknown stream, possibly dropped earlier: {:?}", + self.id, stream_id, frame); // We do not consider this a protocol violation and thus do not send a stream reset // because we may still be processing pending `StreamCommand`s of this stream that were // sent before it has been dropped and "garbage collected". Such a stream reset would @@ -849,7 +852,8 @@ impl Connection { hdr.ack(); return Action::Ping(Frame::new(hdr)) } - log::debug!("{}/{}: ping for unknown stream", self.id, stream_id); + log::trace!("{}/{}: ping for unknown stream, possibly dropped earlier: {:?}", + self.id, stream_id, frame); // We do not consider this a protocol violation and thus do not send a stream reset because // we may still be processing pending `StreamCommand`s of this stream that were sent before // it has been dropped and "garbage collected". Such a stream reset would interfere with the diff --git a/src/frame/io.rs b/src/frame/io.rs index 1e09d2fb..b5b6995a 100644 --- a/src/frame/io.rs +++ b/src/frame/io.rs @@ -149,7 +149,7 @@ impl Stream for Io { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = &mut *self; loop { - log::debug!("{}: poll_next: read: {:?}", this.id, this.read_state); + log::trace!("{}: poll_next: read: {:?}", this.id, this.read_state); match this.read_state { ReadState::Init => { this.read_state = ReadState::Header { @@ -203,15 +203,13 @@ impl Stream for Io { let body_len = header.len().val() as usize; if *offset == body_len { - log::debug!("Read complete frame of {} bytes", offset); + log::trace!("{}: Read frame of {} body bytes", this.id, body_len); let h = header.clone(); let v = std::mem::take(buffer); this.read_state = ReadState::Init; return Poll::Ready(Some(Ok(Frame { header: h, body: v }))) } - log::debug!("Trying to read body bytes ..."); - let buf = &mut buffer[*offset .. body_len]; match ready!(Pin::new(&mut this.io).poll_read(cx, buf))? { 0 => { @@ -219,7 +217,7 @@ impl Stream for Io { return Poll::Ready(Some(Err(e))) } n => { - log::debug!("Read {} body bytes", n); + log::trace!("{}: Read {} body bytes", this.id, n); *offset += n } } diff --git a/tests/concurrent.rs b/tests/concurrent.rs index 5f8aee6e..88eb4abc 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -69,7 +69,7 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { #[tokio::test] async fn concurrent_streams() { let _ = env_logger::try_init(); - let data = Arc::new(vec![0x42; 100 * 1024]); + let data = Arc::new(vec![0x42; 1024 * 1024]); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); roundtrip(addr, 1000, data).await } From ca5910ac44752c3dcc0efa7e7a186d517cae9bf1 Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Wed, 17 Feb 2021 16:48:32 +0100 Subject: [PATCH 29/43] Set maximum split-send-size for concurrent test. --- tests/concurrent.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/concurrent.rs b/tests/concurrent.rs index 88eb4abc..0b8a78ab 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -18,9 +18,13 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { let listener = TcpListener::bind(&address).await.expect("bind"); let address = listener.local_addr().expect("local address"); + let mut server_cfg = Config::default(); + server_cfg.set_split_send_size(256 * 1024); + let client_cfg = server_cfg.clone(); + let server = async move { let socket = listener.accept().await.expect("accept").0.compat(); - yamux::into_stream(Connection::new(socket, Config::default(), Mode::Server)) + yamux::into_stream(Connection::new(socket, server_cfg, Mode::Server)) .try_for_each_concurrent(None, |mut stream| async move { log::debug!("S: accepted new stream"); let mut len = [0; 4]; @@ -39,7 +43,7 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { let socket = TcpStream::connect(&address).await.expect("connect").compat(); let (tx, rx) = mpsc::unbounded(); - let conn = Connection::new(socket, Config::default(), Mode::Client); + let conn = Connection::new(socket, client_cfg, Mode::Client); let mut ctrl = conn.control(); task::spawn(yamux::into_stream(conn).for_each(|_| future::ready(()))); for _ in 0 .. nstreams { From 827ef8a6fb23ab2d8c4d4bc814a97055e7612747 Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Fri, 19 Feb 2021 20:38:09 +0100 Subject: [PATCH 30/43] Small refinements. --- src/connection.rs | 10 +++++----- src/frame/io.rs | 28 ++++++++++++++++++++-------- src/tests.rs | 2 +- tests/concurrent.rs | 15 ++++++++++++--- 4 files changed, 38 insertions(+), 17 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index 816e13d5..cd8141b0 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -400,7 +400,7 @@ impl Connection { // must wait for the sink to be ready again for the // next frame. if stream_receiver_paused { - if let Poll::Ready(res) = socket.get_mut().poll_ready_unpin(cx) { + if let Poll::Ready(res) = socket.poll_ready_unpin(cx) { res.or(Err(ConnectionError::Closed))?; return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) } @@ -460,7 +460,7 @@ impl Connection { match io_event? { Poll::Ready(IoEvent::OutboundReady) => { - log::debug!("{}: Sink ready, unpausing command streams.", self.id); + log::trace!("{}: Sink ready, unpausing command streams.", self.id); self.stream_receiver.unpause(); // Only unpause the control command receiver if we're not // shutting down already. @@ -628,7 +628,7 @@ impl Connection { return Ok(Some(stream)) } Action::Update(f) => { - log::trace!("{}/{}: sending update", self.id, f.header().stream_id()); + log::trace!("{}: sending update: {:?}", self.id, f.header()); self.send(f).await.or(Err(ConnectionError::Closed))? } Action::Ping(f) => { @@ -958,7 +958,7 @@ impl Connection { frame }; if let Some(f) = frame { - log::trace!("{}/{}: sending final frame: {}", self.id, stream_id, f.header()); + log::trace!("{}/{}: sending: {}", self.id, stream_id, f.header()); send(conn_id, &mut self.socket, &mut self.stream_receiver, &mut self.control_receiver, f).await.or(Err(ConnectionError::Closed))? } @@ -1020,7 +1020,7 @@ async fn send( future::poll_fn(|cx| { match io.get_mut().poll_ready_unpin(cx)? { Poll::Pending => { - log::debug!("{}: send: Write pending. Continuing with paused command streams.", id); + log::trace!("{}: send: Write pending. Continuing with paused command streams.", id); stream_receiver.pause(); control_receiver.pause(); return Poll::Ready(Result::Ok(())) diff --git a/src/frame/io.rs b/src/frame/io.rs index b5b6995a..d274b2cc 100644 --- a/src/frame/io.rs +++ b/src/frame/io.rs @@ -36,7 +36,6 @@ impl Io { } /// The stages of writing a new `Frame`. -#[derive(Debug)] enum WriteState { Init, Header { @@ -50,6 +49,22 @@ enum WriteState { } } +impl fmt::Debug for WriteState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + WriteState::Init => { + f.write_str("(WriteState::Init)") + } + WriteState::Header { offset, .. } => { + write!(f, "(WriteState::Header (offset {}))", offset) + } + WriteState::Body { offset, buffer } => { + write!(f, "(WriteState::Body (offset {}) (buffer-len {}))", offset, buffer.len()) + } + } + } +} + impl Sink> for Io { type Error = io::Error; @@ -59,6 +74,7 @@ impl Sink> for Io { ) -> Poll> { let this = Pin::into_inner(self); loop { + log::trace!("{}: write: {:?}", this.id, this.write_state); match &mut this.write_state { WriteState::Init => return Poll::Ready(Ok(())), WriteState::Header { header, buffer, ref mut offset } => { @@ -149,7 +165,7 @@ impl Stream for Io { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = &mut *self; loop { - log::trace!("{}: poll_next: read: {:?}", this.id, this.read_state); + log::trace!("{}: read: {:?}", this.id, this.read_state); match this.read_state { ReadState::Init => { this.read_state = ReadState::Header { @@ -203,7 +219,6 @@ impl Stream for Io { let body_len = header.len().val() as usize; if *offset == body_len { - log::trace!("{}: Read frame of {} body bytes", this.id, body_len); let h = header.clone(); let v = std::mem::take(buffer); this.read_state = ReadState::Init; @@ -216,10 +231,7 @@ impl Stream for Io { let e = FrameDecodeError::Io(io::ErrorKind::UnexpectedEof.into()); return Poll::Ready(Some(Err(e))) } - n => { - log::trace!("{}: Read {} body bytes", this.id, n); - *offset += n - } + n => *offset += n } } } @@ -234,7 +246,7 @@ impl fmt::Debug for ReadState { f.write_str("(ReadState::Init)") } ReadState::Header { offset, .. } => { - write!(f, "(ReadState::Header {})", offset) + write!(f, "(ReadState::Header (offset {}))", offset) } ReadState::Body { header, offset, buffer } => { write!(f, "(ReadState::Body (header {}) (offset {}) (buffer-len {}))", diff --git a/src/tests.rs b/src/tests.rs index dc98e2e4..854d1139 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -218,7 +218,7 @@ fn write_deadlock() { // We make the message to transmit large enough s.t. the "server" // is forced to start writing (i.e. echoing) the bytes before // having read the entire payload. - let msg = vec![1u8; 1 * 1024 * 1024]; + let msg = vec![1u8; 1024 * 1024]; // We choose a "connection capacity" that is artificially below // the size of a receive window. If it were equal or greater, diff --git a/tests/concurrent.rs b/tests/concurrent.rs index 0b8a78ab..e8bd4642 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -12,14 +12,23 @@ use futures::{channel::mpsc, prelude::*}; use std::{net::{Ipv4Addr, SocketAddr, SocketAddrV4}, sync::Arc}; use tokio::{net::{TcpStream, TcpListener}, task}; use tokio_util::compat::TokioAsyncReadCompatExt; -use yamux::{Config, Connection, Mode}; +use yamux::{Config, Connection, Mode, WindowUpdateMode}; async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { let listener = TcpListener::bind(&address).await.expect("bind"); let address = listener.local_addr().expect("local address"); let mut server_cfg = Config::default(); - server_cfg.set_split_send_size(256 * 1024); + // Use a large frame size to speed up the test. + server_cfg.set_split_send_size(usize::min(256 * 1024, data.len())); + // Use `WindowUpdateMode::OnRead` so window updates are sent by the + // `Stream`s and subject to backpressure from the stream command channel. Thus + // the `Connection` I/O loop will not need to send window updates + // directly as a result of reading a frame, which can otherwise + // lead to mutual write deadlocks if the socket send buffers are too small. + // With `OnRead` the socket send buffer can even be smaller than the size + // of a single frame for this test. + server_cfg.set_window_update_mode(WindowUpdateMode::OnRead); let client_cfg = server_cfg.clone(); let server = async move { @@ -73,7 +82,7 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { #[tokio::test] async fn concurrent_streams() { let _ = env_logger::try_init(); - let data = Arc::new(vec![0x42; 1024 * 1024]); + let data = Arc::new(vec![0x42; 128 * 1024]); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); roundtrip(addr, 1000, data).await } From 037f07098fdf152f80a444693b50170d31105d31 Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Mon, 22 Feb 2021 10:48:41 +0100 Subject: [PATCH 31/43] Small test cleanup. --- tests/concurrent.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/concurrent.rs b/tests/concurrent.rs index e8bd4642..d0fb7b77 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -14,13 +14,15 @@ use tokio::{net::{TcpStream, TcpListener}, task}; use tokio_util::compat::TokioAsyncReadCompatExt; use yamux::{Config, Connection, Mode, WindowUpdateMode}; +const PAYLOAD_SIZE: usize = 128 * 1024; + async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { let listener = TcpListener::bind(&address).await.expect("bind"); let address = listener.local_addr().expect("local address"); let mut server_cfg = Config::default(); // Use a large frame size to speed up the test. - server_cfg.set_split_send_size(usize::min(256 * 1024, data.len())); + server_cfg.set_split_send_size(PAYLOAD_SIZE); // Use `WindowUpdateMode::OnRead` so window updates are sent by the // `Stream`s and subject to backpressure from the stream command channel. Thus // the `Connection` I/O loop will not need to send window updates @@ -82,7 +84,7 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { #[tokio::test] async fn concurrent_streams() { let _ = env_logger::try_init(); - let data = Arc::new(vec![0x42; 128 * 1024]); + let data = Arc::new(vec![0x42; PAYLOAD_SIZE]); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); roundtrip(addr, 1000, data).await } From 34a7a9d17db6adc14a7895ad2a7c9066d4d07a7d Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 22 Feb 2021 13:25:04 +0100 Subject: [PATCH 32/43] src/connection: Remove Pausable wrapper around stream command --- src/connection.rs | 140 ++++++++++++++++++---------------------------- 1 file changed, 54 insertions(+), 86 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index cd8141b0..a0c27cf3 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -169,7 +169,7 @@ pub struct Connection { control_sender: mpsc::Sender, control_receiver: Pausable>, stream_sender: mpsc::Sender, - stream_receiver: Pausable>, + stream_receiver: mpsc::Receiver, garbage: Vec, // see `Connection::garbage_collect()` shutdown: Shutdown, is_closed: bool @@ -283,7 +283,7 @@ impl Connection { control_sender, control_receiver: Pausable::new(control_receiver), stream_sender, - stream_receiver: Pausable::new(stream_receiver), + stream_receiver: stream_receiver, next_id: match mode { Mode::Client => 1, Mode::Server => 2 @@ -349,7 +349,7 @@ impl Connection { // Close and drain the stream command receiver. if !self.stream_receiver.is_terminated() { - self.stream_receiver.stream().close(); + self.stream_receiver.close(); while let Some(_cmd) = self.stream_receiver.next().await { // drop it } @@ -381,30 +381,17 @@ impl Connection { // we properly wait until woken up because we actually can make // progress. - let mut num_terminated = 0; - - // Determine if the stream command receiver is paused, which - // is only the case if we have pending writes, i.e. back-pressure - // from the underlying connection. - let stream_receiver_paused = self.stream_receiver.is_paused(); - - let mut next_io_event = + let next_io_event = if self.socket.is_terminated() { - num_terminated += 1; Either::Left(future::pending()) } else { let socket = &mut self.socket; let io = future::poll_fn(move |cx| { - // If the stream command receiver was paused, we got - // back-pressure from the underlying I/O stream and - // must wait for the sink to be ready again for the - // next frame. - if stream_receiver_paused { - if let Poll::Ready(res) = socket.poll_ready_unpin(cx) { - res.or(Err(ConnectionError::Closed))?; - return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) - } + if let Poll::Ready(res) = socket.poll_ready_unpin(cx) { + res.or(Err(ConnectionError::Closed))?; + return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) } + // Progress reading. let next_frame = match futures::ready!(socket.poll_next_unpin(cx)) { None => Ok(None), @@ -416,6 +403,33 @@ impl Connection { Either::Right(io) }; + match next_io_event.await? { + IoEvent::OutboundReady => { + // Only unpause the control command receiver if we're not + // shutting down already. + if let Shutdown::NotStarted = self.shutdown { + self.control_receiver.unpause(); + } + } + IoEvent::Inbound(frame) => { + if let Some(stream) = self.on_frame(frame).await? { + self.flush_nowait().await.or(Err(ConnectionError::Closed))?; + return Ok(Some(stream)) + } + } + } + + // Getting this far implies that the socket is ready to start sending out a new frame. + + let mut num_terminated = 0; + + let mut next_frame = if self.socket.is_terminated() { + num_terminated += 1; + Either::Left(future::pending()) + } else { + Either::Right(self.socket.next()) + }; + let mut next_stream_command = if self.stream_receiver.is_terminated() { num_terminated += 1; @@ -441,14 +455,14 @@ impl Connection { future::poll_fn(move |cx: &mut Context| { let a = next_stream_command.poll_unpin(cx); let b = next_control_command.poll_unpin(cx); - let c = next_io_event.poll_unpin(cx); + let c = next_frame.poll_unpin(cx); if a.is_pending() && b.is_pending() && c.is_pending() { return Poll::Pending } Poll::Ready((a, b, c)) }); - let (stream_command, control_command, io_event) = next_item.await; + let (stream_command, control_command, frame) = next_item.await; if let Poll::Ready(cmd) = control_command { self.on_control_command(cmd).await? @@ -458,23 +472,11 @@ impl Connection { self.on_stream_command(cmd).await? } - match io_event? { - Poll::Ready(IoEvent::OutboundReady) => { - log::trace!("{}: Sink ready, unpausing command streams.", self.id); - self.stream_receiver.unpause(); - // Only unpause the control command receiver if we're not - // shutting down already. - if let Shutdown::NotStarted = self.shutdown { - self.control_receiver.unpause(); - } + if let Poll::Ready(frame) = frame { + if let Some(stream) = self.on_frame(frame.transpose().map_err(Into::into)).await? { + self.flush_nowait().await.or(Err(ConnectionError::Closed))?; + return Ok(Some(stream)) } - Poll::Ready(IoEvent::Inbound(frame)) => { - if let Some(stream) = self.on_frame(frame).await? { - self.flush_nowait().await.or(Err(ConnectionError::Closed))?; - return Ok(Some(stream)) - } - } - Poll::Pending => {} } self.flush_nowait().await.or(Err(ConnectionError::Closed))?; @@ -506,7 +508,7 @@ impl Connection { let mut frame = Frame::window_update(id, extra_credit); frame.header_mut().syn(); log::trace!("{}/{}: sending initial {}", self.id, id, frame.header()); - self.send(frame).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? } let stream = { let config = self.config.clone(); @@ -528,7 +530,7 @@ impl Connection { header.rst(); let frame = Frame::new(header); log::trace!("{}/{}: sending reset", self.id, id); - self.send(frame).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? } } } @@ -545,7 +547,7 @@ impl Connection { self.shutdown = Shutdown::InProgress(reply); log::trace!("{}: shutting down connection", self.id); self.control_receiver.pause(); - self.stream_receiver.stream().close() + self.stream_receiver.close() } None => { // We only get here after the whole connection shutdown is complete. @@ -559,16 +561,12 @@ impl Connection { Ok(()) } - async fn send(&mut self, f: impl Into>) -> Result<()> { - send(self.id, &mut self.socket, &mut self.stream_receiver, &mut self.control_receiver, f).await - } - /// Process a command from one of our `Stream`s. async fn on_stream_command(&mut self, cmd: Option) -> Result<()> { match cmd { Some(StreamCommand::SendFrame(frame)) => { log::trace!("{}/{}: sending: {}", self.id, frame.header().stream_id(), frame.header()); - self.send(frame).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? } Some(StreamCommand::CloseStream { id, ack }) => { log::trace!("{}/{}: sending close", self.id, id); @@ -576,7 +574,7 @@ impl Connection { header.fin(); if ack { header.ack() } let frame = Frame::new(header); - self.send(frame).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? } None => { // We only get to this point when `self.stream_receiver` @@ -587,7 +585,7 @@ impl Connection { debug_assert!(self.shutdown.is_in_progress()); log::debug!("{}: sending term", self.id); let frame = Frame::term(); - self.send(frame).await.or(Err(ConnectionError::Closed))?; + self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))?; let shutdown = std::mem::replace(&mut self.shutdown, Shutdown::Complete); if let Shutdown::InProgress(tx) = shutdown { // Inform the `Control` that initiated the shutdown. @@ -623,25 +621,25 @@ impl Connection { log::trace!("{}: new inbound {} of {}", self.id, stream, self); if let Some(f) = update { log::trace!("{}/{}: sending update", self.id, f.header().stream_id()); - self.send(f).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? } return Ok(Some(stream)) } Action::Update(f) => { log::trace!("{}: sending update: {:?}", self.id, f.header()); - self.send(f).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? } Action::Ping(f) => { log::trace!("{}/{}: pong", self.id, f.header().stream_id()); - self.send(f).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? } Action::Reset(f) => { log::trace!("{}/{}: sending reset", self.id, f.header().stream_id()); - self.send(f).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? } Action::Terminate(f) => { log::trace!("{}: sending term", self.id); - self.send(f).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? } } Ok(None) @@ -959,8 +957,7 @@ impl Connection { }; if let Some(f) = frame { log::trace!("{}/{}: sending: {}", self.id, stream_id, f.header()); - send(conn_id, &mut self.socket, &mut self.stream_receiver, - &mut self.control_receiver, f).await.or(Err(ConnectionError::Closed))? + self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? } self.garbage.push(stream_id) } @@ -997,39 +994,10 @@ impl Drop for Connection { enum IoEvent { /// A new inbound frame arrived. Inbound(Result>>), - /// We can continue sending frames after having to pause earlier. + /// We can continue sending frames. OutboundReady, } -/// Sends a frame on the given `io` stream. -/// -/// If the frame is taken by `io` but cannot be fully sent, the command -/// receivers are paused, without waiting for completion. -/// -/// If a prior send operation is still pending, waits for its completion. -async fn send( - id: Id, - io: &mut Fuse>, - stream_receiver: &mut Pausable>, - control_receiver: &mut Pausable>, - frame: impl Into> -) -> Result<()> { - SinkExt::feed(io.get_mut(), frame.into()).await?; - // Check if the write "goes through" or is pending due to back-pressure - // from the underlying socket. - future::poll_fn(|cx| { - match io.get_mut().poll_ready_unpin(cx)? { - Poll::Pending => { - log::trace!("{}: send: Write pending. Continuing with paused command streams.", id); - stream_receiver.pause(); - control_receiver.pause(); - return Poll::Ready(Result::Ok(())) - } - Poll::Ready(()) => return Poll::Ready(Ok(())) - } - }).await -} - /// Turn a Yamux [`Connection`] into a [`futures::Stream`]. pub fn into_stream(c: Connection) -> impl futures::stream::Stream> where From 95167a639becb753a3247ec94e240173ef46ef14 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Feb 2021 12:10:50 +0100 Subject: [PATCH 33/43] tests/concurrent: Test with varying TCP send buffer sizes --- tests/concurrent.rs | 66 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/tests/concurrent.rs b/tests/concurrent.rs index e8bd4642..0706adf7 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -10,12 +10,25 @@ use futures::{channel::mpsc, prelude::*}; use std::{net::{Ipv4Addr, SocketAddr, SocketAddrV4}, sync::Arc}; -use tokio::{net::{TcpStream, TcpListener}, task}; +use tokio::{net::TcpSocket, task, runtime::Runtime}; use tokio_util::compat::TokioAsyncReadCompatExt; use yamux::{Config, Connection, Mode, WindowUpdateMode}; +use quickcheck::{Arbitrary, Gen, QuickCheck}; -async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { - let listener = TcpListener::bind(&address).await.expect("bind"); +async fn roundtrip( + address: SocketAddr, + nstreams: usize, + data: Arc>, + tcp_send_buffer_size: TcpSendBufferSize, +) { + let listener = { + let socket = TcpSocket::new_v4().expect("new_v4"); + if let Some(size) = tcp_send_buffer_size.0 { + socket.set_send_buffer_size(size).expect("size set"); + } + socket.bind(address).expect("bind"); + socket.listen(1024).expect("listen") + }; let address = listener.local_addr().expect("local address"); let mut server_cfg = Config::default(); @@ -32,8 +45,8 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { let client_cfg = server_cfg.clone(); let server = async move { - let socket = listener.accept().await.expect("accept").0.compat(); - yamux::into_stream(Connection::new(socket, server_cfg, Mode::Server)) + let stream = listener.accept().await.expect("accept").0.compat(); + yamux::into_stream(Connection::new(stream, server_cfg, Mode::Server)) .try_for_each_concurrent(None, |mut stream| async move { log::debug!("S: accepted new stream"); let mut len = [0; 4]; @@ -50,9 +63,15 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { task::spawn(server); - let socket = TcpStream::connect(&address).await.expect("connect").compat(); + let conn = { + let socket = TcpSocket::new_v4().expect("new_v4"); + if let Some(size) = tcp_send_buffer_size.0 { + socket.set_send_buffer_size(size).expect("size set"); + } + let stream = socket.connect(address).await.expect("connect").compat(); + Connection::new(stream, client_cfg, Mode::Client) + }; let (tx, rx) = mpsc::unbounded(); - let conn = Connection::new(socket, client_cfg, Mode::Client); let mut ctrl = conn.control(); task::spawn(yamux::into_stream(conn).for_each(|_| future::ready(()))); for _ in 0 .. nstreams { @@ -79,10 +98,33 @@ async fn roundtrip(address: SocketAddr, nstreams: usize, data: Arc>) { assert_eq!(nstreams, n) } -#[tokio::test] -async fn concurrent_streams() { +/// Send buffer size for a TCP socket. +/// +/// Leave unchanged, i.e. use OS default value, when `None`. +#[derive(Clone, Debug)] +struct TcpSendBufferSize(Option); + +impl Arbitrary for TcpSendBufferSize { + fn arbitrary(g: &mut Gen) -> Self { + if bool::arbitrary(g) { + TcpSendBufferSize(None) + } else { + TcpSendBufferSize(Some(16 * 1024)) + } + } +} + + +#[test] +fn concurrent_streams() { let _ = env_logger::try_init(); - let data = Arc::new(vec![0x42; 128 * 1024]); - let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); - roundtrip(addr, 1000, data).await + + fn prop (tcp_send_buffer_size: TcpSendBufferSize) { + let data = Arc::new(vec![0x42; 128 * 1024]); + let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); + + Runtime::new().expect("new runtime").block_on(roundtrip(addr, 1000, data, tcp_send_buffer_size)); + } + + QuickCheck::new().tests(1).quickcheck(prop as fn(_) -> _) } From 912317e827e4953266322285aab606d24507abcf Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Feb 2021 11:14:52 +0100 Subject: [PATCH 34/43] src/connection: Apply review suggestions --- src/connection.rs | 83 ++++++++++++++++++++++++----------------------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index a0c27cf3..2f4a36f2 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -283,7 +283,7 @@ impl Connection { control_sender, control_receiver: Pausable::new(control_receiver), stream_sender, - stream_receiver: stream_receiver, + stream_receiver, next_id: match mode { Mode::Client => 1, Mode::Server => 2 @@ -381,45 +381,48 @@ impl Connection { // we properly wait until woken up because we actually can make // progress. - let next_io_event = - if self.socket.is_terminated() { - Either::Left(future::pending()) - } else { - let socket = &mut self.socket; - let io = future::poll_fn(move |cx| { - if let Poll::Ready(res) = socket.poll_ready_unpin(cx) { - res.or(Err(ConnectionError::Closed))?; - return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) - } + // Wait for the frame sink to be ready or, if there is a pending + // write, for an incoming frame. I.e. as long as there is a pending + // write, we only read, unless a read results in needing to send a + // frame, in which case we must wait for the pending write to + // complete. When the frame sink is ready, we can proceed with + // waiting for a new stream or control command or another inbound + // frame. + let next_io_event = if self.socket.is_terminated() { + Either::Left(future::pending()) + } else { + let socket = &mut self.socket; + let io = future::poll_fn(move |cx| { + if let Poll::Ready(res) = socket.poll_ready_unpin(cx) { + res.or(Err(ConnectionError::Closed))?; + return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) + } - // Progress reading. - let next_frame = match futures::ready!(socket.poll_next_unpin(cx)) { - None => Ok(None), - Some(Err(e)) => Err(e.into()), - Some(Ok(f)) => Ok(Some(f)) - }; - Poll::Ready(Ok(IoEvent::Inbound(next_frame))) - }); - Either::Right(io) - }; + // Progress reading. + let next_frame = match futures::ready!(socket.poll_next_unpin(cx)) { + None => Ok(None), + Some(Err(e)) => Err(e.into()), + Some(Ok(f)) => Ok(Some(f)) + }; + Poll::Ready(Ok(IoEvent::Inbound(next_frame))) + }); + Either::Right(io) + }; match next_io_event.await? { - IoEvent::OutboundReady => { - // Only unpause the control command receiver if we're not - // shutting down already. - if let Shutdown::NotStarted = self.shutdown { - self.control_receiver.unpause(); - } - } + IoEvent::OutboundReady => {}, IoEvent::Inbound(frame) => { if let Some(stream) = self.on_frame(frame).await? { self.flush_nowait().await.or(Err(ConnectionError::Closed))?; return Ok(Some(stream)) } + continue // The sink is still pending. } } - // Getting this far implies that the socket is ready to start sending out a new frame. + // Getting this far implies that the socket is ready to start + // sending out a new frame, so we can now listen for new commands + // while waiting for the next inbound frame let mut num_terminated = 0; @@ -508,7 +511,7 @@ impl Connection { let mut frame = Frame::window_update(id, extra_credit); frame.header_mut().syn(); log::trace!("{}/{}: sending initial {}", self.id, id, frame.header()); - self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(frame.into()).await.or(Err(ConnectionError::Closed))? } let stream = { let config = self.config.clone(); @@ -530,7 +533,7 @@ impl Connection { header.rst(); let frame = Frame::new(header); log::trace!("{}/{}: sending reset", self.id, id); - self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(frame.into()).await.or(Err(ConnectionError::Closed))? } } } @@ -566,7 +569,7 @@ impl Connection { match cmd { Some(StreamCommand::SendFrame(frame)) => { log::trace!("{}/{}: sending: {}", self.id, frame.header().stream_id(), frame.header()); - self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(frame.into()).await.or(Err(ConnectionError::Closed))? } Some(StreamCommand::CloseStream { id, ack }) => { log::trace!("{}/{}: sending close", self.id, id); @@ -574,7 +577,7 @@ impl Connection { header.fin(); if ack { header.ack() } let frame = Frame::new(header); - self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(frame.into()).await.or(Err(ConnectionError::Closed))? } None => { // We only get to this point when `self.stream_receiver` @@ -585,7 +588,7 @@ impl Connection { debug_assert!(self.shutdown.is_in_progress()); log::debug!("{}: sending term", self.id); let frame = Frame::term(); - self.socket.get_mut().feed(frame.into()).await.or(Err(ConnectionError::Closed))?; + self.socket.feed(frame.into()).await.or(Err(ConnectionError::Closed))?; let shutdown = std::mem::replace(&mut self.shutdown, Shutdown::Complete); if let Shutdown::InProgress(tx) = shutdown { // Inform the `Control` that initiated the shutdown. @@ -621,25 +624,25 @@ impl Connection { log::trace!("{}: new inbound {} of {}", self.id, stream, self); if let Some(f) = update { log::trace!("{}/{}: sending update", self.id, f.header().stream_id()); - self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(f.into()).await.or(Err(ConnectionError::Closed))? } return Ok(Some(stream)) } Action::Update(f) => { log::trace!("{}: sending update: {:?}", self.id, f.header()); - self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(f.into()).await.or(Err(ConnectionError::Closed))? } Action::Ping(f) => { log::trace!("{}/{}: pong", self.id, f.header().stream_id()); - self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(f.into()).await.or(Err(ConnectionError::Closed))? } Action::Reset(f) => { log::trace!("{}/{}: sending reset", self.id, f.header().stream_id()); - self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(f.into()).await.or(Err(ConnectionError::Closed))? } Action::Terminate(f) => { log::trace!("{}: sending term", self.id); - self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(f.into()).await.or(Err(ConnectionError::Closed))? } } Ok(None) @@ -957,7 +960,7 @@ impl Connection { }; if let Some(f) = frame { log::trace!("{}/{}: sending: {}", self.id, stream_id, f.header()); - self.socket.get_mut().feed(f.into()).await.or(Err(ConnectionError::Closed))? + self.socket.feed(f.into()).await.or(Err(ConnectionError::Closed))? } self.garbage.push(stream_id) } From a3b63b485225373a7f02ab7a214100840141f1eb Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Feb 2021 12:28:47 +0100 Subject: [PATCH 35/43] tests/concurrent: Remove newline --- tests/concurrent.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/concurrent.rs b/tests/concurrent.rs index 0706adf7..d12f0543 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -114,7 +114,6 @@ impl Arbitrary for TcpSendBufferSize { } } - #[test] fn concurrent_streams() { let _ = env_logger::try_init(); From 007f86da23d6744e75dfece064c5748785949ebd Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Feb 2021 16:45:37 +0100 Subject: [PATCH 36/43] tests/concurrent: Vary both send and receive window size --- tests/concurrent.rs | 46 ++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/tests/concurrent.rs b/tests/concurrent.rs index d12f0543..5f361789 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -19,12 +19,13 @@ async fn roundtrip( address: SocketAddr, nstreams: usize, data: Arc>, - tcp_send_buffer_size: TcpSendBufferSize, + tcp_buffer_sizes: Option, ) { let listener = { let socket = TcpSocket::new_v4().expect("new_v4"); - if let Some(size) = tcp_send_buffer_size.0 { - socket.set_send_buffer_size(size).expect("size set"); + if let Some(size) = tcp_buffer_sizes { + socket.set_send_buffer_size(size.send).expect("send size set"); + socket.set_recv_buffer_size(size.recv).expect("recv size set"); } socket.bind(address).expect("bind"); socket.listen(1024).expect("listen") @@ -65,8 +66,9 @@ async fn roundtrip( let conn = { let socket = TcpSocket::new_v4().expect("new_v4"); - if let Some(size) = tcp_send_buffer_size.0 { - socket.set_send_buffer_size(size).expect("size set"); + if let Some(size) = tcp_buffer_sizes { + socket.set_send_buffer_size(size.send).expect("send size set"); + socket.set_recv_buffer_size(size.recv).expect("recv size set"); } let stream = socket.connect(address).await.expect("connect").compat(); Connection::new(stream, client_cfg, Mode::Client) @@ -98,19 +100,29 @@ async fn roundtrip( assert_eq!(nstreams, n) } -/// Send buffer size for a TCP socket. -/// -/// Leave unchanged, i.e. use OS default value, when `None`. -#[derive(Clone, Debug)] -struct TcpSendBufferSize(Option); +/// Send and receive buffer size for a TCP socket. +#[derive(Clone, Debug, Copy)] +struct TcpBuferSizes { + send: u32, + recv: u32, +} -impl Arbitrary for TcpSendBufferSize { +impl Arbitrary for TcpBuferSizes { fn arbitrary(g: &mut Gen) -> Self { - if bool::arbitrary(g) { - TcpSendBufferSize(None) + let send = if bool::arbitrary(g) { + 16*1024 } else { - TcpSendBufferSize(Some(16 * 1024)) - } + 32*1024 + }; + + // Have receive buffer size be some multiple of send buffer size. + let recv = if bool::arbitrary(g) { + send * 2 + } else { + send * 4 + }; + + TcpBuferSizes { send, recv } } } @@ -118,11 +130,11 @@ impl Arbitrary for TcpSendBufferSize { fn concurrent_streams() { let _ = env_logger::try_init(); - fn prop (tcp_send_buffer_size: TcpSendBufferSize) { + fn prop (tcp_buffer_sizes: Option) { let data = Arc::new(vec![0x42; 128 * 1024]); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); - Runtime::new().expect("new runtime").block_on(roundtrip(addr, 1000, data, tcp_send_buffer_size)); + Runtime::new().expect("new runtime").block_on(roundtrip(addr, 1000, data, tcp_buffer_sizes)); } QuickCheck::new().tests(1).quickcheck(prop as fn(_) -> _) From 96addec62463b6a294acd6a3fe26b1ad3d481664 Mon Sep 17 00:00:00 2001 From: "Roman S. Borschel" Date: Tue, 23 Feb 2021 18:30:14 +0100 Subject: [PATCH 37/43] Small cleanup. --- src/connection.rs | 48 ++++++++++++++++++++------------------------- tests/concurrent.rs | 12 ++++++------ 2 files changed, 27 insertions(+), 33 deletions(-) diff --git a/src/connection.rs b/src/connection.rs index 2f4a36f2..68e31870 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -371,16 +371,6 @@ impl Connection { loop { self.garbage_collect().await?; - // For each channel and the socket we create a future that gets - // the next item. We will poll each future and if any one of them - // yields an item, we return the tuple of poll results which are - // then all processed. - // - // For terminated sources we create non-finishing futures. - // This guarantees that if the remaining futures are pending - // we properly wait until woken up because we actually can make - // progress. - // Wait for the frame sink to be ready or, if there is a pending // write, for an incoming frame. I.e. as long as there is a pending // write, we only read, unless a read results in needing to send a @@ -398,31 +388,35 @@ impl Connection { return Poll::Ready(Result::Ok(IoEvent::OutboundReady)) } - // Progress reading. - let next_frame = match futures::ready!(socket.poll_next_unpin(cx)) { - None => Ok(None), - Some(Err(e)) => Err(e.into()), - Some(Ok(f)) => Ok(Some(f)) - }; + // At this point we know the socket sink has a pending + // write, so we try to read the next frame instead. + let next_frame = futures::ready!(socket.poll_next_unpin(cx)) + .transpose() + .map_err(ConnectionError::from); Poll::Ready(Ok(IoEvent::Inbound(next_frame))) }); Either::Right(io) }; - match next_io_event.await? { - IoEvent::OutboundReady => {}, - IoEvent::Inbound(frame) => { - if let Some(stream) = self.on_frame(frame).await? { - self.flush_nowait().await.or(Err(ConnectionError::Closed))?; - return Ok(Some(stream)) - } - continue // The sink is still pending. + if let IoEvent::Inbound(frame) = next_io_event.await? { + if let Some(stream) = self.on_frame(frame).await? { + self.flush_nowait().await.or(Err(ConnectionError::Closed))?; + return Ok(Some(stream)) } + continue // The socket sink still has a pending write. } - // Getting this far implies that the socket is ready to start - // sending out a new frame, so we can now listen for new commands - // while waiting for the next inbound frame + // Getting this far implies that the socket is ready to accept + // a new frame, so we can now listen for new commands while waiting + // for the next inbound frame. To that end, for each channel and the + // socket we create a future that gets the next item. We will poll + // each future and if any one of them yields an item, we return the + // tuple of poll results which are then all processed. + // + // For terminated sources we create non-finishing futures. + // This guarantees that if the remaining futures are pending + // we properly wait until woken up because we actually can make + // progress. let mut num_terminated = 0; diff --git a/tests/concurrent.rs b/tests/concurrent.rs index 95d3a740..1bfe835a 100644 --- a/tests/concurrent.rs +++ b/tests/concurrent.rs @@ -20,7 +20,7 @@ async fn roundtrip( address: SocketAddr, nstreams: usize, data: Arc>, - tcp_buffer_sizes: Option, + tcp_buffer_sizes: Option, ) { let listener = { let socket = TcpSocket::new_v4().expect("new_v4"); @@ -103,12 +103,12 @@ async fn roundtrip( /// Send and receive buffer size for a TCP socket. #[derive(Clone, Debug, Copy)] -struct TcpBuferSizes { +struct TcpBufferSizes { send: u32, recv: u32, } -impl Arbitrary for TcpBuferSizes { +impl Arbitrary for TcpBufferSizes { fn arbitrary(g: &mut Gen) -> Self { let send = if bool::arbitrary(g) { 16*1024 @@ -123,7 +123,7 @@ impl Arbitrary for TcpBuferSizes { send * 4 }; - TcpBuferSizes { send, recv } + TcpBufferSizes { send, recv } } } @@ -131,12 +131,12 @@ impl Arbitrary for TcpBuferSizes { fn concurrent_streams() { let _ = env_logger::try_init(); - fn prop (tcp_buffer_sizes: Option) { + fn prop(tcp_buffer_sizes: Option) { let data = Arc::new(vec![0x42; PAYLOAD_SIZE]); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)); Runtime::new().expect("new runtime").block_on(roundtrip(addr, 1000, data, tcp_buffer_sizes)); } - QuickCheck::new().tests(1).quickcheck(prop as fn(_) -> _) + QuickCheck::new().tests(3).quickcheck(prop as fn(_) -> _) } From 7dc34b8068f5507e140d7e7c8d578ca3fda19a07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 May 2021 06:21:55 +0000 Subject: [PATCH 38/43] build(deps): bump actions/checkout from 2 to 2.3.4 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 2.3.4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v2.3.4) Signed-off-by: dependabot[bot] --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6833ff03..d6283adb 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v2.3.4 - name: Build run: cargo build --verbose - name: Run tests From ab4fe4566e759ca8bea814cc1a4eff93038de5bb Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 3 Jun 2021 11:04:33 +0200 Subject: [PATCH 39/43] src/lib: Default to WindowUpdateMode::OnRead Default to `WindowUpdateMode::OnRead`, thus enabling full Yamux flow-control, exercising back pressure on senders, preventing stream resets due to reaching the buffer limit. See the [`WindowUpdateMode` documentation] for details, especially the section on deadlocking when sending data larger than the receivers window. [`WindowUpdateMode` documentation]: https://docs.rs/yamux/0.9.0/yamux/enum.WindowUpdateMode.html --- CHANGELOG.md | 11 +++++++++++ Cargo.toml | 2 +- src/lib.rs | 4 ++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cebc558e..5e791d5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +# 0.10.0 [unreleased] + +- Default to `WindowUpdateMode::OnRead`, thus enabling full Yamux flow-control, + exercising back pressure on senders, preventing stream resets due to reaching + the buffer limit. + + See the [`WindowUpdateMode` documentation] for details, especially the section + on deadlocking when sending data larger than the receivers window. + + [`WindowUpdateMode` documentation]: https://docs.rs/yamux/0.9.0/yamux/enum.WindowUpdateMode.html + # 0.9.0 - Force-split larger frames, for better interleaving of diff --git a/Cargo.toml b/Cargo.toml index e8114653..648dc92e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "yamux" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] license = "Apache-2.0 OR MIT" description = "Multiplexer over reliable, ordered connections" diff --git a/src/lib.rs b/src/lib.rs index b2a8740c..28e5a499 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,7 +90,7 @@ pub enum WindowUpdateMode { /// - receive window = 256 KiB /// - max. buffer size (per stream) = 1 MiB /// - max. number of streams = 8192 -/// - window update mode = on receive +/// - window update mode = on read /// - read after close = true /// - split send size = 16 KiB #[derive(Debug, Clone)] @@ -109,7 +109,7 @@ impl Default for Config { receive_window: DEFAULT_CREDIT, max_buffer_size: 1024 * 1024, max_num_streams: 8192, - window_update_mode: WindowUpdateMode::OnReceive, + window_update_mode: WindowUpdateMode::OnRead, read_after_close: true, split_send_size: DEFAULT_SPLIT_SEND_SIZE } From 9444015d1c129d327f6f9de58f1a0d723da075d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jul 2021 22:07:58 +0000 Subject: [PATCH 40/43] build(deps): update env_logger requirement from 0.8 to 0.9 Updates the requirements on [env_logger](https://github.com/env-logger-rs/env_logger) to permit the latest version. - [Release notes](https://github.com/env-logger-rs/env_logger/releases) - [Changelog](https://github.com/env-logger-rs/env_logger/blob/main/CHANGELOG.md) - [Commits](https://github.com/env-logger-rs/env_logger/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: env_logger dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 648dc92e..4823e7a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ static_assertions = "1" [dev-dependencies] anyhow = "1" criterion = "0.3" -env_logger = "0.8" +env_logger = "0.9" futures = "0.3.4" quickcheck = "1.0" tokio = { version = "1.0", features = ["net", "rt-multi-thread", "macros", "time"] } From d12d69e087db889f877b7a65ac2e47258d596bef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Oct 2021 22:07:31 +0000 Subject: [PATCH 41/43] build(deps): bump actions/checkout from 2.3.4 to 2.3.5 Bumps [actions/checkout](https://github.com/actions/checkout) from 2.3.4 to 2.3.5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2.3.4...v2.3.5) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d6283adb..ceaf97f5 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Build run: cargo build --verbose - name: Run tests From c850b5364de625cd1e2b410b91edc8866e16de47 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Nov 2021 22:12:28 +0000 Subject: [PATCH 42/43] build(deps): bump actions/checkout from 2.3.5 to 2.4.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 2.3.5 to 2.4.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2.3.5...v2.4.0) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ceaf97f5..8cec79ef 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: Build run: cargo build --verbose - name: Run tests From b03804cc4ceebb0d0b47bb57032f5446bcf0e34c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 11 Jan 2022 11:37:52 +0100 Subject: [PATCH 43/43] CHANGELOG: Prepare v0.10.0 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e791d5d..804f563e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 0.10.0 [unreleased] +# 0.10.0 - Default to `WindowUpdateMode::OnRead`, thus enabling full Yamux flow-control, exercising back pressure on senders, preventing stream resets due to reaching