diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 7f58de3733..01d0904951 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -98,6 +98,7 @@ jobs: else targets="--lib --bins" fi + echo cargo check -p $i $FEATURES $targets cargo check -p $i $FEATURES $targets done env: diff --git a/Cargo.lock b/Cargo.lock index 2f8e76d693..6886a853de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -163,30 +163,14 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "asn1-rs" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive 0.4.0", - "asn1-rs-impl 0.1.0", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "asn1-rs" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" dependencies = [ - "asn1-rs-derive 0.5.0", - "asn1-rs-impl 0.2.0", + "asn1-rs-derive", + "asn1-rs-impl", "displaydoc", "nom", "num-traits", @@ -195,18 +179,6 @@ dependencies = [ "time", ] -[[package]] -name = "asn1-rs-derive" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure 0.12.6", -] - [[package]] name = "asn1-rs-derive" version = "0.5.0" @@ -216,18 +188,7 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.72", - "synstructure 0.13.1", -] - -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "synstructure", ] [[package]] @@ -253,6 +214,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", +] + [[package]] name = "async-trait" version = "0.1.81" @@ -366,9 +338,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad46c3ec4e12f4a4b6835e173ba21c25e484c9d02b49770bf006ce5367c036" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" dependencies = [ "arc-swap", "bytes", @@ -379,10 +351,11 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 2.1.2", + "rustls", + "rustls-pemfile", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tower", "tower-service", ] @@ -546,9 +519,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" dependencies = [ "serde", ] @@ -611,6 +584,12 @@ version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cfg-if" version = "1.0.0" @@ -754,6 +733,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "comfy-table" version = "7.1.1" @@ -1127,27 +1116,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs 0.5.2", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -1412,15 +1387,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" -[[package]] -name = "encoding_rs" -version = "0.8.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" -dependencies = [ - "cfg-if", -] - [[package]] name = "endian-type" version = "0.1.2" @@ -1602,7 +1568,7 @@ dependencies = [ "futures-core", "futures-sink", "nanorand", - "spin 0.9.8", + "spin", ] [[package]] @@ -1985,7 +1951,7 @@ dependencies = [ "hash32", "rustc_version", "serde", - "spin 0.9.8", + "spin", "stable_deref_trait", ] @@ -2030,61 +1996,113 @@ dependencies = [ "ipnet", "once_cell", "rand", - "ring 0.16.20", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-proto" +version = "0.25.0-alpha.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8270a1857fb962b9914aafd46a89a187a4e63d0eb4190c327e7c7b8256a2d055" +dependencies = [ + "async-recursion", + "async-trait", + "bitflags 2.6.0", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.5.0", + "ipnet", + "once_cell", + "rand", + "ring", + "rustls", + "rustls-pemfile", "serde", "thiserror", + "time", "tinyvec", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tracing", "url", ] +[[package]] +name = "hickory-recursor" +version = "0.25.0-alpha.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0d2b7bc3b967b53b1b9879e319c8cc4ec037bb5d25bc7a88edd2e6de15d1a70" +dependencies = [ + "async-recursion", + "async-trait", + "bytes", + "cfg-if", + "enum-as-inner", + "futures-util", + "hickory-proto 0.25.0-alpha.2", + "hickory-resolver", + "lru-cache", + "parking_lot", + "serde", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.25.0-alpha.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "46c110355b5703070d9e29c344d79818a7cde3de9c27fc35750defea6074b0ad" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.25.0-alpha.2", "ipconfig", "lru-cache", "once_cell", "parking_lot", "rand", "resolv-conf", - "rustls 0.21.12", + "rustls", "serde", "smallvec", "thiserror", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tracing", ] [[package]] name = "hickory-server" -version = "0.24.1" +version = "0.25.0-alpha.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be0e43c556b9b3fdb6c7c71a9a32153a2275d02419e3de809e520bfcfe40c37" +checksum = "1ee9bc516413439e322999f9c3263361b0454969cd53f20d26297ed8aa1e77c1" dependencies = [ "async-trait", "bytes", "cfg-if", "enum-as-inner", "futures-util", - "hickory-proto", + "hickory-proto 0.25.0-alpha.2", + "hickory-recursor", "hickory-resolver", - "rustls 0.21.12", + "ipnet", + "prefix-trie", + "rustls", "serde", "thiserror", "time", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-util", "tracing", ] @@ -2272,20 +2290,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.30", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2296,12 +2300,12 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.11", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", - "webpki-roots 0.26.3", + "webpki-roots", ] [[package]] @@ -2464,7 +2468,7 @@ dependencies = [ "socket2", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] @@ -2472,6 +2476,9 @@ name = "ipnet" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +dependencies = [ + "serde", +] [[package]] name = "iroh" @@ -2604,11 +2611,11 @@ dependencies = [ "proptest", "rand", "range-collections", - "rcgen 0.12.1", + "rcgen", "redb 1.5.1", "redb 2.1.1", "reflink-copy", - "rustls 0.21.12", + "rustls", "self_cell", "serde", "serde_json", @@ -2660,7 +2667,7 @@ dependencies = [ "rand_xorshift", "ratatui", "regex", - "reqwest 0.12.5", + "reqwest", "rustyline", "serde", "serde_with", @@ -2695,7 +2702,7 @@ dependencies = [ "dirs-next", "futures-lite 2.3.0", "governor", - "hickory-proto", + "hickory-proto 0.25.0-alpha.2", "hickory-resolver", "hickory-server", "http 1.1.0", @@ -2706,16 +2713,16 @@ dependencies = [ "mainline", "parking_lot", "pkarr", - "rcgen 0.12.1", + "rcgen", "redb 2.1.1", "regex", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls", + "rustls-pemfile", "serde", "struct_iterable", "strum 0.26.3", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-rustls-acme", "tokio-stream", "tokio-util", @@ -2788,6 +2795,7 @@ dependencies = [ "iroh-blake3", "iroh-metrics", "iroh-net", + "iroh-quinn", "iroh-test", "postcard", "rand", @@ -2825,7 +2833,7 @@ dependencies = [ "hyper-util", "once_cell", "prometheus-client", - "reqwest 0.12.5", + "reqwest", "serde", "struct_iterable", "time", @@ -2856,7 +2864,7 @@ dependencies = [ "genawaiter", "governor", "hex", - "hickory-proto", + "hickory-proto 0.25.0-alpha.2", "hickory-resolver", "hostname", "http 1.1.0", @@ -2889,14 +2897,14 @@ dependencies = [ "rand", "rand_chacha", "rand_core", - "rcgen 0.12.1", + "rcgen", "regex", - "reqwest 0.12.5", - "ring 0.17.8", + "reqwest", + "ring", "rtnetlink", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "rustls-webpki 0.101.7", + "rustls", + "rustls-pemfile", + "rustls-webpki", "serde", "serde_json", "serde_with", @@ -2911,7 +2919,7 @@ dependencies = [ "thiserror", "time", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-rustls-acme", "tokio-stream", "tokio-tungstenite", @@ -2923,10 +2931,10 @@ dependencies = [ "tungstenite", "url", "watchable", - "webpki-roots 0.25.4", + "webpki-roots", "windows 0.51.1", "wmi", - "x509-parser 0.15.1", + "x509-parser", "z32", ] @@ -2941,9 +2949,9 @@ dependencies = [ "hdrhistogram", "iroh-metrics", "iroh-net", - "quinn 0.10.2", - "rcgen 0.11.3", - "rustls 0.21.12", + "iroh-quinn", + "rcgen", + "rustls", "socket2", "tokio", "tracing", @@ -2952,16 +2960,17 @@ dependencies = [ [[package]] name = "iroh-quinn" -version = "0.10.5" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906875956feb75d3d41d708ddaffeb11fdb10cd05f23efbcb17600037e411779" +checksum = "4fd590a39a14cfc168efa4d894de5039d65641e62d8da4a80733018ababe3c33" dependencies = [ "bytes", "iroh-quinn-proto", "iroh-quinn-udp", "pin-project-lite", - "rustc-hash", - "rustls 0.21.12", + "rustc-hash 2.0.0", + "rustls", + "socket2", "thiserror", "tokio", "tracing", @@ -2969,16 +2978,16 @@ dependencies = [ [[package]] name = "iroh-quinn-proto" -version = "0.10.8" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6bf92478805e67f2320459285496e1137edf5171411001a0d4d85f9bbafb792" +checksum = "5fd0538ff12efe3d61ea1deda2d7913f4270873a519d43e6995c6e87a1558538" dependencies = [ "bytes", "rand", - "ring 0.17.8", - "rustc-hash", - "rustls 0.21.12", - "rustls-native-certs", + "ring", + "rustc-hash 2.0.0", + "rustls", + "rustls-platform-verifier", "slab", "thiserror", "tinyvec", @@ -2987,15 +2996,15 @@ dependencies = [ [[package]] name = "iroh-quinn-udp" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc7915b3a31f08ee0bc02f73f4d61a5d5be146a1081ef7f70622a11627fd314" +checksum = "d0619b59471fdd393ac8a6c047f640171119c1c8b41f7d2927db91776dcdbc5f" dependencies = [ - "bytes", "libc", + "once_cell", "socket2", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -3058,6 +3067,26 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "js-sys" version = "0.3.69" @@ -3073,7 +3102,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.9.8", + "spin", ] [[package]] @@ -3276,7 +3305,7 @@ dependencies = [ "netlink-packet-route", "netlink-sys", "once_cell", - "system-configuration 0.6.0", + "system-configuration", "windows-sys 0.52.0", ] @@ -3580,22 +3609,13 @@ dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs 0.5.2", -] - [[package]] name = "oid-registry" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", ] [[package]] @@ -3827,9 +3847,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkarr" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8ffdac8b4b7ea5240c46b28f88de799e0efaa2b93ccb08eaae6e50835dfe137" +checksum = "7945a08031b7e14de57e8385cea3bcc7e10a88701595dc11d82551ba07bae13e" dependencies = [ "bytes", "document-features", @@ -4041,6 +4061,16 @@ dependencies = [ "ucd-parse", ] +[[package]] +name = "prefix-trie" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cb065e4407d69a5a5265221262cceeafff7f1aabc545d01ed955cce92ee78b" +dependencies = [ + "ipnet", + "num-traits", +] + [[package]] name = "pretty_assertions" version = "1.4.0" @@ -4170,9 +4200,9 @@ dependencies = [ [[package]] name = "quic-rpc" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "110f0fbbf7c4a694902e11d890157245801d89a18d8e9b8d9d2afd91358a6a7c" +checksum = "87cb85690ab1688eade9a5de4d94545a9ceef60639b3370f5e1a28f525eb5589" dependencies = [ "anyhow", "bincode", @@ -4186,6 +4216,7 @@ dependencies = [ "iroh-quinn", "pin-project", "serde", + "slab", "tokio", "tokio-serde", "tokio-util", @@ -4194,9 +4225,9 @@ dependencies = [ [[package]] name = "quic-rpc-derive" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02c2d3637609d07aa2560f178588dce09cf2f4f5cdcc9b0caac0063a188a898" +checksum = "6150a9fd3cf6c34d25730fe55a247b99d1c6e4fad6e7b7843f729a431a57e919" dependencies = [ "proc-macro2", "quic-rpc", @@ -4210,23 +4241,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quinn" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" -dependencies = [ - "bytes", - "pin-project-lite", - "quinn-proto 0.10.6", - "quinn-udp 0.4.1", - "rustc-hash", - "rustls 0.21.12", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "quinn" version = "0.11.2" @@ -4235,33 +4249,15 @@ checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" dependencies = [ "bytes", "pin-project-lite", - "quinn-proto 0.11.3", - "quinn-udp 0.5.3", - "rustc-hash", - "rustls 0.23.11", + "quinn-proto", + "quinn-udp", + "rustc-hash 1.1.0", + "rustls", "thiserror", "tokio", "tracing", ] -[[package]] -name = "quinn-proto" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" -dependencies = [ - "bytes", - "rand", - "ring 0.16.20", - "rustc-hash", - "rustls 0.21.12", - "rustls-native-certs", - "slab", - "thiserror", - "tinyvec", - "tracing", -] - [[package]] name = "quinn-proto" version = "0.11.3" @@ -4270,28 +4266,15 @@ checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" dependencies = [ "bytes", "rand", - "ring 0.17.8", - "rustc-hash", - "rustls 0.23.11", + "ring", + "rustc-hash 1.1.0", + "rustls", "slab", "thiserror", "tinyvec", "tracing", ] -[[package]] -name = "quinn-udp" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" -dependencies = [ - "bytes", - "libc", - "socket2", - "tracing", - "windows-sys 0.48.0", -] - [[package]] name = "quinn-udp" version = "0.5.3" @@ -4439,18 +4422,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rcgen" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" -dependencies = [ - "pem", - "ring 0.16.20", - "time", - "yasna", -] - [[package]] name = "rcgen" version = "0.12.1" @@ -4458,7 +4429,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1" dependencies = [ "pem", - "ring 0.17.8", + "ring", "time", "yasna", ] @@ -4593,50 +4564,9 @@ checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", - "hyper-rustls 0.24.2", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration 0.5.1", - "tokio", - "tokio-rustls 0.24.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots 0.25.4", - "winreg 0.50.0", -] - -[[package]] -name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", @@ -4646,7 +4576,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.4.1", - "hyper-rustls 0.27.2", + "hyper-rustls", "hyper-util", "ipnet", "js-sys", @@ -4655,23 +4585,23 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "quinn 0.11.2", - "rustls 0.23.11", - "rustls-pemfile 2.1.2", + "quinn", + "rustls", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.3", - "winreg 0.52.0", + "webpki-roots", + "windows-registry", ] [[package]] @@ -4694,21 +4624,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.8" @@ -4719,8 +4634,8 @@ dependencies = [ "cfg-if", "getrandom", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.52.0", ] @@ -4775,6 +4690,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.4.0" @@ -4806,18 +4727,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring 0.17.8", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.23.11" @@ -4826,34 +4735,26 @@ checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" dependencies = [ "log", "once_cell", - "ring 0.17.8", + "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "04182dffc9091a404e0fc069ea5cd60e5b866c3adf881eff99a32d048242dffa" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.4", + "rustls-pemfile", + "rustls-pki-types", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.1.2" @@ -4871,24 +4772,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] -name = "rustls-webpki" -version = "0.101.7" +name = "rustls-platform-verifier" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "93bda3f493b9abe5b93b3e7e3ecde0df292f2bd28c0296b90586ee0055ff5123" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-roots", + "winapi", ] +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ - "ring 0.17.8", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -4971,16 +4889,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "sec1" version = "0.7.3" @@ -5005,6 +4913,7 @@ dependencies = [ "core-foundation", "core-foundation-sys", "libc", + "num-bigint", "security-framework-sys", ] @@ -5317,12 +5226,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -5577,7 +5480,7 @@ checksum = "39769914108ae68e261d85ceac7bce7095947130f79c29d4535e9b31fc702a40" dependencies = [ "acto", "anyhow", - "hickory-proto", + "hickory-proto 0.24.1", "rand", "socket2", "tokio", @@ -5628,17 +5531,8 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" - -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "unicode-xid", + "futures-core", ] [[package]] @@ -5666,17 +5560,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys 0.5.0", -] - [[package]] name = "system-configuration" version = "0.6.0" @@ -5685,17 +5568,7 @@ checksum = "658bc6ee10a9b4fcf576e9b0819d95ec16f4d2c02d39fd83ac1c8789785c4a42" dependencies = [ "bitflags 2.6.0", "core-foundation", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", + "system-configuration-sys", ] [[package]] @@ -5876,32 +5749,22 @@ dependencies = [ "syn 2.0.72", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.11", + "rustls", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-rustls-acme" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ebc06d846f8367f24c3a8882328707d1a5e507ef4f40943723ddbe2c17b9f24" +checksum = "e4ee7cbca7da86fa030e33b0deac55bad0e0bf8ab909f1a84666f04447f6339b" dependencies = [ "async-trait", "axum-server", @@ -5912,18 +5775,19 @@ dependencies = [ "num-bigint", "pem", "proc-macro2", - "rcgen 0.12.1", - "reqwest 0.11.27", - "ring 0.17.8", - "rustls 0.21.12", + "rcgen", + "reqwest", + "ring", + "rustls", "serde", "serde_json", "thiserror", + "time", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "url", - "webpki-roots 0.25.4", - "x509-parser 0.16.0", + "webpki-roots", + "x509-parser", ] [[package]] @@ -6310,12 +6174,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -6331,10 +6189,10 @@ dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.11", + "rustls", "rustls-pki-types", "url", - "webpki-roots 0.26.3", + "webpki-roots", ] [[package]] @@ -6507,12 +6365,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.3" @@ -6677,6 +6529,17 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + [[package]] name = "windows-result" version = "0.2.0" @@ -6714,6 +6577,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -6863,16 +6735,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wmi" version = "0.13.3" @@ -6896,35 +6758,18 @@ dependencies = [ "tap", ] -[[package]] -name = "x509-parser" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" -dependencies = [ - "asn1-rs 0.5.2", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "x509-parser" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "data-encoding", - "der-parser 9.0.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.7.0", + "oid-registry", "rusticata-macros", "thiserror", "time", diff --git a/deny.toml b/deny.toml index 5230025622..f57140ac2c 100644 --- a/deny.toml +++ b/deny.toml @@ -2,6 +2,8 @@ multiple-versions = "allow" deny = [ "aws-lc", + "aws-lc-rs", + "aws-lc-sys", "native-tls", "openssl", ] diff --git a/iroh-blobs/Cargo.toml b/iroh-blobs/Cargo.toml index 401daa0100..ff445724c2 100644 --- a/iroh-blobs/Cargo.toml +++ b/iroh-blobs/Cargo.toml @@ -19,7 +19,7 @@ workspace = true anyhow = { version = "1" } async-channel = "2.3.1" bao-tree = { version = "0.13", features = ["tokio_fsm", "validate"], default-features = false } -bytes = { version = "1.4", features = ["serde"] } +bytes = { version = "1.7", features = ["serde"] } chrono = "0.4.31" derive_more = { version = "=1.0.0-beta.7", features = ["debug", "display", "deref", "deref_mut", "from", "try_into", "into"] } futures-buffered = "0.2.4" @@ -36,6 +36,7 @@ oneshot = "0.1.8" parking_lot = { version = "0.12.1", optional = true } pin-project = "1.1.5" postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } +quinn = { package = "iroh-quinn", version = "0.11", features = ["ring"] } rand = "0.8" range-collections = "0.4.0" redb = { version = "2.0.0", optional = true } @@ -63,17 +64,10 @@ testresult = "0.4.0" tokio = { version = "1", features = ["macros", "test-util"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] } rcgen = "0.12.0" -rustls = { version = "0.21.11", default-features = false, features = ["quic"] } +rustls = { version = "0.23", default-features = false, features = ["ring"] } tempfile = "3.10.0" futures-util = "0.3.30" -[dev-dependencies.quinn] -# This allows writing the examples without relying on iroh-net. -# Though as they still depend on iroh-quinn this is perhaps not very -# useful right now. Changing them is a bit more work however. -package = "iroh-quinn" -version = "0.10" - [features] default = ["fs-store"] downloader = ["dep:parking_lot", "tokio-util/time", "dep:hashlink"] diff --git a/iroh-blobs/examples/connect/mod.rs b/iroh-blobs/examples/connect/mod.rs index a0df61aa50..4191275a46 100644 --- a/iroh-blobs/examples/connect/mod.rs +++ b/iroh-blobs/examples/connect/mod.rs @@ -1,5 +1,6 @@ //! Common code used to created quinn connections in the examples use anyhow::{bail, Context, Result}; +use quinn::crypto::rustls::{QuicClientConfig, QuicServerConfig}; use std::{path::PathBuf, sync::Arc}; use tokio::fs; @@ -17,7 +18,7 @@ pub async fn load_certs() -> Result { let path = PathBuf::from(CERT_PATH).join("cert.der"); match fs::read(path).await { Ok(cert) => { - roots.add(&rustls::Certificate(cert))?; + roots.add(rustls::pki_types::CertificateDer::from(cert))?; } Err(e) => { bail!("failed to open local server certificate: {}\nYou must run the `provide-bytes` example to create the certificate.\n\tcargo run --example provide-bytes", e); @@ -29,7 +30,10 @@ pub async fn load_certs() -> Result { // derived from `quinn/examples/server.rs` // creates a self signed certificate and saves it to "./certs" #[allow(unused)] -pub async fn make_and_write_certs() -> Result<(rustls::PrivateKey, rustls::Certificate)> { +pub async fn make_and_write_certs() -> Result<( + rustls::pki_types::PrivateKeyDer<'static>, + rustls::pki_types::CertificateDer<'static>, +)> { let path = std::path::PathBuf::from(CERT_PATH); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key_path = path.join("key.der"); @@ -47,7 +51,10 @@ pub async fn make_and_write_certs() -> Result<(rustls::PrivateKey, rustls::Certi .await .context("failed to write private key")?; - Ok((rustls::PrivateKey(key), rustls::Certificate(cert))) + Ok(( + rustls::pki_types::PrivateKeyDer::try_from(key).unwrap(), + rustls::pki_types::CertificateDer::from(cert), + )) } // derived from `quinn/examples/client.rs` @@ -55,13 +62,12 @@ pub async fn make_and_write_certs() -> Result<(rustls::PrivateKey, rustls::Certi #[allow(unused)] pub fn make_client_endpoint(roots: rustls::RootCertStore) -> Result { let mut client_crypto = rustls::ClientConfig::builder() - .with_safe_defaults() .with_root_certificates(roots) .with_no_client_auth(); client_crypto.alpn_protocols = vec![EXAMPLE_ALPN.to_vec()]; - - let client_config = quinn::ClientConfig::new(Arc::new(client_crypto)); + let client_config: QuicClientConfig = client_crypto.try_into()?; + let client_config = quinn::ClientConfig::new(Arc::new(client_config)); let mut endpoint = quinn::Endpoint::client("[::]:0".parse().unwrap())?; endpoint.set_default_client_config(client_config); Ok(endpoint) @@ -71,15 +77,15 @@ pub fn make_client_endpoint(roots: rustls::RootCertStore) -> Result, + cert: rustls::pki_types::CertificateDer<'static>, ) -> Result { let mut server_crypto = rustls::ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() .with_single_cert(vec![cert], key)?; server_crypto.alpn_protocols = vec![EXAMPLE_ALPN.to_vec()]; - let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_crypto)); + let server_config: QuicServerConfig = server_crypto.try_into()?; + let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_config)); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(0_u8.into()); diff --git a/iroh-blobs/examples/provide-bytes.rs b/iroh-blobs/examples/provide-bytes.rs index 60211563f8..f916dd467a 100644 --- a/iroh-blobs/examples/provide-bytes.rs +++ b/iroh-blobs/examples/provide-bytes.rs @@ -87,13 +87,22 @@ async fn main() -> Result<()> { while let Some(incoming) = endpoint.accept().await { println!("connection incoming"); + let conn = match incoming.accept() { + Ok(conn) => conn, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; let db = db.clone(); let lp = lp.clone(); // spawn a task to handle the connection tokio::spawn(async move { - let remote_addr = incoming.remote_address(); - let conn = match incoming.await { + let remote_addr = conn.remote_address(); + let conn = match conn.await { Ok(conn) => conn, Err(err) => { warn!(%remote_addr, "Error connecting: {err:#}"); diff --git a/iroh-blobs/src/get.rs b/iroh-blobs/src/get.rs index 8d41e5f047..52b3f217b3 100644 --- a/iroh-blobs/src/get.rs +++ b/iroh-blobs/src/get.rs @@ -188,7 +188,10 @@ pub mod fsm { RequestTooBig, /// Error when writing the request to the [`SendStream`]. #[error("write: {0}")] - Write(#[from] endpoint::WriteError), + Write(#[from] quinn::WriteError), + /// Quic connection is closed. + #[error("closed")] + Closed(#[from] quinn::ClosedStream), /// A generic io error #[error("io {0}")] Io(io::Error), @@ -257,7 +260,7 @@ pub mod fsm { // 2. Finish writing before expecting a response let (mut writer, bytes_written) = writer.into_parts(); - writer.finish().await?; + writer.finish()?; let hash = request.hash; let ranges_iter = RangesIter::new(request.ranges); diff --git a/iroh-blobs/src/get/error.rs b/iroh-blobs/src/get/error.rs index 62caf26965..8980d04a9d 100644 --- a/iroh-blobs/src/get/error.rs +++ b/iroh-blobs/src/get/error.rs @@ -73,6 +73,11 @@ impl From for GetError { // TODO(@divma): don't see how this is reachable but let's just not use the peer GetError::Io(e.into()) } + e @ quinn::ConnectionError::CidsExhausted => { + // > The connection could not be created because not enough of the CID space + // > is available + GetError::Io(e.into()) + } } } } @@ -83,7 +88,7 @@ impl From for GetError { match value { e @ ReadError::Reset(_) => GetError::RemoteReset(e.into()), ReadError::ConnectionLost(conn_error) => conn_error.into(), - ReadError::UnknownStream + ReadError::ClosedStream | ReadError::IllegalOrderedRead | ReadError::ZeroRttRejected => { // all these errors indicate the peer is not usable at this moment @@ -92,6 +97,11 @@ impl From for GetError { } } } +impl From for GetError { + fn from(value: quinn::ClosedStream) -> Self { + GetError::Io(value.into()) + } +} impl From for GetError { fn from(value: endpoint::WriteError) -> Self { @@ -99,7 +109,7 @@ impl From for GetError { match value { e @ WriteError::Stopped(_) => GetError::RemoteReset(e.into()), WriteError::ConnectionLost(conn_error) => conn_error.into(), - WriteError::UnknownStream | WriteError::ZeroRttRejected => { + WriteError::ClosedStream | WriteError::ZeroRttRejected => { // all these errors indicate the peer is not usable at this moment GetError::Io(value.into()) } @@ -120,6 +130,7 @@ impl From for GetError { GetError::BadRequest(e.into()) } Write(e) => e.into(), + Closed(e) => e.into(), e @ Io(_) => { // io errors are likely recoverable GetError::Io(e.into()) diff --git a/iroh-blobs/src/provider.rs b/iroh-blobs/src/provider.rs index 4144004a1f..5cf934c207 100644 --- a/iroh-blobs/src/provider.rs +++ b/iroh-blobs/src/provider.rs @@ -292,7 +292,7 @@ pub(crate) async fn transfer_hash_seq( stats.send += tw.stats(); stats.read += blob_read_stats; if SentStatus::NotFound == status { - writer.inner.finish().await?; + writer.inner.finish()?; return Ok(status); } @@ -516,7 +516,7 @@ pub async fn handle_get( None => { debug!("not found {}", hash); writer.notify_transfer_aborted(None).await; - writer.inner.finish().await?; + writer.inner.finish()?; } }; diff --git a/iroh-cli/Cargo.toml b/iroh-cli/Cargo.toml index 3525bc4595..07923bd4ba 100644 --- a/iroh-cli/Cargo.toml +++ b/iroh-cli/Cargo.toml @@ -25,7 +25,7 @@ doc = false anyhow = "1.0.81" async-channel = "2.3.1" bao-tree = "0.13" -bytes = "1.5.0" +bytes = "1.7" clap = { version = "4", features = ["derive"] } colored = "2.0.4" comfy-table = "7.0.1" @@ -44,13 +44,13 @@ iroh = { version = "0.23.0", path = "../iroh", features = ["metrics"] } iroh-gossip = { version = "0.23.0", path = "../iroh-gossip" } iroh-metrics = { version = "0.23.0", path = "../iroh-metrics" } parking_lot = "0.12.1" -pkarr = { version = "2.0.0", default-features = false } +pkarr = { version = "2.2.0", default-features = false } portable-atomic = "1" postcard = "1.0.8" -quic-rpc = { version = "0.11", features = ["flume-transport", "quinn-transport"] } +quic-rpc = { version = "0.12", features = ["flume-transport", "quinn-transport"] } rand = "0.8.5" ratatui = "0.26.2" -reqwest = { version = "0.12.4", default-features = false, features = ["json", "rustls-tls"] } +reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } rustyline = "12.0.0" serde = { version = "1.0.197", features = ["derive"] } serde_with = "3.7.0" diff --git a/iroh-cli/src/commands/doctor.rs b/iroh-cli/src/commands/doctor.rs index 78c014a3e6..08d954051c 100644 --- a/iroh-cli/src/commands/doctor.rs +++ b/iroh-cli/src/commands/doctor.rs @@ -317,7 +317,7 @@ async fn handle_test_request( gui.set_send(bytes, elapsed); } } - send.finish().await?; + send.finish()?; Ok(()) } @@ -555,7 +555,7 @@ async fn echo_test( let progress = update_pb("echo", pb.cloned(), size, updates); let t0 = Instant::now(); send_blocks(&mut send, size, 1024 * 1024).await?; - send.finish().await?; + send.finish()?; let received = copying.await??; anyhow::ensure!(received == size); let duration = t0.elapsed(); @@ -578,7 +578,7 @@ async fn send_test( let t0 = Instant::now(); send_blocks(&mut send_with_progress, size, 1024 * 1024).await?; drop(send_with_progress); - send.finish().await?; + send.finish()?; drop(send); let received = copying.await??; anyhow::ensure!(received == 0); @@ -606,7 +606,7 @@ async fn recv_test( .await?; let copying = tokio::spawn(async move { tokio::io::copy(&mut recv, &mut sink).await }); let progress = update_pb("recv", pb.cloned(), size, updates); - send.finish().await?; + send.finish()?; let received = copying.await??; anyhow::ensure!(received == size); let duration = t0.elapsed(); @@ -757,7 +757,16 @@ async fn accept( ); } let connections = Arc::new(AtomicU64::default()); - while let Some(connecting) = endpoint.accept().await { + while let Some(incoming) = endpoint.accept().await { + let connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; let connections = connections.clone(); let endpoint = endpoint.clone(); tokio::task::spawn(async move { diff --git a/iroh-dns-server/Cargo.toml b/iroh-dns-server/Cargo.toml index 5125ed646d..aa8008a07e 100644 --- a/iroh-dns-server/Cargo.toml +++ b/iroh-dns-server/Cargo.toml @@ -13,33 +13,33 @@ readme = "README.md" anyhow = "1.0.80" async-trait = "0.1.77" axum = { version = "0.7.4", features = ["macros"] } -axum-server = { version = "0.6.0", features = ["tls-rustls"] } +axum-server = { version = "0.7", features = ["tls-rustls-no-provider"] } base64-url = "2.0.2" -bytes = "1.5.0" +bytes = "1.7" clap = { version = "4.5.1", features = ["derive"] } derive_more = { version = "=1.0.0-beta.7", features = ["debug", "display", "into", "from"] } dirs-next = "2.0.0" futures-lite = "2.3.0" governor = "0.6.3" -hickory-proto = "0.24.0" -hickory-server = { version = "0.24.0", features = ["dns-over-rustls"] } +hickory-proto = "=0.25.0-alpha.2" +hickory-server = { version = "=0.25.0-alpha.2", features = ["dns-over-rustls"] } http = "1.0.0" iroh-metrics = { version = "0.23.0", path = "../iroh-metrics" } lru = "0.12.3" mainline = "2.0.1" parking_lot = "0.12.1" -pkarr = { version = "2.0.0", features = [ "async", "relay", "dht"], default-features = false } +pkarr = { version = "2.2.0", features = [ "async", "relay", "dht"], default-features = false } rcgen = "0.12.1" redb = "2.0.0" regex = "1.10.3" -rustls = "0.21.11" -rustls-pemfile = "1" +rustls = { version = "0.23", default-features = false, features = ["ring"] } +rustls-pemfile = { version = "2.1" } serde = { version = "1.0.197", features = ["derive"] } struct_iterable = "0.1.1" strum = { version = "0.26.1", features = ["derive"] } tokio = { version = "1.36.0", features = ["full"] } -tokio-rustls = "0.24.1" -tokio-rustls-acme = { version = "0.3", features = ["axum"] } +tokio-rustls = { version = "0.26", default-features = false, features = ["logging", "ring"] } +tokio-rustls-acme = { version = "0.4", features = ["axum"] } tokio-stream = "0.1.14" tokio-util = "0.7.10" toml = "0.8.10" @@ -52,10 +52,10 @@ url = "2.5.0" z32 = "1.1.1" [dev-dependencies] -hickory-resolver = "0.24.0" +hickory-resolver = "=0.25.0-alpha.2" iroh-net = { version = "0.23.0", path = "../iroh-net" } iroh-test = { path = "../iroh-test" } -pkarr = { version = "2.0.0", features = ["rand"] } +pkarr = { version = "2.2.0", features = ["rand"] } [package.metadata.docs.rs] all-features = true diff --git a/iroh-dns-server/src/http/doh/response.rs b/iroh-dns-server/src/http/doh/response.rs index f4cee805c0..b7ebedfa92 100644 --- a/iroh-dns-server/src/http/doh/response.rs +++ b/iroh-dns-server/src/http/doh/response.rs @@ -3,7 +3,7 @@ // This module is mostly copied from // https://github.com/fission-codes/fission-server/blob/394de877fad021260c69fdb1edd7bb4b2f98108c/fission-core/src/dns.rs -use anyhow::{anyhow, ensure, Result}; +use anyhow::{ensure, Result}; use hickory_proto as proto; use serde::{Deserialize, Serialize}; @@ -130,15 +130,11 @@ pub struct DohRecordJson { impl DohRecordJson { /// Create a new JSON record from a DNS record pub fn from_record(record: &proto::rr::Record) -> Result { - let data = record - .data() - .ok_or_else(|| anyhow!("Missing record data"))?; - Ok(Self { name: record.name().to_string(), record_type: record.record_type().into(), ttl: record.ttl(), - data: data.to_string(), + data: record.data().to_string(), }) } } diff --git a/iroh-dns-server/src/http/tls.rs b/iroh-dns-server/src/http/tls.rs index 1133d498d5..191abba2ad 100644 --- a/iroh-dns-server/src/http/tls.rs +++ b/iroh-dns-server/src/http/tls.rs @@ -75,19 +75,14 @@ impl Acce impl TlsAcceptor { async fn self_signed(domains: Vec) -> Result { let tls_cert = rcgen::generate_simple_self_signed(domains)?; - let config = RustlsConfig::from_der( - vec![tls_cert.serialize_der()?], - tls_cert.serialize_private_key_der(), - ) - .await?; + let key = tls_cert.serialize_private_key_der(); + let config = RustlsConfig::from_der(vec![tls_cert.serialize_der()?], key).await?; let acceptor = RustlsAcceptor::new(config); Ok(Self::Manual(acceptor)) } async fn manual(domains: Vec, dir: PathBuf) -> Result { - let config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth(); + let config = rustls::ServerConfig::builder().with_no_client_auth(); if domains.len() != 1 { bail!("Multiple domains in manual mode are not supported"); } @@ -114,9 +109,7 @@ impl TlsAcceptor { is_production: bool, dir: PathBuf, ) -> Result { - let config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth(); + let config = rustls::ServerConfig::builder().with_no_client_auth(); let mut state = AcmeConfig::new(domains) .contact([format!("mailto:{contact}")]) .cache_option(Some(DirCache::new(dir))) @@ -143,27 +136,35 @@ impl TlsAcceptor { } } -fn load_certs(filename: impl AsRef) -> Result> { +fn load_certs( + filename: impl AsRef, +) -> Result>> { let certfile = std::fs::File::open(filename).context("cannot open certificate file")?; let mut reader = std::io::BufReader::new(certfile); - let certs = rustls_pemfile::certs(&mut reader)? - .iter() - .map(|v| rustls::Certificate(v.clone())) - .collect(); + let certs: Result, std::io::Error> = rustls_pemfile::certs(&mut reader).collect(); + let certs = certs?; Ok(certs) } -fn load_secret_key(filename: impl AsRef) -> Result { +fn load_secret_key( + filename: impl AsRef, +) -> Result> { let keyfile = std::fs::File::open(filename.as_ref()).context("cannot open secret key file")?; let mut reader = std::io::BufReader::new(keyfile); loop { match rustls_pemfile::read_one(&mut reader).context("cannot parse secret key .pem file")? { - Some(rustls_pemfile::Item::RSAKey(key)) => return Ok(rustls::PrivateKey(key)), - Some(rustls_pemfile::Item::PKCS8Key(key)) => return Ok(rustls::PrivateKey(key)), - Some(rustls_pemfile::Item::ECKey(key)) => return Ok(rustls::PrivateKey(key)), + Some(rustls_pemfile::Item::Pkcs1Key(key)) => { + return Ok(rustls::pki_types::PrivateKeyDer::Pkcs1(key)); + } + Some(rustls_pemfile::Item::Pkcs8Key(key)) => { + return Ok(rustls::pki_types::PrivateKeyDer::Pkcs8(key)); + } + Some(rustls_pemfile::Item::Sec1Key(key)) => { + return Ok(rustls::pki_types::PrivateKeyDer::Sec1(key)); + } None => break, _ => {} } diff --git a/iroh-docs/Cargo.toml b/iroh-docs/Cargo.toml index 2a4e0c6069..6b36367e4f 100644 --- a/iroh-docs/Cargo.toml +++ b/iroh-docs/Cargo.toml @@ -18,7 +18,7 @@ workspace = true anyhow = "1" async-channel = "2.3.1" blake3 = { package = "iroh-blake3", version = "1.4.5"} -bytes = { version = "1.4", features = ["serde"] } +bytes = { version = "1.7", features = ["serde"] } derive_more = { version = "=1.0.0-beta.7", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref"] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } futures-buffered = "0.2.4" diff --git a/iroh-docs/src/net.rs b/iroh-docs/src/net.rs index a3f90032e1..5879fdf7e0 100644 --- a/iroh-docs/src/net.rs +++ b/iroh-docs/src/net.rs @@ -48,7 +48,8 @@ pub async fn connect_and_sync( let res = run_alice(&mut send_stream, &mut recv_stream, sync, namespace, peer_id).await; - send_stream.finish().await.map_err(ConnectError::close)?; + send_stream.finish().map_err(ConnectError::close)?; + send_stream.stopped().await.map_err(ConnectError::close)?; recv_stream .read_to_end(0) .await @@ -145,6 +146,9 @@ where send_stream .finish() + .map_err(|error| AcceptError::close(peer, namespace, error))?; + send_stream + .stopped() .await .map_err(|error| AcceptError::close(peer, namespace, error))?; recv_stream diff --git a/iroh-gossip/Cargo.toml b/iroh-gossip/Cargo.toml index 12f1be0cc0..45add30dc4 100644 --- a/iroh-gossip/Cargo.toml +++ b/iroh-gossip/Cargo.toml @@ -18,20 +18,23 @@ workspace = true anyhow = { version = "1" } async-channel = { version = "2.3.1", optional = true } blake3 = { package = "iroh-blake3", version = "1.4.5"} -bytes = { version = "1.4.0", features = ["serde"] } +bytes = { version = "1.7", features = ["serde"] } derive_more = { version = "=1.0.0-beta.7", features = ["add", "debug", "deref", "display", "from", "try_into", "into"] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } -futures-concurrency = { version = "7.6.1", optional = true } -futures-lite = { version = "2.3", optional = true } -futures-util = { version = "0.3.30", optional = true } indexmap = "2.0" iroh-base = { version = "0.23.0", path = "../iroh-base" } iroh-metrics = { version = "0.23.0", path = "../iroh-metrics" } -iroh-net = { path = "../iroh-net", version = "0.23.0", optional = true, default-features = false } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } rand = { version = "0.8.5", features = ["std_rng"] } rand_core = "0.6.4" serde = { version = "1.0.164", features = ["derive"] } + +# net dependencies (optional) +quinn = { package = "iroh-quinn", version = "0.11", optional = true } +futures-lite = { version = "2.3", optional = true } +futures-concurrency = { version = "7.6.1", optional = true } +futures-util = { version = "0.3.30", optional = true } +iroh-net = { path = "../iroh-net", version = "0.23.0", optional = true, default-features = false } tokio = { version = "1", optional = true, features = ["io-util", "sync", "rt", "macros", "net", "fs"] } tokio-util = { version = "0.7.8", optional = true, features = ["codec"] } tracing = "0.1" @@ -46,7 +49,15 @@ url = "2.4.0" [features] default = ["net"] -net = ["dep:futures-lite", "dep:iroh-net", "dep:tokio", "dep:tokio-util", "dep:async-channel", "dep:futures-util", "dep:futures-concurrency"] +net = [ + "dep:futures-lite", + "dep:iroh-net", + "dep:tokio", + "dep:tokio-util", + "dep:async-channel", + "dep:futures-util", + "dep:futures-concurrency" +] [[example]] name = "chat" diff --git a/iroh-gossip/examples/chat.rs b/iroh-gossip/examples/chat.rs index 4aa36a2f56..85f10b041e 100644 --- a/iroh-gossip/examples/chat.rs +++ b/iroh-gossip/examples/chat.rs @@ -16,6 +16,7 @@ use iroh_net::{ Endpoint, NodeAddr, }; use serde::{Deserialize, Serialize}; +use tracing::warn; /// Chat over iroh-gossip /// @@ -189,7 +190,16 @@ async fn subscribe_loop(mut receiver: GossipReceiver) -> Result<()> { } async fn endpoint_loop(endpoint: Endpoint, gossip: Gossip) { - while let Some(conn) = endpoint.accept().await { + while let Some(incoming) = endpoint.accept().await { + let conn = match incoming.accept() { + Ok(conn) => conn, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; let gossip = gossip.clone(); tokio::spawn(async move { if let Err(err) = handle_connection(conn, gossip).await { @@ -198,7 +208,11 @@ async fn endpoint_loop(endpoint: Endpoint, gossip: Gossip) { }); } } -async fn handle_connection(mut conn: iroh_net::endpoint::Connecting, gossip: Gossip) -> Result<()> { + +async fn handle_connection( + mut conn: iroh_net::endpoint::Connecting, + gossip: Gossip, +) -> anyhow::Result<()> { let alpn = conn.alpn().await?; let conn = conn.await?; let peer_id = iroh_net::endpoint::get_remote_node_id(&conn)?; diff --git a/iroh-gossip/src/net.rs b/iroh-gossip/src/net.rs index 3be8169eab..d13c400b37 100644 --- a/iroh-gossip/src/net.rs +++ b/iroh-gossip/src/net.rs @@ -861,9 +861,20 @@ mod test { tokio::select! { biased; _ = cancel.cancelled() => break, - conn = endpoint.accept() => match conn { + incoming = endpoint.accept() => match incoming { None => break, - Some(conn) => gossip.handle_connection(conn.await?).await? + Some(incoming) => { + let connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; + gossip.handle_connection(connecting.await?).await? + } } } } diff --git a/iroh-metrics/Cargo.toml b/iroh-metrics/Cargo.toml index 4affde3a39..4988b3bda1 100644 --- a/iroh-metrics/Cargo.toml +++ b/iroh-metrics/Cargo.toml @@ -22,7 +22,7 @@ hyper = { version = "1", features = ["server", "http1"] } hyper-util = { version = "0.1.1", features = ["tokio"] } once_cell = "1.17.0" prometheus-client = { version = "0.22", optional = true } -reqwest = { version = "0.12.4", default-features = false, features = ["json", "rustls-tls"] } +reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } serde = { version = "1.0", features = ["derive"] } struct_iterable = "0.1" time = { version = "0.3.21", features = ["serde-well-known"] } diff --git a/iroh-net/Cargo.toml b/iroh-net/Cargo.toml index 5ac30e151d..9d2710e563 100644 --- a/iroh-net/Cargo.toml +++ b/iroh-net/Cargo.toml @@ -19,7 +19,7 @@ workspace = true anyhow = { version = "1" } base64 = "0.22.1" backoff = "0.4.0" -bytes = "1" +bytes = "1.7" netdev = "0.30.0" der = { version = "0.7", features = ["alloc", "derive"] } derive_more = { version = "=1.0.0-beta.7", features = ["debug", "display", "from", "try_into", "deref"] } @@ -30,8 +30,8 @@ futures-sink = "0.3.25" futures-util = "0.3.25" governor = "0.6.0" hex = "0.4.3" -hickory-proto = "0.24.0" -hickory-resolver = "0.24.0" +hickory-proto = "=0.25.0-alpha.2" +hickory-resolver = "=0.25.0-alpha.2" hostname = "0.3.1" http = "1" http-body-util = "0.1.0" @@ -44,17 +44,17 @@ num_enum = "0.7" once_cell = "1.18.0" parking_lot = "0.12.1" pin-project = "1" -pkarr = { version = "2.0.0", default-features = false, features = ["async", "relay"] } +pkarr = { version = "2", default-features = false, features = ["async", "relay"] } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } -quinn = { package = "iroh-quinn", version = "0.10.5" } -quinn-proto = { package = "iroh-quinn-proto", version = "0.10.8" } -quinn-udp = { package = "iroh-quinn-udp", version = "0.4.2" } +quinn = { package = "iroh-quinn", version = "0.11" } +quinn-proto = { package = "iroh-quinn-proto", version = "0.11" } +quinn-udp = { package = "iroh-quinn-udp", version = "0.5" } rand = "0.8" rand_core = "0.6.4" rcgen = "0.12" -reqwest = { version = "0.12.4", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } ring = "0.17" -rustls = { version = "0.21.11", default-features = false, features = ["dangerous_configuration"] } +rustls = { version = "0.23", default-features = false, features = ["ring"] } serde = { version = "1", features = ["derive", "rc"] } smallvec = "1.11.1" socket2 = "0.5.3" @@ -63,7 +63,7 @@ surge-ping = "0.8.0" thiserror = "1" time = "0.3.20" tokio = { version = "1", features = ["io-util", "macros", "sync", "rt", "net", "fs", "io-std", "signal", "process"] } -tokio-rustls = { version = "0.24" } +tokio-rustls = { version = "0.26", default-features = false, features = ["logging", "ring"] } tokio-tungstenite = "0.21" tokio-tungstenite-wasm = "0.3" tokio-util = { version = "0.7", features = ["io-util", "io", "codec"] } @@ -71,20 +71,20 @@ tracing = "0.1" tungstenite = "0.21" url = { version = "2.4", features = ["serde"] } watchable = "1.1.2" -webpki = { package = "rustls-webpki", version = "0.101.4", features = ["std"] } -webpki-roots = "0.25" -x509-parser = "0.15" +webpki = { package = "rustls-webpki", version = "0.102" } +webpki-roots = "0.26" +x509-parser = "0.16" z32 = "1.0.3" # iroh-relay axum = { version = "0.7.4", optional = true } clap = { version = "4", features = ["derive"], optional = true } regex = { version = "1.7.1", optional = true } -rustls-pemfile = { version = "1.0.2", optional = true } +rustls-pemfile = { version = "2.1", optional = true } serde_with = { version = "3.3", optional = true } toml = { version = "0.8", optional = true } tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = true } -tokio-rustls-acme = { version = "0.3", optional = true } +tokio-rustls-acme = { version = "0.4", optional = true } # metrics iroh-metrics = { version = "0.23.0", path = "../iroh-metrics", default-features = false } diff --git a/iroh-net/bench/Cargo.toml b/iroh-net/bench/Cargo.toml index 892dde19dc..8def078356 100644 --- a/iroh-net/bench/Cargo.toml +++ b/iroh-net/bench/Cargo.toml @@ -7,18 +7,16 @@ publish = false [dependencies] anyhow = "1.0.22" -bytes = "1" +bytes = "1.7" hdrhistogram = { version = "7.2", default-features = false } iroh-net = { path = "..", features = ["test-utils"] } iroh-metrics = { path = "../../iroh-metrics" } -rcgen = "0.11.1" -rustls = { version = "0.21.0", default-features = false, features = ["quic"] } +quinn = { package = "iroh-quinn", version = "0.11" } +rcgen = "0.12" +rustls = { version = "0.23", default-features = false, features = ["ring"] } clap = { version = "4", features = ["derive"] } tokio = { version = "1.0.1", features = ["rt", "sync"] } tracing = "0.1" tracing-subscriber = { version = "0.3.0", default-features = false, features = ["env-filter", "fmt", "ansi", "time", "local-time"] } socket2 = "0.5" futures-lite = "2.3.0" - -[target.'cfg(not(any(target_os = "freebsd", target_os = "openbsd", target_os = "netbsd")))'.dependencies] -quinn = "0.10" diff --git a/iroh-net/bench/src/iroh.rs b/iroh-net/bench/src/iroh.rs index f411640b26..3048c2062f 100644 --- a/iroh-net/bench/src/iroh.rs +++ b/iroh-net/bench/src/iroh.rs @@ -11,7 +11,7 @@ use iroh_net::{ relay::{RelayMap, RelayMode, RelayUrl}, Endpoint, NodeAddr, }; -use tracing::trace; +use tracing::{trace, warn}; use crate::{ client_handler, stats::TransferResult, ClientStats, ConnectionSelector, EndpointSelector, Opt, @@ -191,7 +191,11 @@ async fn send_data_on_stream(stream: &mut SendStream, stream_size: u64) -> Resul .context("failed sending data")?; } - stream.finish().await.context("failed finishing stream")?; + stream.finish().context("failed finishing stream")?; + stream + .stopped() + .await + .context("failed to wait for stream to be stopped")?; Ok(()) } @@ -225,8 +229,17 @@ pub async fn server(endpoint: Endpoint, opt: Opt) -> Result<()> { // Handle only the expected amount of clients for _ in 0..opt.clients { - let handshake = endpoint.accept().await.unwrap(); - let connection = handshake.await.context("handshake failed")?; + let incoming = endpoint.accept().await.unwrap(); + let connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; + let connection = connecting.await.context("handshake failed")?; server_tasks.push(tokio::spawn(async move { loop { diff --git a/iroh-net/bench/src/quinn.rs b/iroh-net/bench/src/quinn.rs index f3895750bd..fb579bf0f0 100644 --- a/iroh-net/bench/src/quinn.rs +++ b/iroh-net/bench/src/quinn.rs @@ -68,9 +68,9 @@ pub async fn connect_client( opt: Opt, ) -> Result<(::quinn::Endpoint, Connection)> { let secret_key = iroh_net::key::SecretKey::generate(); - let tls_client_config = + let quic_client_config = iroh_net::tls::make_client_config(&secret_key, None, vec![ALPN.to_vec()], false)?; - let mut config = quinn::ClientConfig::new(Arc::new(tls_client_config)); + let mut config = quinn::ClientConfig::new(Arc::new(quic_client_config)); let transport = transport_config(opt.max_streams, opt.initial_mtu); @@ -211,7 +211,11 @@ async fn send_data_on_stream(stream: &mut SendStream, stream_size: u64) -> Resul .context("failed sending data")?; } - stream.finish().await.context("failed finishing stream")?; + stream.finish().context("failed finishing stream")?; + stream + .stopped() + .await + .context("failed to wait for stream to be stopped")?; Ok(()) } @@ -245,8 +249,17 @@ pub async fn server(endpoint: Endpoint, opt: Opt) -> Result<()> { // Handle only the expected amount of clients for _ in 0..opt.clients { - let handshake = endpoint.accept().await.unwrap(); - let connection = handshake.await.context("handshake failed")?; + let incoming = endpoint.accept().await.unwrap(); + let connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; + let connection = connecting.await.context("handshake failed")?; server_tasks.push(tokio::spawn(async move { loop { diff --git a/iroh-net/examples/connect.rs b/iroh-net/examples/connect.rs index 216a4e42eb..cd1b736f27 100644 --- a/iroh-net/examples/connect.rs +++ b/iroh-net/examples/connect.rs @@ -84,10 +84,13 @@ async fn main() -> anyhow::Result<()> { send.write_all(message.as_bytes()).await?; // Call `finish` to close the send side of the connection gracefully. - send.finish().await?; + send.finish()?; let message = recv.read_to_end(100).await?; let message = String::from_utf8(message)?; println!("received: {message}"); + // We received the last message: close all connections and allow for the close + // message to be sent. + endpoint.close(0u8.into(), b"bye").await?; Ok(()) } diff --git a/iroh-net/examples/dht_discovery.rs b/iroh-net/examples/dht_discovery.rs index 787b152fa2..4fe4038f97 100644 --- a/iroh-net/examples/dht_discovery.rs +++ b/iroh-net/examples/dht_discovery.rs @@ -12,6 +12,7 @@ use std::str::FromStr; use clap::Parser; use iroh_net::{endpoint::get_remote_node_id, Endpoint, NodeId}; +use tracing::warn; use url::Url; const CHAT_ALPN: &[u8] = b"pkarr-discovery-demo-chat"; @@ -75,7 +76,16 @@ async fn chat_server(args: Args) -> anyhow::Result<()> { println!("Listening on {}", node_id); println!("pkarr z32: {}", zid); println!("see https://app.pkarr.org/?pk={}", zid); - while let Some(connecting) = endpoint.accept().await { + while let Some(incoming) = endpoint.accept().await { + let connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; tokio::spawn(async move { let connection = connecting.await?; let remote_node_id = get_remote_node_id(&connection)?; diff --git a/iroh-net/examples/listen-unreliable.rs b/iroh-net/examples/listen-unreliable.rs index 7dbc5e246d..13eddc3865 100644 --- a/iroh-net/examples/listen-unreliable.rs +++ b/iroh-net/examples/listen-unreliable.rs @@ -6,7 +6,7 @@ use anyhow::Context; use futures_lite::StreamExt; use iroh_net::{key::SecretKey, relay::RelayMode, Endpoint}; -use tracing::info; +use tracing::{info, warn}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; @@ -62,9 +62,18 @@ async fn main() -> anyhow::Result<()> { ); // accept incoming connections, returns a normal QUIC connection - while let Some(mut conn) = endpoint.accept().await { - let alpn = conn.alpn().await?; - let conn = conn.await?; + while let Some(incoming) = endpoint.accept().await { + let mut connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; + let alpn = connecting.alpn().await?; + let conn = connecting.await?; let node_id = iroh_net::endpoint::get_remote_node_id(&conn)?; info!( "new (unreliable) connection from {node_id} with ALPN {} (coming from {})", diff --git a/iroh-net/examples/listen.rs b/iroh-net/examples/listen.rs index 6f538534a4..7460259381 100644 --- a/iroh-net/examples/listen.rs +++ b/iroh-net/examples/listen.rs @@ -3,10 +3,15 @@ //! This example uses the default relay servers to attempt to holepunch, and will use that relay server to relay packets if the two devices cannot establish a direct UDP connection. //! run this example from the project root: //! $ cargo run --example listen +use std::time::Duration; + use anyhow::Context; use futures_lite::StreamExt; -use iroh_net::{key::SecretKey, relay::RelayMode, Endpoint}; -use tracing::{debug, info}; +use iroh_net::endpoint::ConnectionError; +use iroh_net::key::SecretKey; +use iroh_net::relay::RelayMode; +use iroh_net::Endpoint; +use tracing::{debug, info, warn}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; @@ -61,9 +66,18 @@ async fn main() -> anyhow::Result<()> { "\tcargo run --example connect -- --node-id {me} --addrs \"{local_addrs}\" --relay-url {relay_url}\n" ); // accept incoming connections, returns a normal QUIC connection - while let Some(mut conn) = endpoint.accept().await { - let alpn = conn.alpn().await?; - let conn = conn.await?; + while let Some(incoming) = endpoint.accept().await { + let mut connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; + let alpn = connecting.alpn().await?; + let conn = connecting.await?; let node_id = iroh_net::endpoint::get_remote_node_id(&conn)?; info!( "new connection from {node_id} with ALPN {} (coming from {})", @@ -84,8 +98,20 @@ async fn main() -> anyhow::Result<()> { let message = format!("hi! you connected to {me}. bye bye"); send.write_all(message.as_bytes()).await?; // call `finish` to close the connection gracefully - send.finish().await?; + send.finish()?; + // We sent the last message, so wait for the client to close the connection once + // it received this message. + let res = tokio::time::timeout(Duration::from_secs(3), async move { + let closed = conn.closed().await; + if !matches!(closed, ConnectionError::ApplicationClosed(_)) { + println!("node {node_id} disconnected with an error: {closed:#}"); + } + }) + .await; + if res.is_err() { + println!("node {node_id} did not disconnect within 3 seconds"); + } Ok::<_, anyhow::Error>(()) }); } diff --git a/iroh-net/src/bin/iroh-relay.rs b/iroh-net/src/bin/iroh-relay.rs index b220399488..298cd7750d 100644 --- a/iroh-net/src/bin/iroh-relay.rs +++ b/iroh-net/src/bin/iroh-relay.rs @@ -47,19 +47,21 @@ enum CertMode { LetsEncrypt, } -fn load_certs(filename: impl AsRef) -> Result> { +fn load_certs( + filename: impl AsRef, +) -> Result>> { let certfile = std::fs::File::open(filename).context("cannot open certificate file")?; let mut reader = std::io::BufReader::new(certfile); - let certs = rustls_pemfile::certs(&mut reader)? - .iter() - .map(|v| rustls::Certificate(v.clone())) - .collect(); + let certs: Result, std::io::Error> = rustls_pemfile::certs(&mut reader).collect(); + let certs = certs?; Ok(certs) } -fn load_secret_key(filename: impl AsRef) -> Result { +fn load_secret_key( + filename: impl AsRef, +) -> Result> { let filename = filename.as_ref(); let keyfile = std::fs::File::open(filename) .with_context(|| format!("cannot open secret key file {}", filename.display()))?; @@ -67,9 +69,15 @@ fn load_secret_key(filename: impl AsRef) -> Result { loop { match rustls_pemfile::read_one(&mut reader).context("cannot parse secret key .pem file")? { - Some(rustls_pemfile::Item::RSAKey(key)) => return Ok(rustls::PrivateKey(key)), - Some(rustls_pemfile::Item::PKCS8Key(key)) => return Ok(rustls::PrivateKey(key)), - Some(rustls_pemfile::Item::ECKey(key)) => return Ok(rustls::PrivateKey(key)), + Some(rustls_pemfile::Item::Pkcs1Key(key)) => { + return Ok(rustls::pki_types::PrivateKeyDer::Pkcs1(key)); + } + Some(rustls_pemfile::Item::Pkcs8Key(key)) => { + return Ok(rustls::pki_types::PrivateKeyDer::Pkcs8(key)); + } + Some(rustls_pemfile::Item::Sec1Key(key)) => { + return Ok(rustls::pki_types::PrivateKeyDer::Sec1(key)); + } None => break, _ => {} } diff --git a/iroh-net/src/discovery.rs b/iroh-net/src/discovery.rs index 7d6a171d68..432d44406e 100644 --- a/iroh-net/src/discovery.rs +++ b/iroh-net/src/discovery.rs @@ -344,7 +344,7 @@ mod tests { use parking_lot::Mutex; use rand::Rng; - use crate::{key::SecretKey, relay::RelayMode}; + use crate::{key::SecretKey, relay::RelayMode, util::AbortingJoinHandle}; use super::*; @@ -459,12 +459,12 @@ mod tests { async fn endpoint_discovery_simple_shared() -> anyhow::Result<()> { let _guard = iroh_test::logging::setup(); let disco_shared = TestDiscoveryShared::default(); - let ep1 = { + let (ep1, _guard1) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await }; - let ep2 = { + let (ep2, _guard2) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await @@ -481,12 +481,12 @@ mod tests { async fn endpoint_discovery_combined_with_empty() -> anyhow::Result<()> { let _guard = iroh_test::logging::setup(); let disco_shared = TestDiscoveryShared::default(); - let ep1 = { + let (ep1, _guard1) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await }; - let ep2 = { + let (ep2, _guard2) = { let secret = SecretKey::generate(); let disco1 = EmptyDiscovery; let disco2 = disco_shared.create_discovery(secret.public()); @@ -509,12 +509,12 @@ mod tests { async fn endpoint_discovery_combined_with_empty_and_wrong() -> anyhow::Result<()> { let _guard = iroh_test::logging::setup(); let disco_shared = TestDiscoveryShared::default(); - let ep1 = { + let (ep1, _guard1) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await }; - let ep2 = { + let (ep2, _guard2) = { let secret = SecretKey::generate(); let disco1 = EmptyDiscovery; let disco2 = disco_shared.create_lying_discovery(secret.public()); @@ -537,12 +537,12 @@ mod tests { async fn endpoint_discovery_combined_wrong_only() -> anyhow::Result<()> { let _guard = iroh_test::logging::setup(); let disco_shared = TestDiscoveryShared::default(); - let ep1 = { + let (ep1, _guard1) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await }; - let ep2 = { + let (ep2, _guard2) = { let secret = SecretKey::generate(); let disco1 = disco_shared.create_lying_discovery(secret.public()); let disco = ConcurrentDiscovery::from_services(vec![Box::new(disco1)]); @@ -562,12 +562,12 @@ mod tests { async fn endpoint_discovery_with_wrong_existing_addr() -> anyhow::Result<()> { let _guard = iroh_test::logging::setup(); let disco_shared = TestDiscoveryShared::default(); - let ep1 = { + let (ep1, _guard1) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await }; - let ep2 = { + let (ep2, _guard2) = { let secret = SecretKey::generate(); let disco = disco_shared.create_discovery(secret.public()); new_endpoint(secret, disco).await @@ -585,15 +585,33 @@ mod tests { Ok(()) } - async fn new_endpoint(secret: SecretKey, disco: impl Discovery + 'static) -> Endpoint { - Endpoint::builder() + async fn new_endpoint( + secret: SecretKey, + disco: impl Discovery + 'static, + ) -> (Endpoint, AbortingJoinHandle>) { + let ep = Endpoint::builder() .secret_key(secret) .discovery(Box::new(disco)) .relay_mode(RelayMode::Disabled) .alpns(vec![TEST_ALPN.to_vec()]) .bind(0) .await - .unwrap() + .unwrap(); + + let handle = tokio::spawn({ + let ep = ep.clone(); + async move { + // we skip accept() errors, they can be caused by retransmits + while let Some(connecting) = ep.accept().await.and_then(|inc| inc.accept().ok()) { + let _conn = connecting.await?; + // Just accept incoming connections, but don't do anything with them. + } + + anyhow::Ok(()) + } + }); + + (ep, AbortingJoinHandle::from(handle)) } fn system_time_now() -> u64 { @@ -624,6 +642,7 @@ mod test_dns_pkarr { pkarr_dns_state::State, run_relay_server, DnsPkarrServer, }, + util::AbortingJoinHandle, AddrInfo, Endpoint, NodeAddr, }; @@ -696,8 +715,8 @@ mod test_dns_pkarr { let dns_pkarr_server = DnsPkarrServer::run().await?; let (relay_map, _relay_url, _relay_guard) = run_relay_server().await?; - let ep1 = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; - let ep2 = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; + let (ep1, _guard1) = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; + let (ep2, _guard2) = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; // wait until our shared state received the update from pkarr publishing dns_pkarr_server @@ -717,8 +736,8 @@ mod test_dns_pkarr { let dns_pkarr_server = DnsPkarrServer::run().await?; let (relay_map, _relay_url, _relay_guard) = run_relay_server().await?; - let ep1 = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; - let ep2 = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; + let (ep1, _guard1) = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; + let (ep2, _guard2) = ep_with_discovery(&relay_map, &dns_pkarr_server).await?; // wait until our shared state received the update from pkarr publishing dns_pkarr_server @@ -734,7 +753,7 @@ mod test_dns_pkarr { async fn ep_with_discovery( relay_map: &RelayMap, dns_pkarr_server: &DnsPkarrServer, - ) -> Result { + ) -> Result<(Endpoint, AbortingJoinHandle>)> { let secret_key = SecretKey::generate(); let ep = Endpoint::builder() .relay_mode(RelayMode::Custom(relay_map.clone())) @@ -745,6 +764,20 @@ mod test_dns_pkarr { .discovery(dns_pkarr_server.discovery(secret_key)) .bind(0) .await?; - Ok(ep) + + let handle = tokio::spawn({ + let ep = ep.clone(); + async move { + // we skip accept() errors, they can be caused by retransmits + while let Some(connecting) = ep.accept().await.and_then(|inc| inc.accept().ok()) { + let _conn = connecting.await?; + // Just accept incoming connections, but don't do anything with them. + } + + anyhow::Ok(()) + } + }); + + Ok((ep, AbortingJoinHandle::from(handle))) } } diff --git a/iroh-net/src/dns/node_info.rs b/iroh-net/src/dns/node_info.rs index 7e6181547b..aeef9875b1 100644 --- a/iroh-net/src/dns/node_info.rs +++ b/iroh-net/src/dns/node_info.rs @@ -320,7 +320,7 @@ impl TxtAttrs { pub fn from_hickory_records(records: &[hickory_proto::rr::Record]) -> Result { use hickory_proto::rr; let mut records = records.iter().filter_map(|rr| match rr.data() { - Some(rr::RData::TXT(txt)) => { + rr::RData::TXT(txt) => { node_id_from_hickory_name(rr.name()).map(|node_id| (node_id, txt)) } _ => None, diff --git a/iroh-net/src/endpoint.rs b/iroh-net/src/endpoint.rs index 74178c0e48..9e3644cabc 100644 --- a/iroh-net/src/endpoint.rs +++ b/iroh-net/src/endpoint.rs @@ -12,7 +12,7 @@ //! [module docs]: crate use std::any::Any; -use std::future::Future; +use std::future::{Future, IntoFuture}; use std::net::{IpAddr, SocketAddr}; use std::pin::Pin; use std::sync::Arc; @@ -22,26 +22,25 @@ use std::time::Duration; use anyhow::{anyhow, bail, Context, Result}; use derive_more::Debug; use futures_lite::{Stream, StreamExt}; +use pin_project::pin_project; use tokio_util::sync::{CancellationToken, WaitForCancellationFuture}; -use tracing::{debug, info_span, trace, warn}; +use tracing::{debug, instrument, trace, warn}; use url::Url; -use crate::{ - discovery::{Discovery, DiscoveryTask}, - dns::{default_resolver, DnsResolver}, - key::{PublicKey, SecretKey}, - magicsock::{self, Handle}, - relay::{RelayMode, RelayUrl}, - tls, NodeId, -}; +use crate::discovery::{Discovery, DiscoveryTask}; +use crate::dns::{default_resolver, DnsResolver}; +use crate::key::{PublicKey, SecretKey}; +use crate::magicsock::{self, Handle, QuicMappedAddr}; +use crate::relay::{RelayMode, RelayUrl}; +use crate::{tls, NodeId}; mod rtt_actor; use self::rtt_actor::RttMessage; pub use quinn::{ - Connection, ConnectionError, ReadError, RecvStream, SendStream, TransportConfig, VarInt, - WriteError, + ApplicationClose, Connection, ConnectionClose, ConnectionError, ReadError, RecvStream, + RetryError, SendStream, ServerConfig, TransportConfig, VarInt, WriteError, }; pub use super::magicsock::{ @@ -74,7 +73,6 @@ pub struct Builder { relay_mode: RelayMode, alpn_protocols: Vec>, transport_config: Option, - concurrent_connections: Option, keylog: bool, discovery: Option>, proxy_url: Option, @@ -92,7 +90,6 @@ impl Default for Builder { relay_mode: default_relay_mode(), alpn_protocols: Default::default(), transport_config: Default::default(), - concurrent_connections: Default::default(), keylog: Default::default(), discovery: Default::default(), proxy_url: None, @@ -123,7 +120,6 @@ impl Builder { let static_config = StaticConfig { transport_config: Arc::new(self.transport_config.unwrap_or_default()), keylog: self.keylog, - concurrent_connections: self.concurrent_connections, secret_key: secret_key.clone(), }; let dns_resolver = self @@ -274,15 +270,6 @@ impl Builder { self.insecure_skip_relay_cert_verify = skip_verify; self } - - /// Maximum number of simultaneous connections to accept. - /// - /// New incoming connections are only accepted if the total number of incoming or - /// outgoing connections is less than this. Outgoing connections are unaffected. - pub fn concurrent_connections(mut self, concurrent_connections: u32) -> Self { - self.concurrent_connections = Some(concurrent_connections); - self - } } /// Configuration for a [`quinn::Endpoint`] that cannot be changed at runtime. @@ -291,35 +278,35 @@ struct StaticConfig { secret_key: SecretKey, transport_config: Arc, keylog: bool, - concurrent_connections: Option, } impl StaticConfig { /// Create a [`quinn::ServerConfig`] with the specified ALPN protocols. fn create_server_config(&self, alpn_protocols: Vec>) -> Result { - let mut server_config = make_server_config( + let server_config = make_server_config( &self.secret_key, alpn_protocols, self.transport_config.clone(), self.keylog, )?; - if let Some(c) = self.concurrent_connections { - server_config.concurrent_connections(c); - } Ok(server_config) } } /// Creates a [`quinn::ServerConfig`] with the given secret key and limits. +// This return type can not longer be used anywhere in our public API. It is however still +// used by iroh::node::Node (or rather iroh::node::Builder) to create a plain Quinn +// endpoint. pub fn make_server_config( secret_key: &SecretKey, alpn_protocols: Vec>, transport_config: Arc, keylog: bool, ) -> Result { - let tls_server_config = tls::make_server_config(secret_key, alpn_protocols, keylog)?; - let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_server_config)); + let quic_server_config = tls::make_server_config(secret_key, alpn_protocols, keylog)?; + let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_server_config)); server_config.transport_config(transport_config); + Ok(server_config) } @@ -370,13 +357,12 @@ impl Endpoint { /// /// This is for internal use, the public interface is the [`Builder`] obtained from /// [Self::builder]. See the methods on the builder for documentation of the parameters. + #[instrument("ep", skip_all, fields(me = %static_config.secret_key.public().fmt_short()))] async fn bind( static_config: StaticConfig, msock_opts: magicsock::Options, initial_alpns: Vec>, ) -> Result { - let span = info_span!("magic_ep", me = %static_config.secret_key.public().fmt_short()); - let _guard = span.enter(); let msock = magicsock::MagicSock::spawn(msock_opts).await?; trace!("created magicsock"); @@ -393,7 +379,7 @@ impl Endpoint { let endpoint = quinn::Endpoint::new_with_abstract_socket( endpoint_config, Some(server_config), - msock.clone(), + Arc::new(msock.clone()), Arc::new(quinn::TokioRuntime), )?; trace!("created quinn endpoint"); @@ -439,6 +425,7 @@ impl Endpoint { /// The `alpn`, or application-level protocol identifier, is also required. The remote /// endpoint must support this `alpn`, otherwise the connection attempt will fail with /// an error. + #[instrument(skip_all, fields(me = %self.node_id().fmt_short(), remote = %node_addr.node_id.fmt_short(), alpn = ?String::from_utf8_lossy(alpn)))] pub async fn connect(&self, node_addr: NodeAddr, alpn: &[u8]) -> Result { // Connecting to ourselves is not supported. if node_addr.node_id == self.node_id() { @@ -459,7 +446,13 @@ impl Endpoint { // address information for this node. let (addr, discovery) = self .get_mapping_addr_and_maybe_start_discovery(node_addr) - .await?; + .await + .with_context(|| { + format!( + "No addressing information for NodeId({}), unable to connect", + node_id.fmt_short() + ) + })?; debug!( "connecting to {}: (via {} - {:?})", @@ -497,17 +490,17 @@ impl Endpoint { &self, node_id: NodeId, alpn: &[u8], - addr: SocketAddr, + addr: QuicMappedAddr, ) -> Result { let client_config = { let alpn_protocols = vec![alpn.to_vec()]; - let tls_client_config = tls::make_client_config( + let quic_client_config = tls::make_client_config( &self.static_config.secret_key, Some(node_id), alpn_protocols, self.static_config.keylog, )?; - let mut client_config = quinn::ClientConfig::new(Arc::new(tls_client_config)); + let mut client_config = quinn::ClientConfig::new(Arc::new(quic_client_config)); let mut transport_config = quinn::TransportConfig::default(); transport_config.keep_alive_interval(Some(Duration::from_secs(1))); client_config.transport_config(Arc::new(transport_config)); @@ -517,9 +510,11 @@ impl Endpoint { // TODO: We'd eventually want to replace "localhost" with something that makes more sense. let connect = self .endpoint - .connect_with(client_config, addr, "localhost")?; + .connect_with(client_config, addr.0, "localhost")?; - let connection = connect.await.context("failed connecting to provider")?; + let connection = connect + .await + .context("failed connecting to remote endpoint")?; let rtt_msg = RttMessage::NewConnection { connection: connection.weak_handle(), @@ -545,7 +540,7 @@ impl Endpoint { pub fn accept(&self) -> Accept<'_> { Accept { inner: self.endpoint.accept(), - magic_ep: self.clone(), + ep: self.clone(), } } @@ -847,7 +842,7 @@ impl Endpoint { async fn get_mapping_addr_and_maybe_start_discovery( &self, node_addr: NodeAddr, - ) -> Result<(SocketAddr, Option)> { + ) -> Result<(QuicMappedAddr, Option)> { let node_id = node_addr.node_id; // Only return a mapped addr if we have some way of dialing this node, in other @@ -878,13 +873,16 @@ impl Endpoint { // So, we start a discovery task and wait for the first result to arrive, and // only then continue, because otherwise we wouldn't have any // path to the remote endpoint. - let mut discovery = DiscoveryTask::start(self.clone(), node_id)?; - discovery.first_arrived().await?; - if self.msock.has_send_address(node_id) { - let addr = self.msock.get_mapping_addr(node_id).expect("checked"); + let mut discovery = DiscoveryTask::start(self.clone(), node_id) + .context("Discovery service required due to missing addressing information")?; + discovery + .first_arrived() + .await + .context("Discovery service failed")?; + if let Some(addr) = self.msock.get_mapping_addr(node_id) { Ok((addr, Some(discovery))) } else { - bail!("Failed to retrieve the mapped address from the magic socket. Unable to dial node {node_id:?}"); + bail!("Discovery did not find addressing information"); } } } @@ -902,37 +900,155 @@ impl Endpoint { /// Future produced by [`Endpoint::accept`]. #[derive(Debug)] -#[pin_project::pin_project] +#[pin_project] pub struct Accept<'a> { #[pin] #[debug("quinn::Accept")] inner: quinn::Accept<'a>, - magic_ep: Endpoint, + ep: Endpoint, } impl<'a> Future for Accept<'a> { - type Output = Option; + type Output = Option; fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { let this = self.project(); match this.inner.poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(None) => Poll::Ready(None), - Poll::Ready(Some(inner)) => Poll::Ready(Some(Connecting { + Poll::Ready(Some(inner)) => Poll::Ready(Some(Incoming { inner, - magic_ep: this.magic_ep.clone(), + ep: this.ep.clone(), })), } } } +/// An incoming connection for which the server has not yet begun its parts of the +/// handshake. +#[derive(Debug)] +pub struct Incoming { + inner: quinn::Incoming, + ep: Endpoint, +} + +impl Incoming { + /// Attempts to accept this incoming connection (an error may still occur). + /// + /// Errors occurring here are likely not caused by the application or remote. The QUIC + /// connection listens on a normal UDP socket and any reachable network endpoint can + /// send datagrams to it, solicited or not. Even if the first few bytes look like a + /// QUIC packet, it might not even be a QUIC packet that is being received. + /// + /// Thus it is common to simply log the errors here and accept them as something which + /// can happen. + pub fn accept(self) -> Result { + self.inner.accept().map(|conn| Connecting { + inner: conn, + ep: self.ep, + }) + } + + /// Accepts this incoming connection using a custom configuration. + /// + /// See [`accept()`] for more details. + /// + /// [`accept()`]: Incoming::accept + pub fn accept_with( + self, + server_config: Arc, + ) -> Result { + self.inner + .accept_with(server_config) + .map(|conn| Connecting { + inner: conn, + ep: self.ep, + }) + } + + /// Rejects this incoming connection attempt. + pub fn refuse(self) { + self.inner.refuse() + } + + /// Responds with a retry packet. + /// + /// This requires the client to retry with address validation. + /// + /// Errors if `remote_address_validated()` is true. + pub fn retry(self) -> Result<(), RetryError> { + self.inner.retry() + } + + /// Ignores this incoming connection attempt, not sending any packet in response. + pub fn ignore(self) { + self.inner.ignore() + } + + /// Returns the local IP address which was used when the peer established the + /// connection. + pub fn local_ip(&self) -> Option { + self.inner.local_ip() + } + + /// Returns the peer's UDP address. + pub fn remote_address(&self) -> SocketAddr { + self.inner.remote_address() + } + + /// Whether the socket address that is initiating this connection has been validated. + /// + /// This means that the sender of the initial packet has proved that they can receive + /// traffic sent to `self.remote_address()`. + pub fn remote_address_validated(&self) -> bool { + self.inner.remote_address_validated() + } +} + +impl IntoFuture for Incoming { + type Output = Result; + type IntoFuture = IncomingFuture; + + fn into_future(self) -> Self::IntoFuture { + IncomingFuture { + inner: self.inner.into_future(), + ep: self.ep, + } + } +} + +/// Adaptor to let [`Incoming`] be `await`ed like a [`Connecting`]. +#[derive(Debug)] +#[pin_project] +pub struct IncomingFuture { + #[pin] + inner: quinn::IncomingFuture, + ep: Endpoint, +} + +impl Future for IncomingFuture { + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let this = self.project(); + match this.inner.poll(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Err(err)) => Poll::Ready(Err(err)), + Poll::Ready(Ok(conn)) => { + try_send_rtt_msg(&conn, this.ep); + Poll::Ready(Ok(conn)) + } + } + } +} + /// In-progress connection attempt future #[derive(Debug)] -#[pin_project::pin_project] +#[pin_project] pub struct Connecting { #[pin] inner: quinn::Connecting, - magic_ep: Endpoint, + ep: Endpoint, } impl Connecting { @@ -940,13 +1056,10 @@ impl Connecting { pub fn into_0rtt(self) -> Result<(quinn::Connection, quinn::ZeroRttAccepted), Self> { match self.inner.into_0rtt() { Ok((conn, zrtt_accepted)) => { - try_send_rtt_msg(&conn, &self.magic_ep); + try_send_rtt_msg(&conn, &self.ep); Ok((conn, zrtt_accepted)) } - Err(inner) => Err(Self { - inner, - magic_ep: self.magic_ep, - }), + Err(inner) => Err(Self { inner, ep: self.ep }), } } @@ -989,7 +1102,7 @@ impl Future for Connecting { Poll::Pending => Poll::Pending, Poll::Ready(Err(err)) => Poll::Ready(Err(err)), Poll::Ready(Ok(conn)) => { - try_send_rtt_msg(&conn, this.magic_ep); + try_send_rtt_msg(&conn, this.ep); Poll::Ready(Ok(conn)) } } @@ -997,11 +1110,12 @@ impl Future for Connecting { } /// Extract the [`PublicKey`] from the peer's TLS certificate. +// TODO: make this a method now pub fn get_remote_node_id(connection: &quinn::Connection) -> Result { let data = connection.peer_identity(); match data { None => bail!("no peer certificate found"), - Some(data) => match data.downcast::>() { + Some(data) => match data.downcast::>() { Ok(certs) => { if certs.len() != 1 { bail!( @@ -1182,10 +1296,10 @@ mod tests { let incoming = ep.accept().await.unwrap(); let conn = incoming.await.unwrap(); let mut stream = conn.accept_uni().await.unwrap(); - let mut buf = [0u8, 5]; + let mut buf = [0u8; 5]; stream.read_exact(&mut buf).await.unwrap(); info!("Accepted 1 stream, received {buf:?}. Closing now."); - // close the stream + // close the connection conn.close(7u8.into(), b"bye"); let res = conn.accept_uni().await; @@ -1224,6 +1338,7 @@ mod tests { // the error. stream.write_all(b"hello").await.unwrap(); + info!("waiting for closed"); // Remote now closes the connection, we should see an error sometime soon. let err = conn.closed().await; let expected_err = @@ -1233,12 +1348,7 @@ mod tests { }); assert_eq!(err, expected_err); - let res = stream.finish().await; - assert_eq!( - res.unwrap_err(), - quinn::WriteError::ConnectionLost(expected_err.clone()) - ); - + info!("opening new - expect it to fail"); let res = conn.open_uni().await; assert_eq!(res.unwrap_err(), expected_err); info!("client test completed"); @@ -1246,7 +1356,12 @@ mod tests { .instrument(info_span!("test-client")), ); - let (server, client) = tokio::join!(server, client); + let (server, client) = tokio::time::timeout( + Duration::from_secs(5), + futures_lite::future::zip(server, client), + ) + .await + .expect("timeout"); server.unwrap(); client.unwrap(); } @@ -1346,7 +1461,8 @@ mod tests { recv.read_exact(&mut buf).await.unwrap(); send.write_all(&buf).await.unwrap(); } - send.finish().await.unwrap(); + send.finish().unwrap(); + send.stopped().await.unwrap(); recv.read_to_end(0).await.unwrap(); info!(%i, peer = %peer_id.fmt_short(), "finished"); println!("[server] round {} done in {:?}", i + 1, now.elapsed()); @@ -1390,7 +1506,8 @@ mod tests { recv.read_exact(&mut buf).await.unwrap(); assert_eq!(buf, vec![i; chunk_size]); } - send.finish().await.unwrap(); + send.finish().unwrap(); + send.stopped().await.unwrap(); recv.read_to_end(0).await.unwrap(); info!("client finished"); ep.close(0u32.into(), &[]).await.unwrap(); @@ -1438,30 +1555,62 @@ mod tests { async fn connect_hello(ep: Endpoint, dst: NodeAddr) { let conn = ep.connect(dst, TEST_ALPN).await.unwrap(); let (mut send, mut recv) = conn.open_bi().await.unwrap(); + info!("sending hello"); send.write_all(b"hello").await.unwrap(); - send.finish().await.unwrap(); + send.finish().unwrap(); + info!("receiving world"); let m = recv.read_to_end(100).await.unwrap(); assert_eq!(m, b"world"); + conn.close(1u8.into(), b"done"); } async fn accept_world(ep: Endpoint, src: NodeId) { - let mut incoming = ep.accept().await.unwrap(); - let alpn = incoming.alpn().await.unwrap(); - let conn = incoming.await.unwrap(); + let incoming = ep.accept().await.unwrap(); + let mut iconn = incoming.accept().unwrap(); + let alpn = iconn.alpn().await.unwrap(); + let conn = iconn.await.unwrap(); let node_id = get_remote_node_id(&conn).unwrap(); assert_eq!(node_id, src); assert_eq!(alpn, TEST_ALPN); let (mut send, mut recv) = conn.accept_bi().await.unwrap(); + info!("receiving hello"); let m = recv.read_to_end(100).await.unwrap(); assert_eq!(m, b"hello"); + info!("sending hello"); send.write_all(b"world").await.unwrap(); - send.finish().await.unwrap(); + send.finish().unwrap(); + match conn.closed().await { + ConnectionError::ApplicationClosed(closed) => { + assert_eq!(closed.error_code, 1u8.into()); + } + _ => panic!("wrong close error"), + } } - let p1_accept = tokio::spawn(accept_world(ep1.clone(), ep2_nodeid)); - let p2_accept = tokio::spawn(accept_world(ep2.clone(), ep1_nodeid)); - let p1_connect = tokio::spawn(connect_hello(ep1.clone(), ep2_nodeaddr)); - let p2_connect = tokio::spawn(connect_hello(ep2.clone(), ep1_nodeaddr)); + let p1_accept = tokio::spawn(accept_world(ep1.clone(), ep2_nodeid).instrument(info_span!( + "p1_accept", + ep1 = %ep1.node_id().fmt_short(), + dst = %ep2_nodeid.fmt_short(), + ))); + let p2_accept = tokio::spawn(accept_world(ep2.clone(), ep1_nodeid).instrument(info_span!( + "p2_accept", + ep2 = %ep2.node_id().fmt_short(), + dst = %ep1_nodeid.fmt_short(), + ))); + let p1_connect = tokio::spawn(connect_hello(ep1.clone(), ep2_nodeaddr).instrument( + info_span!( + "p1_connect", + ep1 = %ep1.node_id().fmt_short(), + dst = %ep2_nodeid.fmt_short(), + ), + )); + let p2_connect = tokio::spawn(connect_hello(ep2.clone(), ep1_nodeaddr).instrument( + info_span!( + "p2_connect", + ep2 = %ep2.node_id().fmt_short(), + dst = %ep1_nodeid.fmt_short(), + ), + )); p1_accept.await.unwrap(); p2_accept.await.unwrap(); diff --git a/iroh-net/src/magicsock.rs b/iroh-net/src/magicsock.rs index ecaf2712bc..1f786b47ee 100644 --- a/iroh-net/src/magicsock.rs +++ b/iroh-net/src/magicsock.rs @@ -25,7 +25,7 @@ use std::{ atomic::{AtomicBool, AtomicU16, AtomicU64, Ordering}, Arc, }, - task::{ready, Context, Poll, Waker}, + task::{Context, Poll, Waker}, time::{Duration, Instant}, }; @@ -180,7 +180,7 @@ pub(crate) struct MagicSock { relay_recv_receiver: parking_lot::Mutex>, /// Stores wakers, to be called when relay_recv_ch receives new data. network_recv_wakers: parking_lot::Mutex>, - network_send_wakers: parking_lot::Mutex>, + network_send_wakers: Arc>>, /// The DNS resolver to be used in this magicsock. dns_resolver: DnsResolver, @@ -215,10 +215,7 @@ pub(crate) struct MagicSock { net_checker: netcheck::Client, /// The state for an active DiscoKey. disco_secrets: DiscoSecrets, - udp_state: quinn_udp::UdpState, - /// Send buffer used in `poll_send_udp` - send_buffer: parking_lot::Mutex>, /// UDP disco (ping) queue udp_disco_sender: mpsc::Sender<(SocketAddr, PublicKey, disco::Message)>, @@ -353,14 +350,9 @@ impl MagicSock { self.node_map.conn_type_stream(node_id) } - /// Returns the [`SocketAddr`] which can be used by the QUIC layer to dial this node. - /// - /// Note this is a user-facing API and does not wrap the [`SocketAddr`] in a - /// [`QuicMappedAddr`] as we do internally. - pub(crate) fn get_mapping_addr(&self, node_id: NodeId) -> Option { - self.node_map - .get_quic_mapped_addr_for_node_key(node_id) - .map(|a| a.0) + /// Returns the socket address which can be used by the QUIC layer to dial this node. + pub(crate) fn get_mapping_addr(&self, node_id: NodeId) -> Option { + self.node_map.get_quic_mapped_addr_for_node_key(node_id) } /// Add addresses for a node to the magic socket's addresbook. @@ -429,226 +421,219 @@ impl MagicSock { .ok(); } + #[cfg_attr(windows, allow(dead_code))] fn normalized_local_addr(&self) -> io::Result { let (v4, v6) = self.local_addr(); let addr = if let Some(v6) = v6 { v6 } else { v4 }; Ok(addr) } - #[instrument(skip_all, fields(me = %self.me))] - fn poll_send( - &self, - cx: &mut Context, - transmits: &[quinn_udp::Transmit], - ) -> Poll> { - let bytes_total: usize = transmits.iter().map(|t| t.contents.len()).sum(); - inc_by!(MagicsockMetrics, send_data, bytes_total as _); + fn create_io_poller(&self) -> Pin> { + // To do this properly the MagicSock would need a registry of pollers. For each + // node we would look up the poller or create one. Then on each try_send we can + // look up the correct poller and configure it to poll the paths it needs. + // + // Note however that the current quinn impl calls UdpPoller::poll_writable() + // **before** it calls try_send(), as opposed to how it is documented. That is a + // problem as we would not yet know the path that needs to be polled. To avoid such + // ambiguity the API could be changed to a .poll_send(&self, cx: &mut Context, + // io_poller: Pin<&mut dyn UdpPoller>, transmit: &Transmit) -> Poll> + // instead of the existing .try_send() because then we would have control over this. + // + // Right now however we have one single poller behaving the same for each + // connection. It checks all paths and returns Poll::Ready as soon as any path is + // ready. + let ipv4_poller = Arc::new(self.pconn4.clone()).create_io_poller(); + let ipv6_poller = self + .pconn6 + .as_ref() + .map(|sock| Arc::new(sock.clone()).create_io_poller()); + let relay_sender = self.relay_actor_sender.clone(); + Box::pin(IoPoller { + ipv4_poller, + ipv6_poller, + relay_sender, + relay_send_waker: self.network_send_wakers.clone(), + }) + } - let mut n = 0; - if transmits.is_empty() { - tracing::trace!(is_closed=?self.is_closed(), "poll_send without any quinn_udp::Transmit"); - return Poll::Ready(Ok(n)); - } + /// Implementation for AsyncUdpSocket::try_send + #[instrument(skip_all)] + fn try_send(&self, transmit: &quinn_udp::Transmit) -> io::Result<()> { + inc_by!(MagicsockMetrics, send_data, transmit.contents.len() as _); if self.is_closed() { - inc_by!(MagicsockMetrics, send_data_network_down, bytes_total as _); - return Poll::Ready(Err(io::Error::new( + inc_by!( + MagicsockMetrics, + send_data_network_down, + transmit.contents.len() as _ + ); + return Err(io::Error::new( io::ErrorKind::NotConnected, "connection closed", - ))); + )); } + let dest = QuicMappedAddr(transmit.destination); trace!( - "sending:\n{}", - transmits.iter().fold( - String::with_capacity(transmits.len() * 50), - |mut final_repr, t| { - final_repr.push_str( - format!( - " dest: {}, src: {:?}, content_len: {}\n", - QuicMappedAddr(t.destination), - t.src_ip, - t.contents.len() - ) - .as_str(), - ); - final_repr - } - ) + dst = %dest, + src = ?transmit.src_ip, + len = %transmit.contents.len(), + "sending", ); - - let dest = transmits[0].destination; - for transmit in transmits.iter() { - if transmit.destination != dest { - break; - } - n += 1; - } - - // Copy the transmits into an owned buffer, because we will have to modify the send - // addresses to translate from the quic mapped address to the actual UDP address. - // To avoid allocating on each call to `poll_send`, we use a fixed buffer. - let mut transmits = { - let mut buf = self.send_buffer.lock(); - buf.clear(); - buf.reserve(n); - buf.extend_from_slice(&transmits[..n]); - buf - }; - - let dest = QuicMappedAddr(dest); - - let mut transmits_sent = 0; + let mut transmit = transmit.clone(); match self .node_map .get_send_addrs(dest, self.ipv6_reported.load(Ordering::Relaxed)) { - Some((public_key, udp_addr, relay_url, mut msgs)) => { + Some((node_id, udp_addr, relay_url, msgs)) => { let mut pings_sent = false; // If we have pings to send, we *have* to send them out first. if !msgs.is_empty() { - if let Err(err) = ready!(self.poll_handle_ping_actions(cx, &mut msgs)) { - warn!(node = %public_key.fmt_short(), "failed to handle ping actions: {err:?}"); + if let Err(err) = self.try_send_ping_actions(msgs) { + warn!( + node = %node_id.fmt_short(), + "failed to handle ping actions: {err:#}", + ); } pings_sent = true; } let mut udp_sent = false; - let mut relay_sent = false; let mut udp_error = None; - let mut udp_pending = false; - let mut relay_pending = false; + let mut relay_sent = false; + let mut relay_error = None; // send udp if let Some(addr) = udp_addr { - // rewrite target addresses. - for t in transmits.iter_mut() { - t.destination = addr; - } - match self.poll_send_udp(addr, &transmits, cx) { - Poll::Ready(Ok(n)) => { - trace!(node = %public_key.fmt_short(), dst = %addr, transmit_count=n, "sent transmits over UDP"); - // truncate the transmits vec to `n`. these transmits will be sent to - // the relay further below. We only want to send those transmits to the relay that were - // sent to UDP, because the next transmits will be sent on the next - // call to poll_send, which will happen immediately after, because we - // are always returning Poll::Ready if poll_send_udp returned - // Poll::Ready. - transmits.truncate(n); - transmits_sent = transmits.len(); + // rewrite target address + transmit.destination = addr; + match self.try_send_udp(addr, &transmit) { + Ok(()) => { + trace!(node = %node_id.fmt_short(), dst = %addr, + "sent transmit over UDP"); udp_sent = true; - // record metrics. } - Poll::Ready(Err(err)) => { - error!(node = %public_key.fmt_short(), ?addr, "failed to send udp: {err:?}"); + Err(err) => { + error!(node = %node_id.fmt_short(), dst = %addr, + "failed to send udp: {err:#}"); udp_error = Some(err); } - Poll::Pending => { - udp_pending = true; - } } } // send relay if let Some(ref relay_url) = relay_url { - match self.poll_send_relay(relay_url, public_key, split_packets(&transmits)) { - Poll::Ready(sent) => { - relay_sent = sent; - transmits_sent = transmits.len(); + match self.try_send_relay(relay_url, node_id, split_packets(&transmit)) { + Ok(()) => { + relay_sent = true; } - Poll::Pending => { - self.network_send_wakers.lock().replace(cx.waker().clone()); - relay_pending = true; + Err(err) => { + relay_error = Some(err); } } } - if udp_addr.is_none() && relay_url.is_none() { - // Returning an error here would lock up the entire `Endpoint`. - // - // If we returned `Poll::Pending`, the waker driving the `poll_send` will never get woken up. - // - // Our best bet here is to log an error and return `Poll::Ready(Ok(n))`. - // - // `n` is the number of consecutive transmits in this batch that are meant for the same destination (a destination that we have no addresses for, and so we can never actually send). - // - // When we return `Poll::Ready(Ok(n))`, we are effectively dropping those n messages, by lying to QUIC and saying they were sent. - // (If we returned `Poll::Ready(Ok(0))` instead, QUIC would loop to attempt to re-send those messages, blocking other traffic.) - // - // When `QUIC` gets no `ACK`s for those messages, the connection will eventually timeout. - error!(node = %public_key.fmt_short(), "failed to send: no UDP or relay addr"); - return Poll::Ready(Ok(n)); - } - - if (udp_addr.is_none() || udp_pending) && (relay_url.is_none() || relay_pending) { - // Handle backpressure - // The explicit choice here is to only return pending, iff all available paths returned - // pending. - // This might result in one channel being backed up, without the system noticing, but - // for now this seems to be the best choice workable in the current implementation. - return Poll::Pending; - } - - if !relay_sent && !udp_sent && !pings_sent { - // Returning an error here would lock up the entire `Endpoint`. - // Instead, log an error and return `Poll::Pending`, the connection will timeout. - let err = udp_error.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::NotConnected, - "no UDP or relay address available for node", - ) - }); - error!(node = %public_key.fmt_short(), "{err:?}"); - return Poll::Pending; + let udp_pending = udp_error + .as_ref() + .map(|err| err.kind() == io::ErrorKind::WouldBlock) + .unwrap_or_default(); + let relay_pending = relay_error + .as_ref() + .map(|err| err.kind() == io::ErrorKind::WouldBlock) + .unwrap_or_default(); + if udp_pending && relay_pending { + // Handle backpressure. + Err(io::Error::new(io::ErrorKind::WouldBlock, "pending")) + } else { + if relay_sent || udp_sent { + trace!( + node = %node_id.fmt_short(), + send_udp = ?udp_addr, + send_relay = ?relay_url, + "sent transmit", + ); + } else if !pings_sent { + // Returning Ok here means we let QUIC handle a timeout for a lost + // packet, same would happen if we returned any errors. The + // philosophy of quinn-udp is that a UDP connection could come back + // at any time so these errors should be treated as transient and + // are just timeouts. Hence we opt for returning Ok. See + // test_try_send_no_udp_addr_or_relay_url to explore this further. + error!( + node = %node_id.fmt_short(), + "no UDP or relay paths available for node", + ); + } + Ok(()) } - - trace!( - node = %public_key.fmt_short(), - transmit_count = %transmits_sent, - send_udp = ?udp_addr, - send_relay = ?relay_url, - "sent transmits" - ); - Poll::Ready(Ok(transmits_sent)) } None => { - // Returning an error here would lock up the entire `Endpoint`. - // - // If we returned `Poll::Pending`, the waker driving the `poll_send` will never get woken up. - // - // Our best bet here is to log an error and return `Poll::Ready(Ok(n))`. - // - // `n` is the number of consecutive transmits in this batch that are meant for the same destination (a destination that we have no node state for, and so we can never actually send). - // - // When we return `Poll::Ready(Ok(n))`, we are effectively dropping those n messages, by lying to QUIC and saying they were sent. - // (If we returned `Poll::Ready(Ok(0))` instead, QUIC would loop to attempt to re-send those messages, blocking other traffic.) - // - // When `QUIC` gets no `ACK`s for those messages, the connection will eventually timeout. - error!(dst=%dest, "no node_state for mapped address"); - Poll::Ready(Ok(n)) + error!(%dest, "no NodeState for mapped address"); + // Returning Ok here means we let QUIC timeout. Returning WouldBlock + // triggers a hot loop. Returning an error would immediately fail a + // connection. The philosophy of quinn-udp is that a UDP connection could + // come back at any time or missing should be transient so chooses to let + // these kind of errors time out. See test_try_send_no_send_addr to try + // this out. + Ok(()) } } } - fn poll_send_udp( + fn try_send_relay( &self, - addr: SocketAddr, - transmits: &[quinn_udp::Transmit], - cx: &mut Context<'_>, - ) -> Poll> { + url: &RelayUrl, + node: NodeId, + contents: RelayContents, + ) -> io::Result<()> { + trace!( + node = %node.fmt_short(), + relay_url = %url, + count = contents.len(), + len = contents.iter().map(|c| c.len()).sum::(), + "send relay", + ); + let msg = RelayActorMessage::Send { + url: url.clone(), + contents, + peer: node, + }; + match self.relay_actor_sender.try_send(msg) { + Ok(_) => { + trace!(node = %node.fmt_short(), relay_url = %url, + "send relay: message queued"); + Ok(()) + } + Err(mpsc::error::TrySendError::Closed(_)) => { + warn!(node = %node.fmt_short(), relay_url = %url, + "send relay: message dropped, channel to actor is closed"); + Err(io::Error::new( + io::ErrorKind::ConnectionReset, + "channel to actor is closed", + )) + } + Err(mpsc::error::TrySendError::Full(_)) => { + warn!(node = %node.fmt_short(), relay_url = %url, + "send relay: message dropped, channel to actor is full"); + Err(io::Error::new( + io::ErrorKind::WouldBlock, + "channel to actor is full", + )) + } + } + } + + fn try_send_udp(&self, addr: SocketAddr, transmit: &quinn_udp::Transmit) -> io::Result<()> { let conn = self.conn_for_addr(addr)?; - let n = ready!(conn.poll_send(&self.udp_state, cx, transmits))?; - let total_bytes: u64 = transmits - .iter() - .take(n) - .map(|x| x.contents.len() as u64) - .sum(); + conn.try_send(transmit)?; + let total_bytes: u64 = transmit.contents.len() as u64; if addr.is_ipv6() { inc_by!(MagicsockMetrics, send_ipv6, total_bytes); } else { inc_by!(MagicsockMetrics, send_ipv4, total_bytes); } - Poll::Ready(Ok(n)) + Ok(()) } fn conn_for_addr(&self, addr: SocketAddr) -> io::Result<&UdpConn> { @@ -663,7 +648,7 @@ impl MagicSock { } /// NOTE: Receiving on a [`Self::closed`] socket will return [`Poll::Pending`] indefinitely. - #[instrument(skip_all, fields(me = %self.me))] + #[instrument(skip_all)] fn poll_recv( &self, cx: &mut Context, @@ -692,7 +677,23 @@ impl MagicSock { Poll::Ready(n) => (n, true), }; + // Adding the IP address we received something on results in Quinn using this + // address on the send path to send from. However we let Quinn use a + // QuicMappedAddress, not a real address. So we used to substitute our bind address + // here so that Quinn would send on the right address. But that would sometimes + // result in the wrong address family and Windows trips up on that. + // + // What should be done is that this dst_ip from the RecvMeta is stored in the + // NodeState/PathState. Then on the send path it should be retrieved from the + // NodeState/PathSate together with the send address and substituted at send time. + // This is relevant for IPv6 link-local addresses where the OS otherwise does not + // know which intervace to send from. + #[cfg(not(windows))] let dst_ip = self.normalized_local_addr().ok().map(|addr| addr.ip()); + // Reasoning for this here: + // https://github.com/n0-computer/iroh/pull/2595#issuecomment-2290947319 + #[cfg(windows)] + let dst_ip = None; let mut quic_packets_total = 0; @@ -781,7 +782,7 @@ impl MagicSock { Poll::Ready(Ok(msgs)) } - #[instrument(skip_all, fields(name = %self.me))] + #[instrument(skip_all)] fn poll_recv_relay( &self, cx: &mut Context, @@ -993,24 +994,30 @@ impl MagicSock { } } - fn poll_send_ping(&self, ping: &SendPing, cx: &mut Context<'_>) -> Poll> { - let SendPing { - id, - dst, - dst_node, - tx_id, - purpose, - } = ping; - let msg = disco::Message::Ping(disco::Ping { - tx_id: *tx_id, - node_key: self.public_key(), - }); - ready!(self.poll_send_disco_message(dst.clone(), *dst_node, msg, cx))?; - let msg_sender = self.actor_sender.clone(); - debug!(%dst, tx = %hex::encode(tx_id), ?purpose, "ping sent (polled)"); - self.node_map - .notify_ping_sent(*id, dst.clone(), *tx_id, *purpose, msg_sender); - Poll::Ready(Ok(())) + /// Tries to send the ping actions. + /// + /// Note that on failure the (remaining) ping actions are simply dropped. That's bad! + /// The Endpoint will think a full ping was done and not request a new full-ping for a + /// while. We should probably be buffering the pings. + fn try_send_ping_actions(&self, msgs: Vec) -> io::Result<()> { + for msg in msgs { + // Abort sending as soon as we know we are shutting down. + if self.is_closing() || self.is_closed() { + return Ok(()); + } + match msg { + PingAction::SendCallMeMaybe { + ref relay_url, + dst_node, + } => { + self.send_or_queue_call_me_maybe(relay_url, dst_node); + } + PingAction::SendPing(ping) => { + self.try_send_ping(ping)?; + } + } + } + Ok(()) } /// Send a disco message. UDP messages will be queued. @@ -1033,22 +1040,21 @@ impl MagicSock { } /// Send a disco message. UDP messages will be polled to send directly on the UDP socket. - fn poll_send_disco_message( + fn try_send_disco_message( &self, dst: SendAddr, dst_key: PublicKey, msg: disco::Message, - cx: &mut Context<'_>, - ) -> Poll> { + ) -> io::Result<()> { match dst { SendAddr::Udp(addr) => { - ready!(self.poll_send_disco_message_udp(addr, dst_key, &msg, cx))?; + self.try_send_disco_message_udp(addr, dst_key, &msg)?; } SendAddr::Relay(ref url) => { self.send_disco_message_relay(url, dst_key, msg); } } - Poll::Ready(Ok(())) + Ok(()) } fn send_disco_message_relay( @@ -1060,111 +1066,112 @@ impl MagicSock { debug!(node = %dst_key.fmt_short(), %url, %msg, "send disco message (relay)"); let pkt = self.encode_disco_message(dst_key, &msg); inc!(MagicsockMetrics, send_disco_relay); - match self.poll_send_relay(url, dst_key, smallvec![pkt]) { - Poll::Ready(true) => { + match self.try_send_relay(url, dst_key, smallvec![pkt]) { + Ok(()) => { inc!(MagicsockMetrics, sent_disco_relay); disco_message_sent(&msg); true } - _ => false, + Err(_) => false, } } async fn send_disco_message_udp( &self, dst: SocketAddr, - dst_key: PublicKey, + dst_node: NodeId, msg: &disco::Message, - ) -> io::Result { - std::future::poll_fn(move |cx| self.poll_send_disco_message_udp(dst, dst_key, msg, cx)) - .await + ) -> io::Result<()> { + futures_lite::future::poll_fn(move |cx| { + loop { + match self.try_send_disco_message_udp(dst, dst_node, msg) { + Ok(()) => return Poll::Ready(Ok(())), + Err(err) if err.kind() == io::ErrorKind::WouldBlock => { + // This is the socket .try_send_disco_message_udp used. + let sock = self.conn_for_addr(dst)?; + let sock = Arc::new(sock.clone()); + let mut poller = sock.create_io_poller(); + match poller.as_mut().poll_writable(cx)? { + Poll::Ready(()) => continue, + Poll::Pending => return Poll::Pending, + } + } + Err(err) => return Poll::Ready(Err(err)), + } + } + }) + .await } - fn poll_send_disco_message_udp( + fn try_send_disco_message_udp( &self, dst: SocketAddr, - dst_key: PublicKey, + dst_node: NodeId, msg: &disco::Message, - cx: &mut Context<'_>, - ) -> Poll> { + ) -> std::io::Result<()> { trace!(%dst, %msg, "send disco message (UDP)"); if self.is_closed() { - return Poll::Ready(Err(io::Error::new( + return Err(io::Error::new( io::ErrorKind::NotConnected, "connection closed", - ))); + )); } - let pkt = self.encode_disco_message(dst_key, msg); + let pkt = self.encode_disco_message(dst_node, msg); // TODO: These metrics will be wrong with the poll impl // Also - do we need it? I'd say the `sent_disco_udp` below is enough. inc!(MagicsockMetrics, send_disco_udp); - let transmits = [quinn_udp::Transmit { + let transmit = quinn_udp::Transmit { destination: dst, - contents: pkt, + contents: &pkt, ecn: None, segment_size: None, src_ip: None, // TODO - }]; - let sent = ready!(self.poll_send_udp(dst, &transmits, cx)); - Poll::Ready(match sent { - Ok(0) => { - // Can't send. (e.g. no IPv6 locally) - warn!(%dst, node = %dst_key.fmt_short(), ?msg, "failed to send disco message"); - Ok(false) - } - Ok(_n) => { - trace!(%dst, node = %dst_key.fmt_short(), %msg, "sent disco message"); + }; + let sent = self.try_send_udp(dst, &transmit); + match sent { + Ok(()) => { + trace!(%dst, node = %dst_node.fmt_short(), %msg, "sent disco message"); inc!(MagicsockMetrics, sent_disco_udp); disco_message_sent(msg); - Ok(true) + Ok(()) } Err(err) => { - warn!(%dst, node = %dst_key.fmt_short(), ?msg, ?err, "failed to send disco message"); + warn!(%dst, node = %dst_node.fmt_short(), ?msg, ?err, + "failed to send disco message"); Err(err) } - }) - } - - fn poll_handle_ping_actions( - &self, - cx: &mut Context<'_>, - msgs: &mut Vec, - ) -> Poll> { - if msgs.is_empty() { - return Poll::Ready(Ok(())); } + } - while let Some(msg) = msgs.pop() { - if self.poll_handle_ping_action(cx, &msg)?.is_pending() { - msgs.push(msg); - return Poll::Pending; - } + #[instrument(skip_all)] + async fn handle_ping_actions(&mut self, msgs: Vec) { + // TODO: This used to make sure that all ping actions are sent. Though on the + // poll_send/try_send path we also do fire-and-forget. try_send_ping_actions() + // really should store any unsent pings on the Inner and send them at the next + // possible time. + if let Err(err) = self.try_send_ping_actions(msgs) { + warn!("Not all ping actions were sent: {err:#}"); } - Poll::Ready(Ok(())) } - #[instrument("handle_ping_action", skip_all)] - fn poll_handle_ping_action( - &self, - cx: &mut Context<'_>, - msg: &PingAction, - ) -> Poll> { - // Abort sending as soon as we know we are shutting down. - if self.is_closing() || self.is_closed() { - return Poll::Ready(Ok(())); - } - match *msg { - PingAction::SendCallMeMaybe { - ref relay_url, - dst_node, - } => { - self.send_or_queue_call_me_maybe(relay_url, dst_node); - } - PingAction::SendPing(ref ping) => { - ready!(self.poll_send_ping(ping, cx))?; - } - } - Poll::Ready(Ok(())) + fn try_send_ping(&self, ping: SendPing) -> io::Result<()> { + let SendPing { + id, + dst, + dst_node, + tx_id, + purpose, + } = ping; + let msg = disco::Message::Ping(disco::Ping { + tx_id, + node_key: self.public_key(), + }); + self.try_send_disco_message(dst.clone(), dst_node, msg)?; + debug!(%dst, tx = %hex::encode(tx_id), ?purpose, "ping sent (polled)"); + let msg_sender = self.actor_sender.clone(); + self.node_map + .notify_ping_sent(id, dst.clone(), tx_id, purpose, msg_sender); + Ok(()) } fn poll_send_relay( @@ -1409,7 +1416,6 @@ impl Handle { let node_map = node_map.unwrap_or_default(); let node_map = NodeMap::load_from_vec(node_map); - let udp_state = quinn_udp::UdpState::default(); let inner = Arc::new(MagicSock { me, port: AtomicU16::new(port), @@ -1420,7 +1426,7 @@ impl Handle { closed: AtomicBool::new(false), relay_recv_receiver: parking_lot::Mutex::new(relay_recv_receiver), network_recv_wakers: parking_lot::Mutex::new(None), - network_send_wakers: parking_lot::Mutex::new(None), + network_send_wakers: Arc::new(parking_lot::Mutex::new(None)), actor_sender: actor_sender.clone(), ipv6_reported: Arc::new(AtomicBool::new(false)), relay_map, @@ -1431,8 +1437,6 @@ impl Handle { disco_secrets: DiscoSecrets::default(), node_map, relay_actor_sender: relay_actor_sender.clone(), - udp_state, - send_buffer: Default::default(), udp_disco_sender, discovery, direct_addrs: Watchable::new(Default::default()), @@ -1657,13 +1661,12 @@ fn endpoint_sets_equal(xs: &[DirectAddr], ys: &[DirectAddr]) -> bool { } impl AsyncUdpSocket for Handle { - fn poll_send( - &self, - _udp_state: &quinn_udp::UdpState, - cx: &mut Context, - transmits: &[quinn_udp::Transmit], - ) -> Poll> { - self.msock.poll_send(cx, transmits) + fn create_io_poller(self: Arc) -> Pin> { + self.msock.create_io_poller() + } + + fn try_send(&self, transmit: &quinn_udp::Transmit) -> io::Result<()> { + self.msock.try_send(transmit) } /// NOTE: Receiving on a [`Self::close`]d socket will return [`Poll::Pending`] indefinitely. @@ -1690,6 +1693,74 @@ impl AsyncUdpSocket for Handle { (_, Some(ipv6)) => Ok(*ipv6), } } + + fn max_transmit_segments(&self) -> usize { + if let Some(pconn6) = self.pconn6.as_ref() { + std::cmp::min( + pconn6.max_transmit_segments(), + self.pconn4.max_transmit_segments(), + ) + } else { + self.pconn4.max_transmit_segments() + } + } + + fn max_receive_segments(&self) -> usize { + if let Some(pconn6) = self.pconn6.as_ref() { + // `max_receive_segments` controls the size of the `RecvMeta` buffer + // that quinn creates. Having buffers slightly bigger than necessary + // isn't terrible, and makes sure a single socket can read the maximum + // amount with a single poll. We considered adding these numbers instead, + // but we never get data from both sockets at the same time in `poll_recv` + // and it's impossible and unnecessary to be refactored that way. + std::cmp::max( + pconn6.max_receive_segments(), + self.pconn4.max_receive_segments(), + ) + } else { + self.pconn4.max_receive_segments() + } + } + + fn may_fragment(&self) -> bool { + if let Some(pconn6) = self.pconn6.as_ref() { + pconn6.may_fragment() || self.pconn4.may_fragment() + } else { + self.pconn4.may_fragment() + } + } +} + +#[derive(Debug)] +struct IoPoller { + ipv4_poller: Pin>, + ipv6_poller: Option>>, + relay_sender: mpsc::Sender, + relay_send_waker: Arc>>, +} + +impl quinn::UdpPoller for IoPoller { + fn poll_writable(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // This version returns Ready as soon as any of them are ready. + let this = &mut *self; + match this.ipv4_poller.as_mut().poll_writable(cx) { + Poll::Ready(_) => return Poll::Ready(Ok(())), + Poll::Pending => (), + } + if let Some(ref mut ipv6_poller) = this.ipv6_poller { + match ipv6_poller.as_mut().poll_writable(cx) { + Poll::Ready(_) => return Poll::Ready(Ok(())), + Poll::Pending => (), + } + } + match this.relay_sender.capacity() { + 0 => { + self.relay_send_waker.lock().replace(cx.waker().clone()); + Poll::Pending + } + _ => Poll::Ready(Ok(())), + } + } } #[derive(Debug)] @@ -1826,14 +1897,14 @@ impl Actor { } } - async fn handle_ping_actions(&mut self, mut msgs: Vec) { - if msgs.is_empty() { - return; - } - if let Err(err) = - std::future::poll_fn(|cx| self.msock.poll_handle_ping_actions(cx, &mut msgs)).await - { - debug!("failed to send pings: {err:?}"); + #[instrument(skip_all)] + async fn handle_ping_actions(&mut self, msgs: Vec) { + // TODO: This used to make sure that all ping actions are sent. Though on the + // poll_send/try_send path we also do fire-and-forget. try_send_ping_actions() + // really should store any unsent pings on the Inner and send them at the next + // possible time. + if let Err(err) = self.msock.try_send_ping_actions(msgs) { + warn!("Not all ping actions were sent: {err:#}"); } } @@ -1899,24 +1970,9 @@ impl Actor { false } + #[cfg_attr(windows, allow(dead_code))] fn normalized_local_addr(&self) -> io::Result { - let (v4, v6) = self.local_addr(); - if let Some(v6) = v6 { - return v6; - } - v4 - } - - fn local_addr(&self) -> (io::Result, Option>) { - // TODO: think more about this - // needs to pretend ipv6 always as the fake addrs are ipv6 - let mut ipv6_addr = None; - if let Some(ref conn) = self.pconn6 { - ipv6_addr = Some(conn.local_addr()); - } - let ipv4_addr = self.pconn4.local_addr(); - - (ipv4_addr, ipv6_addr) + self.msock.normalized_local_addr() } fn process_relay_read_result(&mut self, dm: RelayReadResult) -> Vec { @@ -1934,7 +1990,11 @@ impl Actor { // split the packet into these parts let parts = PacketSplitIter::new(dm.buf); // Normalize local_ip + #[cfg(not(windows))] let dst_ip = self.normalized_local_addr().ok().map(|addr| addr.ip()); + // Reasoning for this here: https://github.com/n0-computer/iroh/pull/2595#issuecomment-2290947319 + #[cfg(windows)] + let dst_ip = None; let mut out = Vec::new(); for part in parts { @@ -2485,22 +2545,24 @@ impl DiscoveredDirectAddrs { } } -/// Split a number of transmits into individual packets. +/// Split a transmit containing a GSO payload into individual packets. /// -/// For each transmit, if it has a segment size, it will be split into -/// multiple packets according to that segment size. If it does not have a -/// segment size, the contents will be sent as a single packet. -fn split_packets(transmits: &[quinn_udp::Transmit]) -> RelayContents { - let mut res = SmallVec::with_capacity(transmits.len()); - for transmit in transmits { - let contents = &transmit.contents; - if let Some(segment_size) = transmit.segment_size { - for chunk in contents.chunks(segment_size) { - res.push(contents.slice_ref(chunk)); - } - } else { - res.push(contents.clone()); +/// This allocates the data. +/// +/// If the transmit has a segment size it contains multiple GSO packets. It will be split +/// into multiple packets according to that segment size. If it does not have a segment +/// size, the contents will be sent as a single packet. +// TODO: If quinn stayed on bytes this would probably be much cheaper, probably. Need to +// figure out where they allocate the Vec. +fn split_packets(transmit: &quinn_udp::Transmit) -> RelayContents { + let mut res = SmallVec::with_capacity(1); + let contents = transmit.contents; + if let Some(segment_size) = transmit.segment_size { + for chunk in contents.chunks(segment_size) { + res.push(Bytes::from(chunk.to_vec())); } + } else { + res.push(Bytes::from(contents.to_vec())); } res } @@ -2563,7 +2625,7 @@ impl Iterator for PacketSplitIter { /// comes in as the inner [`SocketAddr`], in those interfaces we have to be careful to do /// the conversion to this type. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub(crate) struct QuicMappedAddr(SocketAddr); +pub(crate) struct QuicMappedAddr(pub(crate) SocketAddr); /// Counter to always generate unique addresses for [`QuicMappedAddr`]. static ADDR_COUNTER: AtomicU64 = AtomicU64::new(1); @@ -2742,10 +2804,16 @@ mod tests { use iroh_test::CallOnDrop; use rand::RngCore; - use crate::{defaults::staging::EU_RELAY_HOSTNAME, relay::RelayMode, tls, Endpoint}; + use crate::defaults::staging::EU_RELAY_HOSTNAME; + use crate::relay::RelayMode; + use crate::tls; + use crate::util::AbortingJoinHandle; + use crate::Endpoint; use super::*; + const ALPN: &[u8] = b"n0/test/1"; + impl MagicSock { #[track_caller] pub fn add_test_addr(&self, node_addr: NodeAddr) { @@ -2761,8 +2829,6 @@ mod tests { endpoint: Endpoint, } - const ALPN: &[u8] = b"n0/test/1"; - impl MagicStack { async fn new(relay_mode: RelayMode) -> Result { let secret_key = SecretKey::generate(); @@ -2898,7 +2964,8 @@ mod tests { } info!("finishing"); - send_bi.finish().await.context("[receiver] finishing")?; + send_bi.finish().context("[receiver] finishing")?; + send_bi.stopped().await.context("[receiver] stopped")?; let stats = conn.stats(); info!("stats: {:#?}", stats); @@ -2933,7 +3000,8 @@ mod tests { send_bi.write_all(msg).await.context("[sender] write all")?; info!("finishing"); - send_bi.finish().await.context("[sender] finish")?; + send_bi.finish().context("[sender] finish")?; + send_bi.stopped().await.context("[sender] stopped")?; info!("reading_to_end"); let val = recv_bi.read_to_end(usize::MAX).await.context("[sender]")?; @@ -3164,8 +3232,8 @@ mod tests { let key = SecretKey::generate(); let conn = std::net::UdpSocket::bind(addr)?; - let tls_server_config = tls::make_server_config(&key, vec![ALPN.to_vec()], false)?; - let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_server_config)); + let quic_server_config = tls::make_server_config(&key, vec![ALPN.to_vec()], false)?; + let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_server_config)); let mut transport_config = quinn::TransportConfig::default(); transport_config.keep_alive_interval(Some(Duration::from_secs(5))); transport_config.max_idle_timeout(Some(Duration::from_secs(10).try_into().unwrap())); @@ -3177,9 +3245,9 @@ mod tests { Arc::new(quinn::TokioRuntime), )?; - let tls_client_config = + let quic_client_config = tls::make_client_config(&key, None, vec![ALPN.to_vec()], false)?; - let mut client_config = quinn::ClientConfig::new(Arc::new(tls_client_config)); + let mut client_config = quinn::ClientConfig::new(Arc::new(quic_client_config)); let mut transport_config = quinn::TransportConfig::default(); transport_config.max_idle_timeout(Some(Duration::from_secs(10).try_into().unwrap())); client_config.transport_config(Arc::new(transport_config)); @@ -3206,73 +3274,79 @@ mod tests { println!("{}: {}, {}: {}", a_name, a_addr, b_name, b_addr); let b_task = tokio::task::spawn(async move { - println!("[{}] accepting conn", b_name); + println!("[{b_name}] accepting conn"); let conn = b.accept().await.expect("no conn"); println!("[{}] connecting", b_name); let conn = conn .await - .with_context(|| format!("[{}] connecting", b_name))?; + .with_context(|| format!("[{b_name}] connecting"))?; println!("[{}] accepting bi", b_name); let (mut send_bi, mut recv_bi) = conn .accept_bi() .await - .with_context(|| format!("[{}] accepting bi", b_name))?; + .with_context(|| format!("[{b_name}] accepting bi"))?; - println!("[{}] reading", b_name); + println!("[{b_name}] reading"); let val = recv_bi .read_to_end(usize::MAX) .await - .with_context(|| format!("[{}] reading to end", b_name))?; - println!("[{}] finishing", b_name); + .with_context(|| format!("[{b_name}] reading to end"))?; + println!("[{b_name}] finishing"); send_bi .finish() + .with_context(|| format!("[{b_name}] finishing"))?; + send_bi + .stopped() .await - .with_context(|| format!("[{}] finishing", b_name))?; + .with_context(|| format!("[b_name] stopped"))?; - println!("[{}] close", b_name); + println!("[{b_name}] close"); conn.close(0u32.into(), b"done"); - println!("[{}] closed", b_name); + println!("[{b_name}] closed"); Ok::<_, anyhow::Error>(val) }); - println!("[{}] connecting to {}", a_name, b_addr); + println!("[{a_name}] connecting to {b_addr}"); let conn = a .connect(b_addr, "localhost")? .await - .with_context(|| format!("[{}] connect", a_name))?; + .with_context(|| format!("[{a_name}] connect"))?; - println!("[{}] opening bi", a_name); + println!("[{a_name}] opening bi"); let (mut send_bi, mut recv_bi) = conn .open_bi() .await - .with_context(|| format!("[{}] open bi", a_name))?; - println!("[{}] writing message", a_name); + .with_context(|| format!("[{a_name}] open bi"))?; + println!("[{a_name}] writing message"); send_bi .write_all(&$msg[..]) .await - .with_context(|| format!("[{}] write all", a_name))?; + .with_context(|| format!("[{a_name}] write all"))?; - println!("[{}] finishing", a_name); + println!("[{a_name}] finishing"); send_bi .finish() + .with_context(|| format!("[{a_name}] finish"))?; + send_bi + .stopped() .await - .with_context(|| format!("[{}] finish", a_name))?; + .with_context(|| format!("[{a_name}] stopped"))?; - println!("[{}] reading_to_end", a_name); + println!("[{a_name}] reading_to_end"); let _ = recv_bi .read_to_end(usize::MAX) .await - .with_context(|| format!("[{}]", a_name))?; - println!("[{}] close", a_name); + .with_context(|| format!("[{a_name}] reading_to_end"))?; + println!("[{a_name}] close"); conn.close(0u32.into(), b"done"); - println!("[{}] wait idle", a_name); + println!("[{a_name}] wait idle"); a.wait_idle().await; drop(send_bi); // make sure the right values arrived - println!("[{}] waiting for channel", a_name); + println!("[{a_name}] waiting for channel"); let val = b_task.await??; anyhow::ensure!( val == $msg, @@ -3307,8 +3381,8 @@ mod tests { let key = SecretKey::generate(); let conn = UdpConn::bind(addr.port(), addr.ip().into())?; - let tls_server_config = tls::make_server_config(&key, vec![ALPN.to_vec()], false)?; - let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_server_config)); + let quic_server_config = tls::make_server_config(&key, vec![ALPN.to_vec()], false)?; + let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_server_config)); let mut transport_config = quinn::TransportConfig::default(); transport_config.keep_alive_interval(Some(Duration::from_secs(5))); transport_config.max_idle_timeout(Some(Duration::from_secs(10).try_into().unwrap())); @@ -3316,13 +3390,13 @@ mod tests { let mut quic_ep = quinn::Endpoint::new_with_abstract_socket( quinn::EndpointConfig::default(), Some(server_config), - conn, + Arc::new(conn), Arc::new(quinn::TokioRuntime), )?; - let tls_client_config = + let quic_client_config = tls::make_client_config(&key, None, vec![ALPN.to_vec()], false)?; - let mut client_config = quinn::ClientConfig::new(Arc::new(tls_client_config)); + let mut client_config = quinn::ClientConfig::new(Arc::new(quic_client_config)); let mut transport_config = quinn::TransportConfig::default(); transport_config.max_idle_timeout(Some(Duration::from_secs(10).try_into().unwrap())); client_config.transport_config(Arc::new(transport_config)); @@ -3373,8 +3447,11 @@ mod tests { println!("[{}] finishing", b_name); send_bi .finish() - .await .with_context(|| format!("[{}] finishing", b_name))?; + send_bi + .stopped() + .await + .with_context(|| format!("[{b_name}] stopped"))?; println!("[{}] close", b_name); conn.close(0u32.into(), b"done"); @@ -3403,8 +3480,11 @@ mod tests { println!("[{}] finishing", a_name); send_bi .finish() - .await .with_context(|| format!("[{}] finish", a_name))?; + send_bi + .stopped() + .await + .with_context(|| format!("[{a_name}] stopped"))?; println!("[{}] reading_to_end", a_name); let _ = recv_bi @@ -3448,12 +3528,12 @@ mod tests { #[test] fn test_split_packets() { - fn mk_transmit(contents: &[u8], segment_size: Option) -> quinn_udp::Transmit { + fn mk_transmit(contents: &[u8], segment_size: Option) -> quinn_udp::Transmit<'_> { let destination = "127.0.0.1:0".parse().unwrap(); quinn_udp::Transmit { destination, ecn: None, - contents: contents.to_vec().into(), + contents, segment_size, src_ip: None, } @@ -3464,36 +3544,25 @@ mod tests { .map(|p| p.as_bytes().to_vec().into()) .collect() } - // no packets - assert_eq!(split_packets(&[]), SmallVec::<[Bytes; 1]>::default()); // no split assert_eq!( - split_packets(&vec![ - mk_transmit(b"hello", None), - mk_transmit(b"world", None) - ]), - mk_expected(["hello", "world"]) + split_packets(&mk_transmit(b"hello", None)), + mk_expected(["hello"]) ); // split without rest assert_eq!( - split_packets(&[mk_transmit(b"helloworld", Some(5))]), + split_packets(&mk_transmit(b"helloworld", Some(5))), mk_expected(["hello", "world"]) ); // split with rest and second transmit assert_eq!( - split_packets(&vec![ - mk_transmit(b"hello world", Some(5)), - mk_transmit(b"!", None) - ]), - mk_expected(["hello", " worl", "d", "!"]) // spellchecker:disable-line + split_packets(&mk_transmit(b"hello world", Some(5))), + mk_expected(["hello", " worl", "d"]) // spellchecker:disable-line ); // split that results in 1 packet assert_eq!( - split_packets(&vec![ - mk_transmit(b"hello world", Some(1000)), - mk_transmit(b"!", None) - ]), - mk_expected(["hello world", "!"]) + split_packets(&mk_transmit(b"hello world", Some(1000))), + mk_expected(["hello world"]) ); } @@ -3544,4 +3613,276 @@ mod tests { Some(Some(url)) ); } + + /// Creates a new [`quinn::Endpoint`] hooked up to a [`MagicSock`]. + /// + /// This is without involving [`crate::endpoint::Endpoint`]. The socket will accept + /// connections using [`ALPN`]. + /// + /// Use [`magicsock_connect`] to establish connections. + #[instrument(name = "ep", skip_all, fields(me = secret_key.public().fmt_short()))] + async fn magicsock_ep(secret_key: SecretKey) -> anyhow::Result<(quinn::Endpoint, Handle)> { + let opts = Options { + port: 0, + secret_key: secret_key.clone(), + relay_map: RelayMap::empty(), + node_map: None, + discovery: None, + dns_resolver: crate::dns::default_resolver().clone(), + proxy_url: None, + insecure_skip_relay_cert_verify: true, + }; + let msock = MagicSock::spawn(opts).await?; + let server_config = crate::endpoint::make_server_config( + &secret_key, + vec![ALPN.to_vec()], + Arc::new(quinn::TransportConfig::default()), + true, + )?; + let mut endpoint_config = quinn::EndpointConfig::default(); + endpoint_config.grease_quic_bit(false); + let endpoint = quinn::Endpoint::new_with_abstract_socket( + endpoint_config, + Some(server_config), + Arc::new(msock.clone()), + Arc::new(quinn::TokioRuntime), + )?; + Ok((endpoint, msock)) + } + + /// Connects from `ep` returned by [`magicsock_ep`] to the `node_id`. + /// + /// Uses [`ALPN`], `node_id`, must match `addr`. + #[instrument(name = "connect", skip_all, fields(me = ep_secret_key.public().fmt_short()))] + async fn magicsock_connect( + ep: &quinn::Endpoint, + ep_secret_key: SecretKey, + addr: QuicMappedAddr, + node_id: NodeId, + ) -> Result { + // Endpoint::connect sets this, do the same to have similar behaviour. + let mut transport_config = quinn::TransportConfig::default(); + transport_config.keep_alive_interval(Some(Duration::from_secs(1))); + + magicsock_connet_with_transport_config( + ep, + ep_secret_key, + addr, + node_id, + Arc::new(transport_config), + ) + .await + } + + /// Connects from `ep` returned by [`magicsock_ep`] to the `node_id`. + /// + /// This version allows customising the transport config. + /// + /// Uses [`ALPN`], `node_id`, must match `addr`. + #[instrument(name = "connect", skip_all, fields(me = ep_secret_key.public().fmt_short()))] + async fn magicsock_connet_with_transport_config( + ep: &quinn::Endpoint, + ep_secret_key: SecretKey, + addr: QuicMappedAddr, + node_id: NodeId, + transport_config: Arc, + ) -> Result { + let alpns = vec![ALPN.to_vec()]; + let quic_client_config = + tls::make_client_config(&ep_secret_key, Some(node_id), alpns, true)?; + let mut client_config = quinn::ClientConfig::new(Arc::new(quic_client_config)); + client_config.transport_config(transport_config); + let connect = ep.connect_with(client_config, addr.0, "localhost")?; + let connection = connect.await?; + Ok(connection) + } + + #[tokio::test] + async fn test_try_send_no_send_addr() { + // Regression test: if there is no send_addr we should keep being able to use the + // Endpoint. + let _guard = iroh_test::logging::setup(); + + let secret_key_1 = SecretKey::from_bytes(&[1u8; 32]); + let secret_key_2 = SecretKey::from_bytes(&[2u8; 32]); + let node_id_2 = secret_key_2.public(); + let secret_key_missing_node = SecretKey::from_bytes(&[255u8; 32]); + let node_id_missing_node = secret_key_missing_node.public(); + + let (ep_1, msock_1) = magicsock_ep(secret_key_1.clone()).await.unwrap(); + + // Generate an address not present in the NodeMap. + let bad_addr = QuicMappedAddr::generate(); + + // 500ms is rather fast here. Running this locally it should always be the correct + // timeout. If this is too slow however the test will not become flaky as we are + // expecting the timeout, we might just get the timeout for the wrong reason. But + // this speeds up the test. + let res = tokio::time::timeout( + Duration::from_millis(500), + magicsock_connect(&ep_1, secret_key_1.clone(), bad_addr, node_id_missing_node), + ) + .await; + assert!(res.is_err(), "expecting timeout"); + + // Now check we can still create another connection with this endpoint. + let (ep_2, msock_2) = magicsock_ep(secret_key_2.clone()).await.unwrap(); + + // This needs an accept task + let accept_task = tokio::spawn({ + async fn accept(ep: quinn::Endpoint) -> Result<()> { + let incoming = ep.accept().await.ok_or(anyhow!("no incoming"))?; + let _conn = incoming.accept()?.await?; + + // Keep this connection alive for a while + tokio::time::sleep(Duration::from_secs(10)).await; + info!("accept finished"); + Ok(()) + } + let ep_2 = ep_2.clone(); + async move { + if let Err(err) = accept(ep_2).await { + error!("{err:#}"); + } + } + .instrument(info_span!("ep2.accept, me = node_id_2.fmt_short()")) + }); + let _accept_task = AbortingJoinHandle::from(accept_task); + + let node_addr_2 = NodeAddr { + node_id: node_id_2, + info: AddrInfo { + relay_url: None, + direct_addresses: msock_2 + .direct_addresses() + .next() + .await + .expect("no direct addrs") + .into_iter() + .map(|x| x.addr) + .collect(), + }, + }; + msock_1 + .add_node_addr(node_addr_2, Source::NamedApp { name: "test" }) + .unwrap(); + let addr = msock_1.get_mapping_addr(node_id_2).unwrap(); + let res = tokio::time::timeout( + Duration::from_secs(10), + magicsock_connect(&ep_1, secret_key_1.clone(), addr, node_id_2), + ) + .await + .expect("timeout while connecting"); + + // aka assert!(res.is_ok()) but with nicer error reporting. + res.unwrap(); + + // TODO: Now check if we can connect to a repaired ep_3, but we can't modify that + // much internal state for now. + } + + #[tokio::test] + async fn test_try_send_no_udp_addr_or_relay_url() { + // This specifically tests the `if udp_addr.is_none() && relay_url.is_none()` + // behaviour of MagicSock::try_send. + let _logging_guard = iroh_test::logging::setup(); + + let secret_key_1 = SecretKey::from_bytes(&[1u8; 32]); + let secret_key_2 = SecretKey::from_bytes(&[2u8; 32]); + let node_id_2 = secret_key_2.public(); + + let (ep_1, msock_1) = magicsock_ep(secret_key_1.clone()).await.unwrap(); + let (ep_2, msock_2) = magicsock_ep(secret_key_2.clone()).await.unwrap(); + + // We need a task to accept the connection. + let accept_task = tokio::spawn({ + async fn accept(ep: quinn::Endpoint) -> Result<()> { + let incoming = ep.accept().await.ok_or(anyhow!("no incoming"))?; + let conn = incoming.accept()?.await?; + let mut stream = conn.accept_uni().await?; + stream.read_to_end(1 << 16).await?; + info!("accept finished"); + Ok(()) + } + let ep_2 = ep_2.clone(); + async move { + if let Err(err) = accept(ep_2).await { + error!("{err:#}"); + } + } + .instrument(info_span!("ep2.accept", me = node_id_2.fmt_short())) + }); + let _accept_task = AbortingJoinHandle::from(accept_task); + + // Add an empty entry in the NodeMap of ep_1 + msock_1.node_map.add_node_addr( + NodeAddr { + node_id: node_id_2, + info: AddrInfo::default(), + }, + Source::NamedApp { name: "test" }, + ); + let addr_2 = msock_1.get_mapping_addr(node_id_2).unwrap(); + + // Set a low max_idle_timeout so quinn gives up on this quickly and our test does + // not take forever. You need to check the log output to verify this is really + // triggering the correct error. + // In test_try_send_no_send_addr() above you may have noticed we used + // tokio::time::timeout() on the connection attempt instead. Here however we want + // Quinn itself to have fully given up on the connection attempt because we will + // later connect to **the same** node. If Quinn did not give up on the connection + // we'd close it on drop, and the retransmits of the close packets would interfere + // with the next handshake, closing it during the handshake. This makes the test a + // little slower though. + let mut transport_config = quinn::TransportConfig::default(); + transport_config.max_idle_timeout(Some(Duration::from_millis(200).try_into().unwrap())); + let res = magicsock_connet_with_transport_config( + &ep_1, + secret_key_1.clone(), + addr_2, + node_id_2, + Arc::new(transport_config), + ) + .await; + assert!(res.is_err(), "expected timeout"); + info!("first connect timed out as expected"); + + // Provide correct addressing information + msock_1.node_map.add_node_addr( + NodeAddr { + node_id: node_id_2, + info: AddrInfo { + relay_url: None, + direct_addresses: msock_2 + .direct_addresses() + .next() + .await + .expect("no direct addrs") + .into_iter() + .map(|x| x.addr) + .collect(), + }, + }, + Source::NamedApp { name: "test" }, + ); + + // We can now connect + tokio::time::timeout(Duration::from_secs(10), async move { + info!("establishing new connection"); + let conn = magicsock_connect(&ep_1, secret_key_1.clone(), addr_2, node_id_2) + .await + .unwrap(); + info!("have connection"); + let mut stream = conn.open_uni().await.unwrap(); + stream.write_all(b"hello").await.unwrap(); + stream.finish().unwrap(); + stream.stopped().await.unwrap(); + info!("finished stream"); + }) + .await + .expect("connection timed out"); + + // TODO: could remove the addresses again, send, add it back and see it recover. + // But we don't have that much private access to the NodeMap. This will do for now. + } } diff --git a/iroh-net/src/magicsock/node_map.rs b/iroh-net/src/magicsock/node_map.rs index 74b7dc1132..eb76c4aa9f 100644 --- a/iroh-net/src/magicsock/node_map.rs +++ b/iroh-net/src/magicsock/node_map.rs @@ -76,7 +76,7 @@ pub(super) struct NodeMapInner { /// /// You can look up entries in [`NodeMap`] with various keys, depending on the context you /// have for the node. These are all the keys the [`NodeMap`] can use. -#[derive(Clone)] +#[derive(Debug, Clone)] enum NodeStateKey { Idx(usize), NodeId(NodeId), @@ -199,6 +199,7 @@ impl NodeMap { let mut inner = self.inner.lock(); let ep = inner.get_mut(NodeStateKey::QuicMappedAddr(addr))?; let public_key = *ep.public_key(); + trace!(dest = %addr, node_id = %public_key.fmt_short(), "dst mapped to NodeId"); let (udp_addr, relay_url, msgs) = ep.get_send_addrs(have_ipv6); Some((public_key, udp_addr, relay_url, msgs)) } @@ -286,7 +287,6 @@ impl NodeMapInner { active: false, source, }); - node_state.update_from_node_addr(&info); let id = node_state.id(); for addr in &info.direct_addresses { diff --git a/iroh-net/src/magicsock/udp_conn.rs b/iroh-net/src/magicsock/udp_conn.rs index e6d6444d09..8352786a4b 100644 --- a/iroh-net/src/magicsock/udp_conn.rs +++ b/iroh-net/src/magicsock/udp_conn.rs @@ -1,13 +1,16 @@ use std::{ fmt::Debug, + future::Future, io, net::SocketAddr, + pin::Pin, sync::Arc, task::{ready, Context, Poll}, }; use anyhow::{bail, Context as _}; use quinn::AsyncUdpSocket; +use quinn_udp::{Transmit, UdpSockRef}; use tokio::io::Interest; use tracing::{debug, trace, warn}; @@ -18,7 +21,7 @@ use crate::net::UdpSocket; #[derive(Clone, Debug)] pub struct UdpConn { io: Arc, - state: Arc, + inner: Arc, } impl UdpConn { @@ -28,9 +31,10 @@ impl UdpConn { pub(super) fn bind(port: u16, network: IpFamily) -> anyhow::Result { let sock = bind(port, network)?; + let state = quinn_udp::UdpSocketState::new(quinn_udp::UdpSockRef::from(&sock))?; Ok(Self { io: Arc::new(sock), - state: Default::default(), + inner: Arc::new(state), }) } @@ -46,32 +50,22 @@ impl UdpConn { } impl AsyncUdpSocket for UdpConn { - fn poll_send( - &self, - state: &quinn_udp::UdpState, - cx: &mut Context, - transmits: &[quinn_udp::Transmit], - ) -> Poll> { - let inner = &self.state; - let io = &self.io; - loop { - ready!(io.poll_send_ready(cx))?; - if let Ok(res) = io.try_io(Interest::WRITABLE, || { - inner.send(Arc::as_ref(io).into(), state, transmits) - }) { - for t in transmits.iter().take(res) { - trace!( - dst = %t.destination, - len = t.contents.len(), - count = t.segment_size.map(|ss| t.contents.len() / ss).unwrap_or(1), - src = %t.src_ip.map(|x| x.to_string()).unwrap_or_default(), - "UDP send" - ); - } + fn create_io_poller(self: Arc) -> Pin> { + let sock = self.io.clone(); + Box::pin(IoPoller { + next_waiter: move || { + let sock = sock.clone(); + async move { sock.writable().await } + }, + waiter: None, + }) + } - return Poll::Ready(Ok(res)); - } - } + fn try_send(&self, transmit: &Transmit<'_>) -> io::Result<()> { + self.io.try_io(Interest::WRITABLE, || { + let sock_ref = UdpSockRef::from(&self.io); + self.inner.send(sock_ref, transmit) + }) } fn poll_recv( @@ -83,7 +77,7 @@ impl AsyncUdpSocket for UdpConn { loop { ready!(self.io.poll_recv_ready(cx))?; if let Ok(res) = self.io.try_io(Interest::READABLE, || { - self.state.recv(Arc::as_ref(&self.io).into(), bufs, meta) + self.inner.recv(Arc::as_ref(&self.io).into(), bufs, meta) }) { for meta in meta.iter().take(res) { trace!( @@ -103,6 +97,18 @@ impl AsyncUdpSocket for UdpConn { fn local_addr(&self) -> io::Result { self.io.local_addr() } + + fn may_fragment(&self) -> bool { + self.inner.may_fragment() + } + + fn max_transmit_segments(&self) -> usize { + self.inner.max_gso_segments() + } + + fn max_receive_segments(&self) -> usize { + self.inner.gro_segments() + } } fn bind(port: u16, network: IpFamily) -> anyhow::Result { @@ -145,6 +151,53 @@ fn bind(port: u16, network: IpFamily) -> anyhow::Result { ); } +/// Poller for when the socket is writable. +/// +/// The tricky part is that we only have `tokio::net::UdpSocket::writable()` to create the +/// waiter we need, which does not return a named future type. In order to be able to store +/// this waiter in a struct without boxing we need to specify the future itself as a type +/// parameter, which we can only do if we introduce a second type parameter which returns +/// the future. So we end up with a function which we do not need, but it makes the types +/// work. +#[derive(derive_more::Debug)] +#[pin_project::pin_project] +struct IoPoller +where + F: Fn() -> Fut + Send + Sync + 'static, + Fut: Future> + Send + Sync + 'static, +{ + /// Function which can create a new waiter if there is none. + #[debug("next_waiter")] + next_waiter: F, + /// The waiter which tells us when the socket is writable. + #[debug("waiter")] + #[pin] + waiter: Option, +} + +impl quinn::UdpPoller for IoPoller +where + F: Fn() -> Fut + Send + Sync + 'static, + Fut: Future> + Send + Sync + 'static, +{ + fn poll_writable(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + if this.waiter.is_none() { + this.waiter.set(Some((this.next_waiter)())); + } + let result = this + .waiter + .as_mut() + .as_pin_mut() + .expect("just set") + .poll(cx); + if result.is_ready() { + this.waiter.set(None); + } + result + } +} + #[cfg(test)] mod tests { use crate::{key, tls}; @@ -152,33 +205,36 @@ mod tests { use super::*; use anyhow::Result; use tokio::sync::mpsc; + use tracing::{info_span, Instrument}; const ALPN: &[u8] = b"n0/test/1"; fn wrap_socket(conn: impl AsyncUdpSocket) -> Result<(quinn::Endpoint, key::SecretKey)> { let key = key::SecretKey::generate(); - let tls_server_config = tls::make_server_config(&key, vec![ALPN.to_vec()], false)?; - let server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_server_config)); + let quic_server_config = tls::make_server_config(&key, vec![ALPN.to_vec()], false)?; + let server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_server_config)); let mut quic_ep = quinn::Endpoint::new_with_abstract_socket( quinn::EndpointConfig::default(), Some(server_config), - conn, + Arc::new(conn), Arc::new(quinn::TokioRuntime), )?; - let tls_client_config = tls::make_client_config(&key, None, vec![ALPN.to_vec()], false)?; - let client_config = quinn::ClientConfig::new(Arc::new(tls_client_config)); + let quic_client_config = tls::make_client_config(&key, None, vec![ALPN.to_vec()], false)?; + let client_config = quinn::ClientConfig::new(Arc::new(quic_client_config)); quic_ep.set_default_client_config(client_config); Ok((quic_ep, key)) } #[tokio::test] async fn test_rebinding_conn_send_recv_ipv4() -> Result<()> { + let _guard = iroh_test::logging::setup(); rebinding_conn_send_recv(IpFamily::V4).await } #[tokio::test] async fn test_rebinding_conn_send_recv_ipv6() -> Result<()> { + let _guard = iroh_test::logging::setup(); if !crate::netcheck::os_has_ipv6() { return Ok(()); } @@ -195,24 +251,29 @@ mod tests { let m1_addr = SocketAddr::new(network.local_addr(), m1.local_addr()?.port()); let (m1_send, mut m1_recv) = mpsc::channel(8); - let m1_task = tokio::task::spawn(async move { - if let Some(conn) = m1.accept().await { - let conn = conn.await?; - let (mut send_bi, mut recv_bi) = conn.accept_bi().await?; + let m1_task = tokio::task::spawn( + async move { + // we skip accept() errors, they can be caused by retransmits + if let Some(conn) = m1.accept().await.and_then(|inc| inc.accept().ok()) { + let conn = conn.await?; + let (mut send_bi, mut recv_bi) = conn.accept_bi().await?; + + let val = recv_bi.read_to_end(usize::MAX).await?; + m1_send.send(val).await?; + send_bi.finish()?; + send_bi.stopped().await?; + } - let val = recv_bi.read_to_end(usize::MAX).await?; - m1_send.send(val).await?; - send_bi.finish().await?; + Ok::<_, anyhow::Error>(()) } - - Ok::<_, anyhow::Error>(()) - }); + .instrument(info_span!("m1_task")), + ); let conn = m2.connect(m1_addr, "localhost")?.await?; let (mut send_bi, mut recv_bi) = conn.open_bi().await?; send_bi.write_all(b"hello").await?; - send_bi.finish().await?; + send_bi.finish()?; let _ = recv_bi.read_to_end(usize::MAX).await?; conn.close(0u32.into(), b"done"); diff --git a/iroh-net/src/net/udp.rs b/iroh-net/src/net/udp.rs index ad21f27e4e..3aba36277f 100644 --- a/iroh-net/src/net/udp.rs +++ b/iroh-net/src/net/udp.rs @@ -37,21 +37,21 @@ impl UdpSocket { /// Bind to the given port only on localhost. pub fn bind_local(network: IpFamily, port: u16) -> Result { let addr = SocketAddr::new(network.local_addr(), port); - Self::bind_raw(addr, true).with_context(|| format!("{addr:?}")) + Self::bind_raw(addr).with_context(|| format!("{addr:?}")) } /// Bind to the given port and listen on all interfaces. pub fn bind(network: IpFamily, port: u16) -> Result { let addr = SocketAddr::new(network.unspecified_addr(), port); - Self::bind_raw(addr, true).with_context(|| format!("{addr:?}")) + Self::bind_raw(addr).with_context(|| format!("{addr:?}")) } - /// Bind to any provided [`SocketAddr`]. Does not prepare for using the socket as QUIC socket. + /// Bind to any provided [`SocketAddr`]. pub fn bind_full(addr: impl Into) -> Result { - Self::bind_raw(addr, false) + Self::bind_raw(addr) } - fn bind_raw(addr: impl Into, prepare_for_quinn: bool) -> Result { + fn bind_raw(addr: impl Into) -> Result { let addr = addr.into(); let network = IpFamily::from(addr.ip()); let socket = socket2::Socket::new( @@ -82,9 +82,6 @@ impl UdpSocket { // is not yet available on all OSes. socket.bind(&addr.into()).context("binding")?; - if prepare_for_quinn { - quinn_udp::UdpSocketState::configure((&socket).into()).context("QUIC config")?; - } // Ensure nonblocking socket.set_nonblocking(true).context("nonblocking: true")?; diff --git a/iroh-net/src/relay/client.rs b/iroh-net/src/relay/client.rs index 31ebfe219b..ef2db9e43b 100644 --- a/iroh-net/src/relay/client.rs +++ b/iroh-net/src/relay/client.rs @@ -305,18 +305,16 @@ impl ClientBuilder { /// Build the [`Client`] pub fn build(self, key: SecretKey, dns_resolver: DnsResolver) -> (Client, ClientReceiver) { // TODO: review TLS config - let mut roots = rustls::RootCertStore::empty(); - roots.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { - rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, - ) - })); - let mut config = rustls::client::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(roots) - .with_no_client_auth(); + let roots = rustls::RootCertStore { + roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), + }; + let mut config = rustls::client::ClientConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_safe_default_protocol_versions() + .expect("protocols supported by ring") + .with_root_certificates(roots) + .with_no_client_auth(); #[cfg(any(test, feature = "test-utils"))] if self.insecure_skip_cert_verify { warn!("Insecure config: SSL certificates from relay servers will be trusted without verification"); @@ -656,6 +654,7 @@ impl Actor { let hostname = self .tls_servername() .ok_or_else(|| ClientError::InvalidUrl("No tls servername".into()))?; + let hostname = hostname.to_owned(); let tls_stream = self.tls_connector.connect(hostname, tcp_stream).await?; debug!("tls_connector connect success"); Self::start_upgrade(tls_stream).await? @@ -828,10 +827,10 @@ impl Actor { self.conn_gen } - fn tls_servername(&self) -> Option { + fn tls_servername(&self) -> Option { self.url .host_str() - .and_then(|s| rustls::ServerName::try_from(s).ok()) + .and_then(|s| rustls::pki_types::ServerName::try_from(s).ok()) } fn use_tls(&self) -> bool { @@ -909,7 +908,7 @@ impl Actor { } else { let hostname = proxy_url .host_str() - .and_then(|s| rustls::ServerName::try_from(s).ok()) + .and_then(|s| rustls::pki_types::ServerName::try_from(s.to_string()).ok()) .ok_or_else(|| ClientError::InvalidUrl("No tls servername for proxy url".into()))?; let tls_stream = self.tls_connector.connect(hostname, tcp_stream).await?; MaybeTlsStream::Tls(tls_stream) @@ -1050,20 +1049,43 @@ async fn resolve_host( /// Used to allow self signed certificates in tests #[cfg(any(test, feature = "test-utils"))] +#[derive(Debug)] struct NoCertVerifier; #[cfg(any(test, feature = "test-utils"))] -impl rustls::client::ServerCertVerifier for NoCertVerifier { +impl rustls::client::danger::ServerCertVerifier for NoCertVerifier { fn verify_server_cert( &self, - _end_entity: &rustls::Certificate, - _intermediates: &[rustls::Certificate], - _server_name: &rustls::ServerName, - _scts: &mut dyn Iterator, + _end_entity: &rustls::pki_types::CertificateDer, + _intermediates: &[rustls::pki_types::CertificateDer], + _server_name: &rustls::pki_types::ServerName, _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + rustls::crypto::ring::default_provider() + .signature_verification_algorithms + .supported_schemes() } } diff --git a/iroh-net/src/relay/server.rs b/iroh-net/src/relay/server.rs index adc4674416..e3544c0dc5 100644 --- a/iroh-net/src/relay/server.rs +++ b/iroh-net/src/relay/server.rs @@ -157,9 +157,9 @@ pub enum CertConfig { /// Use a static TLS key and certificate chain. Manual { /// The TLS private key. - private_key: rustls::PrivateKey, + private_key: rustls::pki_types::PrivateKeyDer<'static>, /// The TLS certificate chain. - certs: Vec, + certs: Vec>, }, } @@ -255,9 +255,12 @@ impl Server { .request_handler(Method::GET, "/robots.txt", Box::new(robots_handler)); let http_addr = match relay_config.tls { Some(tls_config) => { - let server_config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth(); + let server_config = rustls::ServerConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_safe_default_protocol_versions() + .expect("protocols supported by ring") + .with_no_client_auth(); let server_tls_config = match tls_config.cert { CertConfig::LetsEncrypt { config } => { let mut state = config.state(); @@ -283,8 +286,8 @@ impl Server { }) } CertConfig::Manual { private_key, certs } => { - let server_config = server_config - .with_single_cert(certs.clone(), private_key.clone())?; + let server_config = + server_config.with_single_cert(certs.clone(), private_key)?; let server_config = Arc::new(server_config); let acceptor = tokio_rustls::TlsAcceptor::from(server_config.clone()); diff --git a/iroh-net/src/relay/server/http_server.rs b/iroh-net/src/relay/server/http_server.rs index fb1dd03e1a..d571cfdb13 100644 --- a/iroh-net/src/relay/server/http_server.rs +++ b/iroh-net/src/relay/server/http_server.rs @@ -678,7 +678,7 @@ impl std::ops::DerefMut for Handlers { #[cfg(test)] mod tests { - use super::*; + use std::sync::Arc; use anyhow::Result; use bytes::Bytes; @@ -692,19 +692,27 @@ mod tests { use crate::relay::client::conn::ReceivedMessage; use crate::relay::client::{Client, ClientBuilder}; + use super::*; + pub(crate) fn make_tls_config() -> TlsConfig { let subject_alt_names = vec!["localhost".to_string()]; let cert = rcgen::generate_simple_self_signed(subject_alt_names).unwrap(); - let rustls_certificate = rustls::Certificate(cert.serialize_der().unwrap()); - let rustls_key = rustls::PrivateKey(cert.get_key_pair().serialize_der()); - let config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_single_cert(vec![(rustls_certificate)], rustls_key) - .unwrap(); - - let config = std::sync::Arc::new(config); + let rustls_certificate = + rustls::pki_types::CertificateDer::from(cert.serialize_der().unwrap()); + let rustls_key = + rustls::pki_types::PrivatePkcs8KeyDer::from(cert.get_key_pair().serialize_der()); + let rustls_key = rustls::pki_types::PrivateKeyDer::from(rustls_key); + let config = rustls::ServerConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_safe_default_protocol_versions() + .expect("protocols supported by ring") + .with_no_client_auth() + .with_single_cert(vec![(rustls_certificate)], rustls_key) + .expect("cert is right"); + + let config = Arc::new(config); let acceptor = tokio_rustls::TlsAcceptor::from(config.clone()); TlsConfig { diff --git a/iroh-net/src/test_utils.rs b/iroh-net/src/test_utils.rs index 6a8b411daa..93b66b4537 100644 --- a/iroh-net/src/test_utils.rs +++ b/iroh-net/src/test_utils.rs @@ -30,8 +30,10 @@ pub struct CleanupDropGuard(pub(crate) oneshot::Sender<()>); pub async fn run_relay_server() -> Result<(RelayMap, RelayUrl, Server)> { let secret_key = SecretKey::generate(); let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()]).unwrap(); - let rustls_cert = rustls::Certificate(cert.serialize_der().unwrap()); - let private_key = rustls::PrivateKey(cert.get_key_pair().serialize_der()); + let rustls_cert = rustls::pki_types::CertificateDer::from(cert.serialize_der().unwrap()); + let private_key = + rustls::pki_types::PrivatePkcs8KeyDer::from(cert.get_key_pair().serialize_der()); + let private_key = rustls::pki_types::PrivateKeyDer::from(private_key); let config = ServerConfig { relay: Some(RelayConfig { diff --git a/iroh-net/src/tls.rs b/iroh-net/src/tls.rs index b95525da4b..d24b8259ba 100644 --- a/iroh-net/src/tls.rs +++ b/iroh-net/src/tls.rs @@ -5,13 +5,27 @@ use std::sync::Arc; +use quinn::crypto::rustls::{NoInitialCipherSuite, QuicClientConfig, QuicServerConfig}; use tracing::warn; use crate::key::{PublicKey, SecretKey}; +use self::certificate::AlwaysResolvesCert; + pub mod certificate; mod verifier; +/// Error for generating iroh p2p TLS configs. +#[derive(Debug, thiserror::Error)] +pub enum CreateConfigError { + /// Error generating the certificate. + #[error("Error generating the certificate")] + CertError(#[from] certificate::GenError), + /// Error creating QUIC config. + #[error("Error creating QUIC config")] + ConfigError(#[from] NoInitialCipherSuite), +} + /// Create a TLS client configuration. /// /// If *keylog* is `true` this will enable logging of the pre-master key to the file in the @@ -22,26 +36,31 @@ pub fn make_client_config( remote_peer_id: Option, alpn_protocols: Vec>, keylog: bool, -) -> Result { +) -> Result { let (certificate, secret_key) = certificate::generate(secret_key)?; - let mut crypto = rustls::ClientConfig::builder() - .with_cipher_suites(verifier::CIPHERSUITES) - .with_safe_default_kx_groups() - .with_protocol_versions(verifier::PROTOCOL_VERSIONS) - .expect("Cipher suites and kx groups are configured; qed") - .with_custom_certificate_verifier(Arc::new( - verifier::Libp2pCertificateVerifier::with_remote_peer_id(remote_peer_id), - )) - .with_client_auth_cert(vec![certificate], secret_key) - .expect("Client cert key DER is valid; qed"); + let cert_resolver = Arc::new( + AlwaysResolvesCert::new(certificate, &secret_key) + .expect("Client cert key DER is valid; qed"), + ); + + let mut crypto = rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_protocol_versions(verifier::PROTOCOL_VERSIONS) + .expect("version supported by ring") + .dangerous() + .with_custom_certificate_verifier(Arc::new( + verifier::Libp2pCertificateVerifier::with_remote_peer_id(remote_peer_id), + )) + .with_client_cert_resolver(cert_resolver); crypto.alpn_protocols = alpn_protocols; if keylog { warn!("enabling SSLKEYLOGFILE for TLS pre-master keys"); crypto.key_log = Arc::new(rustls::KeyLogFile::new()); } - - Ok(crypto) + let config = crypto.try_into()?; + Ok(config) } /// Create a TLS server configuration. @@ -53,21 +72,26 @@ pub fn make_server_config( secret_key: &SecretKey, alpn_protocols: Vec>, keylog: bool, -) -> Result { +) -> Result { let (certificate, secret_key) = certificate::generate(secret_key)?; - let mut crypto = rustls::ServerConfig::builder() - .with_cipher_suites(verifier::CIPHERSUITES) - .with_safe_default_kx_groups() - .with_protocol_versions(verifier::PROTOCOL_VERSIONS) - .expect("Cipher suites and kx groups are configured; qed") - .with_client_cert_verifier(Arc::new(verifier::Libp2pCertificateVerifier::new())) - .with_single_cert(vec![certificate], secret_key) - .expect("Server cert key DER is valid; qed"); + let cert_resolver = Arc::new( + AlwaysResolvesCert::new(certificate, &secret_key) + .expect("Server cert key DER is valid; qed"), + ); + + let mut crypto = rustls::ServerConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_protocol_versions(verifier::PROTOCOL_VERSIONS) + .expect("fixed config") + .with_client_cert_verifier(Arc::new(verifier::Libp2pCertificateVerifier::new())) + .with_cert_resolver(cert_resolver); crypto.alpn_protocols = alpn_protocols; if keylog { warn!("enabling SSLKEYLOGFILE for TLS pre-master keys"); crypto.key_log = Arc::new(rustls::KeyLogFile::new()); } - Ok(crypto) + let config = crypto.try_into()?; + Ok(config) } diff --git a/iroh-net/src/tls/certificate.rs b/iroh-net/src/tls/certificate.rs index 8bfad4cd30..c0900a0435 100644 --- a/iroh-net/src/tls/certificate.rs +++ b/iroh-net/src/tls/certificate.rs @@ -10,6 +10,8 @@ use x509_parser::prelude::*; use crate::key::{PublicKey, SecretKey, Signature}; +use std::sync::Arc; + /// The libp2p Public Key Extension is a X.509 extension /// with the Object Identifier 1.3.6.1.4.1.53594.1.1, /// allocated by IANA to the libp2p project at Protocol Labs. @@ -26,6 +28,45 @@ const P2P_SIGNING_PREFIX: [u8; 21] = *b"libp2p-tls-handshake:"; // Similarly, hash functions with an output length less than 256 bits MUST NOT be used. static P2P_SIGNATURE_ALGORITHM: &rcgen::SignatureAlgorithm = &rcgen::PKCS_ECDSA_P256_SHA256; +#[derive(Debug)] +pub(crate) struct AlwaysResolvesCert(Arc); + +impl AlwaysResolvesCert { + pub(crate) fn new( + cert: rustls::pki_types::CertificateDer<'static>, + key: &rustls::pki_types::PrivateKeyDer<'_>, + ) -> Result { + let certified_key = rustls::sign::CertifiedKey::new( + vec![cert], + rustls::crypto::ring::sign::any_ecdsa_type(key)?, + ); + Ok(Self(Arc::new(certified_key))) + } +} + +impl rustls::client::ResolvesClientCert for AlwaysResolvesCert { + fn resolve( + &self, + _root_hint_subjects: &[&[u8]], + _sigschemes: &[rustls::SignatureScheme], + ) -> Option> { + Some(Arc::clone(&self.0)) + } + + fn has_certs(&self) -> bool { + true + } +} + +impl rustls::server::ResolvesServerCert for AlwaysResolvesCert { + fn resolve( + &self, + _client_hello: rustls::server::ClientHello<'_>, + ) -> Option> { + Some(Arc::clone(&self.0)) + } +} + /// The public host key and the signature are ANS.1-encoded /// into the SignedKey data structure, which is carried in the libp2p Public Key Extension. #[derive(Clone, Debug, Eq, PartialEq, Sequence)] @@ -38,15 +79,21 @@ struct SignedKey<'a> { /// certificate extension containing the public key of the given secret key. pub fn generate( identity_secret_key: &SecretKey, -) -> Result<(rustls::Certificate, rustls::PrivateKey), GenError> { +) -> Result< + ( + rustls::pki_types::CertificateDer<'static>, + rustls::pki_types::PrivateKeyDer<'static>, + ), + GenError, +> { // SecretKey used to sign the certificate. // SHOULD NOT be related to the host's key. // Endpoints MAY generate a new key and certificate // for every connection attempt, or they MAY reuse the same key // and certificate for multiple connections. let certificate_keypair = rcgen::KeyPair::generate(P2P_SIGNATURE_ALGORITHM)?; - let rustls_key = rustls::PrivateKey(certificate_keypair.serialize_der()); - + let rustls_key = + rustls::pki_types::PrivateKeyDer::try_from(certificate_keypair.serialize_der()).unwrap(); let certificate = { let mut params = rcgen::CertificateParams::new(vec![]); params.distinguished_name = rcgen::DistinguishedName::new(); @@ -59,7 +106,7 @@ pub fn generate( rcgen::Certificate::from_params(params)? }; - let rustls_certificate = rustls::Certificate(certificate.serialize_der()?); + let rustls_certificate = rustls::pki_types::CertificateDer::from(certificate.serialize_der()?); Ok((rustls_certificate, rustls_key)) } @@ -68,7 +115,9 @@ pub fn generate( /// /// For this to succeed, the certificate must contain the specified extension and the signature must /// match the embedded public key. -pub fn parse(certificate: &rustls::Certificate) -> Result, ParseError> { +pub fn parse<'a>( + certificate: &'a rustls::pki_types::CertificateDer<'_>, +) -> Result, ParseError> { let certificate = parse_unverified(certificate.as_ref())?; certificate.verify()?; diff --git a/iroh-net/src/tls/verifier.rs b/iroh-net/src/tls/verifier.rs index ebeb676efc..686e3ce0b9 100644 --- a/iroh-net/src/tls/verifier.rs +++ b/iroh-net/src/tls/verifier.rs @@ -7,14 +7,12 @@ //! Technologies (UK) Ltd. use std::sync::Arc; +use rustls::pki_types::CertificateDer as Certificate; use rustls::{ - cipher_suite::{ - TLS13_AES_128_GCM_SHA256, TLS13_AES_256_GCM_SHA384, TLS13_CHACHA20_POLY1305_SHA256, - }, - client::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, - server::{ClientCertVerified, ClientCertVerifier}, - Certificate, CertificateError, DigitallySignedStruct, DistinguishedName, PeerMisbehaved, - SignatureScheme, SupportedCipherSuite, SupportedProtocolVersion, + client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, + server::danger::{ClientCertVerified, ClientCertVerifier}, + CertificateError, DigitallySignedStruct, DistinguishedName, OtherError, PeerMisbehaved, + SignatureScheme, SupportedProtocolVersion, }; use crate::key::PublicKey; @@ -29,18 +27,10 @@ use super::certificate; /// > Endpoints MUST NOT negotiate lower TLS versions. pub static PROTOCOL_VERSIONS: &[&SupportedProtocolVersion] = &[&rustls::version::TLS13]; -/// A list of the TLS 1.3 cipher suites supported by rustls. -// By default rustls creates client/server configs with both -// TLS 1.3 __and__ 1.2 cipher suites. But we don't need 1.2. -pub static CIPHERSUITES: &[SupportedCipherSuite] = &[ - TLS13_AES_128_GCM_SHA256, - TLS13_AES_256_GCM_SHA384, - TLS13_CHACHA20_POLY1305_SHA256, -]; - /// Implementation of the `rustls` certificate verification traits for libp2p. /// /// Only TLS 1.3 is supported. TLS 1.2 should be disabled in the configuration of `rustls`. +#[derive(Debug)] pub struct Libp2pCertificateVerifier { /// The peer ID we intend to connect to remote_peer_id: Option, @@ -83,10 +73,9 @@ impl ServerCertVerifier for Libp2pCertificateVerifier { &self, end_entity: &Certificate, intermediates: &[Certificate], - _server_name: &rustls::ServerName, - _scts: &mut dyn Iterator, + _server_name: &rustls::pki_types::ServerName, _ocsp_response: &[u8], - _now: std::time::SystemTime, + _now: rustls::pki_types::UnixTime, ) -> Result { let peer_id = verify_presented_certs(end_entity, intermediates)?; @@ -140,15 +129,11 @@ impl ClientCertVerifier for Libp2pCertificateVerifier { true } - fn client_auth_root_subjects(&self) -> &[DistinguishedName] { - &[][..] - } - fn verify_client_cert( &self, end_entity: &Certificate, intermediates: &[Certificate], - _now: std::time::SystemTime, + _now: rustls::pki_types::UnixTime, ) -> Result { verify_presented_certs(end_entity, intermediates)?; @@ -176,6 +161,10 @@ impl ClientCertVerifier for Libp2pCertificateVerifier { fn supported_verify_schemes(&self) -> Vec { Self::verification_schemes() } + + fn root_hint_subjects(&self) -> &[DistinguishedName] { + &[][..] + } } /// When receiving the certificate chain, an endpoint @@ -215,7 +204,9 @@ impl From for rustls::Error { use webpki::Error::*; match e { BadDer => rustls::Error::InvalidCertificate(CertificateError::BadEncoding), - e => rustls::Error::InvalidCertificate(CertificateError::Other(Arc::new(e))), + e => { + rustls::Error::InvalidCertificate(CertificateError::Other(OtherError(Arc::new(e)))) + } } } } @@ -229,7 +220,9 @@ impl From for rustls::Error { UnsupportedSignatureAlgorithm | UnsupportedSignatureAlgorithmForPublicKey => { rustls::Error::InvalidCertificate(CertificateError::BadSignature) } - e => rustls::Error::InvalidCertificate(CertificateError::Other(Arc::new(e))), + e => { + rustls::Error::InvalidCertificate(CertificateError::Other(OtherError(Arc::new(e)))) + } } } } diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 243330410c..81649e7b18 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -19,7 +19,7 @@ workspace = true anyhow = { version = "1" } async-channel = "2.3.1" bao-tree = { version = "0.13", features = ["tokio_fsm"], default-features = false } -bytes = "1" +bytes = "1.7" derive_more = { version = "=1.0.0-beta.7", features = ["debug", "display", "from", "try_into", "from_str"] } futures-buffered = "0.2.4" futures-lite = "2.3" @@ -38,9 +38,9 @@ iroh-docs = { version = "0.23.0", path = "../iroh-docs" } iroh-gossip = { version = "0.23.0", path = "../iroh-gossip" } parking_lot = "0.12.1" postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } -quic-rpc = { version = "0.11", default-features = false, features = ["flume-transport", "quinn-transport"] } -quic-rpc-derive = { version = "0.11" } -quinn = { package = "iroh-quinn", version = "0.10" } +quic-rpc = { version = "0.12", default-features = false, features = ["flume-transport", "quinn-transport"] } +quic-rpc-derive = { version = "0.12" } +quinn = { package = "iroh-quinn", version = "0.11" } rand = "0.8" serde = { version = "1", features = ["derive"] } strum = { version = "0.25", features = ["derive"] } @@ -73,7 +73,6 @@ test-utils = ["iroh-net/test-utils"] [dev-dependencies] anyhow = { version = "1" } -bytes = "1" genawaiter = { version = "0.99", features = ["futures03"] } iroh = { path = ".", features = ["test-utils"] } iroh-test = { path = "../iroh-test" } diff --git a/iroh/examples/custom-protocol.rs b/iroh/examples/custom-protocol.rs index 02ad0ca0e1..c9b70fe58d 100644 --- a/iroh/examples/custom-protocol.rs +++ b/iroh/examples/custom-protocol.rs @@ -169,7 +169,11 @@ impl ProtocolHandler for BlobSearch { // By calling `finish` on the send stream we signal that we will not send anything // further, which makes the receive stream on the other end terminate. - send.finish().await?; + send.finish()?; + // By calling stopped we wait until the remote iroh Endpoint has acknowledged + // all data. This does not mean the remote application has received all data + // from the Endpoint. + send.stopped().await?; Ok(()) }) } @@ -200,7 +204,11 @@ impl BlobSearch { // Finish the send stream, signalling that no further data will be sent. // This makes the `read_to_end` call on the accepting side terminate. - send.finish().await?; + send.finish()?; + // By calling stopped we wait until the remote iroh Endpoint has acknowledged all + // data. This does not mean the remote application has received all data from the + // Endpoint. + send.stopped().await?; // In this example, we simply collect all results into a vector. // For real protocols, you'd usually want to return a stream of results instead. @@ -214,7 +222,7 @@ impl BlobSearch { match recv.read_exact(&mut hash_bytes).await { // FinishedEarly means that the remote side did not send further data, // so in this case we break our loop. - Err(quinn::ReadExactError::FinishedEarly) => break, + Err(quinn::ReadExactError::FinishedEarly(_)) => break, // Other errors are connection errors, so we bail. Err(err) => return Err(err.into()), Ok(_) => {} diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 2b8a52edf2..a3697c7107 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -439,10 +439,10 @@ impl NodeInner { } }, // handle incoming p2p connections. - Some(connecting) = self.endpoint.accept() => { + Some(incoming) = self.endpoint.accept() => { let protocols = protocols.clone(); join_set.spawn(async move { - handle_connection(connecting, protocols).await; + handle_connection(incoming, protocols).await; Ok(()) }); }, @@ -597,14 +597,18 @@ impl NodeInner { } } -async fn handle_connection( - mut connecting: iroh_net::endpoint::Connecting, - protocols: Arc, -) { +async fn handle_connection(incoming: iroh_net::endpoint::Incoming, protocols: Arc) { + let mut connecting = match incoming.accept() { + Ok(conn) => conn, + Err(err) => { + warn!("Ignoring connection: accepting failed: {err:#}"); + return; + } + }; let alpn = match connecting.alpn().await { Ok(alpn) => alpn, Err(err) => { - warn!("Ignoring connection: invalid handshake: {:?}", err); + warn!("Ignoring connection: invalid handshake: {err:#}"); return; } }; diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 8ca6c05d21..f9ce3d4d9b 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -54,8 +54,6 @@ const ENDPOINT_WAIT: Duration = Duration::from_secs(5); /// Default interval between GC runs. const DEFAULT_GC_INTERVAL: Duration = Duration::from_secs(60 * 5); -const MAX_CONNECTIONS: u32 = 1024; - /// Storage backend for documents. #[derive(Debug, Clone)] pub enum DocsStorage { @@ -535,7 +533,6 @@ where .secret_key(self.secret_key.clone()) .proxy_from_env() .keylog(self.keylog) - .concurrent_connections(MAX_CONNECTIONS) .relay_mode(self.relay_mode); let endpoint = match discovery { Some(discovery) => endpoint.discovery(discovery), @@ -851,14 +848,15 @@ impl Default for GcPolicy { } const DEFAULT_RPC_PORT: u16 = 0x1337; -const MAX_RPC_CONNECTIONS: u32 = 16; const MAX_RPC_STREAMS: u32 = 1024; /// The default bind addr of the RPC . pub const DEFAULT_RPC_ADDR: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, DEFAULT_RPC_PORT)); -/// Makes a an RPC endpoint that uses a QUIC transport +/// Makes a an RPC endpoint that uses a QUIC transport. +/// +/// Note that this uses the Quinn version used by quic-rpc. fn make_rpc_endpoint( secret_key: &SecretKey, mut rpc_addr: SocketAddr, @@ -867,13 +865,12 @@ fn make_rpc_endpoint( transport_config .max_concurrent_bidi_streams(MAX_RPC_STREAMS.into()) .max_concurrent_uni_streams(0u32.into()); - let mut server_config = iroh_net::endpoint::make_server_config( + let server_config = iroh_net::endpoint::make_server_config( secret_key, vec![RPC_ALPN.to_vec()], Arc::new(transport_config), false, )?; - server_config.concurrent_connections(MAX_RPC_CONNECTIONS); let rpc_quinn_endpoint = quinn::Endpoint::server(server_config.clone(), rpc_addr); let rpc_quinn_endpoint = match rpc_quinn_endpoint { diff --git a/iroh/tests/provide.rs b/iroh/tests/provide.rs index 3769ba53fc..b912a8133a 100644 --- a/iroh/tests/provide.rs +++ b/iroh/tests/provide.rs @@ -239,8 +239,9 @@ where #[tokio::test] async fn test_server_close() { - // Prepare a Provider transferring a file. let _guard = iroh_test::logging::setup(); + + // Prepare a Provider transferring a file. let mut db = iroh_blobs::store::readonly_mem::Store::default(); let child_hash = db.insert(b"hello there"); let collection = Collection::from_iter([("hello", child_hash)]); @@ -297,7 +298,7 @@ async fn test_ipv6() { /// Simulate a node that has nothing #[tokio::test] async fn test_not_found() { - let _ = iroh_test::logging::setup(); + let _guard = iroh_test::logging::setup(); let db = iroh_blobs::store::readonly_mem::Store::default(); let hash = blake3::hash(b"hello").into(); @@ -337,7 +338,7 @@ async fn test_not_found() { /// Simulate a node that has just begun downloading a blob, but does not yet have any data #[tokio::test] async fn test_chunk_not_found_1() { - let _ = iroh_test::logging::setup(); + let _guard = iroh_test::logging::setup(); let db = iroh_blobs::store::mem::Store::new(); let data = (0..1024 * 64).map(|i| i as u8).collect::>(); @@ -378,6 +379,8 @@ async fn test_chunk_not_found_1() { #[tokio::test] async fn test_run_ticket() { + let _guard = iroh_test::logging::setup(); + let (db, hash) = create_test_db([("test", b"hello")]); let node = test_node(db).spawn().await.unwrap(); let _drop_guard = node.cancel_token().drop_guard(); @@ -428,6 +431,8 @@ async fn run_collection_get_request( #[tokio::test] async fn test_run_fsm() { + let _guard = iroh_test::logging::setup(); + let (db, hash) = create_test_db([("a", b"hello"), ("b", b"world")]); let node = test_node(db).spawn().await.unwrap(); let addrs = node.local_endpoint_addresses().await.unwrap(); @@ -474,6 +479,8 @@ fn make_test_data(n: usize) -> Vec { /// The verified last chunk also verifies the size. #[tokio::test] async fn test_size_request_blob() { + let _guard = iroh_test::logging::setup(); + let expected = make_test_data(1024 * 64 + 1234); let last_chunk = last_chunk(&expected); let (db, hashes) = iroh_blobs::store::readonly_mem::Store::new([("test", &expected)]); @@ -502,6 +509,8 @@ async fn test_size_request_blob() { #[tokio::test] async fn test_collection_stat() { + let _guard = iroh_test::logging::setup(); + let child1 = make_test_data(123456); let child2 = make_test_data(345678); let (db, hash) = create_test_db([("a", &child1), ("b", &child2)]); diff --git a/iroh/tests/sync.rs b/iroh/tests/sync.rs index 67638c6869..78d809351b 100644 --- a/iroh/tests/sync.rs +++ b/iroh/tests/sync.rs @@ -100,7 +100,7 @@ async fn sync_simple() -> Result<()> { let peer1 = nodes[1].node_id(); let doc1 = clients[1].docs().import(ticket.clone()).await?; let mut events1 = doc1.subscribe().await?; - info!("node1: assert 4 events"); + info!("node1: assert 5 events"); assert_next_unordered( &mut events1, TIMEOUT,