diff --git a/.gitignore b/.gitignore index 46bf68e8..9422e6ec 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ target Cargo.lock +fuzz/hfuzz_workspace/ +fuzz/hfuzz_target/ diff --git a/fuzz/.gitignore b/fuzz/.gitignore new file mode 100644 index 00000000..572e03bd --- /dev/null +++ b/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml new file mode 100644 index 00000000..c5b0d03b --- /dev/null +++ b/fuzz/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "jsonrpc-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[features] +honggfuzz_fuzz = ["honggfuzz"] + +[dependencies] +honggfuzz = { version = "0.5", optional = true, default-features = false } +jsonrpc = { path = ".." } + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "simple_http" +path = "fuzz_targets/simple_http.rs" + diff --git a/fuzz/fuzz_targets/simple_http.rs b/fuzz/fuzz_targets/simple_http.rs new file mode 100644 index 00000000..53812b0c --- /dev/null +++ b/fuzz/fuzz_targets/simple_http.rs @@ -0,0 +1,60 @@ + +extern crate jsonrpc; + +#[cfg(not(fuzzing))] +compile_error!("You must set RUSTFLAGS=--cfg=fuzzing to run these test, or run the actual fuzz harness."); + +use jsonrpc::Client; +use jsonrpc::simple_http::SimpleHttpTransport; +use jsonrpc::simple_http::FUZZ_TCP_SOCK; + +use std::io; + +fn do_test(data: &[u8]) { + *FUZZ_TCP_SOCK.lock().unwrap() = Some(io::Cursor::new(data.to_vec())); + + let t = SimpleHttpTransport::builder() + .url("localhost:123").expect("parse url") + .auth("", None) + .build(); + + let client = Client::with_transport(t); + let request = client.build_request("uptime", &[]); + let _ = client.send_request(request); +} + +#[cfg(feature = "honggfuzz")] +fn main() { + loop { + honggfuzz::fuzz!(|data| { + do_test(data); + }); + } +} + +#[cfg(test)] +mod tests { + fn extend_vec_from_hex(hex: &str) -> Vec { + let mut out = vec![]; + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => panic!("Bad hex"), + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + out + } + + #[test] + fn duplicate_crash() { + super::do_test(&extend_vec_from_hex("00")); + } +} diff --git a/fuzz/travis-fuzz.sh b/fuzz/travis-fuzz.sh new file mode 100755 index 00000000..71bff44a --- /dev/null +++ b/fuzz/travis-fuzz.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -e + +# Check that input files are correct Windows file names +incorrectFilenames=$(find . -type f -name "*,*" -o -name "*:*" -o -name "*<*" -o -name "*>*" -o -name "*|*" -o -name "*\?*" -o -name "*\**" -o -name "*\"*" | wc -l) + +if [ ${incorrectFilenames} -gt 0 ]; then + echo 'Exiting early due to Windows-incompatible filenames in this tree. If this is happening' + echo 'to you on a local run, deleting the `hfuzz_workspace/` directory will probably fix it.' + exit 2 +fi + +if [ "$1" == "" ]; then + TARGETS=fuzz_targets/* +else + TARGETS=fuzz_targets/"$1".rs +fi + +cargo --version +rustc --version + +# Testing +cargo install --force honggfuzz --no-default-features +for TARGET in $TARGETS; do + echo "Fuzzing target $TARGET" + FILENAME=$(basename $TARGET) + FILE="${FILENAME%.*}" + if [ -d hfuzz_input/$FILE ]; then + HFUZZ_INPUT_ARGS="-f hfuzz_input/$FILE/input" + fi + HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" HFUZZ_RUN_ARGS="--run_time 30 --exit_upon_crash -v $HFUZZ_INPUT_ARGS" cargo hfuzz run $FILE + + if [ -f hfuzz_workspace/$FILE/HONGGFUZZ.REPORT.TXT ]; then + cat hfuzz_workspace/$FILE/HONGGFUZZ.REPORT.TXT + for CASE in hfuzz_workspace/$FILE/SIG*; do + cat $CASE | xxd -p -c1000 + done + exit 1 + fi +done diff --git a/src/simple_http.rs b/src/simple_http.rs index 87b09783..d6d9a5a6 100644 --- a/src/simple_http.rs +++ b/src/simple_http.rs @@ -5,12 +5,13 @@ #[cfg(feature = "proxy")] use socks::Socks5Stream; -use std::io::{BufRead, BufReader, Read, Write}; +use std::io::{BufRead, BufReader, BufWriter, Read, Write}; +#[cfg(not(fuzzing))] use std::net::TcpStream; use std::net::{SocketAddr, ToSocketAddrs}; -use std::sync::{Arc, Mutex}; -use std::time::{Duration, Instant}; -use std::{error, fmt, io, net, thread}; +use std::sync::{Arc, Mutex, MutexGuard}; +use std::time::Duration; +use std::{error, fmt, io, net, num}; use base64; use serde; @@ -19,6 +20,43 @@ use serde_json; use crate::client::Transport; use crate::{Request, Response}; +#[cfg(fuzzing)] +/// Global mutex used by the fuzzing harness to inject data into the read +/// end of the TCP stream. +pub static FUZZ_TCP_SOCK: Mutex>>> = Mutex::new(None); + +#[cfg(fuzzing)] +#[derive(Clone, Debug)] +struct TcpStream; + +#[cfg(fuzzing)] +mod impls { + use super::*; + impl Read for TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + match *FUZZ_TCP_SOCK.lock().unwrap() { + Some(ref mut cursor) => io::Read::read(cursor, buf), + None => Ok(0), + } + } + } + impl Write for TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + io::sink().write(buf) + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + + impl TcpStream { + pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result { Ok(TcpStream) } + pub fn set_read_timeout(&self, _: Option) -> io::Result<()> { Ok(()) } + pub fn set_write_timeout(&self, _: Option) -> io::Result<()> { Ok(()) } + } +} + + /// The default TCP port to use for connections. /// Set to 8332, the default RPC port for bitcoind. pub const DEFAULT_PORT: u16 = 8332; @@ -26,6 +64,9 @@ pub const DEFAULT_PORT: u16 = 8332; /// The Default SOCKS5 Port to use for proxy connection. pub const DEFAULT_PROXY_PORT: u16 = 9050; +/// Absolute maximum content length we will allow before cutting off the response +const FINAL_RESP_ALLOC: u64 = 1024 * 1024 * 1024; + /// Simple HTTP transport that implements the necessary subset of HTTP for /// running a bitcoind RPC client. #[derive(Clone, Debug)] @@ -39,7 +80,7 @@ pub struct SimpleHttpTransport { proxy_addr: net::SocketAddr, #[cfg(feature = "proxy")] proxy_auth: Option<(String, String)>, - sock: Arc>>, + sock: Arc>>>, } impl Default for SimpleHttpTransport { @@ -50,6 +91,9 @@ impl Default for SimpleHttpTransport { DEFAULT_PORT, ), path: "/".to_owned(), + #[cfg(fuzzing)] + timeout: Duration::from_millis(1), + #[cfg(not(fuzzing))] timeout: Duration::from_secs(15), basic_auth: None, #[cfg(feature = "proxy")] @@ -79,13 +123,11 @@ impl SimpleHttpTransport { where R: for<'a> serde::de::Deserialize<'a>, { - // `try_request` should not panic, so the mutex shouldn't be poisoned - // and unwrapping should be safe - let mut sock = self.sock.lock().expect("poisoned mutex"); - match self.try_request(req, &mut sock) { + match self.try_request(req) { Ok(response) => Ok(response), Err(err) => { - *sock = None; + // No part of this codebase should panic, so unwrapping a mutex lock is fine + *self.sock.lock().expect("poisoned mutex") = None; Err(err) } } @@ -94,15 +136,14 @@ impl SimpleHttpTransport { fn try_request( &self, req: impl serde::Serialize, - sock: &mut Option, ) -> Result where R: for<'a> serde::de::Deserialize<'a>, { - // Open connection - let request_deadline = Instant::now() + self.timeout; - if sock.is_none() { - *sock = Some({ + // No part of this codebase should panic, so unwrapping a mutex lock is fine + let mut sock_lock: MutexGuard> = self.sock.lock().expect("poisoned mutex"); + if sock_lock.is_none() { + *sock_lock = Some(BufReader::new({ #[cfg(feature = "proxy")] { if let Some((username, password)) = &self.proxy_auth { @@ -125,61 +166,77 @@ impl SimpleHttpTransport { stream.set_write_timeout(Some(self.timeout))?; stream } - }) + })); }; - let sock = sock.as_mut().unwrap(); + // In the immediately preceding block, we made sure that `sock` is non-`None`, + // so unwrapping here is fine. + let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap(); // Serialize the body first so we can set the Content-Length header. let body = serde_json::to_vec(&req)?; // Send HTTP request - sock.write_all(b"POST ")?; - sock.write_all(self.path.as_bytes())?; - sock.write_all(b" HTTP/1.1\r\n")?; - // Write headers - sock.write_all(b"Content-Type: application/json\r\n")?; - sock.write_all(b"Content-Length: ")?; - sock.write_all(body.len().to_string().as_bytes())?; - sock.write_all(b"\r\n")?; - if let Some(ref auth) = self.basic_auth { - sock.write_all(b"Authorization: ")?; - sock.write_all(auth.as_ref())?; + { + let mut sock = BufWriter::new(sock.get_ref()); + sock.write_all(b"POST ")?; + sock.write_all(self.path.as_bytes())?; + sock.write_all(b" HTTP/1.1\r\n")?; + // Write headers + sock.write_all(b"Content-Type: application/json\r\n")?; + sock.write_all(b"Content-Length: ")?; + sock.write_all(body.len().to_string().as_bytes())?; + sock.write_all(b"\r\n")?; + if let Some(ref auth) = self.basic_auth { + sock.write_all(b"Authorization: ")?; + sock.write_all(auth.as_ref())?; + sock.write_all(b"\r\n")?; + } + // Write body sock.write_all(b"\r\n")?; + sock.write_all(&body)?; + sock.flush()?; } - // Write body - sock.write_all(b"\r\n")?; - sock.write_all(&body)?; - sock.flush()?; - - // Receive response - let mut reader = BufReader::new(sock); // Parse first HTTP response header line - let http_response = get_line(&mut reader, request_deadline)?; - if http_response.len() < 12 || !http_response.starts_with("HTTP/1.1 ") { - return Err(Error::HttpParseError); + let mut header_buf = String::new(); + sock.read_line(&mut header_buf)?; + if header_buf.len() < 12 { + return Err(Error::HttpResponseTooShort { actual: header_buf.len(), needed: 12 }); + } + if !header_buf.as_bytes()[..12].is_ascii() { + return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec())); } - let response_code = match http_response[9..12].parse::() { + if !header_buf.starts_with("HTTP/1.1 ") { + return Err(Error::HttpResponseBadHello { + actual: header_buf[0..9].into(), + expected: "HTTP/1.1 ".into(), + }); + } + let response_code = match header_buf[9..12].parse::() { Ok(n) => n, - Err(_) => return Err(Error::HttpParseError), + Err(e) => return Err(Error::HttpResponseBadStatus( + header_buf[9..12].into(), + e, + )), }; // Parse response header fields let mut content_length = None; loop { - let line = get_line(&mut reader, request_deadline)?; - - if line == "\r\n" { + header_buf.clear(); + sock.read_line(&mut header_buf)?; + if header_buf == "\r\n" { break; } + header_buf.make_ascii_lowercase(); const CONTENT_LENGTH: &str = "content-length: "; - if line.to_lowercase().starts_with(CONTENT_LENGTH) { + if header_buf.starts_with(CONTENT_LENGTH) { content_length = Some( - line[CONTENT_LENGTH.len()..] + header_buf[CONTENT_LENGTH.len()..] .trim() - .parse::() - .map_err(|_| Error::HttpParseError)?, + .parse::() + .map_err(|e| Error::HttpResponseBadContentLength(header_buf[CONTENT_LENGTH.len()..].into(), e))? ); } } @@ -189,16 +246,32 @@ impl SimpleHttpTransport { return Err(Error::HttpErrorCode(response_code)); } - let content_length = content_length.ok_or(Error::HttpParseError)?; - - let mut buffer = vec![0; content_length]; + // Read up to `content_length` bytes. Note that if there is no content-length + // header, we will assume an effectively infinite content length, i.e. we will + // just keep reading from the socket until it is closed. + let mut reader = match content_length { + None => sock.take(FINAL_RESP_ALLOC), + Some(n) if n > FINAL_RESP_ALLOC => { + return Err(Error::HttpResponseContentLengthTooLarge { + length: n, + max: FINAL_RESP_ALLOC, + }); + }, + Some(n) => sock.take(n), + }; - // Even if it's != 200, we parse the response as we may get a JSONRPC error instead - // of the less meaningful HTTP error code. - reader.read_exact(&mut buffer)?; - match serde_json::from_slice(&buffer) { - Ok(s) => Ok(s), + // Attempt to parse the response. Don't check the HTTP error code until + // after parsing, since Bitcoin Core will often return a descriptive JSON + // error structure which is more useful than the error code. + match serde_json::from_reader(&mut reader) { + Ok(s) => { + if content_length.is_some() { + reader.bytes().count(); // consume any trailing bytes + } + Ok(s) + } Err(e) => { + // If the response was not 200, assume the parse failed because of that if response_code != 200 { Err(Error::HttpErrorCode(response_code)) } else { @@ -222,12 +295,43 @@ pub enum Error { }, /// An error occurred on the socket layer. SocketError(io::Error), - /// The HTTP header of the response couldn't be parsed. - HttpParseError, + /// The HTTP response was too short to even fit a HTTP 1.1 header + HttpResponseTooShort { + /// The total length of the response + actual: usize, + /// Minimum length we can parse + needed: usize, + }, + /// The HTTP response started with a HTTP/1.1 line which was not ASCII + HttpResponseNonAsciiHello(Vec), + /// The HTTP response did not start with HTTP/1.1 + HttpResponseBadHello { + /// Actual HTTP-whatever string + actual: String, + /// The hello string of the HTTP version we support + expected: String, + }, + /// Could not parse the status value as a number + HttpResponseBadStatus(String, num::ParseIntError), + /// Could not parse the status value as a number + HttpResponseBadContentLength(String, num::ParseIntError), + /// The indicated content-length header exceeded our maximum + HttpResponseContentLengthTooLarge { + /// The length indicated in the content-length header + length: u64, + /// Our hard maximum on number of bytes we'll try to read + max: u64, + }, /// Unexpected HTTP error code (non-200). HttpErrorCode(u16), - /// We didn't receive a complete response till the deadline ran out. - Timeout, + /// Received EOF before getting as many bytes as were indicated by the + /// content-length header + IncompleteResponse { + /// The content-length header + content_length: u64, + /// The number of bytes we actually read + n_read: u64, + }, /// JSON parsing error. Json(serde_json::Error), } @@ -250,9 +354,28 @@ impl fmt::Display for Error { ref reason, } => write!(f, "invalid URL '{}': {}", url, reason), Error::SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e), - Error::HttpParseError => f.write_str("Couldn't parse response header."), + Error::HttpResponseTooShort { ref actual, ref needed } => { + write!(f, "HTTP response too short: length {}, needed {}.", actual, needed) + }, + Error::HttpResponseNonAsciiHello(ref bytes) => { + write!(f, "HTTP response started with non-ASCII {:?}", bytes) + }, + Error::HttpResponseBadHello { ref actual, ref expected } => { + write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected) + }, + Error::HttpResponseBadStatus(ref status, ref err) => { + write!(f, "HTTP response had bad status code `{}`: {}.", status, err) + }, + Error::HttpResponseBadContentLength(ref len, ref err) => { + write!(f, "HTTP response had bad content length `{}`: {}.", len, err) + }, + Error::HttpResponseContentLengthTooLarge { length, max } => { + write!(f, "HTTP response content length {} exceeds our max {}.", length, max) + }, Error::HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c), - Error::Timeout => f.write_str("Didn't receive response data in time, timed out."), + Error::IncompleteResponse { content_length, n_read } => { + write!(f, "Read {} bytes but HTTP response content-length header was {}.", n_read, content_length) + }, Error::Json(ref e) => write!(f, "JSON error: {}", e), } } @@ -266,9 +389,14 @@ impl error::Error for Error { InvalidUrl { .. } - | HttpParseError + | HttpResponseTooShort { .. } + | HttpResponseNonAsciiHello(..) + | HttpResponseBadHello { .. } + | HttpResponseBadStatus(..) + | HttpResponseBadContentLength(..) + | HttpResponseContentLengthTooLarge { .. } | HttpErrorCode(_) - | Timeout => None, + | IncompleteResponse { .. } => None, SocketError(ref e) => Some(e), Json(ref e) => Some(e), } @@ -296,23 +424,6 @@ impl From for crate::Error { } } -/// Tries to read a line from a buffered reader. If no line can be read till the deadline is reached -/// returns a timeout error. -fn get_line(reader: &mut R, deadline: Instant) -> Result { - let mut line = String::new(); - while deadline > Instant::now() { - match reader.read_line(&mut line) { - // EOF reached for now, try again later - Ok(0) => thread::sleep(Duration::from_millis(5)), - // received useful data, return it - Ok(_) => return Ok(line), - // io error occurred, abort - Err(e) => return Err(Error::SocketError(e)), - } - } - Err(Error::Timeout) -} - /// Does some very basic manual URL parsing because the uri/url crates /// all have unicode-normalization as a dependency and that's broken. fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {