From 4e9ac109b49d84167db5aed8deb504856fbfeac8 Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Mon, 26 Feb 2018 15:23:25 +0100 Subject: [PATCH] http2: no stream destroy while its data is on the wire MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes a crash that occurred when a `Http2Stream` write is completed after it is already destroyed. Instead, don’t destroy the stream in that case and wait for GC to take over. Backport-PR-URL: https://github.com/nodejs/node/pull/19185 PR-URL: https://github.com/nodejs/node/pull/19002 Fixes: https://github.com/nodejs/node/issues/18973 Reviewed-By: James M Snell Reviewed-By: Anatoli Papirovski --- src/node_http2.cc | 20 ++++-- src/node_http2.h | 3 + ...tp2-write-finishes-after-stream-destroy.js | 62 +++++++++++++++++++ 3 files changed, 80 insertions(+), 5 deletions(-) create mode 100644 test/parallel/test-http2-write-finishes-after-stream-destroy.js diff --git a/src/node_http2.cc b/src/node_http2.cc index f1454aa11e58e9..7650969f8639ce 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -1700,6 +1700,14 @@ void Http2Session::OnStreamRead(ssize_t nread, const uv_buf_t& buf) { stream_buf_ = uv_buf_init(nullptr, 0); } +bool Http2Session::HasWritesOnSocketForStream(Http2Stream* stream) { + for (const nghttp2_stream_write& wr : outgoing_buffers_) { + if (wr.req_wrap != nullptr && wr.req_wrap->stream() == stream) + return true; + } + return false; +} + // Every Http2Session session is tightly bound to a single i/o StreamBase // (typically a net.Socket or tls.TLSSocket). The lifecycle of the two is // tightly coupled with all data transfer between the two happening at the @@ -1753,13 +1761,11 @@ Http2Stream::Http2Stream( Http2Stream::~Http2Stream() { + DEBUG_HTTP2STREAM(this, "tearing down stream"); if (session_ != nullptr) { session_->RemoveStream(this); session_ = nullptr; } - - if (!object().IsEmpty()) - ClearWrap(object()); } // Notify the Http2Stream that a new block of HEADERS is being processed. @@ -1837,7 +1843,7 @@ inline void Http2Stream::Destroy() { Http2Stream* stream = static_cast(data); // Free any remaining outgoing data chunks here. This should be done // here because it's possible for destroy to have been called while - // we still have qeueued outbound writes. + // we still have queued outbound writes. while (!stream->queue_.empty()) { nghttp2_stream_write& head = stream->queue_.front(); if (head.req_wrap != nullptr) @@ -1845,7 +1851,11 @@ inline void Http2Stream::Destroy() { stream->queue_.pop(); } - delete stream; + // We can destroy the stream now if there are no writes for it + // already on the socket. Otherwise, we'll wait for the garbage collector + // to take care of cleaning up. + if (!stream->session()->HasWritesOnSocketForStream(stream)) + delete stream; }, this, this->object()); statistics_.end_time = uv_hrtime(); diff --git a/src/node_http2.h b/src/node_http2.h index 217c19c09287af..8f6662a0160bec 100644 --- a/src/node_http2.h +++ b/src/node_http2.h @@ -878,6 +878,9 @@ class Http2Session : public AsyncWrap, public StreamListener { // Removes a stream instance from this session inline void RemoveStream(Http2Stream* stream); + // Indicates whether there currently exist outgoing buffers for this stream. + bool HasWritesOnSocketForStream(Http2Stream* stream); + // Write data to the session inline ssize_t Write(const uv_buf_t* bufs, size_t nbufs); diff --git a/test/parallel/test-http2-write-finishes-after-stream-destroy.js b/test/parallel/test-http2-write-finishes-after-stream-destroy.js new file mode 100644 index 00000000000000..3b2dd4bcd4e548 --- /dev/null +++ b/test/parallel/test-http2-write-finishes-after-stream-destroy.js @@ -0,0 +1,62 @@ +// Flags: --expose-gc +'use strict'; +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const makeDuplexPair = require('../common/duplexpair'); + +// Make sure the Http2Stream destructor works, since we don't clean the +// stream up like we would otherwise do. +process.on('exit', global.gc); + +{ + const { clientSide, serverSide } = makeDuplexPair(); + + let serverSideHttp2Stream; + let serverSideHttp2StreamDestroyed = false; + const server = http2.createServer(); + server.on('stream', common.mustCall((stream, headers) => { + serverSideHttp2Stream = stream; + stream.respond({ + 'content-type': 'text/html', + ':status': 200 + }); + + const originalWrite = serverSide._write; + serverSide._write = (buf, enc, cb) => { + if (serverSideHttp2StreamDestroyed) { + serverSide.destroy(); + serverSide.write = () => {}; + } else { + setImmediate(() => { + originalWrite.call(serverSide, buf, enc, () => setImmediate(cb)); + }); + } + }; + + // Enough data to fit into a single *session* window, + // not enough data to fit into a single *stream* window. + stream.write(Buffer.alloc(40000)); + })); + + server.emit('connection', serverSide); + + const client = http2.connect('http://localhost:80', { + createConnection: common.mustCall(() => clientSide) + }); + + const req = client.request({ ':path': '/' }); + + req.on('response', common.mustCall((headers) => { + assert.strictEqual(headers[':status'], 200); + })); + + req.on('data', common.mustCallAtLeast(() => { + if (!serverSideHttp2StreamDestroyed) { + serverSideHttp2Stream.destroy(); + serverSideHttp2StreamDestroyed = true; + } + })); +}