-
Notifications
You must be signed in to change notification settings - Fork 29.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
src: re-add
Realloc()
shrink after reading stream data
This would otherwise keep a lot of unused memory lying around, and in particular add up to a page per chunk of memory overhead for network reads, potentially opening a DoS vector if the resulting `Buffer` objects are kept around indefinitely (e.g. stored in a list and not concatenated until the socket finishes). This fixes CVE-2018-7164. Refs: https://github.com/nodejs-private/security/issues/186 Refs: 7c4b09b PR-URL: https://github.com/nodejs-private/node-private/pull/129 Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Evan Lucas <[email protected]>
- Loading branch information
Showing
2 changed files
with
43 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
41 changes: 41 additions & 0 deletions
41
test/sequential/test-net-bytes-per-incoming-chunk-overhead.js
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
// Flags: --expose-gc | ||
'use strict'; | ||
|
||
const common = require('../common'); | ||
const assert = require('assert'); | ||
const net = require('net'); | ||
|
||
// Tests that, when receiving small chunks, we do not keep the full length | ||
// of the original allocation for the libuv read call in memory. | ||
|
||
let client; | ||
let baseRSS; | ||
const receivedChunks = []; | ||
const N = 250000; | ||
|
||
const server = net.createServer(common.mustCall((socket) => { | ||
baseRSS = process.memoryUsage().rss; | ||
|
||
socket.setNoDelay(true); | ||
socket.on('data', (chunk) => { | ||
receivedChunks.push(chunk); | ||
if (receivedChunks.length < N) { | ||
client.write('a'); | ||
} else { | ||
client.end(); | ||
server.close(); | ||
} | ||
}); | ||
})).listen(0, common.mustCall(() => { | ||
client = net.connect(server.address().port); | ||
client.setNoDelay(true); | ||
client.write('hello!'); | ||
})); | ||
|
||
process.on('exit', () => { | ||
global.gc(); | ||
const bytesPerChunk = | ||
(process.memoryUsage().rss - baseRSS) / receivedChunks.length; | ||
// We should always have less than one page (usually ~ 4 kB) per chunk. | ||
assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`); | ||
}); |