-
Notifications
You must be signed in to change notification settings - Fork 263
/
Request.hs
345 lines (311 loc) · 12 KB
/
Request.hs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
{-# LANGUAGE CPP #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-deprecations #-}
module Network.Wai.Handler.Warp.Request (
FirstRequest(..),
recvRequest,
headerLines,
pauseTimeoutKey,
getFileInfoKey,
#ifdef MIN_VERSION_crypton_x509
getClientCertificateKey,
#endif
NoKeepAliveRequest (..),
) where
import qualified Control.Concurrent as Conc (yield)
import Data.Array ((!))
import qualified Data.ByteString as S
import qualified Data.ByteString.Unsafe as SU
import qualified Data.CaseInsensitive as CI
import qualified Data.IORef as I
import Data.Typeable (Typeable)
import qualified Data.Vault.Lazy as Vault
import Data.Word8 (_cr, _lf)
#ifdef MIN_VERSION_crypton_x509
import Data.X509
#endif
import Control.Exception (Exception, throwIO)
import qualified Network.HTTP.Types as H
import Network.Socket (SockAddr)
import Network.Wai
import Network.Wai.Handler.Warp.Types
import Network.Wai.Internal
import System.IO.Unsafe (unsafePerformIO)
import qualified System.TimeManager as Timeout
import Prelude hiding (lines)
import Network.Wai.Handler.Warp.Conduit
import Network.Wai.Handler.Warp.FileInfoCache
import Network.Wai.Handler.Warp.Header
import Network.Wai.Handler.Warp.Imports hiding (readInt)
import Network.Wai.Handler.Warp.ReadInt
import Network.Wai.Handler.Warp.RequestHeader
import Network.Wai.Handler.Warp.Settings (
Settings,
settingsMaxTotalHeaderLength,
settingsNoParsePath,
)
----------------------------------------------------------------
-- | first request on this connection?
data FirstRequest = FirstRequest | SubsequentRequest
-- | Receiving a HTTP request from 'Connection' and parsing its header
-- to create 'Request'.
recvRequest
:: FirstRequest
-> Settings
-> Connection
-> InternalInfo
-> Timeout.Handle
-> SockAddr
-- ^ Peer's address.
-> Source
-- ^ Where HTTP request comes from.
-> Transport
-> IO
( Request
, Maybe (I.IORef Int)
, IndexedHeader
, IO ByteString
)
-- ^
-- 'Request' passed to 'Application',
-- how many bytes remain to be consumed, if known
-- 'IndexedHeader' of HTTP request for internal use,
-- Body producing action used for flushing the request body
recvRequest firstRequest settings conn ii th addr src transport = do
hdrlines <- headerLines (settingsMaxTotalHeaderLength settings) firstRequest src
(method, unparsedPath, path, query, httpversion, hdr) <-
parseHeaderLines hdrlines
let idxhdr = indexRequestHeader hdr
expect = idxhdr ! fromEnum ReqExpect
handle100Continue = handleExpect conn httpversion expect
(rbody, remainingRef, bodyLength) <- bodyAndSource src idxhdr
-- body producing function which will produce '100-continue', if needed
rbody' <- timeoutBody remainingRef th rbody handle100Continue
-- body producing function which will never produce 100-continue
rbodyFlush <- timeoutBody remainingRef th rbody (return ())
let rawPath = if settingsNoParsePath settings then unparsedPath else path
vaultValue =
Vault.insert pauseTimeoutKey (Timeout.pause th)
. Vault.insert getFileInfoKey (getFileInfo ii)
#ifdef MIN_VERSION_crypton_x509
. Vault.insert getClientCertificateKey (getTransportClientCertificate transport)
#endif
$ Vault.empty
req =
Request
{ requestMethod = method
, httpVersion = httpversion
, pathInfo = H.decodePathSegments path
, rawPathInfo = rawPath
, rawQueryString = query
, queryString = H.parseQuery query
, requestHeaders = hdr
, isSecure = isTransportSecure transport
, remoteHost = addr
, requestBody = rbody'
, vault = vaultValue
, requestBodyLength = bodyLength
, requestHeaderHost = idxhdr ! fromEnum ReqHost
, requestHeaderRange = idxhdr ! fromEnum ReqRange
, requestHeaderReferer = idxhdr ! fromEnum ReqReferer
, requestHeaderUserAgent = idxhdr ! fromEnum ReqUserAgent
}
return (req, remainingRef, idxhdr, rbodyFlush)
----------------------------------------------------------------
headerLines :: Int -> FirstRequest -> Source -> IO [ByteString]
headerLines maxTotalHeaderLength firstRequest src = do
bs <- readSource src
if S.null bs
then -- When we're working on a keep-alive connection and trying to
-- get the second or later request, we don't want to treat the
-- lack of data as a real exception. See the http1 function in
-- the Run module for more details.
case firstRequest of
FirstRequest -> throwIO ConnectionClosedByPeer
SubsequentRequest -> throwIO NoKeepAliveRequest
else push maxTotalHeaderLength src (THStatus 0 0 id id) bs
data NoKeepAliveRequest = NoKeepAliveRequest
deriving (Show, Typeable)
instance Exception NoKeepAliveRequest
----------------------------------------------------------------
handleExpect
:: Connection
-> H.HttpVersion
-> Maybe HeaderValue
-> IO ()
handleExpect conn ver (Just "100-continue") = do
connSendAll conn continue
Conc.yield
where
continue
| ver == H.http11 = "HTTP/1.1 100 Continue\r\n\r\n"
| otherwise = "HTTP/1.0 100 Continue\r\n\r\n"
handleExpect _ _ _ = return ()
----------------------------------------------------------------
bodyAndSource
:: Source
-> IndexedHeader
-> IO
( IO ByteString
, Maybe (I.IORef Int)
, RequestBodyLength
)
bodyAndSource src idxhdr
| chunked = do
csrc <- mkCSource src
return (readCSource csrc, Nothing, ChunkedBody)
| otherwise = do
let len = toLength $ idxhdr ! fromEnum ReqContentLength
bodyLen = KnownLength $ fromIntegral len
isrc@(ISource _ remaining) <- mkISource src len
return (readISource isrc, Just remaining, bodyLen)
where
chunked = isChunked $ idxhdr ! fromEnum ReqTransferEncoding
toLength :: Maybe HeaderValue -> Int
toLength Nothing = 0
toLength (Just bs) = readInt bs
isChunked :: Maybe HeaderValue -> Bool
isChunked (Just bs) = CI.foldCase bs == "chunked"
isChunked _ = False
----------------------------------------------------------------
timeoutBody
:: Maybe (I.IORef Int)
-- ^ remaining
-> Timeout.Handle
-> IO ByteString
-> IO ()
-> IO (IO ByteString)
timeoutBody remainingRef timeoutHandle rbody handle100Continue = do
isFirstRef <- I.newIORef True
let checkEmpty =
case remainingRef of
Nothing -> return . S.null
Just ref -> \bs ->
if S.null bs
then return True
else do
x <- I.readIORef ref
return $! x <= 0
return $ do
isFirst <- I.readIORef isFirstRef
when isFirst $ do
-- Only check if we need to produce the 100 Continue status
-- when asking for the first chunk of the body
handle100Continue
-- Timeout handling was paused after receiving the full request
-- headers. Now we need to resume it to avoid a slowloris
-- attack during request body sending.
Timeout.resume timeoutHandle
I.writeIORef isFirstRef False
bs <- rbody
-- As soon as we finish receiving the request body, whether
-- because the application is not interested in more bytes, or
-- because there is no more data available, pause the timeout
-- handler again.
isEmpty <- checkEmpty bs
when isEmpty (Timeout.pause timeoutHandle)
return bs
----------------------------------------------------------------
type BSEndo = S.ByteString -> S.ByteString
type BSEndoList = [ByteString] -> [ByteString]
data THStatus
= THStatus
Int -- running total byte count (excluding current header chunk)
Int -- current header chunk byte count
BSEndoList -- previously parsed lines
BSEndo -- bytestrings to be prepended
----------------------------------------------------------------
{- FIXME
close :: Sink ByteString IO a
close = throwIO IncompleteHeaders
-}
-- | Assumes the 'ByteString' is never 'S.null'
push :: Int -> Source -> THStatus -> ByteString -> IO [ByteString]
push maxTotalHeaderLength src (THStatus totalLen chunkLen reqLines prepend) bs
-- Newline found at index 'ix'
| Just ix <- S.elemIndex _lf bs = do
-- Too many bytes
when (currentTotal > maxTotalHeaderLength) $ throwIO OverLargeHeader
newlineFound ix
-- No newline found
| otherwise = do
-- Early easy abort
when (currentTotal + bsLen > maxTotalHeaderLength) $ throwIO OverLargeHeader
withNewChunk noNewlineFound
where
bsLen = S.length bs
currentTotal = totalLen + chunkLen
{-# INLINE withNewChunk #-}
withNewChunk :: (S.ByteString -> IO a) -> IO a
withNewChunk f = do
newChunk <- readSource' src
when (S.null newChunk) $ throwIO IncompleteHeaders
f newChunk
{-# INLINE noNewlineFound #-}
noNewlineFound newChunk
-- The chunk split the CRLF in half
| SU.unsafeLast bs == _cr && S.head newChunk == _lf =
let bs' = SU.unsafeDrop 1 newChunk
in if bsLen == 1 && chunkLen == 0
-- first part is only CRLF, we're done
then do
when (not $ S.null bs') $ leftoverSource src bs'
pure $ reqLines []
else do
rest <- if S.null bs'
-- new chunk is only LF, we need more to check for multiline
then withNewChunk pure
else pure bs'
let status = addLine (bsLen + 1) (SU.unsafeTake (bsLen - 1) bs)
push maxTotalHeaderLength src status rest
-- chunk and keep going
| otherwise = do
let newChunkTotal = chunkLen + bsLen
newPrepend = prepend . (bs <>)
status = THStatus totalLen newChunkTotal reqLines newPrepend
push maxTotalHeaderLength src status newChunk
{-# INLINE newlineFound #-}
newlineFound ix
-- Is end of headers
| chunkLen == 0 && startsWithLF = do
let rest = SU.unsafeDrop end bs
when (not $ S.null rest) $ leftoverSource src rest
pure $ reqLines []
| otherwise = do
-- LF is on last byte
let p = ix - 1
chunk =
if ix > 0 && SU.unsafeIndex bs p == _cr then p else ix
status = addLine end (SU.unsafeTake chunk bs)
continue = push maxTotalHeaderLength src status
if end == bsLen
then withNewChunk continue
else continue $ SU.unsafeDrop end bs
where
end = ix + 1
startsWithLF =
case ix of
0 -> True
1 -> SU.unsafeHead bs == _cr
_ -> False
-- addLine: take the current chunk and, if there's nothing to prepend,
-- add straight to 'reqLines', otherwise first prepend then add.
{-# INLINE addLine #-}
addLine len chunk =
let newTotal = currentTotal + len
newLine =
if chunkLen == 0 then chunk else prepend chunk
in THStatus newTotal 0 (reqLines . (newLine:)) id
{- HLint ignore push "Use unless" -}
pauseTimeoutKey :: Vault.Key (IO ())
pauseTimeoutKey = unsafePerformIO Vault.newKey
{-# NOINLINE pauseTimeoutKey #-}
getFileInfoKey :: Vault.Key (FilePath -> IO FileInfo)
getFileInfoKey = unsafePerformIO Vault.newKey
{-# NOINLINE getFileInfoKey #-}
#ifdef MIN_VERSION_crypton_x509
getClientCertificateKey :: Vault.Key (Maybe CertificateChain)
getClientCertificateKey = unsafePerformIO Vault.newKey
{-# NOINLINE getClientCertificateKey #-}
#endif