diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 5f77e7b5dba..9cbb01e15b5 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -144,7 +144,7 @@ enable_xframe_options = false ; x_forwarded_proto = X-Forwarded-Proto ; x_forwarded_ssl = X-Forwarded-Ssl ; Maximum allowed http request size. Applies to both clustered and local port. -max_http_request_size = 67108864 ; 64 MB +max_http_request_size = 4294967296 ; 4 GB ; [httpd_design_handlers] ; _view = diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl index 16edd66cea7..500ac220dbc 100644 --- a/src/couch/src/couch_att.erl +++ b/src/couch/src/couch_att.erl @@ -47,7 +47,8 @@ -export([ upgrade/1, - downgrade/1 + downgrade/1, + to_tuple/1 ]). -export([ @@ -708,6 +709,9 @@ upgrade(#att{} = Att) -> upgrade(Att) -> Att. +to_tuple(#att{name=Name, att_len=Len, type=Type, encoding=Encoding}) -> + {att, Name, Len, Type, Encoding}. + %% Downgrade is exposed for interactive convenience. In practice, unless done %% manually, upgrades are always one-way. diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index f960ec5c2d9..c55725a3e9b 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -133,15 +133,30 @@ from_json_obj_validate(EJson) -> from_json_obj_validate(EJson, DbName) -> MaxSize = config:get_integer("couchdb", "max_document_size", 4294967296), Doc = from_json_obj(EJson, DbName), - case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of + BodySize = couch_ejson_size:encoded_size(Doc#doc.body), + case BodySize =< MaxSize of true -> validate_attachment_sizes(Doc#doc.atts), + validate_total_document_size(Doc, BodySize), Doc; false -> throw({request_entity_too_large, Doc#doc.id}) end. +% sum up the json body size + attachment body size and +% make sure it is < max_http_request_size +validate_total_document_size(#doc{id=DocId, atts=Atts0}=Doc, BodySize) -> + MaxReqSize = config:get_integer("httpd", "max_http_request_size", 4294967296), % 4 GB + Atts = lists:map(fun couch_att:to_tuple/1, Atts0), + {_, DocSum} = couch_httpd_multipart:length_multipart_stream(32, BodySize, + Atts), + case DocSum =< MaxReqSize of + true -> ok; + false -> throw({request_entity_too_large, DocId}) + end. + + validate_attachment_sizes([]) -> ok; validate_attachment_sizes(Atts) -> @@ -421,7 +436,13 @@ merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) -> len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) -> AttsToInclude = lists:filter(fun(Att) -> not couch_att:is_stub(Att) end, Atts), AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts), - couch_httpd_multipart:length_multipart_stream(Boundary, JsonBytes, AttsDecoded). + case couch_httpd_multipart:length_multipart_stream(byte_size(Boundary), + iolist_size(JsonBytes), AttsDecoded) of + {json, Len} -> + {<<"application/json">>, Len}; + {multipart, Len} -> + {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>, Len} + end. doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun, diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl index 33795a3a1bf..e0b313e77d3 100644 --- a/src/couch/src/couch_httpd_multipart.erl +++ b/src/couch/src/couch_httpd_multipart.erl @@ -263,14 +263,15 @@ atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun, WriteFun(<<"\r\n--", Boundary/binary>>), atts_to_mp(RestAtts, Boundary, WriteFun, AttFun). -length_multipart_stream(Boundary, JsonBytes, Atts) -> +length_multipart_stream(BoundarySize, JsonByteSize, Atts) when + is_integer(BoundarySize), is_integer(JsonByteSize) -> AttsSize = lists:foldl(fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) -> AccAttsSize + 4 + % "\r\n\r\n" length(integer_to_list(Len)) + Len + 4 + % "\r\n--" - size(Boundary) + + BoundarySize + % attachment headers % (the length of the Content-Length has already been set) size(Name) + @@ -287,15 +288,15 @@ length_multipart_stream(Boundary, JsonBytes, Atts) -> end end, 0, Atts), if AttsSize == 0 -> - {<<"application/json">>, iolist_size(JsonBytes)}; + {json, JsonByteSize}; true -> - {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>, + {multipart, 2 + % "--" - size(Boundary) + + BoundarySize + 36 + % "\r\ncontent-type: application/json\r\n\r\n" - iolist_size(JsonBytes) + + JsonByteSize + 4 + % "\r\n--" - size(Boundary) + + BoundarySize + + AttsSize + 2 % "--" } diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl index bcff0646a57..484acdf3e8d 100644 --- a/src/couch/test/couch_doc_json_tests.erl +++ b/src/couch/test/couch_doc_json_tests.erl @@ -38,8 +38,11 @@ mock(couch_log) -> ok; mock(config) -> meck:new(config, [passthrough]), - meck:expect(config, get_integer, - fun("couchdb", "max_document_size", 4294967296) -> 1024 end), + meck:expect(config, get_integer, fun + ("couchdb", "max_document_size", 4294967296) -> 1024; + ("httpd", "max_http_request_size", 4294967296) -> 1024 + end), + meck:expect(config, get, fun(_, _) -> undefined end), meck:expect(config, get, fun(_, _, Default) -> Default end), ok. @@ -124,6 +127,44 @@ from_json_success_cases() -> ]}, "Attachments are parsed correctly." }, + % see if we count our bytes correctly. This doc should be *exactly* 1024 bytes + { + {[ + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/yay">>}, + {<<"revpos">>, 1}, + {<<"length">>, 319}, + {<<"stub">>, true} + ]}}, + {<<"big.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 319}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + #doc{atts = [ + couch_att:new([ + {name, <<"big.xml">>}, + {data, stub}, + {type, <<"xml/yay">>}, + {att_len, 319}, + {disk_len, 319}, + {revpos, 1} + ]), + couch_att:new([ + {name, <<"big.json">>}, + {data, stub}, + {type, <<"json/ftw">>}, + {att_len, 319}, + {disk_len, 319}, + {revpos, 1} + ]) + ]}, + "Document and attachments == max_http_request_size" + }, { {[{<<"_deleted">>, true}]}, #doc{deleted = true}, @@ -281,6 +322,49 @@ from_json_error_cases() -> end, {request_entity_too_large, <<"large_doc">>}, "Document too large." + }, + % doc json body and each attachment are small enough, but combined are > + % max_http_request_size + { + {[ + {<<"_id">>, <<"normal_doc_with_atts">>}, + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/yay">>}, + {<<"revpos">>, 1}, + {<<"length">>, 768}, + {<<"stub">>, true} + ]}}, + {<<"big.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 768}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + {request_entity_too_large, <<"normal_doc_with_atts">>}, + "Document too large because of attachments." + }, + % see if we count our bytes correctly. This doc should be *exactly* 1025 bytes + { + {[ + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/yay">>}, + {<<"revpos">>, 1}, + {<<"length">>, 320}, + {<<"stub">>, true} + ]}}, + {<<"big.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 319}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + "Document and attachments == max_http_request_size + 1" } ], diff --git a/test/javascript/tests/attachments.js b/test/javascript/tests/attachments.js index 2e831a731eb..bd446e9916c 100644 --- a/test/javascript/tests/attachments.js +++ b/test/javascript/tests/attachments.js @@ -291,7 +291,8 @@ couchTests.attachments= function(debug) { _attachments:{ "foo.txt": { content_type:"text/plain", - data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" + data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=", + length: "This is a base64 encoded text".length } } };