From ed935b7bca77faba183b7f7d53dfdd7090707f2b Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Thu, 29 Mar 2018 13:10:55 +0200 Subject: [PATCH] feat: validate new document writes against max_http_request_size The validation path is now the following: If a new doc body is > max_document_size, we throw an error. If a new attachment is > max_attachment_size, we throw an error. If the new doc body in combination with new and/or existing attachments is > max_attachment_size, we throw an error. This also sets the max_document_size to 2 GB, to restore 1.x and 2.0.x compatibility. Closes #1200 --- rel/overlay/etc/default.ini | 2 +- src/couch/src/couch_att.erl | 6 +- src/couch/src/couch_doc.erl | 15 +++++ src/couch/test/couch_doc_json_tests.erl | 88 ++++++++++++++++++++++++- 4 files changed, 107 insertions(+), 4 deletions(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index df438773543..e37cba123d7 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -135,7 +135,7 @@ enable_xframe_options = false ; x_forwarded_proto = X-Forwarded-Proto ; x_forwarded_ssl = X-Forwarded-Ssl ; Maximum allowed http request size. Applies to both clustered and local port. -max_http_request_size = 67108864 ; 64 MB +max_http_request_size = 4294967296 ; 2 GB ; [httpd_design_handlers] ; _view = diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl index 16edd66cea7..500ac220dbc 100644 --- a/src/couch/src/couch_att.erl +++ b/src/couch/src/couch_att.erl @@ -47,7 +47,8 @@ -export([ upgrade/1, - downgrade/1 + downgrade/1, + to_tuple/1 ]). -export([ @@ -708,6 +709,9 @@ upgrade(#att{} = Att) -> upgrade(Att) -> Att. +to_tuple(#att{name=Name, att_len=Len, type=Type, encoding=Encoding}) -> + {att, Name, Len, Type, Encoding}. + %% Downgrade is exposed for interactive convenience. In practice, unless done %% manually, upgrades are always one-way. diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index f960ec5c2d9..b80354d5eb2 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -136,12 +136,27 @@ from_json_obj_validate(EJson, DbName) -> case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of true -> validate_attachment_sizes(Doc#doc.atts), + validate_total_document_size(Doc), Doc; false -> throw({request_entity_too_large, Doc#doc.id}) end. +% sum up the json body size + attachment body size and +% make sure it is < max_http_request_size +validate_total_document_size(#doc{id=DocId, body=Body, atts=Atts0}) -> + MaxReqSize = config:get_integer("httpd", "max_http_request_size", 4294967296), % 2 GB + Boundary = couch_uuids:random(), % mock boundary, is only used for the length + Atts = lists:map(fun couch_att:to_tuple/1, Atts0), + {_, DocSum} = couch_httpd_multipart:length_multipart_stream(Boundary, + ?JSON_ENCODE(Body), Atts), + case DocSum =< MaxReqSize of + true -> ok; + false -> throw({request_entity_too_large, DocId}) + end. + + validate_attachment_sizes([]) -> ok; validate_attachment_sizes(Atts) -> diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl index bcff0646a57..35d7fcb7b43 100644 --- a/src/couch/test/couch_doc_json_tests.erl +++ b/src/couch/test/couch_doc_json_tests.erl @@ -38,8 +38,11 @@ mock(couch_log) -> ok; mock(config) -> meck:new(config, [passthrough]), - meck:expect(config, get_integer, - fun("couchdb", "max_document_size", 4294967296) -> 1024 end), + meck:expect(config, get_integer, fun + ("couchdb", "max_document_size", 4294967296) -> 1024; + ("httpd", "max_http_request_size", 67108864) -> 1024 + end), + meck:expect(config, get, fun(_, _) -> undefined end), meck:expect(config, get, fun(_, _, Default) -> Default end), ok. @@ -124,6 +127,44 @@ from_json_success_cases() -> ]}, "Attachments are parsed correctly." }, + % see if we count our bytes correctly. This doc should be *exactly* 1024 bytes + { + {[ + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/yay">>}, + {<<"revpos">>, 1}, + {<<"length">>, 319}, + {<<"stub">>, true} + ]}}, + {<<"big.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 319}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + #doc{atts = [ + couch_att:new([ + {name, <<"big.xml">>}, + {data, stub}, + {type, <<"xml/yay">>}, + {att_len, 319}, + {disk_len, 319}, + {revpos, 1} + ]), + couch_att:new([ + {name, <<"big.json">>}, + {data, stub}, + {type, <<"json/ftw">>}, + {att_len, 319}, + {disk_len, 319}, + {revpos, 1} + ]) + ]}, + "Document and attachments == max_http_request_size" + }, { {[{<<"_deleted">>, true}]}, #doc{deleted = true}, @@ -281,6 +322,49 @@ from_json_error_cases() -> end, {request_entity_too_large, <<"large_doc">>}, "Document too large." + }, + % doc json body and each attachment are small enough, but combined are > + % max_http_request_size + { + {[ + {<<"_id">>, <<"normal_doc_with_atts">>}, + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/yay">>}, + {<<"revpos">>, 1}, + {<<"length">>, 768}, + {<<"stub">>, true} + ]}}, + {<<"big.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 768}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + {request_entity_too_large, <<"normal_doc_with_atts">>}, + "Document too large because of attachments." + }, + % see if we count our bytes correctly. This doc should be *exactly* 1025 bytes + { + {[ + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/yay">>}, + {<<"revpos">>, 1}, + {<<"length">>, 320}, + {<<"stub">>, true} + ]}}, + {<<"big.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 319}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + "Document and attachments == max_http_request_size + 1" } ],