From bf9cda02bd4cadcb50e444ec6efc10a7a43fd6e0 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 12 Nov 2021 12:47:30 +0800 Subject: [PATCH] btrfs: fix a out-of-boundary access for copy_compressed_data_to_page() [BUG] The following script can cause btrfs to crash: mount -o compress-force=lzo $DEV /mnt dd if=/dev/urandom of=/mnt/foo bs=4k count=1 sync The calltrace looks like this: general protection fault, probably for non-canonical address 0xe04b37fccce3b000: 0000 [#1] PREEMPT SMP NOPTI CPU: 5 PID: 164 Comm: kworker/u20:3 Not tainted 5.15.0-rc7-custom+ #4 Workqueue: btrfs-delalloc btrfs_work_helper [btrfs] RIP: 0010:__memcpy+0x12/0x20 Call Trace: lzo_compress_pages+0x236/0x540 [btrfs] btrfs_compress_pages+0xaa/0xf0 [btrfs] compress_file_range+0x431/0x8e0 [btrfs] async_cow_start+0x12/0x30 [btrfs] btrfs_work_helper+0xf6/0x3e0 [btrfs] process_one_work+0x294/0x5d0 worker_thread+0x55/0x3c0 kthread+0x140/0x170 ret_from_fork+0x22/0x30 ---[ end trace 63c3c0f131e61982 ]--- [CAUSE] In lzo_compress_pages(), parameter @out_pages is not only an output parameter (for the number of compressed pages), but also an input parameter, as the upper limit of compressed pages we can utilize. In commit d4088803f511 ("btrfs: subpage: make lzo_compress_pages() compatible"), the refactor doesn't take @out_pages as an input, thus completely ignoring the limit. And for compress-force case, we could hit incompressible data that compressed size would go beyond the page limit, and cause above crash. [FIX] Save @out_pages as @max_nr_page, and pass it to lzo_compress_pages(), and check if we're beyond the limit before accessing the pages. Reported-by: Omar Sandoval Fixes: d4088803f511 ("btrfs: subpage: make lzo_compress_pages() compatible") Signed-off-by: Qu Wenruo Reviewed-by: Omar Sandoval Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/lzo.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 65cb0766e62d6..2fdfd09043136 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf) static int copy_compressed_data_to_page(char *compressed_data, size_t compressed_size, struct page **out_pages, + unsigned long max_nr_page, u32 *cur_out, const u32 sectorsize) { @@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data, struct page *cur_page; char *kaddr; + if ((*cur_out / PAGE_SIZE) >= max_nr_page) + return -E2BIG; + /* * We never allow a segment header crossing sector boundary, previous * run should ensure we have enough space left inside the sector. @@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data, orig_out + compressed_size - *cur_out); kunmap(cur_page); + + if ((*cur_out / PAGE_SIZE) >= max_nr_page) + return -E2BIG; + cur_page = out_pages[*cur_out / PAGE_SIZE]; /* Allocate a new page */ if (!cur_page) { @@ -202,6 +210,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, struct workspace *workspace = list_entry(ws, struct workspace, list); const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; struct page *page_in = NULL; + const unsigned long max_nr_page = *out_pages; char *sizes_ptr; int ret = 0; /* Points to the file offset of input data */ @@ -210,6 +219,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, u32 cur_out = 0; u32 len = *total_out; + ASSERT(max_nr_page > 0); *out_pages = 0; *total_out = 0; *total_in = 0; @@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, } ret = copy_compressed_data_to_page(workspace->cbuf, out_len, - pages, &cur_out, sectorsize); + pages, max_nr_page, + &cur_out, sectorsize); if (ret < 0) goto out;