Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: erofs: compressed_pages should not be accessed again after freed

This patch resolves the following page use-after-free issue,
z_erofs_vle_unzip:
...
for (i = 0; i < nr_pages; ++i) {
...
z_erofs_onlinepage_endio(page); (1)
}

for (i = 0; i < clusterpages; ++i) {
page = compressed_pages[i];

if (page->mapping == mngda) (2)
continue;
/* recycle all individual staging pages */
(void)z_erofs_gather_if_stagingpage(page_pool, page); (3)
WRITE_ONCE(compressed_pages[i], NULL);
}
...

After (1) is executed, page is freed and could be then reused, if
compressed_pages is scanned after that, it could fall info (2) or
(3) by mistake and that could finally be in a mess.

This patch aims to solve the above issue only with little changes
as much as possible in order to make the fix backport easier.

Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
Cc: <stable@vger.kernel.org> # 4.19+
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Gao Xiang and committed by
Greg Kroah-Hartman
af692e11 11f27765

+30 -32
+21 -19
drivers/staging/erofs/unzip_vle.c
··· 986 986 if (llen > grp->llen) 987 987 llen = grp->llen; 988 988 989 - err = z_erofs_vle_unzip_fast_percpu(compressed_pages, 990 - clusterpages, pages, llen, work->pageofs, 991 - z_erofs_onlinepage_endio); 989 + err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages, 990 + pages, llen, work->pageofs); 992 991 if (err != -ENOTSUPP) 993 - goto out_percpu; 992 + goto out; 994 993 995 994 if (sparsemem_pages >= nr_pages) 996 995 goto skip_allocpage; ··· 1010 1011 erofs_vunmap(vout, nr_pages); 1011 1012 1012 1013 out: 1013 - for (i = 0; i < nr_pages; ++i) { 1014 - page = pages[i]; 1015 - DBG_BUGON(!page->mapping); 1016 - 1017 - /* recycle all individual staging pages */ 1018 - if (z_erofs_gather_if_stagingpage(page_pool, page)) 1019 - continue; 1020 - 1021 - if (unlikely(err < 0)) 1022 - SetPageError(page); 1023 - 1024 - z_erofs_onlinepage_endio(page); 1025 - } 1026 - 1027 - out_percpu: 1014 + /* must handle all compressed pages before endding pages */ 1028 1015 for (i = 0; i < clusterpages; ++i) { 1029 1016 page = compressed_pages[i]; 1030 1017 ··· 1022 1037 (void)z_erofs_gather_if_stagingpage(page_pool, page); 1023 1038 1024 1039 WRITE_ONCE(compressed_pages[i], NULL); 1040 + } 1041 + 1042 + for (i = 0; i < nr_pages; ++i) { 1043 + page = pages[i]; 1044 + if (!page) 1045 + continue; 1046 + 1047 + DBG_BUGON(!page->mapping); 1048 + 1049 + /* recycle all individual staging pages */ 1050 + if (z_erofs_gather_if_stagingpage(page_pool, page)) 1051 + continue; 1052 + 1053 + if (unlikely(err < 0)) 1054 + SetPageError(page); 1055 + 1056 + z_erofs_onlinepage_endio(page); 1025 1057 } 1026 1058 1027 1059 if (pages == z_pagemap_global)
+1 -2
drivers/staging/erofs/unzip_vle.h
··· 218 218 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, 219 219 unsigned int clusterpages, 220 220 struct page **pages, unsigned int outlen, 221 - unsigned short pageofs, 222 - void (*endio)(struct page *)); 221 + unsigned short pageofs); 223 222 int z_erofs_vle_unzip_vmap(struct page **compressed_pages, 224 223 unsigned int clusterpages, 225 224 void *vaddr, unsigned int llen,
+8 -11
drivers/staging/erofs/unzip_vle_lz4.c
··· 125 125 unsigned int clusterpages, 126 126 struct page **pages, 127 127 unsigned int outlen, 128 - unsigned short pageofs, 129 - void (*endio)(struct page *)) 128 + unsigned short pageofs) 130 129 { 131 130 void *vin, *vout; 132 131 unsigned int nr_pages, i, j; ··· 147 148 ret = z_erofs_unzip_lz4(vin, vout + pageofs, 148 149 clusterpages * PAGE_SIZE, outlen); 149 150 150 - if (ret >= 0) { 151 - outlen = ret; 152 - ret = 0; 153 - } 151 + if (ret < 0) 152 + goto out; 153 + ret = 0; 154 154 155 155 for (i = 0; i < nr_pages; ++i) { 156 156 j = min((unsigned int)PAGE_SIZE - pageofs, outlen); 157 157 158 158 if (pages[i]) { 159 - if (ret < 0) { 160 - SetPageError(pages[i]); 161 - } else if (clusterpages == 1 && 162 - pages[i] == compressed_pages[0]) { 159 + if (clusterpages == 1 && 160 + pages[i] == compressed_pages[0]) { 163 161 memcpy(vin + pageofs, vout + pageofs, j); 164 162 } else { 165 163 void *dst = kmap_atomic(pages[i]); ··· 164 168 memcpy(dst + pageofs, vout + pageofs, j); 165 169 kunmap_atomic(dst); 166 170 } 167 - endio(pages[i]); 168 171 } 169 172 vout += PAGE_SIZE; 170 173 outlen -= j; 171 174 pageofs = 0; 172 175 } 176 + 177 + out: 173 178 preempt_enable(); 174 179 175 180 if (clusterpages == 1)