Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'erofs-for-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
"Updates for folio conversions for compressed inodes: While large folio
support for compressed data could work now, it remains disabled since
the stress test could hang due to page migration in a few hours after
enabling it. I need more time to investigate further before enabling
this feature.

Additionally, clean up stream decompressors and tracepoints for
simplicity.

Summary:

- More folio conversions for compressed inodes

- Stream decompressor (LZMA/DEFLATE/ZSTD) cleanups

- Minor tracepoint cleanup"

* tag 'erofs-for-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
erofs: silence uninitialized variable warning in z_erofs_scan_folio()
erofs: avoid refcounting short-lived pages
erofs: get rid of z_erofs_map_blocks_iter_* tracepoints
erofs: tidy up stream decompressors
erofs: refine z_erofs_{init,exit}_subsystem()
erofs: move each decompressor to its own source file
erofs: tidy up `struct z_erofs_bvec`
erofs: teach z_erofs_scan_folios() to handle multi-page folios
erofs: convert z_erofs_read_fragment() to folios
erofs: convert z_erofs_pcluster_readmore() to folios

+494 -644
+30 -31
fs/erofs/compress.h
··· 24 24 void *data, int size); 25 25 int (*decompress)(struct z_erofs_decompress_req *rq, 26 26 struct page **pagepool); 27 + int (*init)(void); 28 + void (*exit)(void); 27 29 char *name; 28 30 }; 29 31 ··· 54 52 */ 55 53 56 54 /* 57 - * short-lived pages are pages directly from buddy system with specific 58 - * page->private (no need to set PagePrivate since these are non-LRU / 59 - * non-movable pages and bypass reclaim / migration code). 55 + * Currently, short-lived pages are pages directly from buddy system 56 + * with specific page->private (Z_EROFS_SHORTLIVED_PAGE). 57 + * In the future world of Memdescs, it should be type 0 (Misc) memory 58 + * which type can be checked with a new helper. 60 59 */ 61 60 static inline bool z_erofs_is_shortlived_page(struct page *page) 62 61 { 63 - if (page->private != Z_EROFS_SHORTLIVED_PAGE) 64 - return false; 65 - 66 - DBG_BUGON(page->mapping); 67 - return true; 62 + return page->private == Z_EROFS_SHORTLIVED_PAGE; 68 63 } 69 64 70 65 static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, ··· 69 70 { 70 71 if (!z_erofs_is_shortlived_page(page)) 71 72 return false; 72 - 73 - /* short-lived pages should not be used by others at the same time */ 74 - if (page_ref_count(page) > 1) { 75 - put_page(page); 76 - } else { 77 - /* follow the pcluster rule above. */ 78 - erofs_pagepool_add(pagepool, page); 79 - } 73 + erofs_pagepool_add(pagepool, page); 80 74 return true; 81 75 } 82 76 77 + extern const struct z_erofs_decompressor z_erofs_lzma_decomp; 78 + extern const struct z_erofs_decompressor z_erofs_deflate_decomp; 79 + extern const struct z_erofs_decompressor z_erofs_zstd_decomp; 80 + extern const struct z_erofs_decompressor *z_erofs_decomp[]; 81 + 82 + struct z_erofs_stream_dctx { 83 + struct z_erofs_decompress_req *rq; 84 + unsigned int inpages, outpages; /* # of {en,de}coded pages */ 85 + int no, ni; /* the current {en,de}coded page # */ 86 + 87 + unsigned int avail_out; /* remaining bytes in the decoded buffer */ 88 + unsigned int inbuf_pos, inbuf_sz; 89 + /* current status of the encoded buffer */ 90 + u8 *kin, *kout; /* buffer mapped pointers */ 91 + void *bounce; /* bounce buffer for inplace I/Os */ 92 + bool bounced; /* is the bounce buffer used now? */ 93 + }; 94 + 95 + int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, 96 + void **src, struct page **pgpl); 83 97 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, 84 98 unsigned int padbufsize); 85 - extern const struct z_erofs_decompressor erofs_decompressors[]; 86 - 87 - /* prototypes for specific algorithms */ 88 - int z_erofs_load_lzma_config(struct super_block *sb, 89 - struct erofs_super_block *dsb, void *data, int size); 90 - int z_erofs_load_deflate_config(struct super_block *sb, 91 - struct erofs_super_block *dsb, void *data, int size); 92 - int z_erofs_load_zstd_config(struct super_block *sb, 93 - struct erofs_super_block *dsb, void *data, int size); 94 - int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, 95 - struct page **pagepool); 96 - int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, 97 - struct page **pagepool); 98 - int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, 99 - struct page **pgpl); 99 + int __init z_erofs_init_decompressor(void); 100 + void z_erofs_exit_decompressor(void); 100 101 #endif
+122 -26
fs/erofs/decompressor.c
··· 2 2 /* 3 3 * Copyright (C) 2019 HUAWEI, Inc. 4 4 * https://www.huawei.com/ 5 + * Copyright (C) 2024 Alibaba Cloud 5 6 */ 6 7 #include "compress.h" 7 8 #include <linux/lz4.h> ··· 110 109 111 110 if (top) { 112 111 victim = availables[--top]; 113 - get_page(victim); 114 112 } else { 115 113 victim = __erofs_allocpage(pagepool, rq->gfp, true); 116 114 if (!victim) ··· 371 371 return 0; 372 372 } 373 373 374 - const struct z_erofs_decompressor erofs_decompressors[] = { 375 - [Z_EROFS_COMPRESSION_SHIFTED] = { 374 + int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, 375 + void **src, struct page **pgpl) 376 + { 377 + struct z_erofs_decompress_req *rq = dctx->rq; 378 + struct super_block *sb = rq->sb; 379 + struct page **pgo, *tmppage; 380 + unsigned int j; 381 + 382 + if (!dctx->avail_out) { 383 + if (++dctx->no >= dctx->outpages || !rq->outputsize) { 384 + erofs_err(sb, "insufficient space for decompressed data"); 385 + return -EFSCORRUPTED; 386 + } 387 + 388 + if (dctx->kout) 389 + kunmap_local(dctx->kout); 390 + dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out); 391 + rq->outputsize -= dctx->avail_out; 392 + pgo = &rq->out[dctx->no]; 393 + if (!*pgo && rq->fillgaps) { /* deduped */ 394 + *pgo = erofs_allocpage(pgpl, rq->gfp); 395 + if (!*pgo) { 396 + dctx->kout = NULL; 397 + return -ENOMEM; 398 + } 399 + set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE); 400 + } 401 + if (*pgo) { 402 + dctx->kout = kmap_local_page(*pgo); 403 + *dst = dctx->kout + rq->pageofs_out; 404 + } else { 405 + *dst = dctx->kout = NULL; 406 + } 407 + rq->pageofs_out = 0; 408 + } 409 + 410 + if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) { 411 + if (++dctx->ni >= dctx->inpages) { 412 + erofs_err(sb, "invalid compressed data"); 413 + return -EFSCORRUPTED; 414 + } 415 + if (dctx->kout) /* unlike kmap(), take care of the orders */ 416 + kunmap_local(dctx->kout); 417 + kunmap_local(dctx->kin); 418 + 419 + dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE); 420 + rq->inputsize -= dctx->inbuf_sz; 421 + dctx->kin = kmap_local_page(rq->in[dctx->ni]); 422 + *src = dctx->kin; 423 + dctx->bounced = false; 424 + if (dctx->kout) { 425 + j = (u8 *)*dst - dctx->kout; 426 + dctx->kout = kmap_local_page(rq->out[dctx->no]); 427 + *dst = dctx->kout + j; 428 + } 429 + dctx->inbuf_pos = 0; 430 + } 431 + 432 + /* 433 + * Handle overlapping: Use the given bounce buffer if the input data is 434 + * under processing; Or utilize short-lived pages from the on-stack page 435 + * pool, where pages are shared among the same request. Note that only 436 + * a few inplace I/O pages need to be doubled. 437 + */ 438 + if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) { 439 + memcpy(dctx->bounce, *src, dctx->inbuf_sz); 440 + *src = dctx->bounce; 441 + dctx->bounced = true; 442 + } 443 + 444 + for (j = dctx->ni + 1; j < dctx->inpages; ++j) { 445 + if (rq->out[dctx->no] != rq->in[j]) 446 + continue; 447 + tmppage = erofs_allocpage(pgpl, rq->gfp); 448 + if (!tmppage) 449 + return -ENOMEM; 450 + set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); 451 + copy_highpage(tmppage, rq->in[j]); 452 + rq->in[j] = tmppage; 453 + } 454 + return 0; 455 + } 456 + 457 + const struct z_erofs_decompressor *z_erofs_decomp[] = { 458 + [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) { 376 459 .decompress = z_erofs_transform_plain, 377 460 .name = "shifted" 378 461 }, 379 - [Z_EROFS_COMPRESSION_INTERLACED] = { 462 + [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) { 380 463 .decompress = z_erofs_transform_plain, 381 464 .name = "interlaced" 382 465 }, 383 - [Z_EROFS_COMPRESSION_LZ4] = { 466 + [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) { 384 467 .config = z_erofs_load_lz4_config, 385 468 .decompress = z_erofs_lz4_decompress, 469 + .init = z_erofs_gbuf_init, 470 + .exit = z_erofs_gbuf_exit, 386 471 .name = "lz4" 387 472 }, 388 473 #ifdef CONFIG_EROFS_FS_ZIP_LZMA 389 - [Z_EROFS_COMPRESSION_LZMA] = { 390 - .config = z_erofs_load_lzma_config, 391 - .decompress = z_erofs_lzma_decompress, 392 - .name = "lzma" 393 - }, 474 + [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp, 394 475 #endif 395 476 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE 396 - [Z_EROFS_COMPRESSION_DEFLATE] = { 397 - .config = z_erofs_load_deflate_config, 398 - .decompress = z_erofs_deflate_decompress, 399 - .name = "deflate" 400 - }, 477 + [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp, 401 478 #endif 402 479 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD 403 - [Z_EROFS_COMPRESSION_ZSTD] = { 404 - .config = z_erofs_load_zstd_config, 405 - .decompress = z_erofs_zstd_decompress, 406 - .name = "zstd" 407 - }, 480 + [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp, 408 481 #endif 409 482 }; 410 483 ··· 505 432 offset = EROFS_SUPER_OFFSET + sbi->sb_size; 506 433 alg = 0; 507 434 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { 435 + const struct z_erofs_decompressor *dec = z_erofs_decomp[alg]; 508 436 void *data; 509 437 510 438 if (!(algs & 1)) ··· 517 443 break; 518 444 } 519 445 520 - if (alg >= ARRAY_SIZE(erofs_decompressors) || 521 - !erofs_decompressors[alg].config) { 446 + if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) { 447 + ret = dec->config(sb, dsb, data, size); 448 + } else { 522 449 erofs_err(sb, "algorithm %d isn't enabled on this kernel", 523 450 alg); 524 451 ret = -EOPNOTSUPP; 525 - } else { 526 - ret = erofs_decompressors[alg].config(sb, 527 - dsb, data, size); 528 452 } 529 - 530 453 kfree(data); 531 454 if (ret) 532 455 break; 533 456 } 534 457 erofs_put_metabuf(&buf); 535 458 return ret; 459 + } 460 + 461 + int __init z_erofs_init_decompressor(void) 462 + { 463 + int i, err; 464 + 465 + for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) { 466 + err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0; 467 + if (err) { 468 + while (--i) 469 + if (z_erofs_decomp[i]) 470 + z_erofs_decomp[i]->exit(); 471 + return err; 472 + } 473 + } 474 + return 0; 475 + } 476 + 477 + void z_erofs_exit_decompressor(void) 478 + { 479 + int i; 480 + 481 + for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) 482 + if (z_erofs_decomp[i]) 483 + z_erofs_decomp[i]->exit(); 536 484 }
+44 -105
fs/erofs/decompressor_deflate.c
··· 15 15 16 16 module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444); 17 17 18 - void z_erofs_deflate_exit(void) 18 + static void z_erofs_deflate_exit(void) 19 19 { 20 20 /* there should be no running fs instance */ 21 21 while (z_erofs_deflate_avail_strms) { ··· 41 41 } 42 42 } 43 43 44 - int __init z_erofs_deflate_init(void) 44 + static int __init z_erofs_deflate_init(void) 45 45 { 46 46 /* by default, use # of possible CPUs instead */ 47 47 if (!z_erofs_deflate_nstrms) ··· 49 49 return 0; 50 50 } 51 51 52 - int z_erofs_load_deflate_config(struct super_block *sb, 52 + static int z_erofs_load_deflate_config(struct super_block *sb, 53 53 struct erofs_super_block *dsb, void *data, int size) 54 54 { 55 55 struct z_erofs_deflate_cfgs *dfl = data; ··· 97 97 return -ENOMEM; 98 98 } 99 99 100 - int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, 101 - struct page **pgpl) 100 + static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, 101 + struct page **pgpl) 102 102 { 103 - const unsigned int nrpages_out = 104 - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 105 - const unsigned int nrpages_in = 106 - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 107 103 struct super_block *sb = rq->sb; 108 - unsigned int insz, outsz, pofs; 104 + struct z_erofs_stream_dctx dctx = { 105 + .rq = rq, 106 + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, 107 + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) 108 + >> PAGE_SHIFT, 109 + .no = -1, .ni = 0, 110 + }; 109 111 struct z_erofs_deflate *strm; 110 - u8 *kin, *kout = NULL; 111 - bool bounced = false; 112 - int no = -1, ni = 0, j = 0, zerr, err; 112 + int zerr, err; 113 113 114 114 /* 1. get the exact DEFLATE compressed size */ 115 - kin = kmap_local_page(*rq->in); 116 - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, 117 - min_t(unsigned int, rq->inputsize, 118 - sb->s_blocksize - rq->pageofs_in)); 115 + dctx.kin = kmap_local_page(*rq->in); 116 + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, 117 + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); 119 118 if (err) { 120 - kunmap_local(kin); 119 + kunmap_local(dctx.kin); 121 120 return err; 122 121 } 123 122 ··· 133 134 spin_unlock(&z_erofs_deflate_lock); 134 135 135 136 /* 3. multi-call decompress */ 136 - insz = rq->inputsize; 137 - outsz = rq->outputsize; 138 137 zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS); 139 138 if (zerr != Z_OK) { 140 139 err = -EIO; 141 140 goto failed_zinit; 142 141 } 143 142 144 - pofs = rq->pageofs_out; 145 - strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); 146 - insz -= strm->z.avail_in; 147 - strm->z.next_in = kin + rq->pageofs_in; 143 + rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */ 144 + strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); 145 + rq->inputsize -= strm->z.avail_in; 146 + strm->z.next_in = dctx.kin + rq->pageofs_in; 148 147 strm->z.avail_out = 0; 148 + dctx.bounce = strm->bounce; 149 149 150 150 while (1) { 151 - if (!strm->z.avail_out) { 152 - if (++no >= nrpages_out || !outsz) { 153 - erofs_err(sb, "insufficient space for decompressed data"); 154 - err = -EFSCORRUPTED; 155 - break; 156 - } 157 - 158 - if (kout) 159 - kunmap_local(kout); 160 - strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs); 161 - outsz -= strm->z.avail_out; 162 - if (!rq->out[no]) { 163 - rq->out[no] = erofs_allocpage(pgpl, rq->gfp); 164 - if (!rq->out[no]) { 165 - kout = NULL; 166 - err = -ENOMEM; 167 - break; 168 - } 169 - set_page_private(rq->out[no], 170 - Z_EROFS_SHORTLIVED_PAGE); 171 - } 172 - kout = kmap_local_page(rq->out[no]); 173 - strm->z.next_out = kout + pofs; 174 - pofs = 0; 175 - } 176 - 177 - if (!strm->z.avail_in && insz) { 178 - if (++ni >= nrpages_in) { 179 - erofs_err(sb, "invalid compressed data"); 180 - err = -EFSCORRUPTED; 181 - break; 182 - } 183 - 184 - if (kout) { /* unlike kmap(), take care of the orders */ 185 - j = strm->z.next_out - kout; 186 - kunmap_local(kout); 187 - } 188 - kunmap_local(kin); 189 - strm->z.avail_in = min_t(u32, insz, PAGE_SIZE); 190 - insz -= strm->z.avail_in; 191 - kin = kmap_local_page(rq->in[ni]); 192 - strm->z.next_in = kin; 193 - bounced = false; 194 - if (kout) { 195 - kout = kmap_local_page(rq->out[no]); 196 - strm->z.next_out = kout + j; 197 - } 198 - } 199 - 200 - /* 201 - * Handle overlapping: Use bounced buffer if the compressed 202 - * data is under processing; Or use short-lived pages from the 203 - * on-stack pagepool where pages share among the same request 204 - * and not _all_ inplace I/O pages are needed to be doubled. 205 - */ 206 - if (!bounced && rq->out[no] == rq->in[ni]) { 207 - memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in); 208 - strm->z.next_in = strm->bounce; 209 - bounced = true; 210 - } 211 - 212 - for (j = ni + 1; j < nrpages_in; ++j) { 213 - struct page *tmppage; 214 - 215 - if (rq->out[no] != rq->in[j]) 216 - continue; 217 - tmppage = erofs_allocpage(pgpl, rq->gfp); 218 - if (!tmppage) { 219 - err = -ENOMEM; 220 - goto failed; 221 - } 222 - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); 223 - copy_highpage(tmppage, rq->in[j]); 224 - rq->in[j] = tmppage; 225 - } 151 + dctx.avail_out = strm->z.avail_out; 152 + dctx.inbuf_sz = strm->z.avail_in; 153 + err = z_erofs_stream_switch_bufs(&dctx, 154 + (void **)&strm->z.next_out, 155 + (void **)&strm->z.next_in, pgpl); 156 + if (err) 157 + break; 158 + strm->z.avail_out = dctx.avail_out; 159 + strm->z.avail_in = dctx.inbuf_sz; 226 160 227 161 zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH); 228 - if (zerr != Z_OK || !(outsz + strm->z.avail_out)) { 162 + if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) { 229 163 if (zerr == Z_OK && rq->partial_decoding) 230 164 break; 231 - if (zerr == Z_STREAM_END && !outsz) 165 + if (zerr == Z_STREAM_END && !rq->outputsize) 232 166 break; 233 167 erofs_err(sb, "failed to decompress %d in[%u] out[%u]", 234 168 zerr, rq->inputsize, rq->outputsize); ··· 169 237 break; 170 238 } 171 239 } 172 - failed: 173 240 if (zlib_inflateEnd(&strm->z) != Z_OK && !err) 174 241 err = -EIO; 175 - if (kout) 176 - kunmap_local(kout); 242 + if (dctx.kout) 243 + kunmap_local(dctx.kout); 177 244 failed_zinit: 178 - kunmap_local(kin); 245 + kunmap_local(dctx.kin); 179 246 /* 4. push back DEFLATE stream context to the global list */ 180 247 spin_lock(&z_erofs_deflate_lock); 181 248 strm->next = z_erofs_deflate_head; ··· 183 252 wake_up(&z_erofs_deflate_wq); 184 253 return err; 185 254 } 255 + 256 + const struct z_erofs_decompressor z_erofs_deflate_decomp = { 257 + .config = z_erofs_load_deflate_config, 258 + .decompress = z_erofs_deflate_decompress, 259 + .init = z_erofs_deflate_init, 260 + .exit = z_erofs_deflate_exit, 261 + .name = "deflate", 262 + };
+56 -110
fs/erofs/decompressor_lzma.c
··· 5 5 struct z_erofs_lzma { 6 6 struct z_erofs_lzma *next; 7 7 struct xz_dec_microlzma *state; 8 - struct xz_buf buf; 9 8 u8 bounce[PAGE_SIZE]; 10 9 }; 11 10 ··· 17 18 18 19 module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444); 19 20 20 - void z_erofs_lzma_exit(void) 21 + static void z_erofs_lzma_exit(void) 21 22 { 22 23 /* there should be no running fs instance */ 23 24 while (z_erofs_lzma_avail_strms) { ··· 45 46 } 46 47 } 47 48 48 - int __init z_erofs_lzma_init(void) 49 + static int __init z_erofs_lzma_init(void) 49 50 { 50 51 unsigned int i; 51 52 ··· 69 70 return 0; 70 71 } 71 72 72 - int z_erofs_load_lzma_config(struct super_block *sb, 73 + static int z_erofs_load_lzma_config(struct super_block *sb, 73 74 struct erofs_super_block *dsb, void *data, int size) 74 75 { 75 76 static DEFINE_MUTEX(lzma_resize_mutex); ··· 146 147 return err; 147 148 } 148 149 149 - int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, 150 - struct page **pgpl) 150 + static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, 151 + struct page **pgpl) 151 152 { 152 - const unsigned int nrpages_out = 153 - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 154 - const unsigned int nrpages_in = 155 - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 156 - unsigned int inlen, outlen, pageofs; 153 + struct super_block *sb = rq->sb; 154 + struct z_erofs_stream_dctx dctx = { 155 + .rq = rq, 156 + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, 157 + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) 158 + >> PAGE_SHIFT, 159 + .no = -1, .ni = 0, 160 + }; 161 + struct xz_buf buf = {}; 157 162 struct z_erofs_lzma *strm; 158 - u8 *kin; 159 - bool bounced = false; 160 - int no, ni, j, err = 0; 163 + enum xz_ret xz_err; 164 + int err; 161 165 162 166 /* 1. get the exact LZMA compressed size */ 163 - kin = kmap(*rq->in); 164 - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, 165 - min_t(unsigned int, rq->inputsize, 166 - rq->sb->s_blocksize - rq->pageofs_in)); 167 + dctx.kin = kmap_local_page(*rq->in); 168 + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, 169 + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); 167 170 if (err) { 168 - kunmap(*rq->in); 171 + kunmap_local(dctx.kin); 169 172 return err; 170 173 } 171 174 ··· 184 183 spin_unlock(&z_erofs_lzma_lock); 185 184 186 185 /* 3. multi-call decompress */ 187 - inlen = rq->inputsize; 188 - outlen = rq->outputsize; 189 - xz_dec_microlzma_reset(strm->state, inlen, outlen, 186 + xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize, 190 187 !rq->partial_decoding); 191 - pageofs = rq->pageofs_out; 192 - strm->buf.in = kin + rq->pageofs_in; 193 - strm->buf.in_pos = 0; 194 - strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in); 195 - inlen -= strm->buf.in_size; 196 - strm->buf.out = NULL; 197 - strm->buf.out_pos = 0; 198 - strm->buf.out_size = 0; 188 + buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); 189 + rq->inputsize -= buf.in_size; 190 + buf.in = dctx.kin + rq->pageofs_in, 191 + dctx.bounce = strm->bounce; 192 + do { 193 + dctx.avail_out = buf.out_size - buf.out_pos; 194 + dctx.inbuf_sz = buf.in_size; 195 + dctx.inbuf_pos = buf.in_pos; 196 + err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out, 197 + (void **)&buf.in, pgpl); 198 + if (err) 199 + break; 199 200 200 - for (ni = 0, no = -1;;) { 201 - enum xz_ret xz_err; 202 - 203 - if (strm->buf.out_pos == strm->buf.out_size) { 204 - if (strm->buf.out) { 205 - kunmap(rq->out[no]); 206 - strm->buf.out = NULL; 207 - } 208 - 209 - if (++no >= nrpages_out || !outlen) { 210 - erofs_err(rq->sb, "decompressed buf out of bound"); 211 - err = -EFSCORRUPTED; 212 - break; 213 - } 214 - strm->buf.out_pos = 0; 215 - strm->buf.out_size = min_t(u32, outlen, 216 - PAGE_SIZE - pageofs); 217 - outlen -= strm->buf.out_size; 218 - if (!rq->out[no] && rq->fillgaps) { /* deduped */ 219 - rq->out[no] = erofs_allocpage(pgpl, rq->gfp); 220 - if (!rq->out[no]) { 221 - err = -ENOMEM; 222 - break; 223 - } 224 - set_page_private(rq->out[no], 225 - Z_EROFS_SHORTLIVED_PAGE); 226 - } 227 - if (rq->out[no]) 228 - strm->buf.out = kmap(rq->out[no]) + pageofs; 229 - pageofs = 0; 230 - } else if (strm->buf.in_pos == strm->buf.in_size) { 231 - kunmap(rq->in[ni]); 232 - 233 - if (++ni >= nrpages_in || !inlen) { 234 - erofs_err(rq->sb, "compressed buf out of bound"); 235 - err = -EFSCORRUPTED; 236 - break; 237 - } 238 - strm->buf.in_pos = 0; 239 - strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE); 240 - inlen -= strm->buf.in_size; 241 - kin = kmap(rq->in[ni]); 242 - strm->buf.in = kin; 243 - bounced = false; 201 + if (buf.out_size == buf.out_pos) { 202 + buf.out_size = dctx.avail_out; 203 + buf.out_pos = 0; 244 204 } 205 + buf.in_size = dctx.inbuf_sz; 206 + buf.in_pos = dctx.inbuf_pos; 245 207 246 - /* 247 - * Handle overlapping: Use bounced buffer if the compressed 248 - * data is under processing; Otherwise, Use short-lived pages 249 - * from the on-stack pagepool where pages share with the same 250 - * request. 251 - */ 252 - if (!bounced && rq->out[no] == rq->in[ni]) { 253 - memcpy(strm->bounce, strm->buf.in, strm->buf.in_size); 254 - strm->buf.in = strm->bounce; 255 - bounced = true; 256 - } 257 - for (j = ni + 1; j < nrpages_in; ++j) { 258 - struct page *tmppage; 259 - 260 - if (rq->out[no] != rq->in[j]) 261 - continue; 262 - tmppage = erofs_allocpage(pgpl, rq->gfp); 263 - if (!tmppage) { 264 - err = -ENOMEM; 265 - goto failed; 266 - } 267 - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); 268 - copy_highpage(tmppage, rq->in[j]); 269 - rq->in[j] = tmppage; 270 - } 271 - xz_err = xz_dec_microlzma_run(strm->state, &strm->buf); 272 - DBG_BUGON(strm->buf.out_pos > strm->buf.out_size); 273 - DBG_BUGON(strm->buf.in_pos > strm->buf.in_size); 208 + xz_err = xz_dec_microlzma_run(strm->state, &buf); 209 + DBG_BUGON(buf.out_pos > buf.out_size); 210 + DBG_BUGON(buf.in_pos > buf.in_size); 274 211 275 212 if (xz_err != XZ_OK) { 276 - if (xz_err == XZ_STREAM_END && !outlen) 213 + if (xz_err == XZ_STREAM_END && !rq->outputsize) 277 214 break; 278 - erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]", 215 + erofs_err(sb, "failed to decompress %d in[%u] out[%u]", 279 216 xz_err, rq->inputsize, rq->outputsize); 280 217 err = -EFSCORRUPTED; 281 218 break; 282 219 } 283 - } 284 - failed: 285 - if (no < nrpages_out && strm->buf.out) 286 - kunmap(rq->out[no]); 287 - if (ni < nrpages_in) 288 - kunmap(rq->in[ni]); 220 + } while (1); 221 + 222 + if (dctx.kout) 223 + kunmap_local(dctx.kout); 224 + kunmap_local(dctx.kin); 289 225 /* 4. push back LZMA stream context to the global list */ 290 226 spin_lock(&z_erofs_lzma_lock); 291 227 strm->next = z_erofs_lzma_head; ··· 231 293 wake_up(&z_erofs_lzma_wq); 232 294 return err; 233 295 } 296 + 297 + const struct z_erofs_decompressor z_erofs_lzma_decomp = { 298 + .config = z_erofs_load_lzma_config, 299 + .decompress = z_erofs_lzma_decompress, 300 + .init = z_erofs_lzma_init, 301 + .exit = z_erofs_lzma_exit, 302 + .name = "lzma" 303 + };
+50 -104
fs/erofs/decompressor_zstd.c
··· 34 34 return strm; 35 35 } 36 36 37 - void z_erofs_zstd_exit(void) 37 + static void z_erofs_zstd_exit(void) 38 38 { 39 39 while (z_erofs_zstd_avail_strms) { 40 40 struct z_erofs_zstd *strm, *n; ··· 49 49 } 50 50 } 51 51 52 - int __init z_erofs_zstd_init(void) 52 + static int __init z_erofs_zstd_init(void) 53 53 { 54 54 /* by default, use # of possible CPUs instead */ 55 55 if (!z_erofs_zstd_nstrms) ··· 72 72 return 0; 73 73 } 74 74 75 - int z_erofs_load_zstd_config(struct super_block *sb, 75 + static int z_erofs_load_zstd_config(struct super_block *sb, 76 76 struct erofs_super_block *dsb, void *data, int size) 77 77 { 78 78 static DEFINE_MUTEX(zstd_resize_mutex); ··· 135 135 return strm ? -ENOMEM : 0; 136 136 } 137 137 138 - int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, 139 - struct page **pgpl) 138 + static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, 139 + struct page **pgpl) 140 140 { 141 - const unsigned int nrpages_out = 142 - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 143 - const unsigned int nrpages_in = 144 - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 145 - zstd_dstream *stream; 146 141 struct super_block *sb = rq->sb; 147 - unsigned int insz, outsz, pofs; 148 - struct z_erofs_zstd *strm; 142 + struct z_erofs_stream_dctx dctx = { 143 + .rq = rq, 144 + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, 145 + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) 146 + >> PAGE_SHIFT, 147 + .no = -1, .ni = 0, 148 + }; 149 149 zstd_in_buffer in_buf = { NULL, 0, 0 }; 150 150 zstd_out_buffer out_buf = { NULL, 0, 0 }; 151 - u8 *kin, *kout = NULL; 152 - bool bounced = false; 153 - int no = -1, ni = 0, j = 0, zerr, err; 151 + struct z_erofs_zstd *strm; 152 + zstd_dstream *stream; 153 + int zerr, err; 154 154 155 155 /* 1. get the exact compressed size */ 156 - kin = kmap_local_page(*rq->in); 157 - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, 158 - min_t(unsigned int, rq->inputsize, 159 - sb->s_blocksize - rq->pageofs_in)); 156 + dctx.kin = kmap_local_page(*rq->in); 157 + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, 158 + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); 160 159 if (err) { 161 - kunmap_local(kin); 160 + kunmap_local(dctx.kin); 162 161 return err; 163 162 } 164 163 ··· 165 166 strm = z_erofs_isolate_strms(false); 166 167 167 168 /* 3. multi-call decompress */ 168 - insz = rq->inputsize; 169 - outsz = rq->outputsize; 170 169 stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz); 171 170 if (!stream) { 172 171 err = -EIO; 173 172 goto failed_zinit; 174 173 } 175 174 176 - pofs = rq->pageofs_out; 177 - in_buf.size = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); 178 - insz -= in_buf.size; 179 - in_buf.src = kin + rq->pageofs_in; 175 + rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */ 176 + in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in); 177 + rq->inputsize -= in_buf.size; 178 + in_buf.src = dctx.kin + rq->pageofs_in; 179 + dctx.bounce = strm->bounce; 180 + 180 181 do { 182 + dctx.avail_out = out_buf.size - out_buf.pos; 183 + dctx.inbuf_sz = in_buf.size; 184 + dctx.inbuf_pos = in_buf.pos; 185 + err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst, 186 + (void **)&in_buf.src, pgpl); 187 + if (err) 188 + break; 189 + 181 190 if (out_buf.size == out_buf.pos) { 182 - if (++no >= nrpages_out || !outsz) { 183 - erofs_err(sb, "insufficient space for decompressed data"); 184 - err = -EFSCORRUPTED; 185 - break; 186 - } 187 - 188 - if (kout) 189 - kunmap_local(kout); 190 - out_buf.size = min_t(u32, outsz, PAGE_SIZE - pofs); 191 - outsz -= out_buf.size; 192 - if (!rq->out[no]) { 193 - rq->out[no] = erofs_allocpage(pgpl, rq->gfp); 194 - if (!rq->out[no]) { 195 - kout = NULL; 196 - err = -ENOMEM; 197 - break; 198 - } 199 - set_page_private(rq->out[no], 200 - Z_EROFS_SHORTLIVED_PAGE); 201 - } 202 - kout = kmap_local_page(rq->out[no]); 203 - out_buf.dst = kout + pofs; 191 + out_buf.size = dctx.avail_out; 204 192 out_buf.pos = 0; 205 - pofs = 0; 206 193 } 194 + in_buf.size = dctx.inbuf_sz; 195 + in_buf.pos = dctx.inbuf_pos; 207 196 208 - if (in_buf.size == in_buf.pos && insz) { 209 - if (++ni >= nrpages_in) { 210 - erofs_err(sb, "invalid compressed data"); 211 - err = -EFSCORRUPTED; 212 - break; 213 - } 214 - 215 - if (kout) /* unlike kmap(), take care of the orders */ 216 - kunmap_local(kout); 217 - kunmap_local(kin); 218 - in_buf.size = min_t(u32, insz, PAGE_SIZE); 219 - insz -= in_buf.size; 220 - kin = kmap_local_page(rq->in[ni]); 221 - in_buf.src = kin; 222 - in_buf.pos = 0; 223 - bounced = false; 224 - if (kout) { 225 - j = (u8 *)out_buf.dst - kout; 226 - kout = kmap_local_page(rq->out[no]); 227 - out_buf.dst = kout + j; 228 - } 229 - } 230 - 231 - /* 232 - * Handle overlapping: Use bounced buffer if the compressed 233 - * data is under processing; Or use short-lived pages from the 234 - * on-stack pagepool where pages share among the same request 235 - * and not _all_ inplace I/O pages are needed to be doubled. 236 - */ 237 - if (!bounced && rq->out[no] == rq->in[ni]) { 238 - memcpy(strm->bounce, in_buf.src, in_buf.size); 239 - in_buf.src = strm->bounce; 240 - bounced = true; 241 - } 242 - 243 - for (j = ni + 1; j < nrpages_in; ++j) { 244 - struct page *tmppage; 245 - 246 - if (rq->out[no] != rq->in[j]) 247 - continue; 248 - tmppage = erofs_allocpage(pgpl, rq->gfp); 249 - if (!tmppage) { 250 - err = -ENOMEM; 251 - goto failed; 252 - } 253 - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); 254 - copy_highpage(tmppage, rq->in[j]); 255 - rq->in[j] = tmppage; 256 - } 257 197 zerr = zstd_decompress_stream(stream, &out_buf, &in_buf); 258 - if (zstd_is_error(zerr) || (!zerr && outsz)) { 198 + if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) { 259 199 erofs_err(sb, "failed to decompress in[%u] out[%u]: %s", 260 200 rq->inputsize, rq->outputsize, 261 201 zerr ? zstd_get_error_name(zerr) : "unexpected end of stream"); 262 202 err = -EFSCORRUPTED; 263 203 break; 264 204 } 265 - } while (outsz || out_buf.pos < out_buf.size); 266 - failed: 267 - if (kout) 268 - kunmap_local(kout); 205 + } while (rq->outputsize || out_buf.pos < out_buf.size); 206 + 207 + if (dctx.kout) 208 + kunmap_local(dctx.kout); 269 209 failed_zinit: 270 - kunmap_local(kin); 210 + kunmap_local(dctx.kin); 271 211 /* 4. push back ZSTD stream context to the global list */ 272 212 spin_lock(&z_erofs_zstd_lock); 273 213 strm->next = z_erofs_zstd_head; ··· 215 277 wake_up(&z_erofs_zstd_wq); 216 278 return err; 217 279 } 280 + 281 + const struct z_erofs_decompressor z_erofs_zstd_decomp = { 282 + .config = z_erofs_load_zstd_config, 283 + .decompress = z_erofs_zstd_decompress, 284 + .init = z_erofs_zstd_init, 285 + .exit = z_erofs_zstd_exit, 286 + .name = "zstd", 287 + };
+9 -39
fs/erofs/internal.h
··· 312 312 return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK; 313 313 } 314 314 315 - /* 316 - * Different from grab_cache_page_nowait(), reclaiming is never triggered 317 - * when allocating new pages. 318 - */ 319 - static inline 320 - struct page *erofs_grab_cache_page_nowait(struct address_space *mapping, 321 - pgoff_t index) 315 + /* reclaiming is never triggered when allocating new folios. */ 316 + static inline struct folio *erofs_grab_folio_nowait(struct address_space *as, 317 + pgoff_t index) 322 318 { 323 - return pagecache_get_page(mapping, index, 319 + return __filemap_get_folio(as, index, 324 320 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 325 - readahead_gfp_mask(mapping) & ~__GFP_RECLAIM); 321 + readahead_gfp_mask(as) & ~__GFP_RECLAIM); 326 322 } 327 323 328 324 /* Has a disk mapping */ ··· 454 458 void erofs_shrinker_unregister(struct super_block *sb); 455 459 int __init erofs_init_shrinker(void); 456 460 void erofs_exit_shrinker(void); 457 - int __init z_erofs_init_zip_subsystem(void); 458 - void z_erofs_exit_zip_subsystem(void); 461 + int __init z_erofs_init_subsystem(void); 462 + void z_erofs_exit_subsystem(void); 459 463 int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, 460 464 struct erofs_workgroup *egrp); 461 465 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, ··· 472 476 static inline void erofs_shrinker_unregister(struct super_block *sb) {} 473 477 static inline int erofs_init_shrinker(void) { return 0; } 474 478 static inline void erofs_exit_shrinker(void) {} 475 - static inline int z_erofs_init_zip_subsystem(void) { return 0; } 476 - static inline void z_erofs_exit_zip_subsystem(void) {} 477 - static inline int z_erofs_gbuf_init(void) { return 0; } 478 - static inline void z_erofs_gbuf_exit(void) {} 479 + static inline int z_erofs_init_subsystem(void) { return 0; } 480 + static inline void z_erofs_exit_subsystem(void) {} 479 481 static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } 480 482 #endif /* !CONFIG_EROFS_FS_ZIP */ 481 - 482 - #ifdef CONFIG_EROFS_FS_ZIP_LZMA 483 - int __init z_erofs_lzma_init(void); 484 - void z_erofs_lzma_exit(void); 485 - #else 486 - static inline int z_erofs_lzma_init(void) { return 0; } 487 - static inline int z_erofs_lzma_exit(void) { return 0; } 488 - #endif /* !CONFIG_EROFS_FS_ZIP_LZMA */ 489 - 490 - #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE 491 - int __init z_erofs_deflate_init(void); 492 - void z_erofs_deflate_exit(void); 493 - #else 494 - static inline int z_erofs_deflate_init(void) { return 0; } 495 - static inline int z_erofs_deflate_exit(void) { return 0; } 496 - #endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */ 497 - 498 - #ifdef CONFIG_EROFS_FS_ZIP_ZSTD 499 - int __init z_erofs_zstd_init(void); 500 - void z_erofs_zstd_exit(void); 501 - #else 502 - static inline int z_erofs_zstd_init(void) { return 0; } 503 - static inline int z_erofs_zstd_exit(void) { return 0; } 504 - #endif /* !CONFIG_EROFS_FS_ZIP_ZSTD */ 505 483 506 484 #ifdef CONFIG_EROFS_FS_ONDEMAND 507 485 int erofs_fscache_register_fs(struct super_block *sb);
+3 -31
fs/erofs/super.c
··· 849 849 if (err) 850 850 goto shrinker_err; 851 851 852 - err = z_erofs_lzma_init(); 853 - if (err) 854 - goto lzma_err; 855 - 856 - err = z_erofs_deflate_init(); 857 - if (err) 858 - goto deflate_err; 859 - 860 - err = z_erofs_zstd_init(); 861 - if (err) 862 - goto zstd_err; 863 - 864 - err = z_erofs_gbuf_init(); 865 - if (err) 866 - goto gbuf_err; 867 - 868 - err = z_erofs_init_zip_subsystem(); 852 + err = z_erofs_init_subsystem(); 869 853 if (err) 870 854 goto zip_err; 871 855 ··· 866 882 fs_err: 867 883 erofs_exit_sysfs(); 868 884 sysfs_err: 869 - z_erofs_exit_zip_subsystem(); 885 + z_erofs_exit_subsystem(); 870 886 zip_err: 871 - z_erofs_gbuf_exit(); 872 - gbuf_err: 873 - z_erofs_zstd_exit(); 874 - zstd_err: 875 - z_erofs_deflate_exit(); 876 - deflate_err: 877 - z_erofs_lzma_exit(); 878 - lzma_err: 879 887 erofs_exit_shrinker(); 880 888 shrinker_err: 881 889 kmem_cache_destroy(erofs_inode_cachep); ··· 882 906 rcu_barrier(); 883 907 884 908 erofs_exit_sysfs(); 885 - z_erofs_exit_zip_subsystem(); 886 - z_erofs_zstd_exit(); 887 - z_erofs_deflate_exit(); 888 - z_erofs_lzma_exit(); 909 + z_erofs_exit_subsystem(); 889 910 erofs_exit_shrinker(); 890 911 kmem_cache_destroy(erofs_inode_cachep); 891 - z_erofs_gbuf_exit(); 892 912 } 893 913 894 914 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
+175 -167
fs/erofs/zdata.c
··· 19 19 typedef void *z_erofs_next_pcluster_t; 20 20 21 21 struct z_erofs_bvec { 22 - union { 23 - struct page *page; 24 - struct folio *folio; 25 - }; 22 + struct page *page; 26 23 int offset; 27 24 unsigned int end; 28 25 }; ··· 446 449 static inline void erofs_cpu_hotplug_destroy(void) {} 447 450 #endif 448 451 449 - void z_erofs_exit_zip_subsystem(void) 452 + void z_erofs_exit_subsystem(void) 450 453 { 451 454 erofs_cpu_hotplug_destroy(); 452 455 erofs_destroy_percpu_workers(); 453 456 destroy_workqueue(z_erofs_workqueue); 454 457 z_erofs_destroy_pcluster_pool(); 458 + z_erofs_exit_decompressor(); 455 459 } 456 460 457 - int __init z_erofs_init_zip_subsystem(void) 461 + int __init z_erofs_init_subsystem(void) 458 462 { 459 - int err = z_erofs_create_pcluster_pool(); 463 + int err = z_erofs_init_decompressor(); 460 464 461 465 if (err) 462 - goto out_error_pcluster_pool; 466 + goto err_decompressor; 467 + 468 + err = z_erofs_create_pcluster_pool(); 469 + if (err) 470 + goto err_pcluster_pool; 463 471 464 472 z_erofs_workqueue = alloc_workqueue("erofs_worker", 465 473 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); 466 474 if (!z_erofs_workqueue) { 467 475 err = -ENOMEM; 468 - goto out_error_workqueue_init; 476 + goto err_workqueue_init; 469 477 } 470 478 471 479 err = erofs_init_percpu_workers(); 472 480 if (err) 473 - goto out_error_pcpu_worker; 481 + goto err_pcpu_worker; 474 482 475 483 err = erofs_cpu_hotplug_init(); 476 484 if (err < 0) 477 - goto out_error_cpuhp_init; 485 + goto err_cpuhp_init; 478 486 return err; 479 487 480 - out_error_cpuhp_init: 488 + err_cpuhp_init: 481 489 erofs_destroy_percpu_workers(); 482 - out_error_pcpu_worker: 490 + err_pcpu_worker: 483 491 destroy_workqueue(z_erofs_workqueue); 484 - out_error_workqueue_init: 492 + err_workqueue_init: 485 493 z_erofs_destroy_pcluster_pool(); 486 - out_error_pcluster_pool: 494 + err_pcluster_pool: 495 + z_erofs_exit_decompressor(); 496 + err_decompressor: 487 497 return err; 488 498 } 489 499 ··· 621 617 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 622 618 } 623 619 624 - /* called by erofs_shrinker to get rid of all cached compressed bvecs */ 620 + /* (erofs_shrinker) disconnect cached encoded data with pclusters */ 625 621 int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, 626 622 struct erofs_workgroup *grp) 627 623 { 628 624 struct z_erofs_pcluster *const pcl = 629 625 container_of(grp, struct z_erofs_pcluster, obj); 630 626 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 627 + struct folio *folio; 631 628 int i; 632 629 633 630 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 634 - /* There is no actice user since the pcluster is now freezed */ 631 + /* Each cached folio contains one page unless bs > ps is supported */ 635 632 for (i = 0; i < pclusterpages; ++i) { 636 - struct folio *folio = pcl->compressed_bvecs[i].folio; 633 + if (pcl->compressed_bvecs[i].page) { 634 + folio = page_folio(pcl->compressed_bvecs[i].page); 635 + /* Avoid reclaiming or migrating this folio */ 636 + if (!folio_trylock(folio)) 637 + return -EBUSY; 637 638 638 - if (!folio) 639 - continue; 640 - 641 - /* Avoid reclaiming or migrating this folio */ 642 - if (!folio_trylock(folio)) 643 - return -EBUSY; 644 - 645 - if (!erofs_folio_is_managed(sbi, folio)) 646 - continue; 647 - pcl->compressed_bvecs[i].folio = NULL; 648 - folio_detach_private(folio); 649 - folio_unlock(folio); 639 + if (!erofs_folio_is_managed(sbi, folio)) 640 + continue; 641 + pcl->compressed_bvecs[i].page = NULL; 642 + folio_detach_private(folio); 643 + folio_unlock(folio); 644 + } 650 645 } 651 646 return 0; 652 647 } ··· 653 650 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) 654 651 { 655 652 struct z_erofs_pcluster *pcl = folio_get_private(folio); 656 - unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 653 + struct z_erofs_bvec *bvec = pcl->compressed_bvecs; 654 + struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl); 657 655 bool ret; 658 - int i; 659 656 660 657 if (!folio_test_private(folio)) 661 658 return true; ··· 664 661 spin_lock(&pcl->obj.lockref.lock); 665 662 if (pcl->obj.lockref.count <= 0) { 666 663 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 667 - for (i = 0; i < pclusterpages; ++i) { 668 - if (pcl->compressed_bvecs[i].folio == folio) { 669 - pcl->compressed_bvecs[i].folio = NULL; 664 + for (; bvec < end; ++bvec) { 665 + if (bvec->page && page_folio(bvec->page) == folio) { 666 + bvec->page = NULL; 670 667 folio_detach_private(folio); 671 668 ret = true; 672 669 break; ··· 928 925 fe->pcl = NULL; 929 926 } 930 927 931 - static int z_erofs_read_fragment(struct super_block *sb, struct page *page, 928 + static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, 932 929 unsigned int cur, unsigned int end, erofs_off_t pos) 933 930 { 934 931 struct inode *packed_inode = EROFS_SB(sb)->packed_inode; ··· 941 938 942 939 buf.mapping = packed_inode->i_mapping; 943 940 for (; cur < end; cur += cnt, pos += cnt) { 944 - cnt = min_t(unsigned int, end - cur, 945 - sb->s_blocksize - erofs_blkoff(sb, pos)); 941 + cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos)); 946 942 src = erofs_bread(&buf, pos, EROFS_KMAP); 947 943 if (IS_ERR(src)) { 948 944 erofs_put_metabuf(&buf); 949 945 return PTR_ERR(src); 950 946 } 951 - memcpy_to_page(page, cur, src, cnt); 947 + memcpy_to_folio(folio, cur, src, cnt); 952 948 } 953 949 erofs_put_metabuf(&buf); 954 950 return 0; 955 951 } 956 952 957 - static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe, 953 + static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, 958 954 struct folio *folio, bool ra) 959 955 { 960 - struct inode *const inode = fe->inode; 961 - struct erofs_map_blocks *const map = &fe->map; 956 + struct inode *const inode = f->inode; 957 + struct erofs_map_blocks *const map = &f->map; 962 958 const loff_t offset = folio_pos(folio); 963 - const unsigned int bs = i_blocksize(inode), fs = folio_size(folio); 964 - bool tight = true, exclusive; 965 - unsigned int cur, end, len, split; 959 + const unsigned int bs = i_blocksize(inode); 960 + unsigned int end = folio_size(folio), split = 0, cur, pgs; 961 + bool tight, excl; 966 962 int err = 0; 967 963 964 + tight = (bs == PAGE_SIZE); 968 965 z_erofs_onlinefolio_init(folio); 969 - split = 0; 970 - end = fs; 971 - repeat: 972 - if (offset + end - 1 < map->m_la || 973 - offset + end - 1 >= map->m_la + map->m_llen) { 974 - z_erofs_pcluster_end(fe); 975 - map->m_la = offset + end - 1; 976 - map->m_llen = 0; 977 - err = z_erofs_map_blocks_iter(inode, map, 0); 978 - if (err) 979 - goto out; 980 - } 966 + do { 967 + if (offset + end - 1 < map->m_la || 968 + offset + end - 1 >= map->m_la + map->m_llen) { 969 + z_erofs_pcluster_end(f); 970 + map->m_la = offset + end - 1; 971 + map->m_llen = 0; 972 + err = z_erofs_map_blocks_iter(inode, map, 0); 973 + if (err) 974 + break; 975 + } 981 976 982 - cur = offset > map->m_la ? 0 : map->m_la - offset; 983 - /* bump split parts first to avoid several separate cases */ 984 - ++split; 977 + cur = offset > map->m_la ? 0 : map->m_la - offset; 978 + pgs = round_down(cur, PAGE_SIZE); 979 + /* bump split parts first to avoid several separate cases */ 980 + ++split; 985 981 986 - if (!(map->m_flags & EROFS_MAP_MAPPED)) { 987 - folio_zero_segment(folio, cur, end); 988 - tight = false; 989 - goto next_part; 990 - } 982 + if (!(map->m_flags & EROFS_MAP_MAPPED)) { 983 + folio_zero_segment(folio, cur, end); 984 + tight = false; 985 + } else if (map->m_flags & EROFS_MAP_FRAGMENT) { 986 + erofs_off_t fpos = offset + cur - map->m_la; 991 987 992 - if (map->m_flags & EROFS_MAP_FRAGMENT) { 993 - erofs_off_t fpos = offset + cur - map->m_la; 988 + err = z_erofs_read_fragment(inode->i_sb, folio, cur, 989 + cur + min(map->m_llen - fpos, end - cur), 990 + EROFS_I(inode)->z_fragmentoff + fpos); 991 + if (err) 992 + break; 993 + tight = false; 994 + } else { 995 + if (!f->pcl) { 996 + err = z_erofs_pcluster_begin(f); 997 + if (err) 998 + break; 999 + f->pcl->besteffort |= !ra; 1000 + } 994 1001 995 - len = min_t(unsigned int, map->m_llen - fpos, end - cur); 996 - err = z_erofs_read_fragment(inode->i_sb, &folio->page, cur, 997 - cur + len, EROFS_I(inode)->z_fragmentoff + fpos); 998 - if (err) 999 - goto out; 1000 - tight = false; 1001 - goto next_part; 1002 - } 1002 + pgs = round_down(end - 1, PAGE_SIZE); 1003 + /* 1004 + * Ensure this partial page belongs to this submit chain 1005 + * rather than other concurrent submit chains or 1006 + * noio(bypass) chains since those chains are handled 1007 + * asynchronously thus it cannot be used for inplace I/O 1008 + * or bvpage (should be processed in the strict order.) 1009 + */ 1010 + tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 1011 + excl = false; 1012 + if (cur <= pgs) { 1013 + excl = (split <= 1) || tight; 1014 + cur = pgs; 1015 + } 1003 1016 1004 - if (!fe->pcl) { 1005 - err = z_erofs_pcluster_begin(fe); 1006 - if (err) 1007 - goto out; 1008 - fe->pcl->besteffort |= !ra; 1009 - } 1017 + err = z_erofs_attach_page(f, &((struct z_erofs_bvec) { 1018 + .page = folio_page(folio, pgs >> PAGE_SHIFT), 1019 + .offset = offset + pgs - map->m_la, 1020 + .end = end - pgs, }), excl); 1021 + if (err) 1022 + break; 1010 1023 1011 - /* 1012 - * Ensure the current partial folio belongs to this submit chain rather 1013 - * than other concurrent submit chains or the noio(bypass) chain since 1014 - * those chains are handled asynchronously thus the folio cannot be used 1015 - * for inplace I/O or bvpage (should be processed in a strict order.) 1016 - */ 1017 - tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 1018 - exclusive = (!cur && ((split <= 1) || (tight && bs == fs))); 1019 - if (cur) 1020 - tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 1021 - 1022 - err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 1023 - .page = &folio->page, 1024 - .offset = offset - map->m_la, 1025 - .end = end, 1026 - }), exclusive); 1027 - if (err) 1028 - goto out; 1029 - 1030 - z_erofs_onlinefolio_split(folio); 1031 - if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 1032 - fe->pcl->multibases = true; 1033 - if (fe->pcl->length < offset + end - map->m_la) { 1034 - fe->pcl->length = offset + end - map->m_la; 1035 - fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 1036 - } 1037 - if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 1038 - !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 1039 - fe->pcl->length == map->m_llen) 1040 - fe->pcl->partial = false; 1041 - next_part: 1042 - /* shorten the remaining extent to update progress */ 1043 - map->m_llen = offset + cur - map->m_la; 1044 - map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 1045 - 1046 - end = cur; 1047 - if (end > 0) 1048 - goto repeat; 1049 - 1050 - out: 1024 + z_erofs_onlinefolio_split(folio); 1025 + if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 1026 + f->pcl->multibases = true; 1027 + if (f->pcl->length < offset + end - map->m_la) { 1028 + f->pcl->length = offset + end - map->m_la; 1029 + f->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 1030 + } 1031 + if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 1032 + !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 1033 + f->pcl->length == map->m_llen) 1034 + f->pcl->partial = false; 1035 + } 1036 + /* shorten the remaining extent to update progress */ 1037 + map->m_llen = offset + cur - map->m_la; 1038 + map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 1039 + if (cur <= pgs) { 1040 + split = cur < pgs; 1041 + tight = (bs == PAGE_SIZE); 1042 + } 1043 + } while ((end = cur) > 0); 1051 1044 z_erofs_onlinefolio_end(folio, err); 1052 1045 return err; 1053 1046 } ··· 1065 1066 1066 1067 static bool z_erofs_page_is_invalidated(struct page *page) 1067 1068 { 1068 - return !page->mapping && !z_erofs_is_shortlived_page(page); 1069 + return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); 1069 1070 } 1070 1071 1071 1072 struct z_erofs_decompress_backend { ··· 1220 1221 struct z_erofs_pcluster *pcl = be->pcl; 1221 1222 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1222 1223 const struct z_erofs_decompressor *decomp = 1223 - &erofs_decompressors[pcl->algorithmformat]; 1224 - int i, err2; 1224 + z_erofs_decomp[pcl->algorithmformat]; 1225 + int i, j, jtop, err2; 1225 1226 struct page *page; 1226 1227 bool overlapped; 1227 1228 ··· 1279 1280 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1280 1281 put_page(page); 1281 1282 } else { 1283 + /* managed folios are still left in compressed_bvecs[] */ 1282 1284 for (i = 0; i < pclusterpages; ++i) { 1283 - /* consider shortlived pages added when decompressing */ 1284 1285 page = be->compressed_pages[i]; 1285 - 1286 1286 if (!page || 1287 1287 erofs_folio_is_managed(sbi, page_folio(page))) 1288 1288 continue; ··· 1292 1294 if (be->compressed_pages < be->onstack_pages || 1293 1295 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1294 1296 kvfree(be->compressed_pages); 1295 - z_erofs_fill_other_copies(be, err); 1296 1297 1298 + jtop = 0; 1299 + z_erofs_fill_other_copies(be, err); 1297 1300 for (i = 0; i < be->nr_pages; ++i) { 1298 1301 page = be->decompressed_pages[i]; 1299 1302 if (!page) 1300 1303 continue; 1301 1304 1302 1305 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1303 - 1304 - /* recycle all individual short-lived pages */ 1305 - if (z_erofs_put_shortlivedpage(be->pagepool, page)) 1306 + if (!z_erofs_is_shortlived_page(page)) { 1307 + z_erofs_onlinefolio_end(page_folio(page), err); 1306 1308 continue; 1307 - z_erofs_onlinefolio_end(page_folio(page), err); 1309 + } 1310 + if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { 1311 + erofs_pagepool_add(be->pagepool, page); 1312 + continue; 1313 + } 1314 + for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j) 1315 + ; 1316 + if (j >= jtop) /* this bounce page is newly detected */ 1317 + be->decompressed_pages[jtop++] = page; 1308 1318 } 1309 - 1319 + while (jtop) 1320 + erofs_pagepool_add(be->pagepool, 1321 + be->decompressed_pages[--jtop]); 1310 1322 if (be->decompressed_pages != be->onstack_pages) 1311 1323 kvfree(be->decompressed_pages); 1312 1324 ··· 1427 1419 bool tocache = false; 1428 1420 struct z_erofs_bvec zbv; 1429 1421 struct address_space *mapping; 1430 - struct page *page; 1422 + struct folio *folio; 1431 1423 int bs = i_blocksize(f->inode); 1432 1424 1433 1425 /* Except for inplace folios, the entire folio can be used for I/Os */ ··· 1437 1429 spin_lock(&pcl->obj.lockref.lock); 1438 1430 zbv = pcl->compressed_bvecs[nr]; 1439 1431 spin_unlock(&pcl->obj.lockref.lock); 1440 - if (!zbv.folio) 1432 + if (!zbv.page) 1441 1433 goto out_allocfolio; 1442 1434 1443 - bvec->bv_page = &zbv.folio->page; 1435 + bvec->bv_page = zbv.page; 1444 1436 DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); 1437 + 1438 + folio = page_folio(zbv.page); 1445 1439 /* 1446 1440 * Handle preallocated cached folios. We tried to allocate such folios 1447 1441 * without triggering direct reclaim. If allocation failed, inplace 1448 1442 * file-backed folios will be used instead. 1449 1443 */ 1450 - if (zbv.folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { 1451 - zbv.folio->private = 0; 1444 + if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { 1445 + folio->private = 0; 1452 1446 tocache = true; 1453 1447 goto out_tocache; 1454 1448 } 1455 1449 1456 - mapping = READ_ONCE(zbv.folio->mapping); 1450 + mapping = READ_ONCE(folio->mapping); 1457 1451 /* 1458 1452 * File-backed folios for inplace I/Os are all locked steady, 1459 1453 * therefore it is impossible for `mapping` to be NULL. ··· 1467 1457 return; 1468 1458 } 1469 1459 1470 - folio_lock(zbv.folio); 1471 - if (zbv.folio->mapping == mc) { 1460 + folio_lock(folio); 1461 + if (folio->mapping == mc) { 1472 1462 /* 1473 1463 * The cached folio is still in managed cache but without 1474 1464 * a valid `->private` pcluster hint. Let's reconnect them. 1475 1465 */ 1476 - if (!folio_test_private(zbv.folio)) { 1477 - folio_attach_private(zbv.folio, pcl); 1466 + if (!folio_test_private(folio)) { 1467 + folio_attach_private(folio, pcl); 1478 1468 /* compressed_bvecs[] already takes a ref before */ 1479 - folio_put(zbv.folio); 1469 + folio_put(folio); 1480 1470 } 1481 1471 1482 1472 /* no need to submit if it is already up-to-date */ 1483 - if (folio_test_uptodate(zbv.folio)) { 1484 - folio_unlock(zbv.folio); 1473 + if (folio_test_uptodate(folio)) { 1474 + folio_unlock(folio); 1485 1475 bvec->bv_page = NULL; 1486 1476 } 1487 1477 return; ··· 1491 1481 * It has been truncated, so it's unsafe to reuse this one. Let's 1492 1482 * allocate a new page for compressed data. 1493 1483 */ 1494 - DBG_BUGON(zbv.folio->mapping); 1484 + DBG_BUGON(folio->mapping); 1495 1485 tocache = true; 1496 - folio_unlock(zbv.folio); 1497 - folio_put(zbv.folio); 1486 + folio_unlock(folio); 1487 + folio_put(folio); 1498 1488 out_allocfolio: 1499 - page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); 1489 + zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); 1500 1490 spin_lock(&pcl->obj.lockref.lock); 1501 - if (pcl->compressed_bvecs[nr].folio) { 1502 - erofs_pagepool_add(&f->pagepool, page); 1491 + if (pcl->compressed_bvecs[nr].page) { 1492 + erofs_pagepool_add(&f->pagepool, zbv.page); 1503 1493 spin_unlock(&pcl->obj.lockref.lock); 1504 1494 cond_resched(); 1505 1495 goto repeat; 1506 1496 } 1507 - pcl->compressed_bvecs[nr].folio = zbv.folio = page_folio(page); 1497 + bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page; 1498 + folio = page_folio(zbv.page); 1499 + /* first mark it as a temporary shortlived folio (now 1 ref) */ 1500 + folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; 1508 1501 spin_unlock(&pcl->obj.lockref.lock); 1509 - bvec->bv_page = page; 1510 1502 out_tocache: 1511 1503 if (!tocache || bs != PAGE_SIZE || 1512 - filemap_add_folio(mc, zbv.folio, pcl->obj.index + nr, gfp)) { 1513 - /* turn into a temporary shortlived folio (1 ref) */ 1514 - zbv.folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; 1504 + filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) 1515 1505 return; 1516 - } 1517 - folio_attach_private(zbv.folio, pcl); 1506 + folio_attach_private(folio, pcl); 1518 1507 /* drop a refcount added by allocpage (then 2 refs in total here) */ 1519 - folio_put(zbv.folio); 1508 + folio_put(folio); 1520 1509 } 1521 1510 1522 1511 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, ··· 1776 1767 end = round_up(end, PAGE_SIZE); 1777 1768 } else { 1778 1769 end = round_up(map->m_la, PAGE_SIZE); 1779 - 1780 1770 if (!map->m_llen) 1781 1771 return; 1782 1772 } ··· 1783 1775 cur = map->m_la + map->m_llen - 1; 1784 1776 while ((cur >= end) && (cur < i_size_read(inode))) { 1785 1777 pgoff_t index = cur >> PAGE_SHIFT; 1786 - struct page *page; 1778 + struct folio *folio; 1787 1779 1788 - page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1789 - if (page) { 1790 - if (PageUptodate(page)) 1791 - unlock_page(page); 1780 + folio = erofs_grab_folio_nowait(inode->i_mapping, index); 1781 + if (!IS_ERR_OR_NULL(folio)) { 1782 + if (folio_test_uptodate(folio)) 1783 + folio_unlock(folio); 1792 1784 else 1793 - z_erofs_scan_folio(f, page_folio(page), !!rac); 1794 - put_page(page); 1785 + z_erofs_scan_folio(f, folio, !!rac); 1786 + folio_put(folio); 1795 1787 } 1796 1788 1797 1789 if (cur < PAGE_SIZE)
+2 -2
fs/erofs/zmap.c
··· 686 686 struct erofs_inode *const vi = EROFS_I(inode); 687 687 int err = 0; 688 688 689 - trace_z_erofs_map_blocks_iter_enter(inode, map, flags); 689 + trace_erofs_map_blocks_enter(inode, map, flags); 690 690 691 691 /* when trying to read beyond EOF, leave it unmapped */ 692 692 if (map->m_la >= inode->i_size) { ··· 713 713 out: 714 714 if (err) 715 715 map->m_llen = 0; 716 - trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); 716 + trace_erofs_map_blocks_exit(inode, map, flags, err); 717 717 return err; 718 718 } 719 719
+3 -29
include/trace/events/erofs.h
··· 143 143 __entry->raw) 144 144 ); 145 145 146 - DECLARE_EVENT_CLASS(erofs__map_blocks_enter, 146 + TRACE_EVENT(erofs_map_blocks_enter, 147 + 147 148 TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 148 149 unsigned int flags), 149 150 ··· 172 171 __entry->flags ? show_map_flags(__entry->flags) : "NULL") 173 172 ); 174 173 175 - DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter, 176 - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 177 - unsigned flags), 174 + TRACE_EVENT(erofs_map_blocks_exit, 178 175 179 - TP_ARGS(inode, map, flags) 180 - ); 181 - 182 - DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, 183 - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 184 - unsigned int flags), 185 - 186 - TP_ARGS(inode, map, flags) 187 - ); 188 - 189 - DECLARE_EVENT_CLASS(erofs__map_blocks_exit, 190 176 TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 191 177 unsigned int flags, int ret), 192 178 ··· 209 221 __entry->flags ? show_map_flags(__entry->flags) : "NULL", 210 222 __entry->la, __entry->pa, __entry->llen, __entry->plen, 211 223 show_mflags(__entry->mflags), __entry->ret) 212 - ); 213 - 214 - DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit, 215 - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 216 - unsigned flags, int ret), 217 - 218 - TP_ARGS(inode, map, flags, ret) 219 - ); 220 - 221 - DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, 222 - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 223 - unsigned int flags, int ret), 224 - 225 - TP_ARGS(inode, map, flags, ret) 226 224 ); 227 225 228 226 TRACE_EVENT(erofs_destroy_inode,