Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 update from Ted Ts'o:
"Fixed regressions (two stability regressions and a performance
regression) introduced during the 3.10-rc1 merge window.

Also included is a bug fix relating to allocating blocks after
resizing an ext3 file system when using the ext4 file system driver"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
jbd,jbd2: fix oops in jbd2_journal_put_journal_head()
ext4: revert "ext4: use io_end for multiple bios"
ext4: limit group search loop for non-extent files
ext4: fix fio regression

+120 -147
+2 -6
fs/ext4/ext4.h
··· 209 209 ssize_t size; /* size of the extent */ 210 210 struct kiocb *iocb; /* iocb struct for AIO */ 211 211 int result; /* error value for AIO */ 212 - atomic_t count; /* reference counter */ 213 212 } ext4_io_end_t; 214 213 215 214 struct ext4_io_submit { ··· 2650 2651 2651 2652 /* page-io.c */ 2652 2653 extern int __init ext4_init_pageio(void); 2654 + extern void ext4_add_complete_io(ext4_io_end_t *io_end); 2653 2655 extern void ext4_exit_pageio(void); 2654 2656 extern void ext4_ioend_shutdown(struct inode *); 2657 + extern void ext4_free_io_end(ext4_io_end_t *io); 2655 2658 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2656 - extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end); 2657 - extern int ext4_put_io_end(ext4_io_end_t *io_end); 2658 - extern void ext4_put_io_end_defer(ext4_io_end_t *io_end); 2659 - extern void ext4_io_submit_init(struct ext4_io_submit *io, 2660 - struct writeback_control *wbc); 2661 2659 extern void ext4_end_io_work(struct work_struct *work); 2662 2660 extern void ext4_io_submit(struct ext4_io_submit *io); 2663 2661 extern int ext4_bio_write_page(struct ext4_io_submit *io,
+5 -4
fs/ext4/extents.c
··· 3642 3642 { 3643 3643 struct extent_status es; 3644 3644 3645 - ext4_es_find_delayed_extent(inode, lblk_start, &es); 3645 + ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); 3646 3646 if (es.es_len == 0) 3647 3647 return 0; /* there is no delay extent in this tree */ 3648 3648 else if (es.es_lblk <= lblk_start && ··· 4608 4608 struct extent_status es; 4609 4609 ext4_lblk_t block, next_del; 4610 4610 4611 - ext4_es_find_delayed_extent(inode, newes->es_lblk, &es); 4612 - 4613 4611 if (newes->es_pblk == 0) { 4612 + ext4_es_find_delayed_extent_range(inode, newes->es_lblk, 4613 + newes->es_lblk + newes->es_len - 1, &es); 4614 + 4614 4615 /* 4615 4616 * No extent in extent-tree contains block @newes->es_pblk, 4616 4617 * then the block may stay in 1)a hole or 2)delayed-extent. ··· 4631 4630 } 4632 4631 4633 4632 block = newes->es_lblk + newes->es_len; 4634 - ext4_es_find_delayed_extent(inode, block, &es); 4633 + ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); 4635 4634 if (es.es_len == 0) 4636 4635 next_del = EXT_MAX_BLOCKS; 4637 4636 else
+12 -5
fs/ext4/extents_status.c
··· 232 232 } 233 233 234 234 /* 235 - * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk 236 - * if it exists, otherwise, the next extent after @es->lblk. 235 + * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering 236 + * @es->lblk if it exists, otherwise, the next extent after @es->lblk. 237 237 * 238 238 * @inode: the inode which owns delayed extents 239 239 * @lblk: the offset where we start to search 240 + * @end: the offset where we stop to search 240 241 * @es: delayed extent that we found 241 242 */ 242 - void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 243 + void ext4_es_find_delayed_extent_range(struct inode *inode, 244 + ext4_lblk_t lblk, ext4_lblk_t end, 243 245 struct extent_status *es) 244 246 { 245 247 struct ext4_es_tree *tree = NULL; ··· 249 247 struct rb_node *node; 250 248 251 249 BUG_ON(es == NULL); 252 - trace_ext4_es_find_delayed_extent_enter(inode, lblk); 250 + BUG_ON(end < lblk); 251 + trace_ext4_es_find_delayed_extent_range_enter(inode, lblk); 253 252 254 253 read_lock(&EXT4_I(inode)->i_es_lock); 255 254 tree = &EXT4_I(inode)->i_es_tree; ··· 273 270 if (es1 && !ext4_es_is_delayed(es1)) { 274 271 while ((node = rb_next(&es1->rb_node)) != NULL) { 275 272 es1 = rb_entry(node, struct extent_status, rb_node); 273 + if (es1->es_lblk > end) { 274 + es1 = NULL; 275 + break; 276 + } 276 277 if (ext4_es_is_delayed(es1)) 277 278 break; 278 279 } ··· 292 285 read_unlock(&EXT4_I(inode)->i_es_lock); 293 286 294 287 ext4_es_lru_add(inode); 295 - trace_ext4_es_find_delayed_extent_exit(inode, es); 288 + trace_ext4_es_find_delayed_extent_range_exit(inode, es); 296 289 } 297 290 298 291 static struct extent_status *
+2 -1
fs/ext4/extents_status.h
··· 62 62 unsigned long long status); 63 63 extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 64 64 ext4_lblk_t len); 65 - extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 65 + extern void ext4_es_find_delayed_extent_range(struct inode *inode, 66 + ext4_lblk_t lblk, ext4_lblk_t end, 66 67 struct extent_status *es); 67 68 extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 68 69 struct extent_status *es);
+2 -2
fs/ext4/file.c
··· 465 465 * If there is a delay extent at this offset, 466 466 * it will be as a data. 467 467 */ 468 - ext4_es_find_delayed_extent(inode, last, &es); 468 + ext4_es_find_delayed_extent_range(inode, last, last, &es); 469 469 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 470 470 if (last != start) 471 471 dataoff = last << blkbits; ··· 548 548 * If there is a delay extent at this offset, 549 549 * we will skip this extent. 550 550 */ 551 - ext4_es_find_delayed_extent(inode, last, &es); 551 + ext4_es_find_delayed_extent_range(inode, last, last, &es); 552 552 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 553 553 last = es.es_lblk + es.es_len; 554 554 holeoff = last << blkbits;
+39 -48
fs/ext4/inode.c
··· 1488 1488 struct ext4_io_submit io_submit; 1489 1489 1490 1490 BUG_ON(mpd->next_page <= mpd->first_page); 1491 - ext4_io_submit_init(&io_submit, mpd->wbc); 1492 - io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 1493 - if (!io_submit.io_end) 1494 - return -ENOMEM; 1491 + memset(&io_submit, 0, sizeof(io_submit)); 1495 1492 /* 1496 1493 * We need to start from the first_page to the next_page - 1 1497 1494 * to make sure we also write the mapped dirty buffer_heads. ··· 1576 1579 pagevec_release(&pvec); 1577 1580 } 1578 1581 ext4_io_submit(&io_submit); 1579 - /* Drop io_end reference we got from init */ 1580 - ext4_put_io_end_defer(io_submit.io_end); 1581 1582 return ret; 1582 1583 } 1583 1584 ··· 2234 2239 */ 2235 2240 return __ext4_journalled_writepage(page, len); 2236 2241 2237 - ext4_io_submit_init(&io_submit, wbc); 2238 - io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 2239 - if (!io_submit.io_end) { 2240 - redirty_page_for_writepage(wbc, page); 2241 - return -ENOMEM; 2242 - } 2242 + memset(&io_submit, 0, sizeof(io_submit)); 2243 2243 ret = ext4_bio_write_page(&io_submit, page, len, wbc); 2244 2244 ext4_io_submit(&io_submit); 2245 - /* Drop io_end reference we got from init */ 2246 - ext4_put_io_end_defer(io_submit.io_end); 2247 2245 return ret; 2248 2246 } 2249 2247 ··· 3067 3079 struct inode *inode = file_inode(iocb->ki_filp); 3068 3080 ext4_io_end_t *io_end = iocb->private; 3069 3081 3070 - /* if not async direct IO just return */ 3071 - if (!io_end) { 3072 - inode_dio_done(inode); 3073 - if (is_async) 3074 - aio_complete(iocb, ret, 0); 3075 - return; 3076 - } 3082 + /* if not async direct IO or dio with 0 bytes write, just return */ 3083 + if (!io_end || !size) 3084 + goto out; 3077 3085 3078 3086 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3079 3087 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", ··· 3077 3093 size); 3078 3094 3079 3095 iocb->private = NULL; 3096 + 3097 + /* if not aio dio with unwritten extents, just free io and return */ 3098 + if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 3099 + ext4_free_io_end(io_end); 3100 + out: 3101 + inode_dio_done(inode); 3102 + if (is_async) 3103 + aio_complete(iocb, ret, 0); 3104 + return; 3105 + } 3106 + 3080 3107 io_end->offset = offset; 3081 3108 io_end->size = size; 3082 3109 if (is_async) { 3083 3110 io_end->iocb = iocb; 3084 3111 io_end->result = ret; 3085 3112 } 3086 - ext4_put_io_end_defer(io_end); 3113 + 3114 + ext4_add_complete_io(io_end); 3087 3115 } 3088 3116 3089 3117 /* ··· 3129 3133 get_block_t *get_block_func = NULL; 3130 3134 int dio_flags = 0; 3131 3135 loff_t final_size = offset + count; 3132 - ext4_io_end_t *io_end = NULL; 3133 3136 3134 3137 /* Use the old path for reads and writes beyond i_size. */ 3135 3138 if (rw != WRITE || final_size > inode->i_size) ··· 3167 3172 iocb->private = NULL; 3168 3173 ext4_inode_aio_set(inode, NULL); 3169 3174 if (!is_sync_kiocb(iocb)) { 3170 - io_end = ext4_init_io_end(inode, GFP_NOFS); 3175 + ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 3171 3176 if (!io_end) { 3172 3177 ret = -ENOMEM; 3173 3178 goto retake_lock; 3174 3179 } 3175 3180 io_end->flag |= EXT4_IO_END_DIRECT; 3176 - /* 3177 - * Grab reference for DIO. Will be dropped in ext4_end_io_dio() 3178 - */ 3179 - iocb->private = ext4_get_io_end(io_end); 3181 + iocb->private = io_end; 3180 3182 /* 3181 3183 * we save the io structure for current async direct 3182 3184 * IO, so that later ext4_map_blocks() could flag the ··· 3197 3205 NULL, 3198 3206 dio_flags); 3199 3207 3200 - /* 3201 - * Put our reference to io_end. This can free the io_end structure e.g. 3202 - * in sync IO case or in case of error. It can even perform extent 3203 - * conversion if all bios we submitted finished before we got here. 3204 - * Note that in that case iocb->private can be already set to NULL 3205 - * here. 3206 - */ 3207 - if (io_end) { 3208 + if (iocb->private) 3208 3209 ext4_inode_aio_set(inode, NULL); 3209 - ext4_put_io_end(io_end); 3210 - /* 3211 - * In case of error or no write ext4_end_io_dio() was not 3212 - * called so we have to put iocb's reference. 3213 - */ 3214 - if (ret <= 0 && ret != -EIOCBQUEUED) { 3215 - WARN_ON(iocb->private != io_end); 3216 - ext4_put_io_end(io_end); 3217 - iocb->private = NULL; 3218 - } 3219 - } 3220 - if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3210 + /* 3211 + * The io_end structure takes a reference to the inode, that 3212 + * structure needs to be destroyed and the reference to the 3213 + * inode need to be dropped, when IO is complete, even with 0 3214 + * byte write, or failed. 3215 + * 3216 + * In the successful AIO DIO case, the io_end structure will 3217 + * be destroyed and the reference to the inode will be dropped 3218 + * after the end_io call back function is called. 3219 + * 3220 + * In the case there is 0 byte write, or error case, since VFS 3221 + * direct IO won't invoke the end_io call back function, we 3222 + * need to free the end_io structure here. 3223 + */ 3224 + if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3225 + ext4_free_io_end(iocb->private); 3226 + iocb->private = NULL; 3227 + } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3221 3228 EXT4_STATE_DIO_UNWRITTEN)) { 3222 3229 int err; 3223 3230 /*
+5 -1
fs/ext4/mballoc.c
··· 2105 2105 group = ac->ac_g_ex.fe_group; 2106 2106 2107 2107 for (i = 0; i < ngroups; group++, i++) { 2108 - if (group == ngroups) 2108 + /* 2109 + * Artificially restricted ngroups for non-extent 2110 + * files makes group > ngroups possible on first loop. 2111 + */ 2112 + if (group >= ngroups) 2109 2113 group = 0; 2110 2114 2111 2115 /* This now checks without needing the buddy page */
+45 -76
fs/ext4/page-io.c
··· 62 62 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); 63 63 } 64 64 65 - static void ext4_release_io_end(ext4_io_end_t *io_end) 65 + void ext4_free_io_end(ext4_io_end_t *io) 66 66 { 67 - BUG_ON(!list_empty(&io_end->list)); 68 - BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 67 + BUG_ON(!io); 68 + BUG_ON(!list_empty(&io->list)); 69 + BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN); 69 70 70 - if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count)) 71 - wake_up_all(ext4_ioend_wq(io_end->inode)); 72 - if (io_end->flag & EXT4_IO_END_DIRECT) 73 - inode_dio_done(io_end->inode); 74 - if (io_end->iocb) 75 - aio_complete(io_end->iocb, io_end->result, 0); 76 - kmem_cache_free(io_end_cachep, io_end); 77 - } 78 - 79 - static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) 80 - { 81 - struct inode *inode = io_end->inode; 82 - 83 - io_end->flag &= ~EXT4_IO_END_UNWRITTEN; 84 - /* Wake up anyone waiting on unwritten extent conversion */ 85 - if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) 86 - wake_up_all(ext4_ioend_wq(inode)); 71 + if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) 72 + wake_up_all(ext4_ioend_wq(io->inode)); 73 + kmem_cache_free(io_end_cachep, io); 87 74 } 88 75 89 76 /* check a range of space and convert unwritten extents to written. */ ··· 93 106 "(inode %lu, offset %llu, size %zd, error %d)", 94 107 inode->i_ino, offset, size, ret); 95 108 } 96 - ext4_clear_io_unwritten_flag(io); 97 - ext4_release_io_end(io); 109 + /* Wake up anyone waiting on unwritten extent conversion */ 110 + if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) 111 + wake_up_all(ext4_ioend_wq(inode)); 112 + if (io->flag & EXT4_IO_END_DIRECT) 113 + inode_dio_done(inode); 114 + if (io->iocb) 115 + aio_complete(io->iocb, io->result, 0); 98 116 return ret; 99 117 } 100 118 ··· 130 138 } 131 139 132 140 /* Add the io_end to per-inode completed end_io list. */ 133 - static void ext4_add_complete_io(ext4_io_end_t *io_end) 141 + void ext4_add_complete_io(ext4_io_end_t *io_end) 134 142 { 135 143 struct ext4_inode_info *ei = EXT4_I(io_end->inode); 136 144 struct workqueue_struct *wq; ··· 167 175 err = ext4_end_io(io); 168 176 if (unlikely(!ret && err)) 169 177 ret = err; 178 + io->flag &= ~EXT4_IO_END_UNWRITTEN; 179 + ext4_free_io_end(io); 170 180 } 171 181 return ret; 172 182 } ··· 200 206 atomic_inc(&EXT4_I(inode)->i_ioend_count); 201 207 io->inode = inode; 202 208 INIT_LIST_HEAD(&io->list); 203 - atomic_set(&io->count, 1); 204 209 } 205 210 return io; 206 - } 207 - 208 - void ext4_put_io_end_defer(ext4_io_end_t *io_end) 209 - { 210 - if (atomic_dec_and_test(&io_end->count)) { 211 - if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) { 212 - ext4_release_io_end(io_end); 213 - return; 214 - } 215 - ext4_add_complete_io(io_end); 216 - } 217 - } 218 - 219 - int ext4_put_io_end(ext4_io_end_t *io_end) 220 - { 221 - int err = 0; 222 - 223 - if (atomic_dec_and_test(&io_end->count)) { 224 - if (io_end->flag & EXT4_IO_END_UNWRITTEN) { 225 - err = ext4_convert_unwritten_extents(io_end->inode, 226 - io_end->offset, io_end->size); 227 - ext4_clear_io_unwritten_flag(io_end); 228 - } 229 - ext4_release_io_end(io_end); 230 - } 231 - return err; 232 - } 233 - 234 - ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) 235 - { 236 - atomic_inc(&io_end->count); 237 - return io_end; 238 211 } 239 212 240 213 /* ··· 286 325 bi_sector >> (inode->i_blkbits - 9)); 287 326 } 288 327 289 - ext4_put_io_end_defer(io_end); 328 + if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 329 + ext4_free_io_end(io_end); 330 + return; 331 + } 332 + 333 + ext4_add_complete_io(io_end); 290 334 } 291 335 292 336 void ext4_io_submit(struct ext4_io_submit *io) ··· 305 339 bio_put(io->io_bio); 306 340 } 307 341 io->io_bio = NULL; 308 - } 309 - 310 - void ext4_io_submit_init(struct ext4_io_submit *io, 311 - struct writeback_control *wbc) 312 - { 313 - io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 314 - io->io_bio = NULL; 342 + io->io_op = 0; 315 343 io->io_end = NULL; 316 344 } 317 345 318 - static int io_submit_init_bio(struct ext4_io_submit *io, 319 - struct buffer_head *bh) 346 + static int io_submit_init(struct ext4_io_submit *io, 347 + struct inode *inode, 348 + struct writeback_control *wbc, 349 + struct buffer_head *bh) 320 350 { 351 + ext4_io_end_t *io_end; 352 + struct page *page = bh->b_page; 321 353 int nvecs = bio_get_nr_vecs(bh->b_bdev); 322 354 struct bio *bio; 323 355 356 + io_end = ext4_init_io_end(inode, GFP_NOFS); 357 + if (!io_end) 358 + return -ENOMEM; 324 359 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 325 360 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 326 361 bio->bi_bdev = bh->b_bdev; 362 + bio->bi_private = io->io_end = io_end; 327 363 bio->bi_end_io = ext4_end_bio; 328 - bio->bi_private = ext4_get_io_end(io->io_end); 329 - if (!io->io_end->size) 330 - io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT) 331 - + bh_offset(bh); 364 + 365 + io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 366 + 332 367 io->io_bio = bio; 368 + io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 333 369 io->io_next_block = bh->b_blocknr; 334 370 return 0; 335 371 } 336 372 337 373 static int io_submit_add_bh(struct ext4_io_submit *io, 338 374 struct inode *inode, 375 + struct writeback_control *wbc, 339 376 struct buffer_head *bh) 340 377 { 341 378 ext4_io_end_t *io_end; ··· 349 380 ext4_io_submit(io); 350 381 } 351 382 if (io->io_bio == NULL) { 352 - ret = io_submit_init_bio(io, bh); 383 + ret = io_submit_init(io, inode, wbc, bh); 353 384 if (ret) 354 385 return ret; 355 386 } 356 - ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 357 - if (ret != bh->b_size) 358 - goto submit_and_retry; 359 387 io_end = io->io_end; 360 388 if (test_clear_buffer_uninit(bh)) 361 389 ext4_set_io_unwritten_flag(inode, io_end); 362 - io_end->size += bh->b_size; 390 + io->io_end->size += bh->b_size; 363 391 io->io_next_block++; 392 + ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 393 + if (ret != bh->b_size) 394 + goto submit_and_retry; 364 395 return 0; 365 396 } 366 397 ··· 432 463 do { 433 464 if (!buffer_async_write(bh)) 434 465 continue; 435 - ret = io_submit_add_bh(io, inode, bh); 466 + ret = io_submit_add_bh(io, inode, wbc, bh); 436 467 if (ret) { 437 468 /* 438 469 * We only get here on ENOMEM. Not much else
+6 -2
include/linux/journal-head.h
··· 30 30 31 31 /* 32 32 * Journalling list for this buffer [jbd_lock_bh_state()] 33 + * NOTE: We *cannot* combine this with b_modified into a bitfield 34 + * as gcc would then (which the C standard allows but which is 35 + * very unuseful) make 64-bit accesses to the bitfield and clobber 36 + * b_jcount if its update races with bitfield modification. 33 37 */ 34 - unsigned b_jlist:4; 38 + unsigned b_jlist; 35 39 36 40 /* 37 41 * This flag signals the buffer has been modified by 38 42 * the currently running transaction 39 43 * [jbd_lock_bh_state()] 40 44 */ 41 - unsigned b_modified:1; 45 + unsigned b_modified; 42 46 43 47 /* 44 48 * Copy of the buffer data frozen for writing to the log.
+2 -2
include/trace/events/ext4.h
··· 2139 2139 __entry->lblk, __entry->len) 2140 2140 ); 2141 2141 2142 - TRACE_EVENT(ext4_es_find_delayed_extent_enter, 2142 + TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, 2143 2143 TP_PROTO(struct inode *inode, ext4_lblk_t lblk), 2144 2144 2145 2145 TP_ARGS(inode, lblk), ··· 2161 2161 (unsigned long) __entry->ino, __entry->lblk) 2162 2162 ); 2163 2163 2164 - TRACE_EVENT(ext4_es_find_delayed_extent_exit, 2164 + TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, 2165 2165 TP_PROTO(struct inode *inode, struct extent_status *es), 2166 2166 2167 2167 TP_ARGS(inode, es),