Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (43 commits)
ext4: fix a BUG in mb_mark_used during trim.
ext4: unused variables cleanup in fs/ext4/extents.c
ext4: remove redundant set_buffer_mapped() in ext4_da_get_block_prep()
ext4: add more tracepoints and use dev_t in the trace buffer
ext4: don't kfree uninitialized s_group_info members
ext4: add missing space in printk's in __ext4_grp_locked_error()
ext4: add FITRIM to compat_ioctl.
ext4: handle errors in ext4_clear_blocks()
ext4: unify the ext4_handle_release_buffer() api
ext4: handle errors in ext4_rename
jbd2: add COW fields to struct jbd2_journal_handle
jbd2: add the b_cow_tid field to journal_head struct
ext4: Initialize fsync transaction ids in ext4_new_inode()
ext4: Use single thread to perform DIO unwritten convertion
ext4: optimize ext4_bio_write_page() when no extent conversion is needed
ext4: skip orphan cleanup if fs has unknown ROCOMPAT features
ext4: use the nblocks arg to ext4_truncate_restart_trans()
ext4: fix missing iput of root inode for some mount error paths
ext4: make FIEMAP and delayed allocation play well together
ext4: suppress verbose debugging information if malloc-debug is off
...

Fi up conflicts in fs/ext4/super.c due to workqueue changes

+1306 -598
+10 -3
Documentation/ABI/testing/sysfs-fs-ext4
··· 48 48 will have its blocks allocated out of its own unique 49 49 preallocation pool. 50 50 51 - What: /sys/fs/ext4/<disk>/inode_readahead 51 + What: /sys/fs/ext4/<disk>/inode_readahead_blks 52 52 Date: March 2008 53 53 Contact: "Theodore Ts'o" <tytso@mit.edu> 54 54 Description: ··· 85 85 Contact: "Theodore Ts'o" <tytso@mit.edu> 86 86 Description: 87 87 Tuning parameter which (if non-zero) controls the goal 88 - inode used by the inode allocator in p0reference to 89 - all other allocation hueristics. This is intended for 88 + inode used by the inode allocator in preference to 89 + all other allocation heuristics. This is intended for 90 90 debugging use only, and should be 0 on production 91 91 systems. 92 + 93 + What: /sys/fs/ext4/<disk>/max_writeback_mb_bump 94 + Date: September 2009 95 + Contact: "Theodore Ts'o" <tytso@mit.edu> 96 + Description: 97 + The maximum number of megabytes the writeback code will 98 + try to write out before move on to another inode.
+206 -1
Documentation/filesystems/ext4.txt
··· 367 367 minimizes the impact on the systme performance 368 368 while file system's inode table is being initialized. 369 369 370 - discard Controls whether ext4 should issue discard/TRIM 370 + discard Controls whether ext4 should issue discard/TRIM 371 371 nodiscard(*) commands to the underlying block device when 372 372 blocks are freed. This is useful for SSD devices 373 373 and sparse/thinly-provisioned LUNs, but it is off 374 374 by default until sufficient testing has been done. 375 + 376 + nouid32 Disables 32-bit UIDs and GIDs. This is for 377 + interoperability with older kernels which only 378 + store and expect 16-bit values. 379 + 380 + resize Allows to resize filesystem to the end of the last 381 + existing block group, further resize has to be done 382 + with resize2fs either online, or offline. It can be 383 + used only with conjunction with remount. 384 + 385 + block_validity This options allows to enables/disables the in-kernel 386 + noblock_validity facility for tracking filesystem metadata blocks 387 + within internal data structures. This allows multi- 388 + block allocator and other routines to quickly locate 389 + extents which might overlap with filesystem metadata 390 + blocks. This option is intended for debugging 391 + purposes and since it negatively affects the 392 + performance, it is off by default. 393 + 394 + dioread_lock Controls whether or not ext4 should use the DIO read 395 + dioread_nolock locking. If the dioread_nolock option is specified 396 + ext4 will allocate uninitialized extent before buffer 397 + write and convert the extent to initialized after IO 398 + completes. This approach allows ext4 code to avoid 399 + using inode mutex, which improves scalability on high 400 + speed storages. However this does not work with nobh 401 + option and the mount will fail. Nor does it work with 402 + data journaling and dioread_nolock option will be 403 + ignored with kernel warning. Note that dioread_nolock 404 + code path is only used for extent-based files. 405 + Because of the restrictions this options comprises 406 + it is off by default (e.g. dioread_lock). 407 + 408 + i_version Enable 64-bit inode version support. This option is 409 + off by default. 375 410 376 411 Data Mode 377 412 ========= ··· 434 399 needs to be read from and written to disk at the same time where it 435 400 outperforms all others modes. Currently ext4 does not have delayed 436 401 allocation support if this data journalling mode is selected. 402 + 403 + /proc entries 404 + ============= 405 + 406 + Information about mounted ext4 file systems can be found in 407 + /proc/fs/ext4. Each mounted filesystem will have a directory in 408 + /proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or 409 + /proc/fs/ext4/dm-0). The files in each per-device directory are shown 410 + in table below. 411 + 412 + Files in /proc/fs/ext4/<devname> 413 + .............................................................................. 414 + File Content 415 + mb_groups details of multiblock allocator buddy cache of free blocks 416 + .............................................................................. 417 + 418 + /sys entries 419 + ============ 420 + 421 + Information about mounted ext4 file systems can be found in 422 + /sys/fs/ext4. Each mounted filesystem will have a directory in 423 + /sys/fs/ext4 based on its device name (i.e., /sys/fs/ext4/hdc or 424 + /sys/fs/ext4/dm-0). The files in each per-device directory are shown 425 + in table below. 426 + 427 + Files in /sys/fs/ext4/<devname> 428 + (see also Documentation/ABI/testing/sysfs-fs-ext4) 429 + .............................................................................. 430 + File Content 431 + 432 + delayed_allocation_blocks This file is read-only and shows the number of 433 + blocks that are dirty in the page cache, but 434 + which do not have their location in the 435 + filesystem allocated yet. 436 + 437 + inode_goal Tuning parameter which (if non-zero) controls 438 + the goal inode used by the inode allocator in 439 + preference to all other allocation heuristics. 440 + This is intended for debugging use only, and 441 + should be 0 on production systems. 442 + 443 + inode_readahead_blks Tuning parameter which controls the maximum 444 + number of inode table blocks that ext4's inode 445 + table readahead algorithm will pre-read into 446 + the buffer cache 447 + 448 + lifetime_write_kbytes This file is read-only and shows the number of 449 + kilobytes of data that have been written to this 450 + filesystem since it was created. 451 + 452 + max_writeback_mb_bump The maximum number of megabytes the writeback 453 + code will try to write out before move on to 454 + another inode. 455 + 456 + mb_group_prealloc The multiblock allocator will round up allocation 457 + requests to a multiple of this tuning parameter if 458 + the stripe size is not set in the ext4 superblock 459 + 460 + mb_max_to_scan The maximum number of extents the multiblock 461 + allocator will search to find the best extent 462 + 463 + mb_min_to_scan The minimum number of extents the multiblock 464 + allocator will search to find the best extent 465 + 466 + mb_order2_req Tuning parameter which controls the minimum size 467 + for requests (as a power of 2) where the buddy 468 + cache is used 469 + 470 + mb_stats Controls whether the multiblock allocator should 471 + collect statistics, which are shown during the 472 + unmount. 1 means to collect statistics, 0 means 473 + not to collect statistics 474 + 475 + mb_stream_req Files which have fewer blocks than this tunable 476 + parameter will have their blocks allocated out 477 + of a block group specific preallocation pool, so 478 + that small files are packed closely together. 479 + Each large file will have its blocks allocated 480 + out of its own unique preallocation pool. 481 + 482 + session_write_kbytes This file is read-only and shows the number of 483 + kilobytes of data that have been written to this 484 + filesystem since it was mounted. 485 + .............................................................................. 486 + 487 + Ioctls 488 + ====== 489 + 490 + There is some Ext4 specific functionality which can be accessed by applications 491 + through the system call interfaces. The list of all Ext4 specific ioctls are 492 + shown in the table below. 493 + 494 + Table of Ext4 specific ioctls 495 + .............................................................................. 496 + Ioctl Description 497 + EXT4_IOC_GETFLAGS Get additional attributes associated with inode. 498 + The ioctl argument is an integer bitfield, with 499 + bit values described in ext4.h. This ioctl is an 500 + alias for FS_IOC_GETFLAGS. 501 + 502 + EXT4_IOC_SETFLAGS Set additional attributes associated with inode. 503 + The ioctl argument is an integer bitfield, with 504 + bit values described in ext4.h. This ioctl is an 505 + alias for FS_IOC_SETFLAGS. 506 + 507 + EXT4_IOC_GETVERSION 508 + EXT4_IOC_GETVERSION_OLD 509 + Get the inode i_generation number stored for 510 + each inode. The i_generation number is normally 511 + changed only when new inode is created and it is 512 + particularly useful for network filesystems. The 513 + '_OLD' version of this ioctl is an alias for 514 + FS_IOC_GETVERSION. 515 + 516 + EXT4_IOC_SETVERSION 517 + EXT4_IOC_SETVERSION_OLD 518 + Set the inode i_generation number stored for 519 + each inode. The '_OLD' version of this ioctl 520 + is an alias for FS_IOC_SETVERSION. 521 + 522 + EXT4_IOC_GROUP_EXTEND This ioctl has the same purpose as the resize 523 + mount option. It allows to resize filesystem 524 + to the end of the last existing block group, 525 + further resize has to be done with resize2fs, 526 + either online, or offline. The argument points 527 + to the unsigned logn number representing the 528 + filesystem new block count. 529 + 530 + EXT4_IOC_MOVE_EXT Move the block extents from orig_fd (the one 531 + this ioctl is pointing to) to the donor_fd (the 532 + one specified in move_extent structure passed 533 + as an argument to this ioctl). Then, exchange 534 + inode metadata between orig_fd and donor_fd. 535 + This is especially useful for online 536 + defragmentation, because the allocator has the 537 + opportunity to allocate moved blocks better, 538 + ideally into one contiguous extent. 539 + 540 + EXT4_IOC_GROUP_ADD Add a new group descriptor to an existing or 541 + new group descriptor block. The new group 542 + descriptor is described by ext4_new_group_input 543 + structure, which is passed as an argument to 544 + this ioctl. This is especially useful in 545 + conjunction with EXT4_IOC_GROUP_EXTEND, 546 + which allows online resize of the filesystem 547 + to the end of the last existing block group. 548 + Those two ioctls combined is used in userspace 549 + online resize tool (e.g. resize2fs). 550 + 551 + EXT4_IOC_MIGRATE This ioctl operates on the filesystem itself. 552 + It converts (migrates) ext3 indirect block mapped 553 + inode to ext4 extent mapped inode by walking 554 + through indirect block mapping of the original 555 + inode and converting contiguous block ranges 556 + into ext4 extents of the temporary inode. Then, 557 + inodes are swapped. This ioctl might help, when 558 + migrating from ext3 to ext4 filesystem, however 559 + suggestion is to create fresh ext4 filesystem 560 + and copy data from the backup. Note, that 561 + filesystem has to support extents for this ioctl 562 + to work. 563 + 564 + EXT4_IOC_ALLOC_DA_BLKS Force all of the delay allocated blocks to be 565 + allocated to preserve application-expected ext3 566 + behaviour. Note that this will also start 567 + triggering a write of the data blocks, but this 568 + behaviour may change in the future as it is 569 + not necessary and has been done this way only 570 + for sake of simplicity. 571 + .............................................................................. 437 572 438 573 References 439 574 ==========
+3
fs/ext4/balloc.c
··· 21 21 #include "ext4_jbd2.h" 22 22 #include "mballoc.h" 23 23 24 + #include <trace/events/ext4.h> 25 + 24 26 /* 25 27 * balloc.c contains the blocks allocation and deallocation routines 26 28 */ ··· 344 342 * We do it here so the bitmap uptodate bit 345 343 * get set with buffer lock held. 346 344 */ 345 + trace_ext4_read_block_bitmap_load(sb, block_group); 347 346 set_bitmap_uptodate(bh); 348 347 if (bh_submit_read(bh) < 0) { 349 348 put_bh(bh);
-7
fs/ext4/ext4_jbd2.h
··· 202 202 return 1; 203 203 } 204 204 205 - static inline void ext4_journal_release_buffer(handle_t *handle, 206 - struct buffer_head *bh) 207 - { 208 - if (ext4_handle_valid(handle)) 209 - jbd2_journal_release_buffer(handle, bh); 210 - } 211 - 212 205 static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks) 213 206 { 214 207 return ext4_journal_start_sb(inode->i_sb, nblocks);
+165 -48
fs/ext4/extents.c
··· 44 44 #include "ext4_jbd2.h" 45 45 #include "ext4_extents.h" 46 46 47 + #include <trace/events/ext4.h> 48 + 47 49 static int ext4_ext_truncate_extend_restart(handle_t *handle, 48 50 struct inode *inode, 49 51 int needed) ··· 666 664 if (unlikely(!bh)) 667 665 goto err; 668 666 if (!bh_uptodate_or_lock(bh)) { 667 + trace_ext4_ext_load_extent(inode, block, 668 + path[ppos].p_block); 669 669 if (bh_submit_read(bh) < 0) { 670 670 put_bh(bh); 671 671 goto err; ··· 1038 1034 for (i = 0; i < depth; i++) { 1039 1035 if (!ablocks[i]) 1040 1036 continue; 1041 - ext4_free_blocks(handle, inode, 0, ablocks[i], 1, 1037 + ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1042 1038 EXT4_FREE_BLOCKS_METADATA); 1043 1039 } 1044 1040 } ··· 2063 2059 if (err) 2064 2060 return err; 2065 2061 ext_debug("index is empty, remove it, free block %llu\n", leaf); 2066 - ext4_free_blocks(handle, inode, 0, leaf, 1, 2062 + ext4_free_blocks(handle, inode, NULL, leaf, 1, 2067 2063 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2068 2064 return err; 2069 2065 } ··· 2160 2156 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2161 2157 start = ext4_ext_pblock(ex) + ee_len - num; 2162 2158 ext_debug("free last %u blocks starting %llu\n", num, start); 2163 - ext4_free_blocks(handle, inode, 0, start, num, flags); 2159 + ext4_free_blocks(handle, inode, NULL, start, num, flags); 2164 2160 } else if (from == le32_to_cpu(ex->ee_block) 2165 2161 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2166 2162 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", ··· 3112 3108 { 3113 3109 int i, depth; 3114 3110 struct ext4_extent_header *eh; 3115 - struct ext4_extent *ex, *last_ex; 3111 + struct ext4_extent *last_ex; 3116 3112 3117 3113 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 3118 3114 return 0; 3119 3115 3120 3116 depth = ext_depth(inode); 3121 3117 eh = path[depth].p_hdr; 3122 - ex = path[depth].p_ext; 3123 3118 3124 3119 if (unlikely(!eh->eh_entries)) { 3125 3120 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " ··· 3298 3295 struct ext4_map_blocks *map, int flags) 3299 3296 { 3300 3297 struct ext4_ext_path *path = NULL; 3301 - struct ext4_extent_header *eh; 3302 3298 struct ext4_extent newex, *ex; 3303 - ext4_fsblk_t newblock; 3299 + ext4_fsblk_t newblock = 0; 3304 3300 int err = 0, depth, ret; 3305 3301 unsigned int allocated = 0; 3306 3302 struct ext4_allocation_request ar; ··· 3307 3305 3308 3306 ext_debug("blocks %u/%u requested for inode %lu\n", 3309 3307 map->m_lblk, map->m_len, inode->i_ino); 3308 + trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3310 3309 3311 3310 /* check in cache */ 3312 3311 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { ··· 3355 3352 err = -EIO; 3356 3353 goto out2; 3357 3354 } 3358 - eh = path[depth].p_hdr; 3359 3355 3360 3356 ex = path[depth].p_ext; 3361 3357 if (ex) { ··· 3487 3485 /* not a good idea to call discard here directly, 3488 3486 * but otherwise we'd need to call it every free() */ 3489 3487 ext4_discard_preallocations(inode); 3490 - ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex), 3488 + ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 3491 3489 ext4_ext_get_actual_len(&newex), 0); 3492 3490 goto out2; 3493 3491 } ··· 3527 3525 ext4_ext_drop_refs(path); 3528 3526 kfree(path); 3529 3527 } 3528 + trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 3529 + newblock, map->m_len, err ? err : allocated); 3530 3530 return err ? err : allocated; 3531 3531 } 3532 3532 ··· 3662 3658 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3663 3659 return -EOPNOTSUPP; 3664 3660 3661 + trace_ext4_fallocate_enter(inode, offset, len, mode); 3665 3662 map.m_lblk = offset >> blkbits; 3666 3663 /* 3667 3664 * We can't just convert len to max_blocks because ··· 3678 3673 ret = inode_newsize_ok(inode, (len + offset)); 3679 3674 if (ret) { 3680 3675 mutex_unlock(&inode->i_mutex); 3676 + trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 3681 3677 return ret; 3682 3678 } 3683 3679 retry: ··· 3723 3717 goto retry; 3724 3718 } 3725 3719 mutex_unlock(&inode->i_mutex); 3720 + trace_ext4_fallocate_exit(inode, offset, max_blocks, 3721 + ret > 0 ? ret2 : ret); 3726 3722 return ret > 0 ? ret2 : ret; 3727 3723 } 3728 3724 ··· 3783 3775 } 3784 3776 return ret > 0 ? ret2 : ret; 3785 3777 } 3778 + 3786 3779 /* 3787 3780 * Callback function called for each extent to gather FIEMAP information. 3788 3781 */ ··· 3791 3782 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3792 3783 void *data) 3793 3784 { 3794 - struct fiemap_extent_info *fieinfo = data; 3795 - unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 3796 3785 __u64 logical; 3797 3786 __u64 physical; 3798 3787 __u64 length; 3788 + loff_t size; 3799 3789 __u32 flags = 0; 3800 - int error; 3790 + int ret = 0; 3791 + struct fiemap_extent_info *fieinfo = data; 3792 + unsigned char blksize_bits; 3801 3793 3802 - logical = (__u64)newex->ec_block << blksize_bits; 3794 + blksize_bits = inode->i_sb->s_blocksize_bits; 3795 + logical = (__u64)newex->ec_block << blksize_bits; 3803 3796 3804 3797 if (newex->ec_start == 0) { 3805 - pgoff_t offset; 3806 - struct page *page; 3798 + /* 3799 + * No extent in extent-tree contains block @newex->ec_start, 3800 + * then the block may stay in 1)a hole or 2)delayed-extent. 3801 + * 3802 + * Holes or delayed-extents are processed as follows. 3803 + * 1. lookup dirty pages with specified range in pagecache. 3804 + * If no page is got, then there is no delayed-extent and 3805 + * return with EXT_CONTINUE. 3806 + * 2. find the 1st mapped buffer, 3807 + * 3. check if the mapped buffer is both in the request range 3808 + * and a delayed buffer. If not, there is no delayed-extent, 3809 + * then return. 3810 + * 4. a delayed-extent is found, the extent will be collected. 3811 + */ 3812 + ext4_lblk_t end = 0; 3813 + pgoff_t last_offset; 3814 + pgoff_t offset; 3815 + pgoff_t index; 3816 + struct page **pages = NULL; 3807 3817 struct buffer_head *bh = NULL; 3818 + struct buffer_head *head = NULL; 3819 + unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); 3820 + 3821 + pages = kmalloc(PAGE_SIZE, GFP_KERNEL); 3822 + if (pages == NULL) 3823 + return -ENOMEM; 3808 3824 3809 3825 offset = logical >> PAGE_SHIFT; 3810 - page = find_get_page(inode->i_mapping, offset); 3811 - if (!page || !page_has_buffers(page)) 3812 - return EXT_CONTINUE; 3826 + repeat: 3827 + last_offset = offset; 3828 + head = NULL; 3829 + ret = find_get_pages_tag(inode->i_mapping, &offset, 3830 + PAGECACHE_TAG_DIRTY, nr_pages, pages); 3813 3831 3814 - bh = page_buffers(page); 3832 + if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 3833 + /* First time, try to find a mapped buffer. */ 3834 + if (ret == 0) { 3835 + out: 3836 + for (index = 0; index < ret; index++) 3837 + page_cache_release(pages[index]); 3838 + /* just a hole. */ 3839 + kfree(pages); 3840 + return EXT_CONTINUE; 3841 + } 3815 3842 3816 - if (!bh) 3817 - return EXT_CONTINUE; 3843 + /* Try to find the 1st mapped buffer. */ 3844 + end = ((__u64)pages[0]->index << PAGE_SHIFT) >> 3845 + blksize_bits; 3846 + if (!page_has_buffers(pages[0])) 3847 + goto out; 3848 + head = page_buffers(pages[0]); 3849 + if (!head) 3850 + goto out; 3818 3851 3819 - if (buffer_delay(bh)) { 3820 - flags |= FIEMAP_EXTENT_DELALLOC; 3821 - page_cache_release(page); 3852 + bh = head; 3853 + do { 3854 + if (buffer_mapped(bh)) { 3855 + /* get the 1st mapped buffer. */ 3856 + if (end > newex->ec_block + 3857 + newex->ec_len) 3858 + /* The buffer is out of 3859 + * the request range. 3860 + */ 3861 + goto out; 3862 + goto found_mapped_buffer; 3863 + } 3864 + bh = bh->b_this_page; 3865 + end++; 3866 + } while (bh != head); 3867 + 3868 + /* No mapped buffer found. */ 3869 + goto out; 3822 3870 } else { 3823 - page_cache_release(page); 3824 - return EXT_CONTINUE; 3871 + /*Find contiguous delayed buffers. */ 3872 + if (ret > 0 && pages[0]->index == last_offset) 3873 + head = page_buffers(pages[0]); 3874 + bh = head; 3825 3875 } 3876 + 3877 + found_mapped_buffer: 3878 + if (bh != NULL && buffer_delay(bh)) { 3879 + /* 1st or contiguous delayed buffer found. */ 3880 + if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 3881 + /* 3882 + * 1st delayed buffer found, record 3883 + * the start of extent. 3884 + */ 3885 + flags |= FIEMAP_EXTENT_DELALLOC; 3886 + newex->ec_block = end; 3887 + logical = (__u64)end << blksize_bits; 3888 + } 3889 + /* Find contiguous delayed buffers. */ 3890 + do { 3891 + if (!buffer_delay(bh)) 3892 + goto found_delayed_extent; 3893 + bh = bh->b_this_page; 3894 + end++; 3895 + } while (bh != head); 3896 + 3897 + for (index = 1; index < ret; index++) { 3898 + if (!page_has_buffers(pages[index])) { 3899 + bh = NULL; 3900 + break; 3901 + } 3902 + head = page_buffers(pages[index]); 3903 + if (!head) { 3904 + bh = NULL; 3905 + break; 3906 + } 3907 + if (pages[index]->index != 3908 + pages[0]->index + index) { 3909 + /* Blocks are not contiguous. */ 3910 + bh = NULL; 3911 + break; 3912 + } 3913 + bh = head; 3914 + do { 3915 + if (!buffer_delay(bh)) 3916 + /* Delayed-extent ends. */ 3917 + goto found_delayed_extent; 3918 + bh = bh->b_this_page; 3919 + end++; 3920 + } while (bh != head); 3921 + } 3922 + } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) 3923 + /* a hole found. */ 3924 + goto out; 3925 + 3926 + found_delayed_extent: 3927 + newex->ec_len = min(end - newex->ec_block, 3928 + (ext4_lblk_t)EXT_INIT_MAX_LEN); 3929 + if (ret == nr_pages && bh != NULL && 3930 + newex->ec_len < EXT_INIT_MAX_LEN && 3931 + buffer_delay(bh)) { 3932 + /* Have not collected an extent and continue. */ 3933 + for (index = 0; index < ret; index++) 3934 + page_cache_release(pages[index]); 3935 + goto repeat; 3936 + } 3937 + 3938 + for (index = 0; index < ret; index++) 3939 + page_cache_release(pages[index]); 3940 + kfree(pages); 3826 3941 } 3827 3942 3828 3943 physical = (__u64)newex->ec_start << blksize_bits; ··· 3955 3822 if (ex && ext4_ext_is_uninitialized(ex)) 3956 3823 flags |= FIEMAP_EXTENT_UNWRITTEN; 3957 3824 3958 - /* 3959 - * If this extent reaches EXT_MAX_BLOCK, it must be last. 3960 - * 3961 - * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK, 3962 - * this also indicates no more allocated blocks. 3963 - * 3964 - * XXX this might miss a single-block extent at EXT_MAX_BLOCK 3965 - */ 3966 - if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK || 3967 - newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) { 3968 - loff_t size = i_size_read(inode); 3969 - loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb); 3970 - 3825 + size = i_size_read(inode); 3826 + if (logical + length >= size) 3971 3827 flags |= FIEMAP_EXTENT_LAST; 3972 - if ((flags & FIEMAP_EXTENT_DELALLOC) && 3973 - logical+length > size) 3974 - length = (size - logical + bs - 1) & ~(bs-1); 3975 - } 3976 3828 3977 - error = fiemap_fill_next_extent(fieinfo, logical, physical, 3829 + ret = fiemap_fill_next_extent(fieinfo, logical, physical, 3978 3830 length, flags); 3979 - if (error < 0) 3980 - return error; 3981 - if (error == 1) 3831 + if (ret < 0) 3832 + return ret; 3833 + if (ret == 1) 3982 3834 return EXT_BREAK; 3983 - 3984 3835 return EXT_CONTINUE; 3985 3836 } 3986 3837
+9 -5
fs/ext4/fsync.c
··· 164 164 165 165 J_ASSERT(ext4_journal_current_handle() == NULL); 166 166 167 - trace_ext4_sync_file(file, datasync); 167 + trace_ext4_sync_file_enter(file, datasync); 168 168 169 169 if (inode->i_sb->s_flags & MS_RDONLY) 170 170 return 0; 171 171 172 172 ret = ext4_flush_completed_IO(inode); 173 173 if (ret < 0) 174 - return ret; 174 + goto out; 175 175 176 176 if (!journal) { 177 177 ret = generic_file_fsync(file, datasync); 178 178 if (!ret && !list_empty(&inode->i_dentry)) 179 179 ext4_sync_parent(inode); 180 - return ret; 180 + goto out; 181 181 } 182 182 183 183 /* ··· 194 194 * (they were dirtied by commit). But that's OK - the blocks are 195 195 * safe in-journal, which is all fsync() needs to ensure. 196 196 */ 197 - if (ext4_should_journal_data(inode)) 198 - return ext4_force_commit(inode->i_sb); 197 + if (ext4_should_journal_data(inode)) { 198 + ret = ext4_force_commit(inode->i_sb); 199 + goto out; 200 + } 199 201 200 202 commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; 201 203 if (jbd2_log_start_commit(journal, commit_tid)) { ··· 217 215 ret = jbd2_log_wait_commit(journal, commit_tid); 218 216 } else if (journal->j_flags & JBD2_BARRIER) 219 217 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 218 + out: 219 + trace_ext4_sync_file_exit(inode, ret); 220 220 return ret; 221 221 }
+7 -1
fs/ext4/ialloc.c
··· 152 152 * We do it here so the bitmap uptodate bit 153 153 * get set with buffer lock held. 154 154 */ 155 + trace_ext4_load_inode_bitmap(sb, block_group); 155 156 set_bitmap_uptodate(bh); 156 157 if (bh_submit_read(bh) < 0) { 157 158 put_bh(bh); ··· 650 649 *group = parent_group + flex_size; 651 650 if (*group > ngroups) 652 651 *group = 0; 653 - return find_group_orlov(sb, parent, group, mode, 0); 652 + return find_group_orlov(sb, parent, group, mode, NULL); 654 653 } 655 654 656 655 /* ··· 1053 1052 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); 1054 1053 ext4_ext_tree_init(handle, inode); 1055 1054 } 1055 + } 1056 + 1057 + if (ext4_handle_valid(handle)) { 1058 + ei->i_sync_tid = handle->h_transaction->t_tid; 1059 + ei->i_datasync_tid = handle->h_transaction->t_tid; 1056 1060 } 1057 1061 1058 1062 err = ext4_mark_inode_dirty(handle, inode);
+175 -233
fs/ext4/inode.c
··· 173 173 BUG_ON(EXT4_JOURNAL(inode) == NULL); 174 174 jbd_debug(2, "restarting handle %p\n", handle); 175 175 up_write(&EXT4_I(inode)->i_data_sem); 176 - ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 176 + ret = ext4_journal_restart(handle, nblocks); 177 177 down_write(&EXT4_I(inode)->i_data_sem); 178 178 ext4_discard_preallocations(inode); 179 179 ··· 720 720 return ret; 721 721 failed_out: 722 722 for (i = 0; i < index; i++) 723 - ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); 723 + ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); 724 724 return ret; 725 725 } 726 726 ··· 823 823 return err; 824 824 failed: 825 825 /* Allocation failed, free what we already allocated */ 826 - ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0); 826 + ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0); 827 827 for (i = 1; i <= n ; i++) { 828 828 /* 829 829 * branch[i].bh is newly allocated, so there is no 830 830 * need to revoke the block, which is why we don't 831 831 * need to set EXT4_FREE_BLOCKS_METADATA. 832 832 */ 833 - ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 833 + ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 834 834 EXT4_FREE_BLOCKS_FORGET); 835 835 } 836 836 for (i = n+1; i < indirect_blks; i++) 837 - ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); 837 + ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); 838 838 839 - ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0); 839 + ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0); 840 840 841 841 return err; 842 842 } ··· 924 924 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 925 925 EXT4_FREE_BLOCKS_FORGET); 926 926 } 927 - ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key), 927 + ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), 928 928 blks, 0); 929 929 930 930 return err; ··· 973 973 int count = 0; 974 974 ext4_fsblk_t first_block = 0; 975 975 976 + trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 976 977 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 977 978 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 978 979 depth = ext4_block_to_path(inode, map->m_lblk, offsets, ··· 1059 1058 partial--; 1060 1059 } 1061 1060 out: 1061 + trace_ext4_ind_map_blocks_exit(inode, map->m_lblk, 1062 + map->m_pblk, map->m_len, err); 1062 1063 return err; 1063 1064 } 1064 1065 ··· 2063 2060 if (nr_pages == 0) 2064 2061 break; 2065 2062 for (i = 0; i < nr_pages; i++) { 2066 - int commit_write = 0, redirty_page = 0; 2063 + int commit_write = 0, skip_page = 0; 2067 2064 struct page *page = pvec.pages[i]; 2068 2065 2069 2066 index = page->index; ··· 2089 2086 * If the page does not have buffers (for 2090 2087 * whatever reason), try to create them using 2091 2088 * __block_write_begin. If this fails, 2092 - * redirty the page and move on. 2089 + * skip the page and move on. 2093 2090 */ 2094 2091 if (!page_has_buffers(page)) { 2095 2092 if (__block_write_begin(page, 0, len, 2096 2093 noalloc_get_block_write)) { 2097 - redirty_page: 2098 - redirty_page_for_writepage(mpd->wbc, 2099 - page); 2094 + skip_page: 2100 2095 unlock_page(page); 2101 2096 continue; 2102 2097 } ··· 2105 2104 block_start = 0; 2106 2105 do { 2107 2106 if (!bh) 2108 - goto redirty_page; 2107 + goto skip_page; 2109 2108 if (map && (cur_logical >= map->m_lblk) && 2110 2109 (cur_logical <= (map->m_lblk + 2111 2110 (map->m_len - 1)))) { ··· 2121 2120 clear_buffer_unwritten(bh); 2122 2121 } 2123 2122 2124 - /* redirty page if block allocation undone */ 2123 + /* skip page if block allocation undone */ 2125 2124 if (buffer_delay(bh) || buffer_unwritten(bh)) 2126 - redirty_page = 1; 2125 + skip_page = 1; 2127 2126 bh = bh->b_this_page; 2128 2127 block_start += bh->b_size; 2129 2128 cur_logical++; 2130 2129 pblock++; 2131 2130 } while (bh != page_bufs); 2132 2131 2133 - if (redirty_page) 2134 - goto redirty_page; 2132 + if (skip_page) 2133 + goto skip_page; 2135 2134 2136 2135 if (commit_write) 2137 2136 /* mark the buffer_heads as dirty & uptodate */ 2138 2137 block_commit_write(page, 0, len); 2139 2138 2139 + clear_page_dirty_for_io(page); 2140 2140 /* 2141 2141 * Delalloc doesn't support data journalling, 2142 2142 * but eventually maybe we'll lift this ··· 2167 2165 return ret; 2168 2166 } 2169 2167 2170 - static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2171 - sector_t logical, long blk_cnt) 2168 + static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 2172 2169 { 2173 2170 int nr_pages, i; 2174 2171 pgoff_t index, end; ··· 2175 2174 struct inode *inode = mpd->inode; 2176 2175 struct address_space *mapping = inode->i_mapping; 2177 2176 2178 - index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2179 - end = (logical + blk_cnt - 1) >> 2180 - (PAGE_CACHE_SHIFT - inode->i_blkbits); 2177 + index = mpd->first_page; 2178 + end = mpd->next_page - 1; 2181 2179 while (index <= end) { 2182 2180 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2183 2181 if (nr_pages == 0) ··· 2279 2279 err = blks; 2280 2280 /* 2281 2281 * If get block returns EAGAIN or ENOSPC and there 2282 - * appears to be free blocks we will call 2283 - * ext4_writepage() for all of the pages which will 2284 - * just redirty the pages. 2282 + * appears to be free blocks we will just let 2283 + * mpage_da_submit_io() unlock all of the pages. 2285 2284 */ 2286 2285 if (err == -EAGAIN) 2287 2286 goto submit_io; ··· 2311 2312 ext4_print_free_blocks(mpd->inode); 2312 2313 } 2313 2314 /* invalidate all the pages */ 2314 - ext4_da_block_invalidatepages(mpd, next, 2315 - mpd->b_size >> mpd->inode->i_blkbits); 2315 + ext4_da_block_invalidatepages(mpd); 2316 + 2317 + /* Mark this page range as having been completed */ 2318 + mpd->io_done = 1; 2316 2319 return; 2317 2320 } 2318 2321 BUG_ON(blks == 0); ··· 2439 2438 } 2440 2439 2441 2440 /* 2442 - * __mpage_da_writepage - finds extent of pages and blocks 2443 - * 2444 - * @page: page to consider 2445 - * @wbc: not used, we just follow rules 2446 - * @data: context 2447 - * 2448 - * The function finds extents of pages and scan them for all blocks. 2449 - */ 2450 - static int __mpage_da_writepage(struct page *page, 2451 - struct writeback_control *wbc, 2452 - struct mpage_da_data *mpd) 2453 - { 2454 - struct inode *inode = mpd->inode; 2455 - struct buffer_head *bh, *head; 2456 - sector_t logical; 2457 - 2458 - /* 2459 - * Can we merge this page to current extent? 2460 - */ 2461 - if (mpd->next_page != page->index) { 2462 - /* 2463 - * Nope, we can't. So, we map non-allocated blocks 2464 - * and start IO on them 2465 - */ 2466 - if (mpd->next_page != mpd->first_page) { 2467 - mpage_da_map_and_submit(mpd); 2468 - /* 2469 - * skip rest of the page in the page_vec 2470 - */ 2471 - redirty_page_for_writepage(wbc, page); 2472 - unlock_page(page); 2473 - return MPAGE_DA_EXTENT_TAIL; 2474 - } 2475 - 2476 - /* 2477 - * Start next extent of pages ... 2478 - */ 2479 - mpd->first_page = page->index; 2480 - 2481 - /* 2482 - * ... and blocks 2483 - */ 2484 - mpd->b_size = 0; 2485 - mpd->b_state = 0; 2486 - mpd->b_blocknr = 0; 2487 - } 2488 - 2489 - mpd->next_page = page->index + 1; 2490 - logical = (sector_t) page->index << 2491 - (PAGE_CACHE_SHIFT - inode->i_blkbits); 2492 - 2493 - if (!page_has_buffers(page)) { 2494 - mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, 2495 - (1 << BH_Dirty) | (1 << BH_Uptodate)); 2496 - if (mpd->io_done) 2497 - return MPAGE_DA_EXTENT_TAIL; 2498 - } else { 2499 - /* 2500 - * Page with regular buffer heads, just add all dirty ones 2501 - */ 2502 - head = page_buffers(page); 2503 - bh = head; 2504 - do { 2505 - BUG_ON(buffer_locked(bh)); 2506 - /* 2507 - * We need to try to allocate 2508 - * unmapped blocks in the same page. 2509 - * Otherwise we won't make progress 2510 - * with the page in ext4_writepage 2511 - */ 2512 - if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2513 - mpage_add_bh_to_extent(mpd, logical, 2514 - bh->b_size, 2515 - bh->b_state); 2516 - if (mpd->io_done) 2517 - return MPAGE_DA_EXTENT_TAIL; 2518 - } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2519 - /* 2520 - * mapped dirty buffer. We need to update 2521 - * the b_state because we look at 2522 - * b_state in mpage_da_map_blocks. We don't 2523 - * update b_size because if we find an 2524 - * unmapped buffer_head later we need to 2525 - * use the b_state flag of that buffer_head. 2526 - */ 2527 - if (mpd->b_size == 0) 2528 - mpd->b_state = bh->b_state & BH_FLAGS; 2529 - } 2530 - logical++; 2531 - } while ((bh = bh->b_this_page) != head); 2532 - } 2533 - 2534 - return 0; 2535 - } 2536 - 2537 - /* 2538 2441 * This is a special get_blocks_t callback which is used by 2539 2442 * ext4_da_write_begin(). It will either return mapped block or 2540 2443 * reserve space for a single block. ··· 2502 2597 * for partial write. 2503 2598 */ 2504 2599 set_buffer_new(bh); 2505 - set_buffer_mapped(bh); 2506 2600 } 2507 2601 return 0; 2508 2602 } ··· 2715 2811 2716 2812 /* 2717 2813 * write_cache_pages_da - walk the list of dirty pages of the given 2718 - * address space and call the callback function (which usually writes 2719 - * the pages). 2720 - * 2721 - * This is a forked version of write_cache_pages(). Differences: 2722 - * Range cyclic is ignored. 2723 - * no_nrwrite_index_update is always presumed true 2814 + * address space and accumulate pages that need writing, and call 2815 + * mpage_da_map_and_submit to map a single contiguous memory region 2816 + * and then write them. 2724 2817 */ 2725 2818 static int write_cache_pages_da(struct address_space *mapping, 2726 2819 struct writeback_control *wbc, 2727 2820 struct mpage_da_data *mpd, 2728 2821 pgoff_t *done_index) 2729 2822 { 2730 - int ret = 0; 2731 - int done = 0; 2732 - struct pagevec pvec; 2733 - unsigned nr_pages; 2734 - pgoff_t index; 2735 - pgoff_t end; /* Inclusive */ 2736 - long nr_to_write = wbc->nr_to_write; 2737 - int tag; 2823 + struct buffer_head *bh, *head; 2824 + struct inode *inode = mapping->host; 2825 + struct pagevec pvec; 2826 + unsigned int nr_pages; 2827 + sector_t logical; 2828 + pgoff_t index, end; 2829 + long nr_to_write = wbc->nr_to_write; 2830 + int i, tag, ret = 0; 2738 2831 2832 + memset(mpd, 0, sizeof(struct mpage_da_data)); 2833 + mpd->wbc = wbc; 2834 + mpd->inode = inode; 2739 2835 pagevec_init(&pvec, 0); 2740 2836 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2741 2837 end = wbc->range_end >> PAGE_CACHE_SHIFT; ··· 2746 2842 tag = PAGECACHE_TAG_DIRTY; 2747 2843 2748 2844 *done_index = index; 2749 - while (!done && (index <= end)) { 2750 - int i; 2751 - 2845 + while (index <= end) { 2752 2846 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2753 2847 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2754 2848 if (nr_pages == 0) 2755 - break; 2849 + return 0; 2756 2850 2757 2851 for (i = 0; i < nr_pages; i++) { 2758 2852 struct page *page = pvec.pages[i]; ··· 2762 2860 * mapping. However, page->index will not change 2763 2861 * because we have a reference on the page. 2764 2862 */ 2765 - if (page->index > end) { 2766 - done = 1; 2767 - break; 2768 - } 2863 + if (page->index > end) 2864 + goto out; 2769 2865 2770 2866 *done_index = page->index + 1; 2867 + 2868 + /* 2869 + * If we can't merge this page, and we have 2870 + * accumulated an contiguous region, write it 2871 + */ 2872 + if ((mpd->next_page != page->index) && 2873 + (mpd->next_page != mpd->first_page)) { 2874 + mpage_da_map_and_submit(mpd); 2875 + goto ret_extent_tail; 2876 + } 2771 2877 2772 2878 lock_page(page); 2773 2879 2774 2880 /* 2775 - * Page truncated or invalidated. We can freely skip it 2776 - * then, even for data integrity operations: the page 2777 - * has disappeared concurrently, so there could be no 2778 - * real expectation of this data interity operation 2779 - * even if there is now a new, dirty page at the same 2780 - * pagecache address. 2881 + * If the page is no longer dirty, or its 2882 + * mapping no longer corresponds to inode we 2883 + * are writing (which means it has been 2884 + * truncated or invalidated), or the page is 2885 + * already under writeback and we are not 2886 + * doing a data integrity writeback, skip the page 2781 2887 */ 2782 - if (unlikely(page->mapping != mapping)) { 2783 - continue_unlock: 2888 + if (!PageDirty(page) || 2889 + (PageWriteback(page) && 2890 + (wbc->sync_mode == WB_SYNC_NONE)) || 2891 + unlikely(page->mapping != mapping)) { 2784 2892 unlock_page(page); 2785 2893 continue; 2786 2894 } 2787 2895 2788 - if (!PageDirty(page)) { 2789 - /* someone wrote it for us */ 2790 - goto continue_unlock; 2791 - } 2792 - 2793 - if (PageWriteback(page)) { 2794 - if (wbc->sync_mode != WB_SYNC_NONE) 2795 - wait_on_page_writeback(page); 2796 - else 2797 - goto continue_unlock; 2798 - } 2896 + if (PageWriteback(page)) 2897 + wait_on_page_writeback(page); 2799 2898 2800 2899 BUG_ON(PageWriteback(page)); 2801 - if (!clear_page_dirty_for_io(page)) 2802 - goto continue_unlock; 2803 2900 2804 - ret = __mpage_da_writepage(page, wbc, mpd); 2805 - if (unlikely(ret)) { 2806 - if (ret == AOP_WRITEPAGE_ACTIVATE) { 2807 - unlock_page(page); 2808 - ret = 0; 2809 - } else { 2810 - done = 1; 2811 - break; 2812 - } 2901 + if (mpd->next_page != page->index) 2902 + mpd->first_page = page->index; 2903 + mpd->next_page = page->index + 1; 2904 + logical = (sector_t) page->index << 2905 + (PAGE_CACHE_SHIFT - inode->i_blkbits); 2906 + 2907 + if (!page_has_buffers(page)) { 2908 + mpage_add_bh_to_extent(mpd, logical, 2909 + PAGE_CACHE_SIZE, 2910 + (1 << BH_Dirty) | (1 << BH_Uptodate)); 2911 + if (mpd->io_done) 2912 + goto ret_extent_tail; 2913 + } else { 2914 + /* 2915 + * Page with regular buffer heads, 2916 + * just add all dirty ones 2917 + */ 2918 + head = page_buffers(page); 2919 + bh = head; 2920 + do { 2921 + BUG_ON(buffer_locked(bh)); 2922 + /* 2923 + * We need to try to allocate 2924 + * unmapped blocks in the same page. 2925 + * Otherwise we won't make progress 2926 + * with the page in ext4_writepage 2927 + */ 2928 + if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2929 + mpage_add_bh_to_extent(mpd, logical, 2930 + bh->b_size, 2931 + bh->b_state); 2932 + if (mpd->io_done) 2933 + goto ret_extent_tail; 2934 + } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2935 + /* 2936 + * mapped dirty buffer. We need 2937 + * to update the b_state 2938 + * because we look at b_state 2939 + * in mpage_da_map_blocks. We 2940 + * don't update b_size because 2941 + * if we find an unmapped 2942 + * buffer_head later we need to 2943 + * use the b_state flag of that 2944 + * buffer_head. 2945 + */ 2946 + if (mpd->b_size == 0) 2947 + mpd->b_state = bh->b_state & BH_FLAGS; 2948 + } 2949 + logical++; 2950 + } while ((bh = bh->b_this_page) != head); 2813 2951 } 2814 2952 2815 2953 if (nr_to_write > 0) { 2816 2954 nr_to_write--; 2817 2955 if (nr_to_write == 0 && 2818 - wbc->sync_mode == WB_SYNC_NONE) { 2956 + wbc->sync_mode == WB_SYNC_NONE) 2819 2957 /* 2820 2958 * We stop writing back only if we are 2821 2959 * not doing integrity sync. In case of ··· 2866 2924 * pages, but have not synced all of the 2867 2925 * old dirty pages. 2868 2926 */ 2869 - done = 1; 2870 - break; 2871 - } 2927 + goto out; 2872 2928 } 2873 2929 } 2874 2930 pagevec_release(&pvec); 2875 2931 cond_resched(); 2876 2932 } 2933 + return 0; 2934 + ret_extent_tail: 2935 + ret = MPAGE_DA_EXTENT_TAIL; 2936 + out: 2937 + pagevec_release(&pvec); 2938 + cond_resched(); 2877 2939 return ret; 2878 2940 } 2879 2941 ··· 2891 2945 struct mpage_da_data mpd; 2892 2946 struct inode *inode = mapping->host; 2893 2947 int pages_written = 0; 2894 - long pages_skipped; 2895 2948 unsigned int max_pages; 2896 2949 int range_cyclic, cycled = 1, io_done = 0; 2897 2950 int needed_blocks, ret = 0; ··· 2973 3028 wbc->nr_to_write = desired_nr_to_write; 2974 3029 } 2975 3030 2976 - mpd.wbc = wbc; 2977 - mpd.inode = mapping->host; 2978 - 2979 - pages_skipped = wbc->pages_skipped; 2980 - 2981 3031 retry: 2982 3032 if (wbc->sync_mode == WB_SYNC_ALL) 2983 3033 tag_pages_for_writeback(mapping, index, end); ··· 2999 3059 } 3000 3060 3001 3061 /* 3002 - * Now call __mpage_da_writepage to find the next 3062 + * Now call write_cache_pages_da() to find the next 3003 3063 * contiguous region of logical blocks that need 3004 - * blocks to be allocated by ext4. We don't actually 3005 - * submit the blocks for I/O here, even though 3006 - * write_cache_pages thinks it will, and will set the 3007 - * pages as clean for write before calling 3008 - * __mpage_da_writepage(). 3064 + * blocks to be allocated by ext4 and submit them. 3009 3065 */ 3010 - mpd.b_size = 0; 3011 - mpd.b_state = 0; 3012 - mpd.b_blocknr = 0; 3013 - mpd.first_page = 0; 3014 - mpd.next_page = 0; 3015 - mpd.io_done = 0; 3016 - mpd.pages_written = 0; 3017 - mpd.retval = 0; 3018 3066 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); 3019 3067 /* 3020 3068 * If we have a contiguous extent of pages and we ··· 3024 3096 * and try again 3025 3097 */ 3026 3098 jbd2_journal_force_commit_nested(sbi->s_journal); 3027 - wbc->pages_skipped = pages_skipped; 3028 3099 ret = 0; 3029 3100 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 3030 3101 /* ··· 3031 3104 * rest of the pages 3032 3105 */ 3033 3106 pages_written += mpd.pages_written; 3034 - wbc->pages_skipped = pages_skipped; 3035 3107 ret = 0; 3036 3108 io_done = 1; 3037 3109 } else if (wbc->nr_to_write) ··· 3048 3122 wbc->range_end = mapping->writeback_index - 1; 3049 3123 goto retry; 3050 3124 } 3051 - if (pages_skipped != wbc->pages_skipped) 3052 - ext4_msg(inode->i_sb, KERN_CRIT, 3053 - "This should not happen leaving %s " 3054 - "with nr_to_write = %ld ret = %d", 3055 - __func__, wbc->nr_to_write, ret); 3056 3125 3057 3126 /* Update index */ 3058 3127 wbc->range_cyclic = range_cyclic; ··· 3381 3460 3382 3461 static int ext4_readpage(struct file *file, struct page *page) 3383 3462 { 3463 + trace_ext4_readpage(page); 3384 3464 return mpage_readpage(page, ext4_get_block); 3385 3465 } 3386 3466 ··· 3416 3494 { 3417 3495 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3418 3496 3497 + trace_ext4_invalidatepage(page, offset); 3498 + 3419 3499 /* 3420 3500 * free any io_end structure allocated for buffers to be discarded 3421 3501 */ ··· 3438 3514 static int ext4_releasepage(struct page *page, gfp_t wait) 3439 3515 { 3440 3516 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3517 + 3518 + trace_ext4_releasepage(page); 3441 3519 3442 3520 WARN_ON(PageChecked(page)); 3443 3521 if (!page_has_buffers(page)) ··· 3799 3873 { 3800 3874 struct file *file = iocb->ki_filp; 3801 3875 struct inode *inode = file->f_mapping->host; 3876 + ssize_t ret; 3802 3877 3878 + trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 3803 3879 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3804 - return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3805 - 3806 - return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3880 + ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3881 + else 3882 + ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3883 + trace_ext4_direct_IO_exit(inode, offset, 3884 + iov_length(iov, nr_segs), rw, ret); 3885 + return ret; 3807 3886 } 3808 3887 3809 3888 /* ··· 4104 4173 * 4105 4174 * We release `count' blocks on disk, but (last - first) may be greater 4106 4175 * than `count' because there can be holes in there. 4176 + * 4177 + * Return 0 on success, 1 on invalid block range 4178 + * and < 0 on fatal error. 4107 4179 */ 4108 4180 static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 4109 4181 struct buffer_head *bh, ··· 4133 4199 if (bh) { 4134 4200 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4135 4201 err = ext4_handle_dirty_metadata(handle, inode, bh); 4136 - if (unlikely(err)) { 4137 - ext4_std_error(inode->i_sb, err); 4138 - return 1; 4139 - } 4202 + if (unlikely(err)) 4203 + goto out_err; 4140 4204 } 4141 4205 err = ext4_mark_inode_dirty(handle, inode); 4142 - if (unlikely(err)) { 4143 - ext4_std_error(inode->i_sb, err); 4144 - return 1; 4145 - } 4206 + if (unlikely(err)) 4207 + goto out_err; 4146 4208 err = ext4_truncate_restart_trans(handle, inode, 4147 4209 blocks_for_truncate(inode)); 4148 - if (unlikely(err)) { 4149 - ext4_std_error(inode->i_sb, err); 4150 - return 1; 4151 - } 4210 + if (unlikely(err)) 4211 + goto out_err; 4152 4212 if (bh) { 4153 4213 BUFFER_TRACE(bh, "retaking write access"); 4154 - ext4_journal_get_write_access(handle, bh); 4214 + err = ext4_journal_get_write_access(handle, bh); 4215 + if (unlikely(err)) 4216 + goto out_err; 4155 4217 } 4156 4218 } 4157 4219 4158 4220 for (p = first; p < last; p++) 4159 4221 *p = 0; 4160 4222 4161 - ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); 4223 + ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); 4162 4224 return 0; 4225 + out_err: 4226 + ext4_std_error(inode->i_sb, err); 4227 + return err; 4163 4228 } 4164 4229 4165 4230 /** ··· 4192 4259 ext4_fsblk_t nr; /* Current block # */ 4193 4260 __le32 *p; /* Pointer into inode/ind 4194 4261 for current block */ 4195 - int err; 4262 + int err = 0; 4196 4263 4197 4264 if (this_bh) { /* For indirect block */ 4198 4265 BUFFER_TRACE(this_bh, "get_write_access"); ··· 4214 4281 } else if (nr == block_to_free + count) { 4215 4282 count++; 4216 4283 } else { 4217 - if (ext4_clear_blocks(handle, inode, this_bh, 4218 - block_to_free, count, 4219 - block_to_free_p, p)) 4284 + err = ext4_clear_blocks(handle, inode, this_bh, 4285 + block_to_free, count, 4286 + block_to_free_p, p); 4287 + if (err) 4220 4288 break; 4221 4289 block_to_free = nr; 4222 4290 block_to_free_p = p; ··· 4226 4292 } 4227 4293 } 4228 4294 4229 - if (count > 0) 4230 - ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4231 - count, block_to_free_p, p); 4295 + if (!err && count > 0) 4296 + err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4297 + count, block_to_free_p, p); 4298 + if (err < 0) 4299 + /* fatal error */ 4300 + return; 4232 4301 4233 4302 if (this_bh) { 4234 4303 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); ··· 4349 4412 * transaction where the data blocks are 4350 4413 * actually freed. 4351 4414 */ 4352 - ext4_free_blocks(handle, inode, 0, nr, 1, 4415 + ext4_free_blocks(handle, inode, NULL, nr, 1, 4353 4416 EXT4_FREE_BLOCKS_METADATA| 4354 4417 EXT4_FREE_BLOCKS_FORGET); 4355 4418 ··· 4433 4496 ext4_lblk_t last_block; 4434 4497 unsigned blocksize = inode->i_sb->s_blocksize; 4435 4498 4499 + trace_ext4_truncate_enter(inode); 4500 + 4436 4501 if (!ext4_can_truncate(inode)) 4437 4502 return; 4438 4503 ··· 4445 4506 4446 4507 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4447 4508 ext4_ext_truncate(inode); 4509 + trace_ext4_truncate_exit(inode); 4448 4510 return; 4449 4511 } 4450 4512 ··· 4575 4635 ext4_orphan_del(handle, inode); 4576 4636 4577 4637 ext4_journal_stop(handle); 4638 + trace_ext4_truncate_exit(inode); 4578 4639 } 4579 4640 4580 4641 /* ··· 4707 4766 * has in-inode xattrs, or we don't have this inode in memory. 4708 4767 * Read the block from disk. 4709 4768 */ 4769 + trace_ext4_load_inode(inode); 4710 4770 get_bh(bh); 4711 4771 bh->b_end_io = end_buffer_read_sync; 4712 4772 submit_bh(READ_META, bh); ··· 4813 4871 return inode; 4814 4872 4815 4873 ei = EXT4_I(inode); 4816 - iloc.bh = 0; 4874 + iloc.bh = NULL; 4817 4875 4818 4876 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4819 4877 if (ret < 0)
+7
fs/ext4/ioctl.c
··· 334 334 case FITRIM: 335 335 { 336 336 struct super_block *sb = inode->i_sb; 337 + struct request_queue *q = bdev_get_queue(sb->s_bdev); 337 338 struct fstrim_range range; 338 339 int ret = 0; 339 340 340 341 if (!capable(CAP_SYS_ADMIN)) 341 342 return -EPERM; 342 343 344 + if (!blk_queue_discard(q)) 345 + return -EOPNOTSUPP; 346 + 343 347 if (copy_from_user(&range, (struct fstrim_range *)arg, 344 348 sizeof(range))) 345 349 return -EFAULT; 346 350 351 + range.minlen = max((unsigned int)range.minlen, 352 + q->limits.discard_granularity); 347 353 ret = ext4_trim_fs(sb, &range); 348 354 if (ret < 0) 349 355 return ret; ··· 427 421 return err; 428 422 } 429 423 case EXT4_IOC_MOVE_EXT: 424 + case FITRIM: 430 425 break; 431 426 default: 432 427 return -ENOIOCTLCMD;
+23 -11
fs/ext4/mballoc.c
··· 432 432 } 433 433 434 434 /* at order 0 we see each particular block */ 435 - *max = 1 << (e4b->bd_blkbits + 3); 436 - if (order == 0) 435 + if (order == 0) { 436 + *max = 1 << (e4b->bd_blkbits + 3); 437 437 return EXT4_MB_BITMAP(e4b); 438 + } 438 439 439 440 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 440 441 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; ··· 617 616 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 618 617 619 618 grp = ext4_get_group_info(sb, e4b->bd_group); 620 - buddy = mb_find_buddy(e4b, 0, &max); 621 619 list_for_each(cur, &grp->bb_prealloc_list) { 622 620 ext4_group_t groupnr; 623 621 struct ext4_prealloc_space *pa; ··· 635 635 #define mb_check_buddy(e4b) 636 636 #endif 637 637 638 - /* FIXME!! need more doc */ 638 + /* 639 + * Divide blocks started from @first with length @len into 640 + * smaller chunks with power of 2 blocks. 641 + * Clear the bits in bitmap which the blocks of the chunk(s) covered, 642 + * then increase bb_counters[] for corresponded chunk size. 643 + */ 639 644 static void ext4_mb_mark_free_simple(struct super_block *sb, 640 645 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 641 646 struct ext4_group_info *grp) ··· 2386 2381 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte 2387 2382 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem. 2388 2383 * So a two level scheme suffices for now. */ 2389 - sbi->s_group_info = kmalloc(array_size, GFP_KERNEL); 2384 + sbi->s_group_info = kzalloc(array_size, GFP_KERNEL); 2390 2385 if (sbi->s_group_info == NULL) { 2391 2386 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n"); 2392 2387 return -ENOMEM; ··· 3213 3208 cur_distance = abs(goal_block - cpa->pa_pstart); 3214 3209 new_distance = abs(goal_block - pa->pa_pstart); 3215 3210 3216 - if (cur_distance < new_distance) 3211 + if (cur_distance <= new_distance) 3217 3212 return cpa; 3218 3213 3219 3214 /* drop the previous reference */ ··· 3912 3907 struct super_block *sb = ac->ac_sb; 3913 3908 ext4_group_t ngroups, i; 3914 3909 3915 - if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 3910 + if (!mb_enable_debug || 3911 + (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) 3916 3912 return; 3917 3913 3918 3914 printk(KERN_ERR "EXT4-fs: Can't allocate:" ··· 4759 4753 * bitmap. Then issue a TRIM command on this extent and free the extent in 4760 4754 * the group buddy bitmap. This is done until whole group is scanned. 4761 4755 */ 4762 - ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, 4756 + static ext4_grpblk_t 4757 + ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, 4763 4758 ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) 4764 4759 { 4765 4760 void *bitmap; ··· 4870 4863 break; 4871 4864 } 4872 4865 4873 - if (len >= EXT4_BLOCKS_PER_GROUP(sb)) 4874 - len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block); 4875 - else 4866 + /* 4867 + * For all the groups except the last one, last block will 4868 + * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to 4869 + * change it for the last group in which case start + 4870 + * len < EXT4_BLOCKS_PER_GROUP(sb). 4871 + */ 4872 + if (first_block + len < EXT4_BLOCKS_PER_GROUP(sb)) 4876 4873 last_block = first_block + len; 4874 + len -= last_block - first_block; 4877 4875 4878 4876 if (e4b.bd_info->bb_free >= minlen) { 4879 4877 cnt = ext4_trim_all_free(sb, &e4b, first_block,
+1 -1
fs/ext4/mballoc.h
··· 169 169 /* original request */ 170 170 struct ext4_free_extent ac_o_ex; 171 171 172 - /* goal request (after normalization) */ 172 + /* goal request (normalized ac_o_ex) */ 173 173 struct ext4_free_extent ac_g_ex; 174 174 175 175 /* the best found extent */
+5 -5
fs/ext4/migrate.c
··· 263 263 for (i = 0; i < max_entries; i++) { 264 264 if (tmp_idata[i]) { 265 265 extend_credit_for_blkdel(handle, inode); 266 - ext4_free_blocks(handle, inode, 0, 266 + ext4_free_blocks(handle, inode, NULL, 267 267 le32_to_cpu(tmp_idata[i]), 1, 268 268 EXT4_FREE_BLOCKS_METADATA | 269 269 EXT4_FREE_BLOCKS_FORGET); ··· 271 271 } 272 272 put_bh(bh); 273 273 extend_credit_for_blkdel(handle, inode); 274 - ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1, 274 + ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, 275 275 EXT4_FREE_BLOCKS_METADATA | 276 276 EXT4_FREE_BLOCKS_FORGET); 277 277 return 0; ··· 302 302 } 303 303 put_bh(bh); 304 304 extend_credit_for_blkdel(handle, inode); 305 - ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1, 305 + ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, 306 306 EXT4_FREE_BLOCKS_METADATA | 307 307 EXT4_FREE_BLOCKS_FORGET); 308 308 return 0; ··· 315 315 /* ei->i_data[EXT4_IND_BLOCK] */ 316 316 if (i_data[0]) { 317 317 extend_credit_for_blkdel(handle, inode); 318 - ext4_free_blocks(handle, inode, 0, 318 + ext4_free_blocks(handle, inode, NULL, 319 319 le32_to_cpu(i_data[0]), 1, 320 320 EXT4_FREE_BLOCKS_METADATA | 321 321 EXT4_FREE_BLOCKS_FORGET); ··· 428 428 } 429 429 put_bh(bh); 430 430 extend_credit_for_blkdel(handle, inode); 431 - ext4_free_blocks(handle, inode, 0, block, 1, 431 + ext4_free_blocks(handle, inode, NULL, block, 1, 432 432 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 433 433 return retval; 434 434 }
+10 -3
fs/ext4/namei.c
··· 40 40 #include "xattr.h" 41 41 #include "acl.h" 42 42 43 + #include <trace/events/ext4.h> 43 44 /* 44 45 * define how far ahead to read directories while searching them. 45 46 */ ··· 2184 2183 struct ext4_dir_entry_2 *de; 2185 2184 handle_t *handle; 2186 2185 2186 + trace_ext4_unlink_enter(dir, dentry); 2187 2187 /* Initialize quotas before so that eventual writes go 2188 2188 * in separate transaction */ 2189 2189 dquot_initialize(dir); ··· 2230 2228 end_unlink: 2231 2229 ext4_journal_stop(handle); 2232 2230 brelse(bh); 2231 + trace_ext4_unlink_exit(dentry, retval); 2233 2232 return retval; 2234 2233 } 2235 2234 ··· 2405 2402 if (!new_inode && new_dir != old_dir && 2406 2403 EXT4_DIR_LINK_MAX(new_dir)) 2407 2404 goto end_rename; 2405 + BUFFER_TRACE(dir_bh, "get_write_access"); 2406 + retval = ext4_journal_get_write_access(handle, dir_bh); 2407 + if (retval) 2408 + goto end_rename; 2408 2409 } 2409 2410 if (!new_bh) { 2410 2411 retval = ext4_add_entry(handle, new_dentry, old_inode); ··· 2416 2409 goto end_rename; 2417 2410 } else { 2418 2411 BUFFER_TRACE(new_bh, "get write access"); 2419 - ext4_journal_get_write_access(handle, new_bh); 2412 + retval = ext4_journal_get_write_access(handle, new_bh); 2413 + if (retval) 2414 + goto end_rename; 2420 2415 new_de->inode = cpu_to_le32(old_inode->i_ino); 2421 2416 if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb, 2422 2417 EXT4_FEATURE_INCOMPAT_FILETYPE)) ··· 2479 2470 old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir); 2480 2471 ext4_update_dx_flag(old_dir); 2481 2472 if (dir_bh) { 2482 - BUFFER_TRACE(dir_bh, "get_write_access"); 2483 - ext4_journal_get_write_access(handle, dir_bh); 2484 2473 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) = 2485 2474 cpu_to_le32(new_dir->i_ino); 2486 2475 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
+9 -4
fs/ext4/page-io.c
··· 259 259 bi_sector >> (inode->i_blkbits - 9)); 260 260 } 261 261 262 + if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 263 + ext4_free_io_end(io_end); 264 + return; 265 + } 266 + 262 267 /* Add the io_end to per-inode completed io list*/ 263 268 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 264 269 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); ··· 284 279 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); 285 280 bio_put(io->io_bio); 286 281 } 287 - io->io_bio = 0; 282 + io->io_bio = NULL; 288 283 io->io_op = 0; 289 - io->io_end = 0; 284 + io->io_end = NULL; 290 285 } 291 286 292 287 static int io_submit_init(struct ext4_io_submit *io, ··· 385 380 386 381 BUG_ON(!PageLocked(page)); 387 382 BUG_ON(PageWriteback(page)); 388 - set_page_writeback(page); 389 - ClearPageError(page); 390 383 391 384 io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); 392 385 if (!io_page) { ··· 395 392 io_page->p_page = page; 396 393 atomic_set(&io_page->p_count, 1); 397 394 get_page(page); 395 + set_page_writeback(page); 396 + ClearPageError(page); 398 397 399 398 for (bh = head = page_buffers(page), block_start = 0; 400 399 bh != head || !block_start;
+6 -6
fs/ext4/resize.c
··· 230 230 } 231 231 232 232 /* Zero out all of the reserved backup group descriptor table blocks */ 233 - ext4_debug("clear inode table blocks %#04llx -> %#04llx\n", 233 + ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 234 234 block, sbi->s_itb_per_group); 235 235 err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb, 236 236 GFP_NOFS); ··· 248 248 249 249 /* Zero out all of the inode table blocks */ 250 250 block = input->inode_table; 251 - ext4_debug("clear inode table blocks %#04llx -> %#04llx\n", 251 + ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 252 252 block, sbi->s_itb_per_group); 253 253 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); 254 254 if (err) ··· 499 499 return err; 500 500 501 501 exit_inode: 502 - /* ext4_journal_release_buffer(handle, iloc.bh); */ 502 + /* ext4_handle_release_buffer(handle, iloc.bh); */ 503 503 brelse(iloc.bh); 504 504 exit_dindj: 505 - /* ext4_journal_release_buffer(handle, dind); */ 505 + /* ext4_handle_release_buffer(handle, dind); */ 506 506 exit_sbh: 507 - /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */ 507 + /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */ 508 508 exit_dind: 509 509 brelse(dind); 510 510 exit_bh: ··· 586 586 /* 587 587 int j; 588 588 for (j = 0; j < i; j++) 589 - ext4_journal_release_buffer(handle, primary[j]); 589 + ext4_handle_release_buffer(handle, primary[j]); 590 590 */ 591 591 goto exit_bh; 592 592 }
+27 -21
fs/ext4/super.c
··· 54 54 55 55 static struct proc_dir_entry *ext4_proc_root; 56 56 static struct kset *ext4_kset; 57 - struct ext4_lazy_init *ext4_li_info; 58 - struct mutex ext4_li_mtx; 59 - struct ext4_features *ext4_feat; 57 + static struct ext4_lazy_init *ext4_li_info; 58 + static struct mutex ext4_li_mtx; 59 + static struct ext4_features *ext4_feat; 60 60 61 61 static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 62 62 unsigned long journal_devnum); ··· 75 75 static int ext4_freeze(struct super_block *sb); 76 76 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, 77 77 const char *dev_name, void *data); 78 + static int ext4_feature_set_ok(struct super_block *sb, int readonly); 78 79 static void ext4_destroy_lazyinit_thread(void); 79 80 static void ext4_unregister_li_request(struct super_block *sb); 80 81 static void ext4_clear_request_list(void); ··· 595 594 596 595 vaf.fmt = fmt; 597 596 vaf.va = &args; 598 - printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u", 597 + printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", 599 598 sb->s_id, function, line, grp); 600 599 if (ino) 601 600 printk(KERN_CONT "inode %lu: ", ino); ··· 998 997 if (test_opt(sb, OLDALLOC)) 999 998 seq_puts(seq, ",oldalloc"); 1000 999 #ifdef CONFIG_EXT4_FS_XATTR 1001 - if (test_opt(sb, XATTR_USER) && 1002 - !(def_mount_opts & EXT4_DEFM_XATTR_USER)) 1000 + if (test_opt(sb, XATTR_USER)) 1003 1001 seq_puts(seq, ",user_xattr"); 1004 - if (!test_opt(sb, XATTR_USER) && 1005 - (def_mount_opts & EXT4_DEFM_XATTR_USER)) { 1002 + if (!test_opt(sb, XATTR_USER)) 1006 1003 seq_puts(seq, ",nouser_xattr"); 1007 - } 1008 1004 #endif 1009 1005 #ifdef CONFIG_EXT4_FS_POSIX_ACL 1010 1006 if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL)) ··· 1039 1041 !(def_mount_opts & EXT4_DEFM_NODELALLOC)) 1040 1042 seq_puts(seq, ",nodelalloc"); 1041 1043 1042 - if (test_opt(sb, MBLK_IO_SUBMIT)) 1043 - seq_puts(seq, ",mblk_io_submit"); 1044 + if (!test_opt(sb, MBLK_IO_SUBMIT)) 1045 + seq_puts(seq, ",nomblk_io_submit"); 1044 1046 if (sbi->s_stripe) 1045 1047 seq_printf(seq, ",stripe=%lu", sbi->s_stripe); 1046 1048 /* ··· 1449 1451 * Initialize args struct so we know whether arg was 1450 1452 * found; some options take optional arguments. 1451 1453 */ 1452 - args[0].to = args[0].from = 0; 1454 + args[0].to = args[0].from = NULL; 1453 1455 token = match_token(p, tokens, args); 1454 1456 switch (token) { 1455 1457 case Opt_bsd_df: ··· 1769 1771 return 0; 1770 1772 if (option < 0 || option > (1 << 30)) 1771 1773 return 0; 1772 - if (!is_power_of_2(option)) { 1774 + if (option && !is_power_of_2(option)) { 1773 1775 ext4_msg(sb, KERN_ERR, 1774 1776 "EXT4-fs: inode_readahead_blks" 1775 1777 " must be a power of 2"); ··· 2118 2120 return; 2119 2121 } 2120 2122 2123 + /* Check if feature set would not allow a r/w mount */ 2124 + if (!ext4_feature_set_ok(sb, 0)) { 2125 + ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " 2126 + "unknown ROCOMPAT features"); 2127 + return; 2128 + } 2129 + 2121 2130 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 2122 2131 if (es->s_last_orphan) 2123 2132 jbd_debug(1, "Errors on filesystem, " ··· 2417 2412 if (parse_strtoul(buf, 0x40000000, &t)) 2418 2413 return -EINVAL; 2419 2414 2420 - if (!is_power_of_2(t)) 2415 + if (t && !is_power_of_2(t)) 2421 2416 return -EINVAL; 2422 2417 2423 2418 sbi->s_inode_readahead_blks = t; ··· 3100 3095 } 3101 3096 if (def_mount_opts & EXT4_DEFM_UID16) 3102 3097 set_opt(sb, NO_UID32); 3098 + /* xattr user namespace & acls are now defaulted on */ 3103 3099 #ifdef CONFIG_EXT4_FS_XATTR 3104 - if (def_mount_opts & EXT4_DEFM_XATTR_USER) 3105 - set_opt(sb, XATTR_USER); 3100 + set_opt(sb, XATTR_USER); 3106 3101 #endif 3107 3102 #ifdef CONFIG_EXT4_FS_POSIX_ACL 3108 - if (def_mount_opts & EXT4_DEFM_ACL) 3109 - set_opt(sb, POSIX_ACL); 3103 + set_opt(sb, POSIX_ACL); 3110 3104 #endif 3105 + set_opt(sb, MBLK_IO_SUBMIT); 3111 3106 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 3112 3107 set_opt(sb, JOURNAL_DATA); 3113 3108 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) ··· 3521 3516 * concurrency isn't really necessary. Limit it to 1. 3522 3517 */ 3523 3518 EXT4_SB(sb)->dio_unwritten_wq = 3524 - alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1); 3519 + alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 3525 3520 if (!EXT4_SB(sb)->dio_unwritten_wq) { 3526 3521 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); 3527 3522 goto failed_mount_wq; ··· 3536 3531 if (IS_ERR(root)) { 3537 3532 ext4_msg(sb, KERN_ERR, "get root inode failed"); 3538 3533 ret = PTR_ERR(root); 3534 + root = NULL; 3539 3535 goto failed_mount4; 3540 3536 } 3541 3537 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 3542 - iput(root); 3543 3538 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 3544 3539 goto failed_mount4; 3545 3540 } 3546 3541 sb->s_root = d_alloc_root(root); 3547 3542 if (!sb->s_root) { 3548 3543 ext4_msg(sb, KERN_ERR, "get root dentry failed"); 3549 - iput(root); 3550 3544 ret = -ENOMEM; 3551 3545 goto failed_mount4; 3552 3546 } ··· 3661 3657 goto failed_mount; 3662 3658 3663 3659 failed_mount4: 3660 + iput(root); 3661 + sb->s_root = NULL; 3664 3662 ext4_msg(sb, KERN_ERR, "mount failed"); 3665 3663 destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); 3666 3664 failed_mount_wq:
+2 -2
fs/ext4/xattr.c
··· 735 735 int offset = (char *)s->here - bs->bh->b_data; 736 736 737 737 unlock_buffer(bs->bh); 738 - jbd2_journal_release_buffer(handle, bs->bh); 738 + ext4_handle_release_buffer(handle, bs->bh); 739 739 if (ce) { 740 740 mb_cache_entry_release(ce); 741 741 ce = NULL; ··· 833 833 new_bh = sb_getblk(sb, block); 834 834 if (!new_bh) { 835 835 getblk_failed: 836 - ext4_free_blocks(handle, inode, 0, block, 1, 836 + ext4_free_blocks(handle, inode, NULL, block, 1, 837 837 EXT4_FREE_BLOCKS_METADATA); 838 838 error = -EIO; 839 839 goto cleanup;
+25 -3
include/linux/jbd2.h
··· 432 432 int h_err; 433 433 434 434 /* Flags [no locking] */ 435 - unsigned int h_sync: 1; /* sync-on-close */ 436 - unsigned int h_jdata: 1; /* force data journaling */ 437 - unsigned int h_aborted: 1; /* fatal error on handle */ 435 + unsigned int h_sync:1; /* sync-on-close */ 436 + unsigned int h_jdata:1; /* force data journaling */ 437 + unsigned int h_aborted:1; /* fatal error on handle */ 438 + unsigned int h_cowing:1; /* COWing block to snapshot */ 439 + 440 + /* Number of buffers requested by user: 441 + * (before adding the COW credits factor) */ 442 + unsigned int h_base_credits:14; 443 + 444 + /* Number of buffers the user is allowed to dirty: 445 + * (counts only buffers dirtied when !h_cowing) */ 446 + unsigned int h_user_credits:14; 447 + 438 448 439 449 #ifdef CONFIG_DEBUG_LOCK_ALLOC 440 450 struct lockdep_map h_lockdep_map; 451 + #endif 452 + 453 + #ifdef CONFIG_JBD2_DEBUG 454 + /* COW debugging counters: */ 455 + unsigned int h_cow_moved; /* blocks moved to snapshot */ 456 + unsigned int h_cow_copied; /* blocks copied to snapshot */ 457 + unsigned int h_cow_ok_jh; /* blocks already COWed during current 458 + transaction */ 459 + unsigned int h_cow_ok_bitmap; /* blocks not set in COW bitmap */ 460 + unsigned int h_cow_ok_mapped;/* blocks already mapped in snapshot */ 461 + unsigned int h_cow_bitmaps; /* COW bitmaps created */ 462 + unsigned int h_cow_excluded; /* blocks set in exclude bitmap */ 441 463 #endif 442 464 }; 443 465
+7
include/linux/journal-head.h
··· 41 41 unsigned b_modified; 42 42 43 43 /* 44 + * This feild tracks the last transaction id in which this buffer 45 + * has been cowed 46 + * [jbd_lock_bh_state()] 47 + */ 48 + unsigned b_cow_tid; 49 + 50 + /* 44 51 * Copy of the buffer data frozen for writing to the log. 45 52 * [jbd_lock_bh_state()] 46 53 */
+578 -197
include/trace/events/ext4.h
··· 21 21 TP_ARGS(inode), 22 22 23 23 TP_STRUCT__entry( 24 - __field( int, dev_major ) 25 - __field( int, dev_minor ) 24 + __field( dev_t, dev ) 26 25 __field( ino_t, ino ) 27 26 __field( umode_t, mode ) 28 27 __field( uid_t, uid ) ··· 30 31 ), 31 32 32 33 TP_fast_assign( 33 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 34 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 34 + __entry->dev = inode->i_sb->s_dev; 35 35 __entry->ino = inode->i_ino; 36 36 __entry->mode = inode->i_mode; 37 37 __entry->uid = inode->i_uid; ··· 39 41 ), 40 42 41 43 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", 42 - __entry->dev_major, __entry->dev_minor, 43 - (unsigned long) __entry->ino, __entry->mode, 44 - __entry->uid, __entry->gid, 44 + MAJOR(__entry->dev), MINOR(__entry->dev), 45 + (unsigned long) __entry->ino, 46 + __entry->mode, __entry->uid, __entry->gid, 45 47 (unsigned long long) __entry->blocks) 46 48 ); 47 49 ··· 51 53 TP_ARGS(dir, mode), 52 54 53 55 TP_STRUCT__entry( 54 - __field( int, dev_major ) 55 - __field( int, dev_minor ) 56 + __field( dev_t, dev ) 56 57 __field( ino_t, dir ) 57 58 __field( umode_t, mode ) 58 59 ), 59 60 60 61 TP_fast_assign( 61 - __entry->dev_major = MAJOR(dir->i_sb->s_dev); 62 - __entry->dev_minor = MINOR(dir->i_sb->s_dev); 62 + __entry->dev = dir->i_sb->s_dev; 63 63 __entry->dir = dir->i_ino; 64 64 __entry->mode = mode; 65 65 ), 66 66 67 67 TP_printk("dev %d,%d dir %lu mode 0%o", 68 - __entry->dev_major, __entry->dev_minor, 68 + MAJOR(__entry->dev), MINOR(__entry->dev), 69 69 (unsigned long) __entry->dir, __entry->mode) 70 70 ); 71 71 ··· 73 77 TP_ARGS(inode, dir, mode), 74 78 75 79 TP_STRUCT__entry( 76 - __field( int, dev_major ) 77 - __field( int, dev_minor ) 80 + __field( dev_t, dev ) 78 81 __field( ino_t, ino ) 79 82 __field( ino_t, dir ) 80 83 __field( umode_t, mode ) 81 84 ), 82 85 83 86 TP_fast_assign( 84 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 85 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 87 + __entry->dev = inode->i_sb->s_dev; 86 88 __entry->ino = inode->i_ino; 87 89 __entry->dir = dir->i_ino; 88 90 __entry->mode = mode; 89 91 ), 90 92 91 93 TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", 92 - __entry->dev_major, __entry->dev_minor, 94 + MAJOR(__entry->dev), MINOR(__entry->dev), 93 95 (unsigned long) __entry->ino, 94 96 (unsigned long) __entry->dir, __entry->mode) 95 97 ); ··· 98 104 TP_ARGS(inode), 99 105 100 106 TP_STRUCT__entry( 101 - __field( int, dev_major ) 102 - __field( int, dev_minor ) 107 + __field( dev_t, dev ) 103 108 __field( ino_t, ino ) 104 109 __field( int, nlink ) 105 110 ), 106 111 107 112 TP_fast_assign( 108 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 109 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 113 + __entry->dev = inode->i_sb->s_dev; 110 114 __entry->ino = inode->i_ino; 111 115 __entry->nlink = inode->i_nlink; 112 116 ), 113 117 114 118 TP_printk("dev %d,%d ino %lu nlink %d", 115 - __entry->dev_major, __entry->dev_minor, 119 + MAJOR(__entry->dev), MINOR(__entry->dev), 116 120 (unsigned long) __entry->ino, __entry->nlink) 117 121 ); 118 122 ··· 120 128 TP_ARGS(inode, drop), 121 129 122 130 TP_STRUCT__entry( 123 - __field( int, dev_major ) 124 - __field( int, dev_minor ) 131 + __field( dev_t, dev ) 125 132 __field( ino_t, ino ) 126 133 __field( int, drop ) 127 134 ), 128 135 129 136 TP_fast_assign( 130 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 131 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 137 + __entry->dev = inode->i_sb->s_dev; 132 138 __entry->ino = inode->i_ino; 133 139 __entry->drop = drop; 134 140 ), 135 141 136 142 TP_printk("dev %d,%d ino %lu drop %d", 137 - __entry->dev_major, __entry->dev_minor, 143 + MAJOR(__entry->dev), MINOR(__entry->dev), 138 144 (unsigned long) __entry->ino, __entry->drop) 139 145 ); 140 146 ··· 142 152 TP_ARGS(inode, IP), 143 153 144 154 TP_STRUCT__entry( 145 - __field( int, dev_major ) 146 - __field( int, dev_minor ) 155 + __field( dev_t, dev ) 147 156 __field( ino_t, ino ) 148 157 __field(unsigned long, ip ) 149 158 ), 150 159 151 160 TP_fast_assign( 152 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 153 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 161 + __entry->dev = inode->i_sb->s_dev; 154 162 __entry->ino = inode->i_ino; 155 163 __entry->ip = IP; 156 164 ), 157 165 158 166 TP_printk("dev %d,%d ino %lu caller %pF", 159 - __entry->dev_major, __entry->dev_minor, 167 + MAJOR(__entry->dev), MINOR(__entry->dev), 160 168 (unsigned long) __entry->ino, (void *)__entry->ip) 161 169 ); 162 170 ··· 164 176 TP_ARGS(inode, new_size), 165 177 166 178 TP_STRUCT__entry( 167 - __field( int, dev_major ) 168 - __field( int, dev_minor ) 179 + __field( dev_t, dev ) 169 180 __field( ino_t, ino ) 170 181 __field( loff_t, new_size ) 171 182 ), 172 183 173 184 TP_fast_assign( 174 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 175 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 185 + __entry->dev = inode->i_sb->s_dev; 176 186 __entry->ino = inode->i_ino; 177 187 __entry->new_size = new_size; 178 188 ), 179 189 180 190 TP_printk("dev %d,%d ino %lu new_size %lld", 181 - __entry->dev_major, __entry->dev_minor, 191 + MAJOR(__entry->dev), MINOR(__entry->dev), 182 192 (unsigned long) __entry->ino, 183 193 (long long) __entry->new_size) 184 194 ); ··· 189 203 TP_ARGS(inode, pos, len, flags), 190 204 191 205 TP_STRUCT__entry( 192 - __field( int, dev_major ) 193 - __field( int, dev_minor ) 206 + __field( dev_t, dev ) 194 207 __field( ino_t, ino ) 195 208 __field( loff_t, pos ) 196 209 __field( unsigned int, len ) ··· 197 212 ), 198 213 199 214 TP_fast_assign( 200 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 201 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 215 + __entry->dev = inode->i_sb->s_dev; 202 216 __entry->ino = inode->i_ino; 203 217 __entry->pos = pos; 204 218 __entry->len = len; ··· 205 221 ), 206 222 207 223 TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", 208 - __entry->dev_major, __entry->dev_minor, 224 + MAJOR(__entry->dev), MINOR(__entry->dev), 209 225 (unsigned long) __entry->ino, 210 226 __entry->pos, __entry->len, __entry->flags) 211 227 ); ··· 233 249 TP_ARGS(inode, pos, len, copied), 234 250 235 251 TP_STRUCT__entry( 236 - __field( int, dev_major ) 237 - __field( int, dev_minor ) 252 + __field( dev_t, dev ) 238 253 __field( ino_t, ino ) 239 254 __field( loff_t, pos ) 240 255 __field( unsigned int, len ) ··· 241 258 ), 242 259 243 260 TP_fast_assign( 244 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 245 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 261 + __entry->dev = inode->i_sb->s_dev; 246 262 __entry->ino = inode->i_ino; 247 263 __entry->pos = pos; 248 264 __entry->len = len; ··· 249 267 ), 250 268 251 269 TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", 252 - __entry->dev_major, __entry->dev_minor, 253 - (unsigned long) __entry->ino, __entry->pos, 254 - __entry->len, __entry->copied) 270 + MAJOR(__entry->dev), MINOR(__entry->dev), 271 + (unsigned long) __entry->ino, 272 + __entry->pos, __entry->len, __entry->copied) 255 273 ); 256 274 257 275 DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end, ··· 292 310 TP_ARGS(inode, page), 293 311 294 312 TP_STRUCT__entry( 295 - __field( int, dev_major ) 296 - __field( int, dev_minor ) 313 + __field( dev_t, dev ) 297 314 __field( ino_t, ino ) 298 315 __field( pgoff_t, index ) 299 316 300 317 ), 301 318 302 319 TP_fast_assign( 303 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 304 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 320 + __entry->dev = inode->i_sb->s_dev; 305 321 __entry->ino = inode->i_ino; 306 322 __entry->index = page->index; 307 323 ), 308 324 309 325 TP_printk("dev %d,%d ino %lu page_index %lu", 310 - __entry->dev_major, __entry->dev_minor, 326 + MAJOR(__entry->dev), MINOR(__entry->dev), 311 327 (unsigned long) __entry->ino, __entry->index) 312 328 ); 313 329 ··· 315 335 TP_ARGS(inode, wbc), 316 336 317 337 TP_STRUCT__entry( 318 - __field( int, dev_major ) 319 - __field( int, dev_minor ) 338 + __field( dev_t, dev ) 320 339 __field( ino_t, ino ) 321 340 __field( long, nr_to_write ) 322 341 __field( long, pages_skipped ) 323 342 __field( loff_t, range_start ) 324 343 __field( loff_t, range_end ) 344 + __field( int, sync_mode ) 325 345 __field( char, for_kupdate ) 326 - __field( char, for_reclaim ) 327 346 __field( char, range_cyclic ) 328 347 __field( pgoff_t, writeback_index ) 329 348 ), 330 349 331 350 TP_fast_assign( 332 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 333 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 351 + __entry->dev = inode->i_sb->s_dev; 334 352 __entry->ino = inode->i_ino; 335 353 __entry->nr_to_write = wbc->nr_to_write; 336 354 __entry->pages_skipped = wbc->pages_skipped; 337 355 __entry->range_start = wbc->range_start; 338 356 __entry->range_end = wbc->range_end; 357 + __entry->sync_mode = wbc->sync_mode; 339 358 __entry->for_kupdate = wbc->for_kupdate; 340 - __entry->for_reclaim = wbc->for_reclaim; 341 359 __entry->range_cyclic = wbc->range_cyclic; 342 360 __entry->writeback_index = inode->i_mapping->writeback_index; 343 361 ), 344 362 345 363 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " 346 - "range_start %llu range_end %llu " 347 - "for_kupdate %d for_reclaim %d " 348 - "range_cyclic %d writeback_index %lu", 349 - __entry->dev_major, __entry->dev_minor, 364 + "range_start %llu range_end %llu sync_mode %d" 365 + "for_kupdate %d range_cyclic %d writeback_index %lu", 366 + MAJOR(__entry->dev), MINOR(__entry->dev), 350 367 (unsigned long) __entry->ino, __entry->nr_to_write, 351 368 __entry->pages_skipped, __entry->range_start, 352 - __entry->range_end, 353 - __entry->for_kupdate, __entry->for_reclaim, 354 - __entry->range_cyclic, 369 + __entry->range_end, __entry->sync_mode, 370 + __entry->for_kupdate, __entry->range_cyclic, 355 371 (unsigned long) __entry->writeback_index) 356 372 ); 357 373 ··· 357 381 TP_ARGS(inode, mpd), 358 382 359 383 TP_STRUCT__entry( 360 - __field( int, dev_major ) 361 - __field( int, dev_minor ) 384 + __field( dev_t, dev ) 362 385 __field( ino_t, ino ) 363 386 __field( __u64, b_blocknr ) 364 387 __field( __u32, b_size ) ··· 365 390 __field( unsigned long, first_page ) 366 391 __field( int, io_done ) 367 392 __field( int, pages_written ) 393 + __field( int, sync_mode ) 368 394 ), 369 395 370 396 TP_fast_assign( 371 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 372 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 397 + __entry->dev = inode->i_sb->s_dev; 373 398 __entry->ino = inode->i_ino; 374 399 __entry->b_blocknr = mpd->b_blocknr; 375 400 __entry->b_size = mpd->b_size; ··· 377 402 __entry->first_page = mpd->first_page; 378 403 __entry->io_done = mpd->io_done; 379 404 __entry->pages_written = mpd->pages_written; 405 + __entry->sync_mode = mpd->wbc->sync_mode; 380 406 ), 381 407 382 - TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d", 383 - __entry->dev_major, __entry->dev_minor, 408 + TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x " 409 + "first_page %lu io_done %d pages_written %d sync_mode %d", 410 + MAJOR(__entry->dev), MINOR(__entry->dev), 384 411 (unsigned long) __entry->ino, 385 412 __entry->b_blocknr, __entry->b_size, 386 413 __entry->b_state, __entry->first_page, 387 - __entry->io_done, __entry->pages_written) 414 + __entry->io_done, __entry->pages_written, 415 + __entry->sync_mode 416 + ) 388 417 ); 389 418 390 419 TRACE_EVENT(ext4_da_writepages_result, ··· 398 419 TP_ARGS(inode, wbc, ret, pages_written), 399 420 400 421 TP_STRUCT__entry( 401 - __field( int, dev_major ) 402 - __field( int, dev_minor ) 422 + __field( dev_t, dev ) 403 423 __field( ino_t, ino ) 404 424 __field( int, ret ) 405 425 __field( int, pages_written ) 406 426 __field( long, pages_skipped ) 427 + __field( int, sync_mode ) 407 428 __field( char, more_io ) 408 429 __field( pgoff_t, writeback_index ) 409 430 ), 410 431 411 432 TP_fast_assign( 412 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 413 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 433 + __entry->dev = inode->i_sb->s_dev; 414 434 __entry->ino = inode->i_ino; 415 435 __entry->ret = ret; 416 436 __entry->pages_written = pages_written; 417 437 __entry->pages_skipped = wbc->pages_skipped; 438 + __entry->sync_mode = wbc->sync_mode; 418 439 __entry->more_io = wbc->more_io; 419 440 __entry->writeback_index = inode->i_mapping->writeback_index; 420 441 ), 421 442 422 - TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu", 423 - __entry->dev_major, __entry->dev_minor, 443 + TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld " 444 + " more_io %d sync_mode %d writeback_index %lu", 445 + MAJOR(__entry->dev), MINOR(__entry->dev), 424 446 (unsigned long) __entry->ino, __entry->ret, 425 447 __entry->pages_written, __entry->pages_skipped, 426 - __entry->more_io, 448 + __entry->more_io, __entry->sync_mode, 427 449 (unsigned long) __entry->writeback_index) 450 + ); 451 + 452 + DECLARE_EVENT_CLASS(ext4__page_op, 453 + TP_PROTO(struct page *page), 454 + 455 + TP_ARGS(page), 456 + 457 + TP_STRUCT__entry( 458 + __field( pgoff_t, index ) 459 + __field( ino_t, ino ) 460 + __field( dev_t, dev ) 461 + 462 + ), 463 + 464 + TP_fast_assign( 465 + __entry->index = page->index; 466 + __entry->ino = page->mapping->host->i_ino; 467 + __entry->dev = page->mapping->host->i_sb->s_dev; 468 + ), 469 + 470 + TP_printk("dev %d,%d ino %lu page_index %lu", 471 + MAJOR(__entry->dev), MINOR(__entry->dev), 472 + (unsigned long) __entry->ino, 473 + __entry->index) 474 + ); 475 + 476 + DEFINE_EVENT(ext4__page_op, ext4_readpage, 477 + 478 + TP_PROTO(struct page *page), 479 + 480 + TP_ARGS(page) 481 + ); 482 + 483 + DEFINE_EVENT(ext4__page_op, ext4_releasepage, 484 + 485 + TP_PROTO(struct page *page), 486 + 487 + TP_ARGS(page) 488 + ); 489 + 490 + TRACE_EVENT(ext4_invalidatepage, 491 + TP_PROTO(struct page *page, unsigned long offset), 492 + 493 + TP_ARGS(page, offset), 494 + 495 + TP_STRUCT__entry( 496 + __field( pgoff_t, index ) 497 + __field( unsigned long, offset ) 498 + __field( ino_t, ino ) 499 + __field( dev_t, dev ) 500 + 501 + ), 502 + 503 + TP_fast_assign( 504 + __entry->index = page->index; 505 + __entry->offset = offset; 506 + __entry->ino = page->mapping->host->i_ino; 507 + __entry->dev = page->mapping->host->i_sb->s_dev; 508 + ), 509 + 510 + TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", 511 + MAJOR(__entry->dev), MINOR(__entry->dev), 512 + (unsigned long) __entry->ino, 513 + __entry->index, __entry->offset) 428 514 ); 429 515 430 516 TRACE_EVENT(ext4_discard_blocks, ··· 499 455 TP_ARGS(sb, blk, count), 500 456 501 457 TP_STRUCT__entry( 502 - __field( int, dev_major ) 503 - __field( int, dev_minor ) 458 + __field( dev_t, dev ) 504 459 __field( __u64, blk ) 505 460 __field( __u64, count ) 506 461 507 462 ), 508 463 509 464 TP_fast_assign( 510 - __entry->dev_major = MAJOR(sb->s_dev); 511 - __entry->dev_minor = MINOR(sb->s_dev); 465 + __entry->dev = sb->s_dev; 512 466 __entry->blk = blk; 513 467 __entry->count = count; 514 468 ), 515 469 516 470 TP_printk("dev %d,%d blk %llu count %llu", 517 - __entry->dev_major, __entry->dev_minor, 471 + MAJOR(__entry->dev), MINOR(__entry->dev), 518 472 __entry->blk, __entry->count) 519 473 ); 520 474 ··· 523 481 TP_ARGS(ac, pa), 524 482 525 483 TP_STRUCT__entry( 526 - __field( int, dev_major ) 527 - __field( int, dev_minor ) 484 + __field( dev_t, dev ) 528 485 __field( ino_t, ino ) 529 486 __field( __u64, pa_pstart ) 530 487 __field( __u32, pa_len ) ··· 532 491 ), 533 492 534 493 TP_fast_assign( 535 - __entry->dev_major = MAJOR(ac->ac_sb->s_dev); 536 - __entry->dev_minor = MINOR(ac->ac_sb->s_dev); 494 + __entry->dev = ac->ac_sb->s_dev; 537 495 __entry->ino = ac->ac_inode->i_ino; 538 496 __entry->pa_pstart = pa->pa_pstart; 539 497 __entry->pa_len = pa->pa_len; ··· 540 500 ), 541 501 542 502 TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu", 543 - __entry->dev_major, __entry->dev_minor, 544 - (unsigned long) __entry->ino, __entry->pa_pstart, 545 - __entry->pa_len, __entry->pa_lstart) 503 + MAJOR(__entry->dev), MINOR(__entry->dev), 504 + (unsigned long) __entry->ino, 505 + __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) 546 506 ); 547 507 548 508 DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa, ··· 570 530 TP_ARGS(sb, inode, pa, block, count), 571 531 572 532 TP_STRUCT__entry( 573 - __field( int, dev_major ) 574 - __field( int, dev_minor ) 533 + __field( dev_t, dev ) 575 534 __field( ino_t, ino ) 576 535 __field( __u64, block ) 577 536 __field( __u32, count ) ··· 578 539 ), 579 540 580 541 TP_fast_assign( 581 - __entry->dev_major = MAJOR(sb->s_dev); 582 - __entry->dev_minor = MINOR(sb->s_dev); 542 + __entry->dev = sb->s_dev; 583 543 __entry->ino = inode->i_ino; 584 544 __entry->block = block; 585 545 __entry->count = count; 586 546 ), 587 547 588 548 TP_printk("dev %d,%d ino %lu block %llu count %u", 589 - __entry->dev_major, __entry->dev_minor, 590 - (unsigned long) __entry->ino, __entry->block, __entry->count) 549 + MAJOR(__entry->dev), MINOR(__entry->dev), 550 + (unsigned long) __entry->ino, 551 + __entry->block, __entry->count) 591 552 ); 592 553 593 554 TRACE_EVENT(ext4_mb_release_group_pa, ··· 597 558 TP_ARGS(sb, pa), 598 559 599 560 TP_STRUCT__entry( 600 - __field( int, dev_major ) 601 - __field( int, dev_minor ) 561 + __field( dev_t, dev ) 602 562 __field( __u64, pa_pstart ) 603 563 __field( __u32, pa_len ) 604 564 605 565 ), 606 566 607 567 TP_fast_assign( 608 - __entry->dev_major = MAJOR(sb->s_dev); 609 - __entry->dev_minor = MINOR(sb->s_dev); 568 + __entry->dev = sb->s_dev; 610 569 __entry->pa_pstart = pa->pa_pstart; 611 570 __entry->pa_len = pa->pa_len; 612 571 ), 613 572 614 573 TP_printk("dev %d,%d pstart %llu len %u", 615 - __entry->dev_major, __entry->dev_minor, 574 + MAJOR(__entry->dev), MINOR(__entry->dev), 616 575 __entry->pa_pstart, __entry->pa_len) 617 576 ); 618 577 ··· 620 583 TP_ARGS(inode), 621 584 622 585 TP_STRUCT__entry( 623 - __field( int, dev_major ) 624 - __field( int, dev_minor ) 586 + __field( dev_t, dev ) 625 587 __field( ino_t, ino ) 626 588 627 589 ), 628 590 629 591 TP_fast_assign( 630 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 631 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 592 + __entry->dev = inode->i_sb->s_dev; 632 593 __entry->ino = inode->i_ino; 633 594 ), 634 595 635 596 TP_printk("dev %d,%d ino %lu", 636 - __entry->dev_major, __entry->dev_minor, 597 + MAJOR(__entry->dev), MINOR(__entry->dev), 637 598 (unsigned long) __entry->ino) 638 599 ); 639 600 ··· 641 606 TP_ARGS(sb, needed), 642 607 643 608 TP_STRUCT__entry( 644 - __field( int, dev_major ) 645 - __field( int, dev_minor ) 609 + __field( dev_t, dev ) 646 610 __field( int, needed ) 647 611 648 612 ), 649 613 650 614 TP_fast_assign( 651 - __entry->dev_major = MAJOR(sb->s_dev); 652 - __entry->dev_minor = MINOR(sb->s_dev); 615 + __entry->dev = sb->s_dev; 653 616 __entry->needed = needed; 654 617 ), 655 618 656 619 TP_printk("dev %d,%d needed %d", 657 - __entry->dev_major, __entry->dev_minor, __entry->needed) 620 + MAJOR(__entry->dev), MINOR(__entry->dev), 621 + __entry->needed) 658 622 ); 659 623 660 624 TRACE_EVENT(ext4_request_blocks, ··· 662 628 TP_ARGS(ar), 663 629 664 630 TP_STRUCT__entry( 665 - __field( int, dev_major ) 666 - __field( int, dev_minor ) 631 + __field( dev_t, dev ) 667 632 __field( ino_t, ino ) 668 633 __field( unsigned int, flags ) 669 634 __field( unsigned int, len ) ··· 675 642 ), 676 643 677 644 TP_fast_assign( 678 - __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); 679 - __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev); 645 + __entry->dev = ar->inode->i_sb->s_dev; 680 646 __entry->ino = ar->inode->i_ino; 681 647 __entry->flags = ar->flags; 682 648 __entry->len = ar->len; ··· 687 655 __entry->pright = ar->pright; 688 656 ), 689 657 690 - TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 691 - __entry->dev_major, __entry->dev_minor, 658 + TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu " 659 + "lleft %llu lright %llu pleft %llu pright %llu ", 660 + MAJOR(__entry->dev), MINOR(__entry->dev), 692 661 (unsigned long) __entry->ino, 693 662 __entry->flags, __entry->len, 694 663 (unsigned long long) __entry->logical, ··· 706 673 TP_ARGS(ar, block), 707 674 708 675 TP_STRUCT__entry( 709 - __field( int, dev_major ) 710 - __field( int, dev_minor ) 676 + __field( dev_t, dev ) 711 677 __field( ino_t, ino ) 712 678 __field( __u64, block ) 713 679 __field( unsigned int, flags ) ··· 720 688 ), 721 689 722 690 TP_fast_assign( 723 - __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); 724 - __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev); 691 + __entry->dev = ar->inode->i_sb->s_dev; 725 692 __entry->ino = ar->inode->i_ino; 726 693 __entry->block = block; 727 694 __entry->flags = ar->flags; ··· 733 702 __entry->pright = ar->pright; 734 703 ), 735 704 736 - TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 737 - __entry->dev_major, __entry->dev_minor, 738 - (unsigned long) __entry->ino, __entry->flags, 739 - __entry->len, __entry->block, 705 + TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu " 706 + "goal %llu lleft %llu lright %llu pleft %llu pright %llu", 707 + MAJOR(__entry->dev), MINOR(__entry->dev), 708 + (unsigned long) __entry->ino, 709 + __entry->flags, __entry->len, __entry->block, 740 710 (unsigned long long) __entry->logical, 741 711 (unsigned long long) __entry->goal, 742 712 (unsigned long long) __entry->lleft, ··· 753 721 TP_ARGS(inode, block, count, flags), 754 722 755 723 TP_STRUCT__entry( 756 - __field( int, dev_major ) 757 - __field( int, dev_minor ) 724 + __field( dev_t, dev ) 758 725 __field( ino_t, ino ) 759 726 __field( umode_t, mode ) 760 727 __field( __u64, block ) ··· 762 731 ), 763 732 764 733 TP_fast_assign( 765 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 766 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 734 + __entry->dev = inode->i_sb->s_dev; 767 735 __entry->ino = inode->i_ino; 768 736 __entry->mode = inode->i_mode; 769 737 __entry->block = block; ··· 771 741 ), 772 742 773 743 TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d", 774 - __entry->dev_major, __entry->dev_minor, 744 + MAJOR(__entry->dev), MINOR(__entry->dev), 775 745 (unsigned long) __entry->ino, 776 746 __entry->mode, __entry->block, __entry->count, 777 747 __entry->flags) 778 748 ); 779 749 780 - TRACE_EVENT(ext4_sync_file, 750 + TRACE_EVENT(ext4_sync_file_enter, 781 751 TP_PROTO(struct file *file, int datasync), 782 752 783 753 TP_ARGS(file, datasync), 784 754 785 755 TP_STRUCT__entry( 786 - __field( int, dev_major ) 787 - __field( int, dev_minor ) 756 + __field( dev_t, dev ) 788 757 __field( ino_t, ino ) 789 758 __field( ino_t, parent ) 790 759 __field( int, datasync ) ··· 792 763 TP_fast_assign( 793 764 struct dentry *dentry = file->f_path.dentry; 794 765 795 - __entry->dev_major = MAJOR(dentry->d_inode->i_sb->s_dev); 796 - __entry->dev_minor = MINOR(dentry->d_inode->i_sb->s_dev); 766 + __entry->dev = dentry->d_inode->i_sb->s_dev; 797 767 __entry->ino = dentry->d_inode->i_ino; 798 768 __entry->datasync = datasync; 799 769 __entry->parent = dentry->d_parent->d_inode->i_ino; 800 770 ), 801 771 802 772 TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", 803 - __entry->dev_major, __entry->dev_minor, 773 + MAJOR(__entry->dev), MINOR(__entry->dev), 804 774 (unsigned long) __entry->ino, 805 775 (unsigned long) __entry->parent, __entry->datasync) 776 + ); 777 + 778 + TRACE_EVENT(ext4_sync_file_exit, 779 + TP_PROTO(struct inode *inode, int ret), 780 + 781 + TP_ARGS(inode, ret), 782 + 783 + TP_STRUCT__entry( 784 + __field( int, ret ) 785 + __field( ino_t, ino ) 786 + __field( dev_t, dev ) 787 + ), 788 + 789 + TP_fast_assign( 790 + __entry->ret = ret; 791 + __entry->ino = inode->i_ino; 792 + __entry->dev = inode->i_sb->s_dev; 793 + ), 794 + 795 + TP_printk("dev %d,%d ino %ld ret %d", 796 + MAJOR(__entry->dev), MINOR(__entry->dev), 797 + (unsigned long) __entry->ino, 798 + __entry->ret) 806 799 ); 807 800 808 801 TRACE_EVENT(ext4_sync_fs, ··· 833 782 TP_ARGS(sb, wait), 834 783 835 784 TP_STRUCT__entry( 836 - __field( int, dev_major ) 837 - __field( int, dev_minor ) 785 + __field( dev_t, dev ) 838 786 __field( int, wait ) 839 787 840 788 ), 841 789 842 790 TP_fast_assign( 843 - __entry->dev_major = MAJOR(sb->s_dev); 844 - __entry->dev_minor = MINOR(sb->s_dev); 791 + __entry->dev = sb->s_dev; 845 792 __entry->wait = wait; 846 793 ), 847 794 848 - TP_printk("dev %d,%d wait %d", __entry->dev_major, 849 - __entry->dev_minor, __entry->wait) 795 + TP_printk("dev %d,%d wait %d", 796 + MAJOR(__entry->dev), MINOR(__entry->dev), 797 + __entry->wait) 850 798 ); 851 799 852 800 TRACE_EVENT(ext4_alloc_da_blocks, ··· 854 804 TP_ARGS(inode), 855 805 856 806 TP_STRUCT__entry( 857 - __field( int, dev_major ) 858 - __field( int, dev_minor ) 807 + __field( dev_t, dev ) 859 808 __field( ino_t, ino ) 860 809 __field( unsigned int, data_blocks ) 861 810 __field( unsigned int, meta_blocks ) 862 811 ), 863 812 864 813 TP_fast_assign( 865 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 866 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 814 + __entry->dev = inode->i_sb->s_dev; 867 815 __entry->ino = inode->i_ino; 868 816 __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks; 869 817 __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; 870 818 ), 871 819 872 820 TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u", 873 - __entry->dev_major, __entry->dev_minor, 821 + MAJOR(__entry->dev), MINOR(__entry->dev), 874 822 (unsigned long) __entry->ino, 875 823 __entry->data_blocks, __entry->meta_blocks) 876 824 ); ··· 879 831 TP_ARGS(ac), 880 832 881 833 TP_STRUCT__entry( 882 - __field( int, dev_major ) 883 - __field( int, dev_minor ) 834 + __field( dev_t, dev ) 884 835 __field( ino_t, ino ) 885 836 __field( __u16, found ) 886 837 __field( __u16, groups ) ··· 902 855 ), 903 856 904 857 TP_fast_assign( 905 - __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev); 906 - __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev); 858 + __entry->dev = ac->ac_inode->i_sb->s_dev; 907 859 __entry->ino = ac->ac_inode->i_ino; 908 860 __entry->found = ac->ac_found; 909 861 __entry->flags = ac->ac_flags; ··· 927 881 TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " 928 882 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x " 929 883 "tail %u broken %u", 930 - __entry->dev_major, __entry->dev_minor, 884 + MAJOR(__entry->dev), MINOR(__entry->dev), 931 885 (unsigned long) __entry->ino, 932 886 __entry->orig_group, __entry->orig_start, 933 887 __entry->orig_len, __entry->orig_logical, ··· 946 900 TP_ARGS(ac), 947 901 948 902 TP_STRUCT__entry( 949 - __field( int, dev_major ) 950 - __field( int, dev_minor ) 903 + __field( dev_t, dev ) 951 904 __field( ino_t, ino ) 952 905 __field( __u32, orig_logical ) 953 906 __field( int, orig_start ) ··· 959 914 ), 960 915 961 916 TP_fast_assign( 962 - __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev); 963 - __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev); 917 + __entry->dev = ac->ac_inode->i_sb->s_dev; 964 918 __entry->ino = ac->ac_inode->i_ino; 965 919 __entry->orig_logical = ac->ac_o_ex.fe_logical; 966 920 __entry->orig_start = ac->ac_o_ex.fe_start; ··· 972 928 ), 973 929 974 930 TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u", 975 - __entry->dev_major, __entry->dev_minor, 931 + MAJOR(__entry->dev), MINOR(__entry->dev), 976 932 (unsigned long) __entry->ino, 977 933 __entry->orig_group, __entry->orig_start, 978 934 __entry->orig_len, __entry->orig_logical, ··· 990 946 TP_ARGS(sb, inode, group, start, len), 991 947 992 948 TP_STRUCT__entry( 993 - __field( int, dev_major ) 994 - __field( int, dev_minor ) 949 + __field( dev_t, dev ) 995 950 __field( ino_t, ino ) 996 951 __field( int, result_start ) 997 952 __field( __u32, result_group ) ··· 998 955 ), 999 956 1000 957 TP_fast_assign( 1001 - __entry->dev_major = MAJOR(sb->s_dev); 1002 - __entry->dev_minor = MINOR(sb->s_dev); 958 + __entry->dev = sb->s_dev; 1003 959 __entry->ino = inode ? inode->i_ino : 0; 1004 960 __entry->result_start = start; 1005 961 __entry->result_group = group; ··· 1006 964 ), 1007 965 1008 966 TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", 1009 - __entry->dev_major, __entry->dev_minor, 967 + MAJOR(__entry->dev), MINOR(__entry->dev), 1010 968 (unsigned long) __entry->ino, 1011 969 __entry->result_group, __entry->result_start, 1012 970 __entry->result_len) ··· 1040 998 TP_ARGS(inode, is_metadata, block), 1041 999 1042 1000 TP_STRUCT__entry( 1043 - __field( int, dev_major ) 1044 - __field( int, dev_minor ) 1001 + __field( dev_t, dev ) 1045 1002 __field( ino_t, ino ) 1046 1003 __field( umode_t, mode ) 1047 1004 __field( int, is_metadata ) ··· 1048 1007 ), 1049 1008 1050 1009 TP_fast_assign( 1051 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1052 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 1010 + __entry->dev = inode->i_sb->s_dev; 1053 1011 __entry->ino = inode->i_ino; 1054 1012 __entry->mode = inode->i_mode; 1055 1013 __entry->is_metadata = is_metadata; ··· 1056 1016 ), 1057 1017 1058 1018 TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu", 1059 - __entry->dev_major, __entry->dev_minor, 1060 - (unsigned long) __entry->ino, __entry->mode, 1061 - __entry->is_metadata, __entry->block) 1019 + MAJOR(__entry->dev), MINOR(__entry->dev), 1020 + (unsigned long) __entry->ino, 1021 + __entry->mode, __entry->is_metadata, __entry->block) 1062 1022 ); 1063 1023 1064 1024 TRACE_EVENT(ext4_da_update_reserve_space, ··· 1067 1027 TP_ARGS(inode, used_blocks), 1068 1028 1069 1029 TP_STRUCT__entry( 1070 - __field( int, dev_major ) 1071 - __field( int, dev_minor ) 1030 + __field( dev_t, dev ) 1072 1031 __field( ino_t, ino ) 1073 1032 __field( umode_t, mode ) 1074 1033 __field( __u64, i_blocks ) ··· 1078 1039 ), 1079 1040 1080 1041 TP_fast_assign( 1081 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1082 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 1042 + __entry->dev = inode->i_sb->s_dev; 1083 1043 __entry->ino = inode->i_ino; 1084 1044 __entry->mode = inode->i_mode; 1085 1045 __entry->i_blocks = inode->i_blocks; ··· 1088 1050 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; 1089 1051 ), 1090 1052 1091 - TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", 1092 - __entry->dev_major, __entry->dev_minor, 1093 - (unsigned long) __entry->ino, __entry->mode, 1094 - (unsigned long long) __entry->i_blocks, 1053 + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d " 1054 + "reserved_data_blocks %d reserved_meta_blocks %d " 1055 + "allocated_meta_blocks %d", 1056 + MAJOR(__entry->dev), MINOR(__entry->dev), 1057 + (unsigned long) __entry->ino, 1058 + __entry->mode, (unsigned long long) __entry->i_blocks, 1095 1059 __entry->used_blocks, __entry->reserved_data_blocks, 1096 1060 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 1097 1061 ); ··· 1104 1064 TP_ARGS(inode, md_needed), 1105 1065 1106 1066 TP_STRUCT__entry( 1107 - __field( int, dev_major ) 1108 - __field( int, dev_minor ) 1067 + __field( dev_t, dev ) 1109 1068 __field( ino_t, ino ) 1110 1069 __field( umode_t, mode ) 1111 1070 __field( __u64, i_blocks ) ··· 1114 1075 ), 1115 1076 1116 1077 TP_fast_assign( 1117 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1118 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 1078 + __entry->dev = inode->i_sb->s_dev; 1119 1079 __entry->ino = inode->i_ino; 1120 1080 __entry->mode = inode->i_mode; 1121 1081 __entry->i_blocks = inode->i_blocks; ··· 1123 1085 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; 1124 1086 ), 1125 1087 1126 - TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d", 1127 - __entry->dev_major, __entry->dev_minor, 1088 + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d " 1089 + "reserved_data_blocks %d reserved_meta_blocks %d", 1090 + MAJOR(__entry->dev), MINOR(__entry->dev), 1128 1091 (unsigned long) __entry->ino, 1129 1092 __entry->mode, (unsigned long long) __entry->i_blocks, 1130 1093 __entry->md_needed, __entry->reserved_data_blocks, ··· 1138 1099 TP_ARGS(inode, freed_blocks), 1139 1100 1140 1101 TP_STRUCT__entry( 1141 - __field( int, dev_major ) 1142 - __field( int, dev_minor ) 1102 + __field( dev_t, dev ) 1143 1103 __field( ino_t, ino ) 1144 1104 __field( umode_t, mode ) 1145 1105 __field( __u64, i_blocks ) ··· 1149 1111 ), 1150 1112 1151 1113 TP_fast_assign( 1152 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1153 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 1114 + __entry->dev = inode->i_sb->s_dev; 1154 1115 __entry->ino = inode->i_ino; 1155 1116 __entry->mode = inode->i_mode; 1156 1117 __entry->i_blocks = inode->i_blocks; ··· 1159 1122 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; 1160 1123 ), 1161 1124 1162 - TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", 1163 - __entry->dev_major, __entry->dev_minor, 1125 + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d " 1126 + "reserved_data_blocks %d reserved_meta_blocks %d " 1127 + "allocated_meta_blocks %d", 1128 + MAJOR(__entry->dev), MINOR(__entry->dev), 1164 1129 (unsigned long) __entry->ino, 1165 1130 __entry->mode, (unsigned long long) __entry->i_blocks, 1166 1131 __entry->freed_blocks, __entry->reserved_data_blocks, ··· 1175 1136 TP_ARGS(sb, group), 1176 1137 1177 1138 TP_STRUCT__entry( 1178 - __field( int, dev_major ) 1179 - __field( int, dev_minor ) 1139 + __field( dev_t, dev ) 1180 1140 __field( __u32, group ) 1181 1141 1182 1142 ), 1183 1143 1184 1144 TP_fast_assign( 1185 - __entry->dev_major = MAJOR(sb->s_dev); 1186 - __entry->dev_minor = MINOR(sb->s_dev); 1145 + __entry->dev = sb->s_dev; 1187 1146 __entry->group = group; 1188 1147 ), 1189 1148 1190 1149 TP_printk("dev %d,%d group %u", 1191 - __entry->dev_major, __entry->dev_minor, __entry->group) 1150 + MAJOR(__entry->dev), MINOR(__entry->dev), 1151 + __entry->group) 1192 1152 ); 1193 1153 1194 1154 DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load, ··· 1202 1164 TP_PROTO(struct super_block *sb, unsigned long group), 1203 1165 1204 1166 TP_ARGS(sb, group) 1167 + ); 1168 + 1169 + DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load, 1170 + 1171 + TP_PROTO(struct super_block *sb, unsigned long group), 1172 + 1173 + TP_ARGS(sb, group) 1174 + ); 1175 + 1176 + DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, 1177 + 1178 + TP_PROTO(struct super_block *sb, unsigned long group), 1179 + 1180 + TP_ARGS(sb, group) 1181 + ); 1182 + 1183 + TRACE_EVENT(ext4_direct_IO_enter, 1184 + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), 1185 + 1186 + TP_ARGS(inode, offset, len, rw), 1187 + 1188 + TP_STRUCT__entry( 1189 + __field( ino_t, ino ) 1190 + __field( dev_t, dev ) 1191 + __field( loff_t, pos ) 1192 + __field( unsigned long, len ) 1193 + __field( int, rw ) 1194 + ), 1195 + 1196 + TP_fast_assign( 1197 + __entry->ino = inode->i_ino; 1198 + __entry->dev = inode->i_sb->s_dev; 1199 + __entry->pos = offset; 1200 + __entry->len = len; 1201 + __entry->rw = rw; 1202 + ), 1203 + 1204 + TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", 1205 + MAJOR(__entry->dev), MINOR(__entry->dev), 1206 + (unsigned long) __entry->ino, 1207 + (unsigned long long) __entry->pos, __entry->len, __entry->rw) 1208 + ); 1209 + 1210 + TRACE_EVENT(ext4_direct_IO_exit, 1211 + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret), 1212 + 1213 + TP_ARGS(inode, offset, len, rw, ret), 1214 + 1215 + TP_STRUCT__entry( 1216 + __field( ino_t, ino ) 1217 + __field( dev_t, dev ) 1218 + __field( loff_t, pos ) 1219 + __field( unsigned long, len ) 1220 + __field( int, rw ) 1221 + __field( int, ret ) 1222 + ), 1223 + 1224 + TP_fast_assign( 1225 + __entry->ino = inode->i_ino; 1226 + __entry->dev = inode->i_sb->s_dev; 1227 + __entry->pos = offset; 1228 + __entry->len = len; 1229 + __entry->rw = rw; 1230 + __entry->ret = ret; 1231 + ), 1232 + 1233 + TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", 1234 + MAJOR(__entry->dev), MINOR(__entry->dev), 1235 + (unsigned long) __entry->ino, 1236 + (unsigned long long) __entry->pos, __entry->len, 1237 + __entry->rw, __entry->ret) 1238 + ); 1239 + 1240 + TRACE_EVENT(ext4_fallocate_enter, 1241 + TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), 1242 + 1243 + TP_ARGS(inode, offset, len, mode), 1244 + 1245 + TP_STRUCT__entry( 1246 + __field( ino_t, ino ) 1247 + __field( dev_t, dev ) 1248 + __field( loff_t, pos ) 1249 + __field( loff_t, len ) 1250 + __field( int, mode ) 1251 + ), 1252 + 1253 + TP_fast_assign( 1254 + __entry->ino = inode->i_ino; 1255 + __entry->dev = inode->i_sb->s_dev; 1256 + __entry->pos = offset; 1257 + __entry->len = len; 1258 + __entry->mode = mode; 1259 + ), 1260 + 1261 + TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d", 1262 + MAJOR(__entry->dev), MINOR(__entry->dev), 1263 + (unsigned long) __entry->ino, 1264 + (unsigned long long) __entry->pos, 1265 + (unsigned long long) __entry->len, __entry->mode) 1266 + ); 1267 + 1268 + TRACE_EVENT(ext4_fallocate_exit, 1269 + TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret), 1270 + 1271 + TP_ARGS(inode, offset, max_blocks, ret), 1272 + 1273 + TP_STRUCT__entry( 1274 + __field( ino_t, ino ) 1275 + __field( dev_t, dev ) 1276 + __field( loff_t, pos ) 1277 + __field( unsigned, blocks ) 1278 + __field( int, ret ) 1279 + ), 1280 + 1281 + TP_fast_assign( 1282 + __entry->ino = inode->i_ino; 1283 + __entry->dev = inode->i_sb->s_dev; 1284 + __entry->pos = offset; 1285 + __entry->blocks = max_blocks; 1286 + __entry->ret = ret; 1287 + ), 1288 + 1289 + TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d", 1290 + MAJOR(__entry->dev), MINOR(__entry->dev), 1291 + (unsigned long) __entry->ino, 1292 + (unsigned long long) __entry->pos, __entry->blocks, 1293 + __entry->ret) 1294 + ); 1295 + 1296 + TRACE_EVENT(ext4_unlink_enter, 1297 + TP_PROTO(struct inode *parent, struct dentry *dentry), 1298 + 1299 + TP_ARGS(parent, dentry), 1300 + 1301 + TP_STRUCT__entry( 1302 + __field( ino_t, parent ) 1303 + __field( ino_t, ino ) 1304 + __field( loff_t, size ) 1305 + __field( dev_t, dev ) 1306 + ), 1307 + 1308 + TP_fast_assign( 1309 + __entry->parent = parent->i_ino; 1310 + __entry->ino = dentry->d_inode->i_ino; 1311 + __entry->size = dentry->d_inode->i_size; 1312 + __entry->dev = dentry->d_inode->i_sb->s_dev; 1313 + ), 1314 + 1315 + TP_printk("dev %d,%d ino %ld size %lld parent %ld", 1316 + MAJOR(__entry->dev), MINOR(__entry->dev), 1317 + (unsigned long) __entry->ino, __entry->size, 1318 + (unsigned long) __entry->parent) 1319 + ); 1320 + 1321 + TRACE_EVENT(ext4_unlink_exit, 1322 + TP_PROTO(struct dentry *dentry, int ret), 1323 + 1324 + TP_ARGS(dentry, ret), 1325 + 1326 + TP_STRUCT__entry( 1327 + __field( ino_t, ino ) 1328 + __field( dev_t, dev ) 1329 + __field( int, ret ) 1330 + ), 1331 + 1332 + TP_fast_assign( 1333 + __entry->ino = dentry->d_inode->i_ino; 1334 + __entry->dev = dentry->d_inode->i_sb->s_dev; 1335 + __entry->ret = ret; 1336 + ), 1337 + 1338 + TP_printk("dev %d,%d ino %ld ret %d", 1339 + MAJOR(__entry->dev), MINOR(__entry->dev), 1340 + (unsigned long) __entry->ino, 1341 + __entry->ret) 1342 + ); 1343 + 1344 + DECLARE_EVENT_CLASS(ext4__truncate, 1345 + TP_PROTO(struct inode *inode), 1346 + 1347 + TP_ARGS(inode), 1348 + 1349 + TP_STRUCT__entry( 1350 + __field( ino_t, ino ) 1351 + __field( dev_t, dev ) 1352 + __field( blkcnt_t, blocks ) 1353 + ), 1354 + 1355 + TP_fast_assign( 1356 + __entry->ino = inode->i_ino; 1357 + __entry->dev = inode->i_sb->s_dev; 1358 + __entry->blocks = inode->i_blocks; 1359 + ), 1360 + 1361 + TP_printk("dev %d,%d ino %lu blocks %lu", 1362 + MAJOR(__entry->dev), MINOR(__entry->dev), 1363 + (unsigned long) __entry->ino, (unsigned long) __entry->blocks) 1364 + ); 1365 + 1366 + DEFINE_EVENT(ext4__truncate, ext4_truncate_enter, 1367 + 1368 + TP_PROTO(struct inode *inode), 1369 + 1370 + TP_ARGS(inode) 1371 + ); 1372 + 1373 + DEFINE_EVENT(ext4__truncate, ext4_truncate_exit, 1374 + 1375 + TP_PROTO(struct inode *inode), 1376 + 1377 + TP_ARGS(inode) 1378 + ); 1379 + 1380 + DECLARE_EVENT_CLASS(ext4__map_blocks_enter, 1381 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1382 + unsigned len, unsigned flags), 1383 + 1384 + TP_ARGS(inode, lblk, len, flags), 1385 + 1386 + TP_STRUCT__entry( 1387 + __field( ino_t, ino ) 1388 + __field( dev_t, dev ) 1389 + __field( ext4_lblk_t, lblk ) 1390 + __field( unsigned, len ) 1391 + __field( unsigned, flags ) 1392 + ), 1393 + 1394 + TP_fast_assign( 1395 + __entry->ino = inode->i_ino; 1396 + __entry->dev = inode->i_sb->s_dev; 1397 + __entry->lblk = lblk; 1398 + __entry->len = len; 1399 + __entry->flags = flags; 1400 + ), 1401 + 1402 + TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u", 1403 + MAJOR(__entry->dev), MINOR(__entry->dev), 1404 + (unsigned long) __entry->ino, 1405 + (unsigned) __entry->lblk, __entry->len, __entry->flags) 1406 + ); 1407 + 1408 + DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, 1409 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1410 + unsigned len, unsigned flags), 1411 + 1412 + TP_ARGS(inode, lblk, len, flags) 1413 + ); 1414 + 1415 + DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter, 1416 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1417 + unsigned len, unsigned flags), 1418 + 1419 + TP_ARGS(inode, lblk, len, flags) 1420 + ); 1421 + 1422 + DECLARE_EVENT_CLASS(ext4__map_blocks_exit, 1423 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1424 + ext4_fsblk_t pblk, unsigned len, int ret), 1425 + 1426 + TP_ARGS(inode, lblk, pblk, len, ret), 1427 + 1428 + TP_STRUCT__entry( 1429 + __field( ino_t, ino ) 1430 + __field( dev_t, dev ) 1431 + __field( ext4_lblk_t, lblk ) 1432 + __field( ext4_fsblk_t, pblk ) 1433 + __field( unsigned, len ) 1434 + __field( int, ret ) 1435 + ), 1436 + 1437 + TP_fast_assign( 1438 + __entry->ino = inode->i_ino; 1439 + __entry->dev = inode->i_sb->s_dev; 1440 + __entry->lblk = lblk; 1441 + __entry->pblk = pblk; 1442 + __entry->len = len; 1443 + __entry->ret = ret; 1444 + ), 1445 + 1446 + TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d", 1447 + MAJOR(__entry->dev), MINOR(__entry->dev), 1448 + (unsigned long) __entry->ino, 1449 + (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, 1450 + __entry->len, __entry->ret) 1451 + ); 1452 + 1453 + DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit, 1454 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1455 + ext4_fsblk_t pblk, unsigned len, int ret), 1456 + 1457 + TP_ARGS(inode, lblk, pblk, len, ret) 1458 + ); 1459 + 1460 + DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit, 1461 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1462 + ext4_fsblk_t pblk, unsigned len, int ret), 1463 + 1464 + TP_ARGS(inode, lblk, pblk, len, ret) 1465 + ); 1466 + 1467 + TRACE_EVENT(ext4_ext_load_extent, 1468 + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk), 1469 + 1470 + TP_ARGS(inode, lblk, pblk), 1471 + 1472 + TP_STRUCT__entry( 1473 + __field( ino_t, ino ) 1474 + __field( dev_t, dev ) 1475 + __field( ext4_lblk_t, lblk ) 1476 + __field( ext4_fsblk_t, pblk ) 1477 + ), 1478 + 1479 + TP_fast_assign( 1480 + __entry->ino = inode->i_ino; 1481 + __entry->dev = inode->i_sb->s_dev; 1482 + __entry->lblk = lblk; 1483 + __entry->pblk = pblk; 1484 + ), 1485 + 1486 + TP_printk("dev %d,%d ino %lu lblk %u pblk %llu", 1487 + MAJOR(__entry->dev), MINOR(__entry->dev), 1488 + (unsigned long) __entry->ino, 1489 + (unsigned) __entry->lblk, (unsigned long long) __entry->pblk) 1490 + ); 1491 + 1492 + TRACE_EVENT(ext4_load_inode, 1493 + TP_PROTO(struct inode *inode), 1494 + 1495 + TP_ARGS(inode), 1496 + 1497 + TP_STRUCT__entry( 1498 + __field( ino_t, ino ) 1499 + __field( dev_t, dev ) 1500 + ), 1501 + 1502 + TP_fast_assign( 1503 + __entry->ino = inode->i_ino; 1504 + __entry->dev = inode->i_sb->s_dev; 1505 + ), 1506 + 1507 + TP_printk("dev %d,%d ino %ld", 1508 + MAJOR(__entry->dev), MINOR(__entry->dev), 1509 + (unsigned long) __entry->ino) 1205 1510 ); 1206 1511 1207 1512 #endif /* _TRACE_EXT4_H */
+31 -47
include/trace/events/jbd2.h
··· 17 17 TP_ARGS(journal, result), 18 18 19 19 TP_STRUCT__entry( 20 - __field( int, dev_major ) 21 - __field( int, dev_minor ) 20 + __field( dev_t, dev ) 22 21 __field( int, result ) 23 22 ), 24 23 25 24 TP_fast_assign( 26 - __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 27 - __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); 25 + __entry->dev = journal->j_fs_dev->bd_dev; 28 26 __entry->result = result; 29 27 ), 30 28 31 - TP_printk("dev %d,%d result %d", 32 - __entry->dev_major, __entry->dev_minor, __entry->result) 29 + TP_printk("dev %s result %d", 30 + jbd2_dev_to_name(__entry->dev), __entry->result) 33 31 ); 34 32 35 33 DECLARE_EVENT_CLASS(jbd2_commit, ··· 37 39 TP_ARGS(journal, commit_transaction), 38 40 39 41 TP_STRUCT__entry( 40 - __field( int, dev_major ) 41 - __field( int, dev_minor ) 42 + __field( dev_t, dev ) 42 43 __field( char, sync_commit ) 43 44 __field( int, transaction ) 44 45 ), 45 46 46 47 TP_fast_assign( 47 - __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 48 - __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); 48 + __entry->dev = journal->j_fs_dev->bd_dev; 49 49 __entry->sync_commit = commit_transaction->t_synchronous_commit; 50 50 __entry->transaction = commit_transaction->t_tid; 51 51 ), 52 52 53 - TP_printk("dev %d,%d transaction %d sync %d", 54 - __entry->dev_major, __entry->dev_minor, 55 - __entry->transaction, __entry->sync_commit) 53 + TP_printk("dev %s transaction %d sync %d", 54 + jbd2_dev_to_name(__entry->dev), __entry->transaction, 55 + __entry->sync_commit) 56 56 ); 57 57 58 58 DEFINE_EVENT(jbd2_commit, jbd2_start_commit, ··· 87 91 TP_ARGS(journal, commit_transaction), 88 92 89 93 TP_STRUCT__entry( 90 - __field( int, dev_major ) 91 - __field( int, dev_minor ) 94 + __field( dev_t, dev ) 92 95 __field( char, sync_commit ) 93 96 __field( int, transaction ) 94 97 __field( int, head ) 95 98 ), 96 99 97 100 TP_fast_assign( 98 - __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 99 - __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); 101 + __entry->dev = journal->j_fs_dev->bd_dev; 100 102 __entry->sync_commit = commit_transaction->t_synchronous_commit; 101 103 __entry->transaction = commit_transaction->t_tid; 102 104 __entry->head = journal->j_tail_sequence; 103 105 ), 104 106 105 - TP_printk("dev %d,%d transaction %d sync %d head %d", 106 - __entry->dev_major, __entry->dev_minor, 107 - __entry->transaction, __entry->sync_commit, __entry->head) 107 + TP_printk("dev %s transaction %d sync %d head %d", 108 + jbd2_dev_to_name(__entry->dev), __entry->transaction, 109 + __entry->sync_commit, __entry->head) 108 110 ); 109 111 110 112 TRACE_EVENT(jbd2_submit_inode_data, ··· 111 117 TP_ARGS(inode), 112 118 113 119 TP_STRUCT__entry( 114 - __field( int, dev_major ) 115 - __field( int, dev_minor ) 120 + __field( dev_t, dev ) 116 121 __field( ino_t, ino ) 117 122 ), 118 123 119 124 TP_fast_assign( 120 - __entry->dev_major = MAJOR(inode->i_sb->s_dev); 121 - __entry->dev_minor = MINOR(inode->i_sb->s_dev); 125 + __entry->dev = inode->i_sb->s_dev; 122 126 __entry->ino = inode->i_ino; 123 127 ), 124 128 125 - TP_printk("dev %d,%d ino %lu", 126 - __entry->dev_major, __entry->dev_minor, 127 - (unsigned long) __entry->ino) 129 + TP_printk("dev %s ino %lu", 130 + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino) 128 131 ); 129 132 130 133 TRACE_EVENT(jbd2_run_stats, ··· 131 140 TP_ARGS(dev, tid, stats), 132 141 133 142 TP_STRUCT__entry( 134 - __field( int, dev_major ) 135 - __field( int, dev_minor ) 143 + __field( dev_t, dev ) 136 144 __field( unsigned long, tid ) 137 145 __field( unsigned long, wait ) 138 146 __field( unsigned long, running ) ··· 144 154 ), 145 155 146 156 TP_fast_assign( 147 - __entry->dev_major = MAJOR(dev); 148 - __entry->dev_minor = MINOR(dev); 157 + __entry->dev = dev; 149 158 __entry->tid = tid; 150 159 __entry->wait = stats->rs_wait; 151 160 __entry->running = stats->rs_running; ··· 156 167 __entry->blocks_logged = stats->rs_blocks_logged; 157 168 ), 158 169 159 - TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u " 170 + TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u " 160 171 "logging %u handle_count %u blocks %u blocks_logged %u", 161 - __entry->dev_major, __entry->dev_minor, __entry->tid, 172 + jbd2_dev_to_name(__entry->dev), __entry->tid, 162 173 jiffies_to_msecs(__entry->wait), 163 174 jiffies_to_msecs(__entry->running), 164 175 jiffies_to_msecs(__entry->locked), ··· 175 186 TP_ARGS(dev, tid, stats), 176 187 177 188 TP_STRUCT__entry( 178 - __field( int, dev_major ) 179 - __field( int, dev_minor ) 189 + __field( dev_t, dev ) 180 190 __field( unsigned long, tid ) 181 191 __field( unsigned long, chp_time ) 182 192 __field( __u32, forced_to_close ) ··· 184 196 ), 185 197 186 198 TP_fast_assign( 187 - __entry->dev_major = MAJOR(dev); 188 - __entry->dev_minor = MINOR(dev); 199 + __entry->dev = dev; 189 200 __entry->tid = tid; 190 201 __entry->chp_time = stats->cs_chp_time; 191 202 __entry->forced_to_close= stats->cs_forced_to_close; ··· 192 205 __entry->dropped = stats->cs_dropped; 193 206 ), 194 207 195 - TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " 208 + TP_printk("dev %s tid %lu chp_time %u forced_to_close %u " 196 209 "written %u dropped %u", 197 - __entry->dev_major, __entry->dev_minor, __entry->tid, 210 + jbd2_dev_to_name(__entry->dev), __entry->tid, 198 211 jiffies_to_msecs(__entry->chp_time), 199 212 __entry->forced_to_close, __entry->written, __entry->dropped) 200 213 ); ··· 207 220 TP_ARGS(journal, first_tid, block_nr, freed), 208 221 209 222 TP_STRUCT__entry( 210 - __field( int, dev_major ) 211 - __field( int, dev_minor ) 223 + __field( dev_t, dev ) 212 224 __field( tid_t, tail_sequence ) 213 225 __field( tid_t, first_tid ) 214 226 __field(unsigned long, block_nr ) ··· 215 229 ), 216 230 217 231 TP_fast_assign( 218 - __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 219 - __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); 232 + __entry->dev = journal->j_fs_dev->bd_dev; 220 233 __entry->tail_sequence = journal->j_tail_sequence; 221 234 __entry->first_tid = first_tid; 222 235 __entry->block_nr = block_nr; 223 236 __entry->freed = freed; 224 237 ), 225 238 226 - TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", 227 - __entry->dev_major, __entry->dev_minor, 228 - __entry->tail_sequence, __entry->first_tid, 229 - __entry->block_nr, __entry->freed) 239 + TP_printk("dev %s from %u to %u offset %lu freed %lu", 240 + jbd2_dev_to_name(__entry->dev), __entry->tail_sequence, 241 + __entry->first_tid, __entry->block_nr, __entry->freed) 230 242 ); 231 243 232 244 #endif /* _TRACE_JBD2_H */