Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'f2fs-for-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
"This round looks fairly small comparing to the previous updates and
includes mostly minor bug fixes. Nevertheless, as we've still
interested in improving the stability, Chao added some debugging
methods to diagnoze subtle runtime inconsistency problem.

Enhancements:
- store all the corruption or failure reasons in superblock
- detect meta inode, summary info, and block address inconsistency
- increase the limit for reserve_root for low-end devices
- add the number of compressed IO in iostat

Bug fixes:
- DIO write fix for zoned devices
- do out-of-place writes for cold files
- fix some stat updates (FS_CP_DATA_IO, dirty page count)
- fix race condition on setting FI_NO_EXTENT flag
- fix data races when freezing super
- fix wrong continue condition check in GC
- do not allow ATGC for LFS mode

In addition, there're some code enhancement and clean-ups as usual"

* tag 'f2fs-for-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (32 commits)
f2fs: change to use atomic_t type form sbi.atomic_files
f2fs: account swapfile inodes
f2fs: allow direct read for zoned device
f2fs: support recording errors into superblock
f2fs: support recording stop_checkpoint reason into super_block
f2fs: remove the unnecessary check in f2fs_xattr_fiemap
f2fs: introduce cp_status sysfs entry
f2fs: fix to detect corrupted meta ino
f2fs: fix to account FS_CP_DATA_IO correctly
f2fs: code clean and fix a type error
f2fs: add "c_len" into trace_f2fs_update_extent_tree_range for compressed file
f2fs: fix to do sanity check on summary info
f2fs: port to vfs{g,u}id_t and associated helpers
f2fs: fix to do sanity check on destination blkaddr during recovery
f2fs: let FI_OPU_WRITE override FADVISE_COLD_BIT
f2fs: fix race condition on setting FI_NO_EXTENT flag
f2fs: remove redundant check in f2fs_sanity_check_cluster
f2fs: add static init_idisk_time function to reduce the code
f2fs: fix typo
f2fs: fix wrong dirty page count when race between mmap and fallocate.
...

+556 -209
+24
Documentation/ABI/testing/sysfs-fs-f2fs
··· 466 466 0x4000 SBI_IS_FREEZING freefs is in process 467 467 ====== ===================== ================================= 468 468 469 + What: /sys/fs/f2fs/<disk>/stat/cp_status 470 + Date: September 2022 471 + Contact: "Chao Yu" <chao.yu@oppo.com> 472 + Description: Show status of f2fs checkpoint in real time. 473 + 474 + =============================== ============================== 475 + cp flag value 476 + CP_UMOUNT_FLAG 0x00000001 477 + CP_ORPHAN_PRESENT_FLAG 0x00000002 478 + CP_COMPACT_SUM_FLAG 0x00000004 479 + CP_ERROR_FLAG 0x00000008 480 + CP_FSCK_FLAG 0x00000010 481 + CP_FASTBOOT_FLAG 0x00000020 482 + CP_CRC_RECOVERY_FLAG 0x00000040 483 + CP_NAT_BITS_FLAG 0x00000080 484 + CP_TRIMMED_FLAG 0x00000100 485 + CP_NOCRC_RECOVERY_FLAG 0x00000200 486 + CP_LARGE_NAT_BITMAP_FLAG 0x00000400 487 + CP_QUOTA_NEED_FSCK_FLAG 0x00000800 488 + CP_DISABLED_FLAG 0x00001000 489 + CP_DISABLED_QUICK_FLAG 0x00002000 490 + CP_RESIZEFS_FLAG 0x00004000 491 + =============================== ============================== 492 + 469 493 What: /sys/fs/f2fs/<disk>/ckpt_thread_ioprio 470 494 Date: January 2021 471 495 Contact: "Daeho Jeong" <daehojeong@google.com>
+1 -1
fs/f2fs/acl.c
··· 219 219 return error; 220 220 if (error == 0) 221 221 *acl = NULL; 222 - if (!in_group_p(i_gid_into_mnt(mnt_userns, inode)) && 222 + if (!vfsgid_in_group_p(i_gid_into_vfsgid(mnt_userns, inode)) && 223 223 !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) 224 224 mode &= ~S_ISGID; 225 225 *mode_p = mode;
+47 -18
fs/f2fs/checkpoint.c
··· 26 26 static struct kmem_cache *ino_entry_slab; 27 27 struct kmem_cache *f2fs_inode_entry_slab; 28 28 29 - void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) 29 + void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, 30 + unsigned char reason) 30 31 { 31 32 f2fs_build_fault_attr(sbi, 0, 0); 32 33 set_ckpt_flags(sbi, CP_ERROR_FLAG); 33 - if (!end_io) 34 + if (!end_io) { 34 35 f2fs_flush_merged_writes(sbi); 36 + 37 + f2fs_handle_stop(sbi, reason); 38 + } 35 39 } 36 40 37 41 /* ··· 93 89 return ERR_PTR(err); 94 90 } 95 91 96 - f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); 92 + f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE); 97 93 98 94 lock_page(page); 99 95 if (unlikely(page->mapping != mapping)) { ··· 126 122 if (PTR_ERR(page) == -EIO && 127 123 ++count <= DEFAULT_RETRY_IO_COUNT) 128 124 goto retry; 129 - f2fs_stop_checkpoint(sbi, false); 125 + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE); 130 126 } 131 127 return page; 132 128 } ··· 144 140 unsigned int segno, offset; 145 141 bool exist; 146 142 147 - if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ) 143 + if (type == DATA_GENERIC) 148 144 return true; 149 145 150 146 segno = GET_SEGNO(sbi, blkaddr); ··· 152 148 se = get_seg_entry(sbi, segno); 153 149 154 150 exist = f2fs_test_bit(offset, se->cur_valid_map); 151 + if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) { 152 + f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", 153 + blkaddr, exist); 154 + set_sbi_flag(sbi, SBI_NEED_FSCK); 155 + return exist; 156 + } 157 + 155 158 if (!exist && type == DATA_GENERIC_ENHANCE) { 156 159 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", 157 160 blkaddr, exist); ··· 196 185 case DATA_GENERIC: 197 186 case DATA_GENERIC_ENHANCE: 198 187 case DATA_GENERIC_ENHANCE_READ: 188 + case DATA_GENERIC_ENHANCE_UPDATE: 199 189 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || 200 190 blkaddr < MAIN_BLKADDR(sbi))) { 201 191 f2fs_warn(sbi, "access invalid blkaddr:%u", ··· 288 276 f2fs_put_page(page, err ? 1 : 0); 289 277 290 278 if (!err) 291 - f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); 279 + f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, 280 + F2FS_BLKSIZE); 292 281 } 293 282 out: 294 283 blk_finish_plug(&plug); ··· 461 448 462 449 if (!folio_test_uptodate(folio)) 463 450 folio_mark_uptodate(folio); 464 - if (!folio_test_dirty(folio)) { 465 - filemap_dirty_folio(mapping, folio); 451 + if (filemap_dirty_folio(mapping, folio)) { 466 452 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META); 467 453 set_page_private_reference(&folio->page); 468 454 return true; ··· 1065 1053 spin_unlock(&sbi->inode_lock[type]); 1066 1054 } 1067 1055 1068 - int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) 1056 + int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, 1057 + bool from_cp) 1069 1058 { 1070 1059 struct list_head *head; 1071 1060 struct inode *inode; ··· 1101 1088 if (inode) { 1102 1089 unsigned long cur_ino = inode->i_ino; 1103 1090 1104 - F2FS_I(inode)->cp_task = current; 1091 + if (from_cp) 1092 + F2FS_I(inode)->cp_task = current; 1093 + F2FS_I(inode)->wb_task = current; 1105 1094 1106 1095 filemap_fdatawrite(inode->i_mapping); 1107 1096 1108 - F2FS_I(inode)->cp_task = NULL; 1097 + F2FS_I(inode)->wb_task = NULL; 1098 + if (from_cp) 1099 + F2FS_I(inode)->cp_task = NULL; 1109 1100 1110 1101 iput(inode); 1111 1102 /* We need to give cpu to another writers. */ ··· 1238 1221 /* write all the dirty dentry pages */ 1239 1222 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { 1240 1223 f2fs_unlock_all(sbi); 1241 - err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); 1224 + err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true); 1242 1225 if (err) 1243 1226 return err; 1244 1227 cond_resched(); ··· 1909 1892 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi) 1910 1893 { 1911 1894 struct ckpt_req_control *cprc = &sbi->cprc_info; 1895 + struct task_struct *ckpt_task; 1912 1896 1913 - if (cprc->f2fs_issue_ckpt) { 1914 - struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt; 1897 + if (!cprc->f2fs_issue_ckpt) 1898 + return; 1915 1899 1916 - cprc->f2fs_issue_ckpt = NULL; 1917 - kthread_stop(ckpt_task); 1900 + ckpt_task = cprc->f2fs_issue_ckpt; 1901 + cprc->f2fs_issue_ckpt = NULL; 1902 + kthread_stop(ckpt_task); 1918 1903 1919 - flush_remained_ckpt_reqs(sbi, NULL); 1920 - } 1904 + f2fs_flush_ckpt_thread(sbi); 1905 + } 1906 + 1907 + void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi) 1908 + { 1909 + struct ckpt_req_control *cprc = &sbi->cprc_info; 1910 + 1911 + flush_remained_ckpt_reqs(sbi, NULL); 1912 + 1913 + /* Let's wait for the previous dispatched checkpoint. */ 1914 + while (atomic_read(&cprc->queued_ckpt)) 1915 + io_schedule_timeout(DEFAULT_IO_TIMEOUT); 1921 1916 } 1922 1917 1923 1918 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
+14 -18
fs/f2fs/compress.c
··· 762 762 763 763 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { 764 764 ret = -EFSCORRUPTED; 765 + f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); 765 766 goto out_release; 766 767 } 767 768 ··· 913 912 reason = "[C|*|C|*]"; 914 913 goto out; 915 914 } 916 - if (compressed) { 917 - if (!__is_valid_data_blkaddr(blkaddr)) { 918 - if (!cluster_end) 919 - cluster_end = i; 920 - continue; 921 - } 922 - /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */ 923 - if (cluster_end) { 924 - reason = "[C|N|N|V]"; 925 - goto out; 926 - } 915 + if (!__is_valid_data_blkaddr(blkaddr)) { 916 + if (!cluster_end) 917 + cluster_end = i; 918 + continue; 919 + } 920 + /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */ 921 + if (cluster_end) { 922 + reason = "[C|N|N|V]"; 923 + goto out; 927 924 } 928 925 } 929 926 return false; ··· 951 952 952 953 if (f2fs_sanity_check_cluster(&dn)) { 953 954 ret = -EFSCORRUPTED; 955 + f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER); 954 956 goto fail; 955 957 } 956 958 ··· 1568 1568 if (!dic->cbuf) 1569 1569 return -ENOMEM; 1570 1570 1571 - if (cops->init_decompress_ctx) { 1572 - int ret = cops->init_decompress_ctx(dic); 1573 - 1574 - if (ret) 1575 - return ret; 1576 - } 1571 + if (cops->init_decompress_ctx) 1572 + return cops->init_decompress_ctx(dic); 1577 1573 1578 1574 return 0; 1579 1575 } ··· 1901 1905 1902 1906 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) 1903 1907 { 1904 - struct address_space *mapping = sbi->compress_inode->i_mapping; 1908 + struct address_space *mapping = COMPRESS_MAPPING(sbi); 1905 1909 struct folio_batch fbatch; 1906 1910 pgoff_t index = 0; 1907 1911 pgoff_t end = MAX_BLKADDR(sbi);
+37 -16
fs/f2fs/data.c
··· 335 335 mempool_free(page, sbi->write_io_dummy); 336 336 337 337 if (unlikely(bio->bi_status)) 338 - f2fs_stop_checkpoint(sbi, true); 338 + f2fs_stop_checkpoint(sbi, true, 339 + STOP_CP_REASON_WRITE_FAIL); 339 340 continue; 340 341 } 341 342 ··· 352 351 if (unlikely(bio->bi_status)) { 353 352 mapping_set_error(page->mapping, -EIO); 354 353 if (type == F2FS_WB_CP_DATA) 355 - f2fs_stop_checkpoint(sbi, true); 354 + f2fs_stop_checkpoint(sbi, true, 355 + STOP_CP_REASON_WRITE_FAIL); 356 356 } 357 357 358 358 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && ··· 707 705 708 706 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, 709 707 fio->is_por ? META_POR : (__is_meta_io(fio) ? 710 - META_GENERIC : DATA_GENERIC_ENHANCE))) 708 + META_GENERIC : DATA_GENERIC_ENHANCE))) { 709 + f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR); 711 710 return -EFSCORRUPTED; 711 + } 712 712 713 713 trace_f2fs_submit_page_bio(page, fio); 714 714 ··· 729 725 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); 730 726 731 727 inc_page_count(fio->sbi, is_read_io(fio->op) ? 732 - __read_io_type(page): WB_DATA_TYPE(fio->page)); 728 + __read_io_type(page) : WB_DATA_TYPE(fio->page)); 733 729 734 730 __submit_bio(fio->sbi, bio, fio->type); 735 731 return 0; ··· 910 906 fio->encrypted_page : fio->page; 911 907 912 908 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, 913 - __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) 909 + __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) { 910 + f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR); 914 911 return -EFSCORRUPTED; 912 + } 915 913 916 914 trace_f2fs_submit_page_bio(page, fio); 917 915 ··· 1091 1085 } 1092 1086 ClearPageError(page); 1093 1087 inc_page_count(sbi, F2FS_RD_DATA); 1094 - f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 1088 + f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); 1095 1089 __submit_bio(sbi, bio, DATA); 1096 1090 return 0; 1097 1091 } ··· 1223 1217 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, 1224 1218 DATA_GENERIC_ENHANCE_READ)) { 1225 1219 err = -EFSCORRUPTED; 1220 + f2fs_handle_error(F2FS_I_SB(inode), 1221 + ERROR_INVALID_BLKADDR); 1226 1222 goto put_err; 1227 1223 } 1228 1224 goto got_it; ··· 1245 1237 dn.data_blkaddr, 1246 1238 DATA_GENERIC_ENHANCE)) { 1247 1239 err = -EFSCORRUPTED; 1240 + f2fs_handle_error(F2FS_I_SB(inode), 1241 + ERROR_INVALID_BLKADDR); 1248 1242 goto put_err; 1249 1243 } 1250 1244 got_it: ··· 1560 1550 if (__is_valid_data_blkaddr(blkaddr) && 1561 1551 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { 1562 1552 err = -EFSCORRUPTED; 1553 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1563 1554 goto sync_out; 1564 1555 } 1565 1556 ··· 1606 1595 (flag != F2FS_GET_BLOCK_FIEMAP || 1607 1596 IS_ENABLED(CONFIG_F2FS_CHECK_FS))) { 1608 1597 err = -EFSCORRUPTED; 1598 + f2fs_handle_error(sbi, 1599 + ERROR_CORRUPTED_CLUSTER); 1609 1600 goto sync_out; 1610 1601 } 1611 1602 if (flag == F2FS_GET_BLOCK_BMAP) { ··· 1831 1818 1832 1819 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); 1833 1820 trace_f2fs_fiemap(inode, 0, phys, len, flags, err); 1834 - if (err || err == 1) 1821 + if (err) 1835 1822 return err; 1836 1823 } 1837 1824 ··· 2089 2076 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, 2090 2077 DATA_GENERIC_ENHANCE_READ)) { 2091 2078 ret = -EFSCORRUPTED; 2079 + f2fs_handle_error(F2FS_I_SB(inode), 2080 + ERROR_INVALID_BLKADDR); 2092 2081 goto out; 2093 2082 } 2094 2083 } else { ··· 2139 2124 goto submit_and_realloc; 2140 2125 2141 2126 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); 2142 - f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE); 2127 + f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO, 2128 + F2FS_BLKSIZE); 2143 2129 ClearPageError(page); 2144 2130 *last_block_in_bio = block_nr; 2145 2131 goto out; ··· 2288 2272 refcount_inc(&dic->refcnt); 2289 2273 2290 2274 inc_page_count(sbi, F2FS_RD_DATA); 2291 - f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 2292 - f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE); 2275 + f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); 2293 2276 ClearPageError(page); 2294 2277 *last_block_in_bio = blkaddr; 2295 2278 } ··· 2560 2545 return true; 2561 2546 2562 2547 /* if this is cold file, we should overwrite to avoid fragmentation */ 2563 - if (file_is_cold(inode)) 2548 + if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE)) 2564 2549 return true; 2565 2550 2566 2551 return check_inplace_update_policy(inode, fio); ··· 2634 2619 fio->old_blkaddr = ei.blk + page->index - ei.fofs; 2635 2620 2636 2621 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, 2637 - DATA_GENERIC_ENHANCE)) 2622 + DATA_GENERIC_ENHANCE)) { 2623 + f2fs_handle_error(fio->sbi, 2624 + ERROR_INVALID_BLKADDR); 2638 2625 return -EFSCORRUPTED; 2626 + } 2639 2627 2640 2628 ipu_force = true; 2641 2629 fio->need_lock = LOCK_DONE; ··· 2666 2648 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, 2667 2649 DATA_GENERIC_ENHANCE)) { 2668 2650 err = -EFSCORRUPTED; 2651 + f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR); 2669 2652 goto out_writepage; 2670 2653 } 2671 2654 ··· 2877 2858 } 2878 2859 unlock_page(page); 2879 2860 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && 2880 - !F2FS_I(inode)->cp_task && allow_balance) 2861 + !F2FS_I(inode)->wb_task && allow_balance) 2881 2862 f2fs_balance_fs(sbi, need_balance_fs); 2882 2863 2883 2864 if (unlikely(f2fs_cp_error(sbi))) { ··· 3177 3158 struct writeback_control *wbc) 3178 3159 { 3179 3160 /* to avoid deadlock in path of data flush */ 3180 - if (F2FS_I(inode)->cp_task) 3161 + if (F2FS_I(inode)->wb_task) 3181 3162 return false; 3182 3163 3183 3164 if (!S_ISREG(inode->i_mode)) ··· 3580 3561 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 3581 3562 DATA_GENERIC_ENHANCE_READ)) { 3582 3563 err = -EFSCORRUPTED; 3564 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 3583 3565 goto fail; 3584 3566 } 3585 3567 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true); ··· 3719 3699 folio_mark_uptodate(folio); 3720 3700 BUG_ON(folio_test_swapcache(folio)); 3721 3701 3722 - if (!folio_test_dirty(folio)) { 3723 - filemap_dirty_folio(mapping, folio); 3702 + if (filemap_dirty_folio(mapping, folio)) { 3724 3703 f2fs_update_dirty_folio(inode, folio); 3725 3704 return true; 3726 3705 } ··· 3991 3972 if (ret < 0) 3992 3973 return ret; 3993 3974 3975 + stat_inc_swapfile_inode(inode); 3994 3976 set_inode_flag(inode, FI_PIN_FILE); 3995 3977 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3996 3978 return ret; ··· 4001 3981 { 4002 3982 struct inode *inode = file_inode(file); 4003 3983 3984 + stat_dec_swapfile_inode(inode); 4004 3985 clear_inode_flag(inode, FI_PIN_FILE); 4005 3986 } 4006 3987 #else
+7 -2
fs/f2fs/debug.c
··· 91 91 si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; 92 92 si->nquota_files = sbi->nquota_files; 93 93 si->ndirty_all = sbi->ndirty_inode[DIRTY_META]; 94 - si->aw_cnt = sbi->atomic_files; 94 + si->aw_cnt = atomic_read(&sbi->atomic_files); 95 95 si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt); 96 96 si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ); 97 97 si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE); ··· 135 135 si->inline_inode = atomic_read(&sbi->inline_inode); 136 136 si->inline_dir = atomic_read(&sbi->inline_dir); 137 137 si->compr_inode = atomic_read(&sbi->compr_inode); 138 + si->swapfile_inode = atomic_read(&sbi->swapfile_inode); 138 139 si->compr_blocks = atomic64_read(&sbi->compr_blocks); 139 140 si->append = sbi->im[APPEND_INO].ino_num; 140 141 si->update = sbi->im[UPDATE_INO].ino_num; ··· 348 347 349 348 seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n", 350 349 si->sbi->sb->s_bdev, i++, 351 - f2fs_readonly(si->sbi->sb) ? "RO": "RW", 350 + f2fs_readonly(si->sbi->sb) ? "RO" : "RW", 352 351 is_set_ckpt_flags(si->sbi, CP_DISABLED_FLAG) ? 353 352 "Disabled" : (f2fs_cp_error(si->sbi) ? "Error" : "Good")); 354 353 if (si->sbi->s_flag) { ··· 386 385 si->inline_dir); 387 386 seq_printf(s, " - Compressed Inode: %u, Blocks: %llu\n", 388 387 si->compr_inode, si->compr_blocks); 388 + seq_printf(s, " - Swapfile Inode: %u\n", 389 + si->swapfile_inode); 389 390 seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n", 390 391 si->orphans, si->append, si->update); 391 392 seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", ··· 610 607 atomic_set(&sbi->inline_dir, 0); 611 608 atomic_set(&sbi->compr_inode, 0); 612 609 atomic64_set(&sbi->compr_blocks, 0); 610 + atomic_set(&sbi->swapfile_inode, 0); 611 + atomic_set(&sbi->atomic_files, 0); 613 612 atomic_set(&sbi->inplace_count, 0); 614 613 for (i = META_CP; i < META_MAX; i++) 615 614 atomic_set(&sbi->meta_count[i], 0);
+1
fs/f2fs/dir.c
··· 1041 1041 __func__, le16_to_cpu(de->name_len)); 1042 1042 set_sbi_flag(sbi, SBI_NEED_FSCK); 1043 1043 err = -EFSCORRUPTED; 1044 + f2fs_handle_error(sbi, ERROR_CORRUPTED_DIRENT); 1044 1045 goto out; 1045 1046 } 1046 1047
+4 -5
fs/f2fs/extent_cache.c
··· 544 544 if (!et) 545 545 return; 546 546 547 - trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len); 547 + trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len, 0); 548 548 549 549 write_lock(&et->lock); 550 550 ··· 583 583 org_end = dei.fofs + dei.len; 584 584 f2fs_bug_on(sbi, pos >= org_end); 585 585 586 - if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) { 586 + if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) { 587 587 en->ei.len = pos - en->ei.fofs; 588 588 prev_en = en; 589 589 parts = 1; ··· 675 675 struct rb_node **insert_p = NULL, *insert_parent = NULL; 676 676 bool leftmost = false; 677 677 678 - trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen); 678 + trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen, c_len); 679 679 680 680 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */ 681 681 if (is_inode_flag_set(inode, FI_NO_EXTENT)) ··· 804 804 if (!f2fs_may_extent_tree(inode)) 805 805 return; 806 806 807 - set_inode_flag(inode, FI_NO_EXTENT); 808 - 809 807 write_lock(&et->lock); 808 + set_inode_flag(inode, FI_NO_EXTENT); 810 809 __free_extent_tree(sbi, et); 811 810 if (et->largest.len) { 812 811 et->largest.len = 0;
+39 -9
fs/f2fs/f2fs.h
··· 266 266 * condition of read on truncated area 267 267 * by extent_cache 268 268 */ 269 + DATA_GENERIC_ENHANCE_UPDATE, /* 270 + * strong check on range and segment 271 + * bitmap for update case 272 + */ 269 273 META_GENERIC, 270 274 }; 271 275 ··· 278 274 ORPHAN_INO, /* for orphan ino list */ 279 275 APPEND_INO, /* for append ino list */ 280 276 UPDATE_INO, /* for update ino list */ 281 - TRANS_DIR_INO, /* for trasactions dir ino list */ 277 + TRANS_DIR_INO, /* for transactions dir ino list */ 282 278 FLUSH_INO, /* for multiple device flushing */ 283 279 MAX_INO_ENTRY, /* max. list */ 284 280 }; ··· 786 782 unsigned int clevel; /* maximum level of given file name */ 787 783 struct task_struct *task; /* lookup and create consistency */ 788 784 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 785 + struct task_struct *wb_task; /* indicate inode is in context of writeback */ 789 786 nid_t i_xattr_nid; /* node id that contains xattrs */ 790 787 loff_t last_disk_size; /* lastly written file size */ 791 788 spinlock_t i_size_lock; /* protect last_disk_size */ ··· 1163 1158 APP_BUFFERED_IO, /* app buffered write IOs */ 1164 1159 APP_WRITE_IO, /* app write IOs */ 1165 1160 APP_MAPPED_IO, /* app mapped IOs */ 1161 + APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */ 1162 + APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */ 1166 1163 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1164 + FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */ 1167 1165 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1168 1166 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1169 1167 FS_GC_DATA_IO, /* data IOs from forground gc */ ··· 1180 1172 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1181 1173 APP_READ_IO, /* app read IOs */ 1182 1174 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1175 + APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */ 1176 + APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */ 1183 1177 FS_DATA_READ_IO, /* data read IOs */ 1184 1178 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1185 1179 FS_CDATA_READ_IO, /* compressed data read IOs */ ··· 1257 1247 DIR_INODE, /* for dirty dir inode */ 1258 1248 FILE_INODE, /* for dirty regular/symlink inode */ 1259 1249 DIRTY_META, /* for all dirtied inode metadata */ 1260 - ATOMIC_FILE, /* for all atomic files */ 1261 1250 NR_INODE_TYPE, 1262 1251 }; 1263 1252 ··· 1735 1726 unsigned int gc_mode; /* current GC state */ 1736 1727 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1737 1728 spinlock_t gc_urgent_high_lock; 1738 - bool gc_urgent_high_limited; /* indicates having limited trial count */ 1739 1729 unsigned int gc_urgent_high_remaining; /* remaining trial count for GC_URGENT_HIGH */ 1740 1730 1741 1731 /* for skip statistic */ 1742 - unsigned int atomic_files; /* # of opened atomic file */ 1743 1732 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1744 1733 1745 1734 /* threshold for gc trials on pinned files */ ··· 1768 1761 atomic_t inline_dir; /* # of inline_dentry inodes */ 1769 1762 atomic_t compr_inode; /* # of compressed inodes */ 1770 1763 atomic64_t compr_blocks; /* # of compressed blocks */ 1764 + atomic_t swapfile_inode; /* # of swapfile inodes */ 1765 + atomic_t atomic_files; /* # of opened atomic file */ 1771 1766 atomic_t max_aw_cnt; /* max # of atomic writes */ 1772 1767 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1773 1768 unsigned int other_skip_bggc; /* skip background gc for other reasons */ ··· 1814 1805 __u32 s_chksum_seed; 1815 1806 1816 1807 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1808 + 1809 + unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */ 1810 + spinlock_t error_lock; /* protect errors array */ 1811 + bool error_dirty; /* errors of sb is dirty */ 1817 1812 1818 1813 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1819 1814 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ ··· 2538 2525 2539 2526 if (__cp_payload(sbi) > 0) { 2540 2527 if (flag == NAT_BITMAP) 2541 - return &ckpt->sit_nat_version_bitmap; 2528 + return tmp_ptr; 2542 2529 else 2543 2530 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2544 2531 } else { ··· 3560 3547 int f2fs_quota_sync(struct super_block *sb, int type); 3561 3548 loff_t max_file_blocks(struct inode *inode); 3562 3549 void f2fs_quota_off_umount(struct super_block *sb); 3550 + void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason); 3551 + void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); 3563 3552 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3564 3553 int f2fs_sync_fs(struct super_block *sb, int sync); 3565 3554 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); ··· 3721 3706 /* 3722 3707 * checkpoint.c 3723 3708 */ 3724 - void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3709 + void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, 3710 + unsigned char reason); 3711 + void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); 3725 3712 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3726 3713 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3727 3714 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); ··· 3753 3736 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3754 3737 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); 3755 3738 void f2fs_remove_dirty_inode(struct inode *inode); 3756 - int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3739 + int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, 3740 + bool from_cp); 3757 3741 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3758 3742 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3759 3743 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); ··· 3876 3858 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3877 3859 unsigned int cur_ckpt_time, peak_ckpt_time; 3878 3860 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3879 - int compr_inode; 3861 + int compr_inode, swapfile_inode; 3880 3862 unsigned long long compr_blocks; 3881 3863 int aw_cnt, max_aw_cnt; 3882 3864 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; ··· 3965 3947 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3966 3948 #define stat_sub_compr_blocks(inode, blocks) \ 3967 3949 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3950 + #define stat_inc_swapfile_inode(inode) \ 3951 + (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode)) 3952 + #define stat_dec_swapfile_inode(inode) \ 3953 + (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode)) 3954 + #define stat_inc_atomic_inode(inode) \ 3955 + (atomic_inc(&F2FS_I_SB(inode)->atomic_files)) 3956 + #define stat_dec_atomic_inode(inode) \ 3957 + (atomic_dec(&F2FS_I_SB(inode)->atomic_files)) 3968 3958 #define stat_inc_meta_count(sbi, blkaddr) \ 3969 3959 do { \ 3970 3960 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ ··· 3992 3966 (atomic_inc(&(sbi)->inplace_count)) 3993 3967 #define stat_update_max_atomic_write(inode) \ 3994 3968 do { \ 3995 - int cur = F2FS_I_SB(inode)->atomic_files; \ 3969 + int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \ 3996 3970 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3997 3971 if (cur > max) \ 3998 3972 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ ··· 4057 4031 #define stat_dec_compr_inode(inode) do { } while (0) 4058 4032 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 4059 4033 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 4034 + #define stat_inc_swapfile_inode(inode) do { } while (0) 4035 + #define stat_dec_swapfile_inode(inode) do { } while (0) 4036 + #define stat_inc_atomic_inode(inode) do { } while (0) 4037 + #define stat_dec_atomic_inode(inode) do { } while (0) 4060 4038 #define stat_update_max_atomic_write(inode) do { } while (0) 4061 4039 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 4062 4040 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
+36 -21
fs/f2fs/file.c
··· 43 43 44 44 ret = filemap_fault(vmf); 45 45 if (!ret) 46 - f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, 47 - F2FS_BLKSIZE); 46 + f2fs_update_iostat(F2FS_I_SB(inode), inode, 47 + APP_MAPPED_READ_IO, F2FS_BLKSIZE); 48 48 49 49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); 50 50 ··· 154 154 if (!PageUptodate(page)) 155 155 SetPageUptodate(page); 156 156 157 - f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 157 + f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE); 158 158 f2fs_update_time(sbi, REQ_TIME); 159 159 160 160 trace_f2fs_vm_page_mkwrite(page, DATA); ··· 822 822 /* disallow direct IO if any of devices has unaligned blksize */ 823 823 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) 824 824 return true; 825 - 825 + /* 826 + * for blkzoned device, fallback direct IO to buffered IO, so 827 + * all IOs can be serialized by log-structured write. 828 + */ 829 + if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE)) 830 + return true; 826 831 if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi)) 827 832 return true; 828 833 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) ··· 917 912 inode->i_ctime = attr->ia_ctime; 918 913 if (ia_valid & ATTR_MODE) { 919 914 umode_t mode = attr->ia_mode; 920 - kgid_t kgid = i_gid_into_mnt(mnt_userns, inode); 915 + vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode); 921 916 922 - if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) 917 + if (!vfsgid_in_group_p(vfsgid) && 918 + !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) 923 919 mode &= ~S_ISGID; 924 920 set_acl_inode(inode, mode); 925 921 } ··· 1202 1196 !f2fs_is_valid_blkaddr(sbi, *blkaddr, 1203 1197 DATA_GENERIC_ENHANCE)) { 1204 1198 f2fs_put_dnode(&dn); 1199 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1205 1200 return -EFSCORRUPTED; 1206 1201 } 1207 1202 ··· 1487 1480 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr, 1488 1481 DATA_GENERIC_ENHANCE)) { 1489 1482 ret = -EFSCORRUPTED; 1483 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1490 1484 break; 1491 1485 } 1492 1486 ··· 2097 2089 } 2098 2090 f2fs_i_size_write(fi->cow_inode, i_size_read(inode)); 2099 2091 2100 - spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 2101 - sbi->atomic_files++; 2102 - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 2092 + stat_inc_atomic_inode(inode); 2103 2093 2104 2094 set_inode_flag(inode, FI_ATOMIC_FILE); 2105 2095 set_inode_flag(fi->cow_inode, FI_COW_FILE); ··· 2191 2185 if (ret) { 2192 2186 if (ret == -EROFS) { 2193 2187 ret = 0; 2194 - f2fs_stop_checkpoint(sbi, false); 2188 + f2fs_stop_checkpoint(sbi, false, 2189 + STOP_CP_REASON_SHUTDOWN); 2195 2190 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2196 2191 trace_f2fs_shutdown(sbi, in, ret); 2197 2192 } ··· 2205 2198 ret = freeze_bdev(sb->s_bdev); 2206 2199 if (ret) 2207 2200 goto out; 2208 - f2fs_stop_checkpoint(sbi, false); 2201 + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); 2209 2202 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2210 2203 thaw_bdev(sb->s_bdev); 2211 2204 break; ··· 2214 2207 ret = f2fs_sync_fs(sb, 1); 2215 2208 if (ret) 2216 2209 goto out; 2217 - f2fs_stop_checkpoint(sbi, false); 2210 + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); 2218 2211 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2219 2212 break; 2220 2213 case F2FS_GOING_DOWN_NOSYNC: 2221 - f2fs_stop_checkpoint(sbi, false); 2214 + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); 2222 2215 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2223 2216 break; 2224 2217 case F2FS_GOING_DOWN_METAFLUSH: 2225 2218 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 2226 - f2fs_stop_checkpoint(sbi, false); 2219 + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); 2227 2220 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2228 2221 break; 2229 2222 case F2FS_GOING_DOWN_NEED_FSCK: ··· 3369 3362 if (!__is_valid_data_blkaddr(blkaddr)) 3370 3363 continue; 3371 3364 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, 3372 - DATA_GENERIC_ENHANCE))) 3365 + DATA_GENERIC_ENHANCE))) { 3366 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 3373 3367 return -EFSCORRUPTED; 3368 + } 3374 3369 } 3375 3370 3376 3371 while (count) { ··· 3533 3524 if (!__is_valid_data_blkaddr(blkaddr)) 3534 3525 continue; 3535 3526 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, 3536 - DATA_GENERIC_ENHANCE))) 3527 + DATA_GENERIC_ENHANCE))) { 3528 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 3537 3529 return -EFSCORRUPTED; 3530 + } 3538 3531 } 3539 3532 3540 3533 while (count) { ··· 3808 3797 DATA_GENERIC_ENHANCE)) { 3809 3798 ret = -EFSCORRUPTED; 3810 3799 f2fs_put_dnode(&dn); 3800 + f2fs_handle_error(sbi, 3801 + ERROR_INVALID_BLKADDR); 3811 3802 goto out; 3812 3803 } 3813 3804 ··· 4266 4253 dec_page_count(sbi, F2FS_DIO_READ); 4267 4254 if (error) 4268 4255 return error; 4269 - f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, size); 4256 + f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size); 4270 4257 return 0; 4271 4258 } 4272 4259 ··· 4355 4342 } else { 4356 4343 ret = filemap_read(iocb, to, 0); 4357 4344 if (ret > 0) 4358 - f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret); 4345 + f2fs_update_iostat(F2FS_I_SB(inode), inode, 4346 + APP_BUFFERED_READ_IO, ret); 4359 4347 } 4360 4348 if (trace_f2fs_dataread_end_enabled()) 4361 4349 trace_f2fs_dataread_end(inode, pos, ret); ··· 4473 4459 4474 4460 if (ret > 0) { 4475 4461 iocb->ki_pos += ret; 4476 - f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_IO, ret); 4462 + f2fs_update_iostat(F2FS_I_SB(inode), inode, 4463 + APP_BUFFERED_IO, ret); 4477 4464 } 4478 4465 return ret; 4479 4466 } ··· 4487 4472 dec_page_count(sbi, F2FS_DIO_WRITE); 4488 4473 if (error) 4489 4474 return error; 4490 - f2fs_update_iostat(sbi, APP_DIRECT_IO, size); 4475 + f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size); 4491 4476 return 0; 4492 4477 } 4493 4478 ··· 4675 4660 skip_write_trace: 4676 4661 /* Do the actual write. */ 4677 4662 ret = dio ? 4678 - f2fs_dio_write_iter(iocb, from, &may_need_sync): 4663 + f2fs_dio_write_iter(iocb, from, &may_need_sync) : 4679 4664 f2fs_buffered_write_iter(iocb, from); 4680 4665 4681 4666 if (trace_f2fs_datawrite_end_enabled())
+25 -15
fs/f2fs/gc.c
··· 74 74 75 75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 76 76 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); 77 - f2fs_stop_checkpoint(sbi, false); 77 + f2fs_stop_checkpoint(sbi, false, 78 + STOP_CP_REASON_FAULT_INJECT); 78 79 } 79 80 80 81 if (!sb_start_write_trylock(sbi->sb)) { ··· 98 97 */ 99 98 if (sbi->gc_mode == GC_URGENT_HIGH) { 100 99 spin_lock(&sbi->gc_urgent_high_lock); 101 - if (sbi->gc_urgent_high_limited) { 102 - if (!sbi->gc_urgent_high_remaining) { 103 - sbi->gc_urgent_high_limited = false; 104 - spin_unlock(&sbi->gc_urgent_high_lock); 105 - sbi->gc_mode = GC_NORMAL; 106 - continue; 107 - } 100 + if (sbi->gc_urgent_high_remaining) { 108 101 sbi->gc_urgent_high_remaining--; 102 + if (!sbi->gc_urgent_high_remaining) 103 + sbi->gc_mode = GC_NORMAL; 109 104 } 110 105 spin_unlock(&sbi->gc_urgent_high_lock); 111 106 } ··· 1079 1082 { 1080 1083 struct page *node_page; 1081 1084 nid_t nid; 1082 - unsigned int ofs_in_node; 1085 + unsigned int ofs_in_node, max_addrs; 1083 1086 block_t source_blkaddr; 1084 1087 1085 1088 nid = le32_to_cpu(sum->nid); ··· 1102 1105 1103 1106 if (f2fs_check_nid_range(sbi, dni->ino)) { 1104 1107 f2fs_put_page(node_page, 1); 1108 + return false; 1109 + } 1110 + 1111 + max_addrs = IS_INODE(node_page) ? DEF_ADDRS_PER_INODE : 1112 + DEF_ADDRS_PER_BLOCK; 1113 + if (ofs_in_node >= max_addrs) { 1114 + f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u", 1115 + ofs_in_node, dni->ino, dni->nid, max_addrs); 1105 1116 return false; 1106 1117 } 1107 1118 ··· 1164 1159 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1165 1160 DATA_GENERIC_ENHANCE_READ))) { 1166 1161 err = -EFSCORRUPTED; 1162 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1167 1163 goto put_page; 1168 1164 } 1169 1165 goto got_it; ··· 1183 1177 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1184 1178 DATA_GENERIC_ENHANCE))) { 1185 1179 err = -EFSCORRUPTED; 1180 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1186 1181 goto put_page; 1187 1182 } 1188 1183 got_it: ··· 1213 1206 f2fs_put_page(fio.encrypted_page, 0); 1214 1207 f2fs_put_page(page, 1); 1215 1208 1216 - f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 1217 - f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1209 + f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); 1210 + f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1218 1211 1219 1212 return 0; 1220 1213 put_encrypted_page: ··· 1314 1307 goto up_out; 1315 1308 } 1316 1309 1317 - f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 1318 - f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1310 + f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO, 1311 + F2FS_BLKSIZE); 1312 + f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, 1313 + F2FS_BLKSIZE); 1319 1314 1320 1315 lock_page(mpage); 1321 1316 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || ··· 1369 1360 goto put_page_out; 1370 1361 } 1371 1362 1372 - f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); 1363 + f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE); 1373 1364 1374 1365 f2fs_update_data_blkaddr(&dn, newaddr); 1375 1366 set_inode_flag(inode, FI_APPEND_WRITE); ··· 1715 1706 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", 1716 1707 segno, type, GET_SUM_TYPE((&sum->footer))); 1717 1708 set_sbi_flag(sbi, SBI_NEED_FSCK); 1718 - f2fs_stop_checkpoint(sbi, false); 1709 + f2fs_stop_checkpoint(sbi, false, 1710 + STOP_CP_REASON_CORRUPTED_SUMMARY); 1719 1711 goto skip; 1720 1712 } 1721 1713
+6 -11
fs/f2fs/inline.c
··· 64 64 void f2fs_do_read_inline_data(struct page *page, struct page *ipage) 65 65 { 66 66 struct inode *inode = page->mapping->host; 67 - void *src_addr, *dst_addr; 68 67 69 68 if (PageUptodate(page)) 70 69 return; ··· 73 74 zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE); 74 75 75 76 /* Copy the whole inline data block */ 76 - src_addr = inline_data_addr(inode, ipage); 77 - dst_addr = kmap_atomic(page); 78 - memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode)); 79 - flush_dcache_page(page); 80 - kunmap_atomic(dst_addr); 77 + memcpy_to_page(page, 0, inline_data_addr(inode, ipage), 78 + MAX_INLINE_DATA(inode)); 81 79 if (!PageUptodate(page)) 82 80 SetPageUptodate(page); 83 81 } ··· 160 164 set_sbi_flag(fio.sbi, SBI_NEED_FSCK); 161 165 f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", 162 166 __func__, dn->inode->i_ino, dn->data_blkaddr); 167 + f2fs_handle_error(fio.sbi, ERROR_INVALID_BLKADDR); 163 168 return -EFSCORRUPTED; 164 169 } 165 170 ··· 243 246 244 247 int f2fs_write_inline_data(struct inode *inode, struct page *page) 245 248 { 246 - void *src_addr, *dst_addr; 247 249 struct dnode_of_data dn; 248 250 int err; 249 251 ··· 259 263 f2fs_bug_on(F2FS_I_SB(inode), page->index); 260 264 261 265 f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true); 262 - src_addr = kmap_atomic(page); 263 - dst_addr = inline_data_addr(inode, dn.inode_page); 264 - memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode)); 265 - kunmap_atomic(src_addr); 266 + memcpy_from_page(inline_data_addr(inode, dn.inode_page), 267 + page, 0, MAX_INLINE_DATA(inode)); 266 268 set_page_dirty(dn.inode_page); 267 269 268 270 f2fs_clear_page_cache_dirty_tag(page); ··· 413 419 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); 414 420 f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", 415 421 __func__, dir->i_ino, dn.data_blkaddr); 422 + f2fs_handle_error(F2FS_P_SB(page), ERROR_INVALID_BLKADDR); 416 423 err = -EFSCORRUPTED; 417 424 goto out; 418 425 }
+35 -16
fs/f2fs/inode.c
··· 81 81 82 82 if (!__is_valid_data_blkaddr(addr)) 83 83 return 1; 84 - if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) 84 + if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) { 85 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 85 86 return -EFSCORRUPTED; 87 + } 86 88 return 0; 87 89 } 88 90 ··· 335 333 return true; 336 334 } 337 335 336 + static void init_idisk_time(struct inode *inode) 337 + { 338 + struct f2fs_inode_info *fi = F2FS_I(inode); 339 + 340 + fi->i_disk_time[0] = inode->i_atime; 341 + fi->i_disk_time[1] = inode->i_ctime; 342 + fi->i_disk_time[2] = inode->i_mtime; 343 + fi->i_disk_time[3] = fi->i_crtime; 344 + } 345 + 338 346 static int do_read_inode(struct inode *inode) 339 347 { 340 348 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); ··· 417 405 418 406 if (!sanity_check_inode(inode, node_page)) { 419 407 f2fs_put_page(node_page, 1); 408 + f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 420 409 return -EFSCORRUPTED; 421 410 } 422 411 ··· 478 465 } 479 466 } 480 467 481 - fi->i_disk_time[0] = inode->i_atime; 482 - fi->i_disk_time[1] = inode->i_ctime; 483 - fi->i_disk_time[2] = inode->i_mtime; 484 - fi->i_disk_time[3] = fi->i_crtime; 468 + init_idisk_time(inode); 485 469 f2fs_put_page(node_page, 1); 486 470 487 471 stat_inc_inline_xattr(inode); ··· 488 478 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks)); 489 479 490 480 return 0; 481 + } 482 + 483 + static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino) 484 + { 485 + return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) || 486 + ino == F2FS_COMPRESS_INO(sbi); 491 487 } 492 488 493 489 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) ··· 507 491 return ERR_PTR(-ENOMEM); 508 492 509 493 if (!(inode->i_state & I_NEW)) { 494 + if (is_meta_ino(sbi, ino)) { 495 + f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino); 496 + set_sbi_flag(sbi, SBI_NEED_FSCK); 497 + ret = -EFSCORRUPTED; 498 + trace_f2fs_iget_exit(inode, ret); 499 + iput(inode); 500 + f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 501 + return ERR_PTR(ret); 502 + } 503 + 510 504 trace_f2fs_iget(inode); 511 505 return inode; 512 506 } 513 - if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) 514 - goto make_now; 515 507 516 - #ifdef CONFIG_F2FS_FS_COMPRESSION 517 - if (ino == F2FS_COMPRESS_INO(sbi)) 508 + if (is_meta_ino(sbi, ino)) 518 509 goto make_now; 519 - #endif 520 510 521 511 ret = do_read_inode(inode); 522 512 if (ret) ··· 698 676 if (inode->i_nlink == 0) 699 677 clear_page_private_inline(node_page); 700 678 701 - F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 702 - F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 703 - F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 704 - F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 705 - 679 + init_idisk_time(inode); 706 680 #ifdef CONFIG_F2FS_CHECK_FS 707 681 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 708 682 #endif ··· 717 699 cond_resched(); 718 700 goto retry; 719 701 } else if (err != -ENOENT) { 720 - f2fs_stop_checkpoint(sbi, false); 702 + f2fs_stop_checkpoint(sbi, false, 703 + STOP_CP_REASON_UPDATE_INODE); 721 704 } 722 705 return; 723 706 }
+53 -21
fs/f2fs/iostat.c
··· 31 31 32 32 /* print app write IOs */ 33 33 seq_puts(seq, "[WRITE]\n"); 34 - seq_printf(seq, "app buffered: %-16llu\n", 34 + seq_printf(seq, "app buffered data: %-16llu\n", 35 35 sbi->rw_iostat[APP_BUFFERED_IO]); 36 - seq_printf(seq, "app direct: %-16llu\n", 36 + seq_printf(seq, "app direct data: %-16llu\n", 37 37 sbi->rw_iostat[APP_DIRECT_IO]); 38 - seq_printf(seq, "app mapped: %-16llu\n", 38 + seq_printf(seq, "app mapped data: %-16llu\n", 39 39 sbi->rw_iostat[APP_MAPPED_IO]); 40 + seq_printf(seq, "app buffered cdata: %-16llu\n", 41 + sbi->rw_iostat[APP_BUFFERED_CDATA_IO]); 42 + seq_printf(seq, "app mapped cdata: %-16llu\n", 43 + sbi->rw_iostat[APP_MAPPED_CDATA_IO]); 40 44 41 45 /* print fs write IOs */ 42 - seq_printf(seq, "fs data: %-16llu\n", 46 + seq_printf(seq, "fs data: %-16llu\n", 43 47 sbi->rw_iostat[FS_DATA_IO]); 44 - seq_printf(seq, "fs node: %-16llu\n", 48 + seq_printf(seq, "fs cdata: %-16llu\n", 49 + sbi->rw_iostat[FS_CDATA_IO]); 50 + seq_printf(seq, "fs node: %-16llu\n", 45 51 sbi->rw_iostat[FS_NODE_IO]); 46 - seq_printf(seq, "fs meta: %-16llu\n", 52 + seq_printf(seq, "fs meta: %-16llu\n", 47 53 sbi->rw_iostat[FS_META_IO]); 48 - seq_printf(seq, "fs gc data: %-16llu\n", 54 + seq_printf(seq, "fs gc data: %-16llu\n", 49 55 sbi->rw_iostat[FS_GC_DATA_IO]); 50 - seq_printf(seq, "fs gc node: %-16llu\n", 56 + seq_printf(seq, "fs gc node: %-16llu\n", 51 57 sbi->rw_iostat[FS_GC_NODE_IO]); 52 - seq_printf(seq, "fs cp data: %-16llu\n", 58 + seq_printf(seq, "fs cp data: %-16llu\n", 53 59 sbi->rw_iostat[FS_CP_DATA_IO]); 54 - seq_printf(seq, "fs cp node: %-16llu\n", 60 + seq_printf(seq, "fs cp node: %-16llu\n", 55 61 sbi->rw_iostat[FS_CP_NODE_IO]); 56 - seq_printf(seq, "fs cp meta: %-16llu\n", 62 + seq_printf(seq, "fs cp meta: %-16llu\n", 57 63 sbi->rw_iostat[FS_CP_META_IO]); 58 64 59 65 /* print app read IOs */ 60 66 seq_puts(seq, "[READ]\n"); 61 - seq_printf(seq, "app buffered: %-16llu\n", 67 + seq_printf(seq, "app buffered data: %-16llu\n", 62 68 sbi->rw_iostat[APP_BUFFERED_READ_IO]); 63 - seq_printf(seq, "app direct: %-16llu\n", 69 + seq_printf(seq, "app direct data: %-16llu\n", 64 70 sbi->rw_iostat[APP_DIRECT_READ_IO]); 65 - seq_printf(seq, "app mapped: %-16llu\n", 71 + seq_printf(seq, "app mapped data: %-16llu\n", 66 72 sbi->rw_iostat[APP_MAPPED_READ_IO]); 73 + seq_printf(seq, "app buffered cdata: %-16llu\n", 74 + sbi->rw_iostat[APP_BUFFERED_CDATA_READ_IO]); 75 + seq_printf(seq, "app mapped cdata: %-16llu\n", 76 + sbi->rw_iostat[APP_MAPPED_CDATA_READ_IO]); 67 77 68 78 /* print fs read IOs */ 69 - seq_printf(seq, "fs data: %-16llu\n", 79 + seq_printf(seq, "fs data: %-16llu\n", 70 80 sbi->rw_iostat[FS_DATA_READ_IO]); 71 - seq_printf(seq, "fs gc data: %-16llu\n", 81 + seq_printf(seq, "fs gc data: %-16llu\n", 72 82 sbi->rw_iostat[FS_GDATA_READ_IO]); 73 - seq_printf(seq, "fs compr_data: %-16llu\n", 83 + seq_printf(seq, "fs cdata: %-16llu\n", 74 84 sbi->rw_iostat[FS_CDATA_READ_IO]); 75 - seq_printf(seq, "fs node: %-16llu\n", 85 + seq_printf(seq, "fs node: %-16llu\n", 76 86 sbi->rw_iostat[FS_NODE_READ_IO]); 77 - seq_printf(seq, "fs meta: %-16llu\n", 87 + seq_printf(seq, "fs meta: %-16llu\n", 78 88 sbi->rw_iostat[FS_META_READ_IO]); 79 89 80 90 /* print other IOs */ 81 91 seq_puts(seq, "[OTHER]\n"); 82 - seq_printf(seq, "fs discard: %-16llu\n", 92 + seq_printf(seq, "fs discard: %-16llu\n", 83 93 sbi->rw_iostat[FS_DISCARD]); 84 94 85 95 return 0; ··· 169 159 spin_unlock_irq(&sbi->iostat_lat_lock); 170 160 } 171 161 172 - void f2fs_update_iostat(struct f2fs_sb_info *sbi, 162 + void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode, 173 163 enum iostat_type type, unsigned long long io_bytes) 174 164 { 175 165 unsigned long flags; ··· 185 175 186 176 if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO) 187 177 sbi->rw_iostat[APP_READ_IO] += io_bytes; 178 + 179 + #ifdef CONFIG_F2FS_FS_COMPRESSION 180 + if (inode && f2fs_compressed_file(inode)) { 181 + if (type == APP_BUFFERED_IO) 182 + sbi->rw_iostat[APP_BUFFERED_CDATA_IO] += io_bytes; 183 + 184 + if (type == APP_BUFFERED_READ_IO) 185 + sbi->rw_iostat[APP_BUFFERED_CDATA_READ_IO] += io_bytes; 186 + 187 + if (type == APP_MAPPED_READ_IO) 188 + sbi->rw_iostat[APP_MAPPED_CDATA_READ_IO] += io_bytes; 189 + 190 + if (type == APP_MAPPED_IO) 191 + sbi->rw_iostat[APP_MAPPED_CDATA_IO] += io_bytes; 192 + 193 + if (type == FS_DATA_READ_IO) 194 + sbi->rw_iostat[FS_CDATA_READ_IO] += io_bytes; 195 + 196 + if (type == FS_DATA_IO) 197 + sbi->rw_iostat[FS_CDATA_IO] += io_bytes; 198 + } 199 + #endif 188 200 189 201 spin_unlock_irqrestore(&sbi->iostat_lock, flags); 190 202
+2 -2
fs/f2fs/iostat.h
··· 31 31 extern int __maybe_unused iostat_info_seq_show(struct seq_file *seq, 32 32 void *offset); 33 33 extern void f2fs_reset_iostat(struct f2fs_sb_info *sbi); 34 - extern void f2fs_update_iostat(struct f2fs_sb_info *sbi, 34 + extern void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode, 35 35 enum iostat_type type, unsigned long long io_bytes); 36 36 37 37 struct bio_iostat_ctx { ··· 65 65 extern int f2fs_init_iostat(struct f2fs_sb_info *sbi); 66 66 extern void f2fs_destroy_iostat(struct f2fs_sb_info *sbi); 67 67 #else 68 - static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 68 + static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode, 69 69 enum iostat_type type, unsigned long long io_bytes) {} 70 70 static inline void iostat_update_and_unbind_ctx(struct bio *bio, int rw) {} 71 71 static inline void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
+5 -4
fs/f2fs/node.c
··· 36 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 38 __func__, nid); 39 + f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 39 40 return -EFSCORRUPTED; 40 41 } 41 42 return 0; ··· 586 585 ne = nat_in_journal(journal, i); 587 586 node_info_from_raw_nat(ni, &ne); 588 587 } 589 - up_read(&curseg->journal_rwsem); 588 + up_read(&curseg->journal_rwsem); 590 589 if (i >= 0) { 591 590 f2fs_up_read(&nm_i->nat_tree_lock); 592 591 goto cache; ··· 1296 1295 if (unlikely(new_ni.blk_addr != NULL_ADDR)) { 1297 1296 err = -EFSCORRUPTED; 1298 1297 set_sbi_flag(sbi, SBI_NEED_FSCK); 1298 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1299 1299 goto fail; 1300 1300 } 1301 1301 #endif ··· 1371 1369 err = f2fs_submit_page_bio(&fio); 1372 1370 1373 1371 if (!err) 1374 - f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE); 1372 + f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE); 1375 1373 1376 1374 return err; 1377 1375 } ··· 2149 2147 if (IS_INODE(&folio->page)) 2150 2148 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); 2151 2149 #endif 2152 - if (!folio_test_dirty(folio)) { 2153 - filemap_dirty_folio(mapping, folio); 2150 + if (filemap_dirty_folio(mapping, folio)) { 2154 2151 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 2155 2152 set_page_private_reference(&folio->page); 2156 2153 return true;
+26 -3
fs/f2fs/recovery.c
··· 474 474 struct dnode_of_data tdn = *dn; 475 475 nid_t ino, nid; 476 476 struct inode *inode; 477 - unsigned int offset; 477 + unsigned int offset, ofs_in_node, max_addrs; 478 478 block_t bidx; 479 479 int i; 480 480 ··· 501 501 got_it: 502 502 /* Use the locked dnode page and inode */ 503 503 nid = le32_to_cpu(sum.nid); 504 + ofs_in_node = le16_to_cpu(sum.ofs_in_node); 505 + 506 + max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); 507 + if (ofs_in_node >= max_addrs) { 508 + f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u", 509 + ofs_in_node, dn->inode->i_ino, nid, max_addrs); 510 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUMMARY); 511 + return -EFSCORRUPTED; 512 + } 513 + 504 514 if (dn->inode->i_ino == nid) { 505 515 tdn.nid = nid; 506 516 if (!dn->inode_page_locked) 507 517 lock_page(dn->inode_page); 508 518 tdn.node_page = dn->inode_page; 509 - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 519 + tdn.ofs_in_node = ofs_in_node; 510 520 goto truncate_out; 511 521 } else if (dn->nid == nid) { 512 - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 522 + tdn.ofs_in_node = ofs_in_node; 513 523 goto truncate_out; 514 524 } 515 525 ··· 638 628 inode->i_ino, ofs_of_node(dn.node_page), 639 629 ofs_of_node(page)); 640 630 err = -EFSCORRUPTED; 631 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); 641 632 goto err; 642 633 } 643 634 ··· 651 640 if (__is_valid_data_blkaddr(src) && 652 641 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { 653 642 err = -EFSCORRUPTED; 643 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 654 644 goto err; 655 645 } 656 646 657 647 if (__is_valid_data_blkaddr(dest) && 658 648 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { 659 649 err = -EFSCORRUPTED; 650 + f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 660 651 goto err; 661 652 } 662 653 ··· 708 695 memalloc_retry_wait(GFP_NOFS); 709 696 goto retry_prev; 710 697 } 698 + goto err; 699 + } 700 + 701 + if (f2fs_is_valid_blkaddr(sbi, dest, 702 + DATA_GENERIC_ENHANCE_UPDATE)) { 703 + f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u", 704 + dest, inode->i_ino, dn.ofs_in_node); 705 + err = -EFSCORRUPTED; 706 + f2fs_handle_error(sbi, 707 + ERROR_INVALID_BLKADDR); 711 708 goto err; 712 709 } 713 710
+23 -14
fs/f2fs/segment.c
··· 187 187 188 188 void f2fs_abort_atomic_write(struct inode *inode, bool clean) 189 189 { 190 - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 191 190 struct f2fs_inode_info *fi = F2FS_I(inode); 192 191 193 192 if (!f2fs_is_atomic_file(inode)) ··· 199 200 fi->cow_inode = NULL; 200 201 release_atomic_write_cnt(inode); 201 202 clear_inode_flag(inode, FI_ATOMIC_FILE); 202 - 203 - spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 204 - sbi->atomic_files--; 205 - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 203 + stat_dec_atomic_inode(inode); 206 204 } 207 205 208 206 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index, ··· 308 312 DATA_GENERIC_ENHANCE)) { 309 313 f2fs_put_dnode(&dn); 310 314 ret = -EFSCORRUPTED; 315 + f2fs_handle_error(sbi, 316 + ERROR_INVALID_BLKADDR); 311 317 goto out; 312 318 } 313 319 ··· 374 376 { 375 377 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 376 378 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); 377 - f2fs_stop_checkpoint(sbi, false); 379 + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); 378 380 } 379 381 380 382 /* balance_fs_bg is able to be pending */ ··· 474 476 mutex_lock(&sbi->flush_lock); 475 477 476 478 blk_start_plug(&plug); 477 - f2fs_sync_dirty_inodes(sbi, FILE_INODE); 479 + f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); 478 480 blk_finish_plug(&plug); 479 481 480 482 mutex_unlock(&sbi->flush_lock); 481 483 } 482 - f2fs_sync_fs(sbi->sb, true); 484 + f2fs_sync_fs(sbi->sb, 1); 483 485 stat_inc_bg_cp_count(sbi->stat_info); 484 486 } 485 487 ··· 692 694 } while (ret && --count); 693 695 694 696 if (ret) { 695 - f2fs_stop_checkpoint(sbi, false); 697 + f2fs_stop_checkpoint(sbi, false, 698 + STOP_CP_REASON_FLUSH_FAIL); 696 699 break; 697 700 } 698 701 ··· 1170 1171 1171 1172 atomic_inc(&dcc->issued_discard); 1172 1173 1173 - f2fs_update_iostat(sbi, FS_DISCARD, 1); 1174 + f2fs_update_iostat(sbi, NULL, FS_DISCARD, 1); 1174 1175 1175 1176 lstart += len; 1176 1177 start += len; ··· 3387 3388 f2fs_submit_page_write(&fio); 3388 3389 3389 3390 stat_inc_meta_count(sbi, page->index); 3390 - f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE); 3391 + f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); 3391 3392 } 3392 3393 3393 3394 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio) ··· 3397 3398 set_summary(&sum, nid, 0, 0); 3398 3399 do_write_page(&sum, fio); 3399 3400 3400 - f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); 3401 + f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); 3401 3402 } 3402 3403 3403 3404 void f2fs_outplace_write_data(struct dnode_of_data *dn, ··· 3411 3412 do_write_page(&sum, fio); 3412 3413 f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 3413 3414 3414 - f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE); 3415 + f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); 3415 3416 } 3416 3417 3417 3418 int f2fs_inplace_write_data(struct f2fs_io_info *fio) ··· 3431 3432 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", 3432 3433 __func__, segno); 3433 3434 err = -EFSCORRUPTED; 3435 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 3434 3436 goto drop_bio; 3435 3437 } 3436 3438 ··· 3453 3453 if (!err) { 3454 3454 f2fs_update_device_state(fio->sbi, fio->ino, 3455 3455 fio->new_blkaddr, 1); 3456 - f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); 3456 + f2fs_update_iostat(fio->sbi, fio->page->mapping->host, 3457 + fio->io_type, F2FS_BLKSIZE); 3457 3458 } 3458 3459 3459 3460 return err; ··· 4380 4379 if (se->type >= NR_PERSISTENT_LOG) { 4381 4380 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4382 4381 se->type, start); 4382 + f2fs_handle_error(sbi, 4383 + ERROR_INCONSISTENT_SUM_TYPE); 4383 4384 return -EFSCORRUPTED; 4384 4385 } 4385 4386 ··· 4418 4415 f2fs_err(sbi, "Wrong journal entry on segno %u", 4419 4416 start); 4420 4417 err = -EFSCORRUPTED; 4418 + f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); 4421 4419 break; 4422 4420 } 4423 4421 ··· 4438 4434 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4439 4435 se->type, start); 4440 4436 err = -EFSCORRUPTED; 4437 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 4441 4438 break; 4442 4439 } 4443 4440 ··· 4470 4465 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { 4471 4466 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4472 4467 sit_valid_blocks[NODE], valid_node_count(sbi)); 4468 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); 4473 4469 return -EFSCORRUPTED; 4474 4470 } 4475 4471 ··· 4479 4473 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", 4480 4474 sit_valid_blocks[DATA], sit_valid_blocks[NODE], 4481 4475 valid_user_blocks(sbi)); 4476 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); 4482 4477 return -EFSCORRUPTED; 4483 4478 } 4484 4479 ··· 4630 4623 f2fs_err(sbi, 4631 4624 "Current segment has invalid alloc_type:%d", 4632 4625 curseg->alloc_type); 4626 + f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4633 4627 return -EFSCORRUPTED; 4634 4628 } 4635 4629 ··· 4648 4640 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4649 4641 i, curseg->segno, curseg->alloc_type, 4650 4642 curseg->next_blkoff, blkofs); 4643 + f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4651 4644 return -EFSCORRUPTED; 4652 4645 } 4653 4646 }
+2
fs/f2fs/segment.h
··· 753 753 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", 754 754 GET_SIT_VBLOCKS(raw_sit), valid_blocks); 755 755 set_sbi_flag(sbi, SBI_NEED_FSCK); 756 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT); 756 757 return -EFSCORRUPTED; 757 758 } 758 759 ··· 768 767 f2fs_err(sbi, "Wrong valid blocks %d or segno %u", 769 768 GET_SIT_VBLOCKS(raw_sit), segno); 770 769 set_sbi_flag(sbi, SBI_NEED_FSCK); 770 + f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT); 771 771 return -EFSCORRUPTED; 772 772 } 773 773 return 0;
+82 -14
fs/f2fs/super.c
··· 301 301 302 302 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) 303 303 { 304 - block_t limit = min((sbi->user_block_count << 1) / 1000, 304 + block_t limit = min((sbi->user_block_count >> 3), 305 305 sbi->user_block_count - sbi->reserved_blocks); 306 306 307 - /* limit is 0.2% */ 307 + /* limit is 12.5% */ 308 308 if (test_opt(sbi, RESERVE_ROOT) && 309 309 F2FS_OPTION(sbi).root_reserved_blocks > limit) { 310 310 F2FS_OPTION(sbi).root_reserved_blocks = limit; ··· 1342 1342 return -EINVAL; 1343 1343 } 1344 1344 1345 + if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) { 1346 + f2fs_err(sbi, "LFS not compatible with ATGC"); 1347 + return -EINVAL; 1348 + } 1349 + 1345 1350 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) { 1346 1351 f2fs_err(sbi, "Allow to mount readonly mode only"); 1347 1352 return -EROFS; ··· 1671 1666 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) 1672 1667 return -EINVAL; 1673 1668 1674 - /* ensure no checkpoint required */ 1675 - if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list)) 1676 - return -EINVAL; 1669 + /* Let's flush checkpoints and stop the thread. */ 1670 + f2fs_flush_ckpt_thread(F2FS_SB(sb)); 1677 1671 1678 1672 /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */ 1679 1673 set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING); ··· 2185 2181 f2fs_up_write(&sbi->gc_lock); 2186 2182 2187 2183 f2fs_sync_fs(sbi->sb, 1); 2184 + 2185 + /* Let's ensure there's no pending checkpoint anymore */ 2186 + f2fs_flush_ckpt_thread(sbi); 2188 2187 } 2189 2188 2190 2189 static int f2fs_remount(struct super_block *sb, int *flags, char *data) ··· 2353 2346 f2fs_stop_ckpt_thread(sbi); 2354 2347 need_restart_ckpt = true; 2355 2348 } else { 2349 + /* Flush if the prevous checkpoint, if exists. */ 2350 + f2fs_flush_ckpt_thread(sbi); 2351 + 2356 2352 err = f2fs_start_ckpt_thread(sbi); 2357 2353 if (err) { 2358 2354 f2fs_err(sbi, ··· 2475 2465 size_t toread; 2476 2466 loff_t i_size = i_size_read(inode); 2477 2467 struct page *page; 2478 - char *kaddr; 2479 2468 2480 2469 if (off > i_size) 2481 2470 return 0; ··· 2507 2498 return -EIO; 2508 2499 } 2509 2500 2510 - kaddr = kmap_atomic(page); 2511 - memcpy(data, kaddr + offset, tocopy); 2512 - kunmap_atomic(kaddr); 2501 + memcpy_from_page(data, page, offset, tocopy); 2513 2502 f2fs_put_page(page, 1); 2514 2503 2515 2504 offset = 0; ··· 2529 2522 size_t towrite = len; 2530 2523 struct page *page; 2531 2524 void *fsdata = NULL; 2532 - char *kaddr; 2533 2525 int err = 0; 2534 2526 int tocopy; 2535 2527 ··· 2547 2541 break; 2548 2542 } 2549 2543 2550 - kaddr = kmap_atomic(page); 2551 - memcpy(kaddr + offset, data, tocopy); 2552 - kunmap_atomic(kaddr); 2553 - flush_dcache_page(page); 2544 + memcpy_to_page(page, offset, data, tocopy); 2554 2545 2555 2546 a_ops->write_end(NULL, mapping, off, tocopy, tocopy, 2556 2547 page, fsdata); ··· 3846 3843 return err; 3847 3844 } 3848 3845 3846 + void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason) 3847 + { 3848 + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 3849 + int err; 3850 + 3851 + f2fs_down_write(&sbi->sb_lock); 3852 + 3853 + if (raw_super->s_stop_reason[reason] < ((1 << BITS_PER_BYTE) - 1)) 3854 + raw_super->s_stop_reason[reason]++; 3855 + 3856 + err = f2fs_commit_super(sbi, false); 3857 + if (err) 3858 + f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d", 3859 + reason, err); 3860 + f2fs_up_write(&sbi->sb_lock); 3861 + } 3862 + 3863 + static void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) 3864 + { 3865 + spin_lock(&sbi->error_lock); 3866 + if (!test_bit(flag, (unsigned long *)sbi->errors)) { 3867 + set_bit(flag, (unsigned long *)sbi->errors); 3868 + sbi->error_dirty = true; 3869 + } 3870 + spin_unlock(&sbi->error_lock); 3871 + } 3872 + 3873 + static bool f2fs_update_errors(struct f2fs_sb_info *sbi) 3874 + { 3875 + bool need_update = false; 3876 + 3877 + spin_lock(&sbi->error_lock); 3878 + if (sbi->error_dirty) { 3879 + memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, 3880 + MAX_F2FS_ERRORS); 3881 + sbi->error_dirty = false; 3882 + need_update = true; 3883 + } 3884 + spin_unlock(&sbi->error_lock); 3885 + 3886 + return need_update; 3887 + } 3888 + 3889 + void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) 3890 + { 3891 + int err; 3892 + 3893 + f2fs_save_errors(sbi, error); 3894 + 3895 + f2fs_down_write(&sbi->sb_lock); 3896 + 3897 + if (!f2fs_update_errors(sbi)) 3898 + goto out_unlock; 3899 + 3900 + err = f2fs_commit_super(sbi, false); 3901 + if (err) 3902 + f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d", 3903 + error, err); 3904 + out_unlock: 3905 + f2fs_up_write(&sbi->sb_lock); 3906 + } 3907 + 3849 3908 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) 3850 3909 { 3851 3910 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); ··· 4254 4189 f2fs_err(sbi, "Failed to initialize post read workqueue"); 4255 4190 goto free_devices; 4256 4191 } 4192 + 4193 + spin_lock_init(&sbi->error_lock); 4194 + memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS); 4257 4195 4258 4196 sbi->total_valid_node_count = 4259 4197 le32_to_cpu(sbi->ckpt->valid_node_count);
+8 -1
fs/f2fs/sysfs.c
··· 128 128 return sprintf(buf, "%lx\n", sbi->s_flag); 129 129 } 130 130 131 + static ssize_t cp_status_show(struct f2fs_attr *a, 132 + struct f2fs_sb_info *sbi, char *buf) 133 + { 134 + return sprintf(buf, "%x\n", le32_to_cpu(F2FS_CKPT(sbi)->ckpt_flags)); 135 + } 136 + 131 137 static ssize_t pending_discard_show(struct f2fs_attr *a, 132 138 struct f2fs_sb_info *sbi, char *buf) 133 139 { ··· 533 527 534 528 if (!strcmp(a->attr.name, "gc_urgent_high_remaining")) { 535 529 spin_lock(&sbi->gc_urgent_high_lock); 536 - sbi->gc_urgent_high_limited = t != 0; 537 530 sbi->gc_urgent_high_remaining = t; 538 531 spin_unlock(&sbi->gc_urgent_high_lock); 539 532 ··· 1035 1030 ATTRIBUTE_GROUPS(f2fs_feat); 1036 1031 1037 1032 F2FS_GENERAL_RO_ATTR(sb_status); 1033 + F2FS_GENERAL_RO_ATTR(cp_status); 1038 1034 static struct attribute *f2fs_stat_attrs[] = { 1039 1035 ATTR_LIST(sb_status), 1036 + ATTR_LIST(cp_status), 1040 1037 NULL, 1041 1038 }; 1042 1039 ATTRIBUTE_GROUPS(f2fs_stat);
+4 -8
fs/f2fs/verity.c
··· 47 47 size_t n = min_t(size_t, count, 48 48 PAGE_SIZE - offset_in_page(pos)); 49 49 struct page *page; 50 - void *addr; 51 50 52 51 page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT, 53 52 NULL); 54 53 if (IS_ERR(page)) 55 54 return PTR_ERR(page); 56 55 57 - addr = kmap_atomic(page); 58 - memcpy(buf, addr + offset_in_page(pos), n); 59 - kunmap_atomic(addr); 56 + memcpy_from_page(buf, page, offset_in_page(pos), n); 60 57 61 58 put_page(page); 62 59 ··· 82 85 PAGE_SIZE - offset_in_page(pos)); 83 86 struct page *page; 84 87 void *fsdata; 85 - void *addr; 86 88 int res; 87 89 88 90 res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata); 89 91 if (res) 90 92 return res; 91 93 92 - addr = kmap_atomic(page); 93 - memcpy(addr + offset_in_page(pos), buf, n); 94 - kunmap_atomic(addr); 94 + memcpy_to_page(page, offset_in_page(pos), buf, n); 95 95 96 96 res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata); 97 97 if (res < 0) ··· 240 246 if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes || 241 247 pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) { 242 248 f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr"); 249 + f2fs_handle_error(F2FS_I_SB(inode), 250 + ERROR_CORRUPTED_VERITY_XATTR); 243 251 return -EFSCORRUPTED; 244 252 } 245 253 if (buf_size) {
+8
fs/f2fs/xattr.c
··· 367 367 inode->i_ino); 368 368 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 369 369 err = -EFSCORRUPTED; 370 + f2fs_handle_error(F2FS_I_SB(inode), 371 + ERROR_CORRUPTED_XATTR); 370 372 goto out; 371 373 } 372 374 check: ··· 585 583 inode->i_ino); 586 584 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 587 585 error = -EFSCORRUPTED; 586 + f2fs_handle_error(F2FS_I_SB(inode), 587 + ERROR_CORRUPTED_XATTR); 588 588 goto cleanup; 589 589 } 590 590 ··· 662 658 inode->i_ino); 663 659 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 664 660 error = -EFSCORRUPTED; 661 + f2fs_handle_error(F2FS_I_SB(inode), 662 + ERROR_CORRUPTED_XATTR); 665 663 goto exit; 666 664 } 667 665 ··· 690 684 inode->i_ino, ENTRY_SIZE(last)); 691 685 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 692 686 error = -EFSCORRUPTED; 687 + f2fs_handle_error(F2FS_I_SB(inode), 688 + ERROR_CORRUPTED_XATTR); 693 689 goto exit; 694 690 } 695 691 last = XATTR_NEXT_ENTRY(last);
+39 -1
include/linux/f2fs_fs.h
··· 73 73 __le32 total_segments; 74 74 } __packed; 75 75 76 + /* reason of stop_checkpoint */ 77 + enum stop_cp_reason { 78 + STOP_CP_REASON_SHUTDOWN, 79 + STOP_CP_REASON_FAULT_INJECT, 80 + STOP_CP_REASON_META_PAGE, 81 + STOP_CP_REASON_WRITE_FAIL, 82 + STOP_CP_REASON_CORRUPTED_SUMMARY, 83 + STOP_CP_REASON_UPDATE_INODE, 84 + STOP_CP_REASON_FLUSH_FAIL, 85 + STOP_CP_REASON_MAX, 86 + }; 87 + 88 + #define MAX_STOP_REASON 32 89 + 90 + /* detail reason for EFSCORRUPTED */ 91 + enum f2fs_error { 92 + ERROR_CORRUPTED_CLUSTER, 93 + ERROR_FAIL_DECOMPRESSION, 94 + ERROR_INVALID_BLKADDR, 95 + ERROR_CORRUPTED_DIRENT, 96 + ERROR_CORRUPTED_INODE, 97 + ERROR_INCONSISTENT_SUMMARY, 98 + ERROR_INCONSISTENT_FOOTER, 99 + ERROR_INCONSISTENT_SUM_TYPE, 100 + ERROR_CORRUPTED_JOURNAL, 101 + ERROR_INCONSISTENT_NODE_COUNT, 102 + ERROR_INCONSISTENT_BLOCK_COUNT, 103 + ERROR_INVALID_CURSEG, 104 + ERROR_INCONSISTENT_SIT, 105 + ERROR_CORRUPTED_VERITY_XATTR, 106 + ERROR_CORRUPTED_XATTR, 107 + ERROR_MAX, 108 + }; 109 + 110 + #define MAX_F2FS_ERRORS 16 111 + 76 112 struct f2fs_super_block { 77 113 __le32 magic; /* Magic Number */ 78 114 __le16 major_ver; /* Major Version */ ··· 152 116 __u8 hot_ext_count; /* # of hot file extension */ 153 117 __le16 s_encoding; /* Filename charset encoding */ 154 118 __le16 s_encoding_flags; /* Filename charset encoding flags */ 155 - __u8 reserved[306]; /* valid reserved region */ 119 + __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */ 120 + __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */ 121 + __u8 reserved[258]; /* valid reserved region */ 156 122 __le32 crc; /* checksum of superblock */ 157 123 } __packed; 158 124
+28 -9
include/trace/events/f2fs.h
··· 1578 1578 TRACE_EVENT(f2fs_update_extent_tree_range, 1579 1579 1580 1580 TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr, 1581 - unsigned int len), 1581 + unsigned int len, 1582 + unsigned int c_len), 1582 1583 1583 - TP_ARGS(inode, pgofs, blkaddr, len), 1584 + TP_ARGS(inode, pgofs, blkaddr, len, c_len), 1584 1585 1585 1586 TP_STRUCT__entry( 1586 1587 __field(dev_t, dev) ··· 1589 1588 __field(unsigned int, pgofs) 1590 1589 __field(u32, blk) 1591 1590 __field(unsigned int, len) 1591 + __field(unsigned int, c_len) 1592 1592 ), 1593 1593 1594 1594 TP_fast_assign( ··· 1598 1596 __entry->pgofs = pgofs; 1599 1597 __entry->blk = blkaddr; 1600 1598 __entry->len = len; 1599 + __entry->c_len = c_len; 1601 1600 ), 1602 1601 1603 1602 TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " 1604 - "blkaddr = %u, len = %u", 1603 + "blkaddr = %u, len = %u, " 1604 + "c_len = %u", 1605 1605 show_dev_ino(__entry), 1606 1606 __entry->pgofs, 1607 1607 __entry->blk, 1608 - __entry->len) 1608 + __entry->len, 1609 + __entry->c_len) 1609 1610 ); 1610 1611 1611 1612 TRACE_EVENT(f2fs_shrink_extent_tree, ··· 1828 1823 __field(unsigned long long, app_bio) 1829 1824 __field(unsigned long long, app_wio) 1830 1825 __field(unsigned long long, app_mio) 1826 + __field(unsigned long long, app_bcdio) 1827 + __field(unsigned long long, app_mcdio) 1831 1828 __field(unsigned long long, fs_dio) 1829 + __field(unsigned long long, fs_cdio) 1832 1830 __field(unsigned long long, fs_nio) 1833 1831 __field(unsigned long long, fs_mio) 1834 1832 __field(unsigned long long, fs_gc_dio) ··· 1843 1835 __field(unsigned long long, app_brio) 1844 1836 __field(unsigned long long, app_rio) 1845 1837 __field(unsigned long long, app_mrio) 1838 + __field(unsigned long long, app_bcrio) 1839 + __field(unsigned long long, app_mcrio) 1846 1840 __field(unsigned long long, fs_drio) 1847 1841 __field(unsigned long long, fs_gdrio) 1848 1842 __field(unsigned long long, fs_cdrio) ··· 1859 1849 __entry->app_bio = iostat[APP_BUFFERED_IO]; 1860 1850 __entry->app_wio = iostat[APP_WRITE_IO]; 1861 1851 __entry->app_mio = iostat[APP_MAPPED_IO]; 1852 + __entry->app_bcdio = iostat[APP_BUFFERED_CDATA_IO]; 1853 + __entry->app_mcdio = iostat[APP_MAPPED_CDATA_IO]; 1862 1854 __entry->fs_dio = iostat[FS_DATA_IO]; 1855 + __entry->fs_cdio = iostat[FS_CDATA_IO]; 1863 1856 __entry->fs_nio = iostat[FS_NODE_IO]; 1864 1857 __entry->fs_mio = iostat[FS_META_IO]; 1865 1858 __entry->fs_gc_dio = iostat[FS_GC_DATA_IO]; ··· 1874 1861 __entry->app_brio = iostat[APP_BUFFERED_READ_IO]; 1875 1862 __entry->app_rio = iostat[APP_READ_IO]; 1876 1863 __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; 1864 + __entry->app_bcrio = iostat[APP_BUFFERED_CDATA_READ_IO]; 1865 + __entry->app_mcrio = iostat[APP_MAPPED_CDATA_READ_IO]; 1877 1866 __entry->fs_drio = iostat[FS_DATA_READ_IO]; 1878 1867 __entry->fs_gdrio = iostat[FS_GDATA_READ_IO]; 1879 1868 __entry->fs_cdrio = iostat[FS_CDATA_READ_IO]; ··· 1885 1870 ), 1886 1871 1887 1872 TP_printk("dev = (%d,%d), " 1888 - "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], " 1889 - "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], " 1873 + "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, " 1874 + "compr(buffered=%llu, mapped=%llu)], " 1875 + "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu], " 1890 1876 "gc [data=%llu, node=%llu], " 1891 1877 "cp [data=%llu, node=%llu, meta=%llu], " 1892 1878 "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " 1893 - "fs [data=%llu, (gc_data=%llu, compr_data=%llu), " 1879 + "compr(buffered=%llu, mapped=%llu)], " 1880 + "fs [data=%llu, (gc_data=%llu, cdata=%llu), " 1894 1881 "node=%llu, meta=%llu]", 1895 1882 show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, 1896 - __entry->app_bio, __entry->app_mio, __entry->fs_dio, 1883 + __entry->app_bio, __entry->app_mio, __entry->app_bcdio, 1884 + __entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio, 1897 1885 __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, 1898 1886 __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, 1899 1887 __entry->fs_cp_nio, __entry->fs_cp_mio, 1900 1888 __entry->app_rio, __entry->app_drio, __entry->app_brio, 1901 - __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio, 1889 + __entry->app_mrio, __entry->app_bcrio, __entry->app_mcrio, 1890 + __entry->fs_drio, __entry->fs_gdrio, 1902 1891 __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) 1903 1892 ); 1904 1893