Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

f2fs: move f2fs to use reader-unfair rwsems

f2fs rw_semaphores work better if writers can starve readers,
especially for the checkpoint thread, because writers are strictly
more important than reader threads. This prevents significant priority
inversion between low-priority readers that blocked while trying to
acquire the read lock and a second acquisition of the write lock that
might be blocking high priority work.

Signed-off-by: Tim Murray <timmurray@google.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

authored by

Tim Murray and committed by
Jaegeuk Kim
e4544b63 dd81e1c7

+342 -274
+17 -17
fs/f2fs/checkpoint.c
··· 351 351 goto skip_write; 352 352 353 353 /* if locked failed, cp will flush dirty pages instead */ 354 - if (!down_write_trylock(&sbi->cp_global_sem)) 354 + if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) 355 355 goto skip_write; 356 356 357 357 trace_f2fs_writepages(mapping->host, wbc, META); 358 358 diff = nr_pages_to_write(sbi, META, wbc); 359 359 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); 360 - up_write(&sbi->cp_global_sem); 360 + f2fs_up_write(&sbi->cp_global_sem); 361 361 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); 362 362 return 0; 363 363 ··· 1159 1159 if (!is_journalled_quota(sbi)) 1160 1160 return false; 1161 1161 1162 - if (!down_write_trylock(&sbi->quota_sem)) 1162 + if (!f2fs_down_write_trylock(&sbi->quota_sem)) 1163 1163 return true; 1164 1164 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { 1165 1165 ret = false; ··· 1171 1171 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { 1172 1172 ret = true; 1173 1173 } 1174 - up_write(&sbi->quota_sem); 1174 + f2fs_up_write(&sbi->quota_sem); 1175 1175 return ret; 1176 1176 } 1177 1177 ··· 1228 1228 * POR: we should ensure that there are no dirty node pages 1229 1229 * until finishing nat/sit flush. inode->i_blocks can be updated. 1230 1230 */ 1231 - down_write(&sbi->node_change); 1231 + f2fs_down_write(&sbi->node_change); 1232 1232 1233 1233 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { 1234 - up_write(&sbi->node_change); 1234 + f2fs_up_write(&sbi->node_change); 1235 1235 f2fs_unlock_all(sbi); 1236 1236 err = f2fs_sync_inode_meta(sbi); 1237 1237 if (err) ··· 1241 1241 } 1242 1242 1243 1243 retry_flush_nodes: 1244 - down_write(&sbi->node_write); 1244 + f2fs_down_write(&sbi->node_write); 1245 1245 1246 1246 if (get_pages(sbi, F2FS_DIRTY_NODES)) { 1247 - up_write(&sbi->node_write); 1247 + f2fs_up_write(&sbi->node_write); 1248 1248 atomic_inc(&sbi->wb_sync_req[NODE]); 1249 1249 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); 1250 1250 atomic_dec(&sbi->wb_sync_req[NODE]); 1251 1251 if (err) { 1252 - up_write(&sbi->node_change); 1252 + f2fs_up_write(&sbi->node_change); 1253 1253 f2fs_unlock_all(sbi); 1254 1254 return err; 1255 1255 } ··· 1262 1262 * dirty node blocks and some checkpoint values by block allocation. 1263 1263 */ 1264 1264 __prepare_cp_block(sbi); 1265 - up_write(&sbi->node_change); 1265 + f2fs_up_write(&sbi->node_change); 1266 1266 return err; 1267 1267 } 1268 1268 1269 1269 static void unblock_operations(struct f2fs_sb_info *sbi) 1270 1270 { 1271 - up_write(&sbi->node_write); 1271 + f2fs_up_write(&sbi->node_write); 1272 1272 f2fs_unlock_all(sbi); 1273 1273 } 1274 1274 ··· 1612 1612 f2fs_warn(sbi, "Start checkpoint disabled!"); 1613 1613 } 1614 1614 if (cpc->reason != CP_RESIZE) 1615 - down_write(&sbi->cp_global_sem); 1615 + f2fs_down_write(&sbi->cp_global_sem); 1616 1616 1617 1617 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && 1618 1618 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || ··· 1693 1693 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); 1694 1694 out: 1695 1695 if (cpc->reason != CP_RESIZE) 1696 - up_write(&sbi->cp_global_sem); 1696 + f2fs_up_write(&sbi->cp_global_sem); 1697 1697 return err; 1698 1698 } 1699 1699 ··· 1741 1741 struct cp_control cpc = { .reason = CP_SYNC, }; 1742 1742 int err; 1743 1743 1744 - down_write(&sbi->gc_lock); 1744 + f2fs_down_write(&sbi->gc_lock); 1745 1745 err = f2fs_write_checkpoint(sbi, &cpc); 1746 - up_write(&sbi->gc_lock); 1746 + f2fs_up_write(&sbi->gc_lock); 1747 1747 1748 1748 return err; 1749 1749 } ··· 1831 1831 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { 1832 1832 int ret; 1833 1833 1834 - down_write(&sbi->gc_lock); 1834 + f2fs_down_write(&sbi->gc_lock); 1835 1835 ret = f2fs_write_checkpoint(sbi, &cpc); 1836 - up_write(&sbi->gc_lock); 1836 + f2fs_up_write(&sbi->gc_lock); 1837 1837 1838 1838 return ret; 1839 1839 }
+3 -3
fs/f2fs/compress.c
··· 1267 1267 * checkpoint. This can only happen to quota writes which can cause 1268 1268 * the below discard race condition. 1269 1269 */ 1270 - down_read(&sbi->node_write); 1270 + f2fs_down_read(&sbi->node_write); 1271 1271 } else if (!f2fs_trylock_op(sbi)) { 1272 1272 goto out_free; 1273 1273 } ··· 1384 1384 1385 1385 f2fs_put_dnode(&dn); 1386 1386 if (IS_NOQUOTA(inode)) 1387 - up_read(&sbi->node_write); 1387 + f2fs_up_read(&sbi->node_write); 1388 1388 else 1389 1389 f2fs_unlock_op(sbi); 1390 1390 ··· 1410 1410 f2fs_put_dnode(&dn); 1411 1411 out_unlock_op: 1412 1412 if (IS_NOQUOTA(inode)) 1413 - up_read(&sbi->node_write); 1413 + f2fs_up_read(&sbi->node_write); 1414 1414 else 1415 1415 f2fs_unlock_op(sbi); 1416 1416 out_free:
+25 -25
fs/f2fs/data.c
··· 590 590 enum page_type btype = PAGE_TYPE_OF_BIO(type); 591 591 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; 592 592 593 - down_write(&io->io_rwsem); 593 + f2fs_down_write(&io->io_rwsem); 594 594 595 595 /* change META to META_FLUSH in the checkpoint procedure */ 596 596 if (type >= META_FLUSH) { ··· 601 601 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 602 602 } 603 603 __submit_merged_bio(io); 604 - up_write(&io->io_rwsem); 604 + f2fs_up_write(&io->io_rwsem); 605 605 } 606 606 607 607 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, ··· 616 616 enum page_type btype = PAGE_TYPE_OF_BIO(type); 617 617 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; 618 618 619 - down_read(&io->io_rwsem); 619 + f2fs_down_read(&io->io_rwsem); 620 620 ret = __has_merged_page(io->bio, inode, page, ino); 621 - up_read(&io->io_rwsem); 621 + f2fs_up_read(&io->io_rwsem); 622 622 } 623 623 if (ret) 624 624 __f2fs_submit_merged_write(sbi, type, temp); ··· 742 742 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) 743 743 f2fs_bug_on(sbi, 1); 744 744 745 - down_write(&io->bio_list_lock); 745 + f2fs_down_write(&io->bio_list_lock); 746 746 list_add_tail(&be->list, &io->bio_list); 747 - up_write(&io->bio_list_lock); 747 + f2fs_up_write(&io->bio_list_lock); 748 748 } 749 749 750 750 static void del_bio_entry(struct bio_entry *be) ··· 766 766 struct list_head *head = &io->bio_list; 767 767 struct bio_entry *be; 768 768 769 - down_write(&io->bio_list_lock); 769 + f2fs_down_write(&io->bio_list_lock); 770 770 list_for_each_entry(be, head, list) { 771 771 if (be->bio != *bio) 772 772 continue; ··· 790 790 __submit_bio(sbi, *bio, DATA); 791 791 break; 792 792 } 793 - up_write(&io->bio_list_lock); 793 + f2fs_up_write(&io->bio_list_lock); 794 794 } 795 795 796 796 if (ret) { ··· 816 816 if (list_empty(head)) 817 817 continue; 818 818 819 - down_read(&io->bio_list_lock); 819 + f2fs_down_read(&io->bio_list_lock); 820 820 list_for_each_entry(be, head, list) { 821 821 if (target) 822 822 found = (target == be->bio); ··· 826 826 if (found) 827 827 break; 828 828 } 829 - up_read(&io->bio_list_lock); 829 + f2fs_up_read(&io->bio_list_lock); 830 830 831 831 if (!found) 832 832 continue; 833 833 834 834 found = false; 835 835 836 - down_write(&io->bio_list_lock); 836 + f2fs_down_write(&io->bio_list_lock); 837 837 list_for_each_entry(be, head, list) { 838 838 if (target) 839 839 found = (target == be->bio); ··· 846 846 break; 847 847 } 848 848 } 849 - up_write(&io->bio_list_lock); 849 + f2fs_up_write(&io->bio_list_lock); 850 850 } 851 851 852 852 if (found) ··· 906 906 907 907 f2fs_bug_on(sbi, is_read_io(fio->op)); 908 908 909 - down_write(&io->io_rwsem); 909 + f2fs_down_write(&io->io_rwsem); 910 910 next: 911 911 if (fio->in_list) { 912 912 spin_lock(&io->io_lock); ··· 973 973 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || 974 974 !f2fs_is_checkpoint_ready(sbi)) 975 975 __submit_merged_bio(io); 976 - up_write(&io->io_rwsem); 976 + f2fs_up_write(&io->io_rwsem); 977 977 } 978 978 979 979 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, ··· 1383 1383 { 1384 1384 if (flag == F2FS_GET_BLOCK_PRE_AIO) { 1385 1385 if (lock) 1386 - down_read(&sbi->node_change); 1386 + f2fs_down_read(&sbi->node_change); 1387 1387 else 1388 - up_read(&sbi->node_change); 1388 + f2fs_up_read(&sbi->node_change); 1389 1389 } else { 1390 1390 if (lock) 1391 1391 f2fs_lock_op(sbi); ··· 2749 2749 * the below discard race condition. 2750 2750 */ 2751 2751 if (IS_NOQUOTA(inode)) 2752 - down_read(&sbi->node_write); 2752 + f2fs_down_read(&sbi->node_write); 2753 2753 2754 2754 fio.need_lock = LOCK_DONE; 2755 2755 err = f2fs_do_write_data_page(&fio); 2756 2756 2757 2757 if (IS_NOQUOTA(inode)) 2758 - up_read(&sbi->node_write); 2758 + f2fs_up_read(&sbi->node_write); 2759 2759 2760 2760 goto done; 2761 2761 } ··· 3213 3213 3214 3214 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ 3215 3215 if (to > i_size && !f2fs_verity_in_progress(inode)) { 3216 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3216 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3217 3217 filemap_invalidate_lock(inode->i_mapping); 3218 3218 3219 3219 truncate_pagecache(inode, i_size); 3220 3220 f2fs_truncate_blocks(inode, i_size, true); 3221 3221 3222 3222 filemap_invalidate_unlock(inode->i_mapping); 3223 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3223 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3224 3224 } 3225 3225 } 3226 3226 ··· 3721 3721 unsigned int end_sec = secidx + blkcnt / blk_per_sec; 3722 3722 int ret = 0; 3723 3723 3724 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3724 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3725 3725 filemap_invalidate_lock(inode->i_mapping); 3726 3726 3727 3727 set_inode_flag(inode, FI_ALIGNED_WRITE); 3728 3728 3729 3729 for (; secidx < end_sec; secidx++) { 3730 - down_write(&sbi->pin_sem); 3730 + f2fs_down_write(&sbi->pin_sem); 3731 3731 3732 3732 f2fs_lock_op(sbi); 3733 3733 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); ··· 3741 3741 3742 3742 page = f2fs_get_lock_data_page(inode, blkidx, true); 3743 3743 if (IS_ERR(page)) { 3744 - up_write(&sbi->pin_sem); 3744 + f2fs_up_write(&sbi->pin_sem); 3745 3745 ret = PTR_ERR(page); 3746 3746 goto done; 3747 3747 } ··· 3754 3754 3755 3755 ret = filemap_fdatawrite(inode->i_mapping); 3756 3756 3757 - up_write(&sbi->pin_sem); 3757 + f2fs_up_write(&sbi->pin_sem); 3758 3758 3759 3759 if (ret) 3760 3760 break; ··· 3765 3765 clear_inode_flag(inode, FI_ALIGNED_WRITE); 3766 3766 3767 3767 filemap_invalidate_unlock(inode->i_mapping); 3768 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3768 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3769 3769 3770 3770 return ret; 3771 3771 }
+6 -6
fs/f2fs/dir.c
··· 766 766 f2fs_wait_on_page_writeback(dentry_page, DATA, true, true); 767 767 768 768 if (inode) { 769 - down_write(&F2FS_I(inode)->i_sem); 769 + f2fs_down_write(&F2FS_I(inode)->i_sem); 770 770 page = f2fs_init_inode_metadata(inode, dir, fname, NULL); 771 771 if (IS_ERR(page)) { 772 772 err = PTR_ERR(page); ··· 793 793 f2fs_update_parent_metadata(dir, inode, current_depth); 794 794 fail: 795 795 if (inode) 796 - up_write(&F2FS_I(inode)->i_sem); 796 + f2fs_up_write(&F2FS_I(inode)->i_sem); 797 797 798 798 f2fs_put_page(dentry_page, 1); 799 799 ··· 858 858 struct page *page; 859 859 int err = 0; 860 860 861 - down_write(&F2FS_I(inode)->i_sem); 861 + f2fs_down_write(&F2FS_I(inode)->i_sem); 862 862 page = f2fs_init_inode_metadata(inode, dir, NULL, NULL); 863 863 if (IS_ERR(page)) { 864 864 err = PTR_ERR(page); ··· 869 869 clear_inode_flag(inode, FI_NEW_INODE); 870 870 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 871 871 fail: 872 - up_write(&F2FS_I(inode)->i_sem); 872 + f2fs_up_write(&F2FS_I(inode)->i_sem); 873 873 return err; 874 874 } 875 875 ··· 877 877 { 878 878 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 879 879 880 - down_write(&F2FS_I(inode)->i_sem); 880 + f2fs_down_write(&F2FS_I(inode)->i_sem); 881 881 882 882 if (S_ISDIR(inode->i_mode)) 883 883 f2fs_i_links_write(dir, false); ··· 888 888 f2fs_i_links_write(inode, false); 889 889 f2fs_i_size_write(inode, 0); 890 890 } 891 - up_write(&F2FS_I(inode)->i_sem); 891 + f2fs_up_write(&F2FS_I(inode)->i_sem); 892 892 893 893 if (inode->i_nlink == 0) 894 894 f2fs_add_orphan_inode(inode);
+89 -21
fs/f2fs/f2fs.h
··· 123 123 124 124 #define COMPRESS_EXT_NUM 16 125 125 126 + /* 127 + * An implementation of an rwsem that is explicitly unfair to readers. This 128 + * prevents priority inversion when a low-priority reader acquires the read lock 129 + * while sleeping on the write lock but the write lock is needed by 130 + * higher-priority clients. 131 + */ 132 + 133 + struct f2fs_rwsem { 134 + struct rw_semaphore internal_rwsem; 135 + wait_queue_head_t read_waiters; 136 + }; 137 + 126 138 struct f2fs_mount_info { 127 139 unsigned int opt; 128 140 int write_io_size_bits; /* Write IO size bits */ ··· 764 752 765 753 /* Use below internally in f2fs*/ 766 754 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 767 - struct rw_semaphore i_sem; /* protect fi info */ 755 + struct f2fs_rwsem i_sem; /* protect fi info */ 768 756 atomic_t dirty_pages; /* # of dirty pages */ 769 757 f2fs_hash_t chash; /* hash value of given file name */ 770 758 unsigned int clevel; /* maximum level of given file name */ ··· 789 777 struct extent_tree *extent_tree; /* cached extent_tree entry */ 790 778 791 779 /* avoid racing between foreground op and gc */ 792 - struct rw_semaphore i_gc_rwsem[2]; 793 - struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 780 + struct f2fs_rwsem i_gc_rwsem[2]; 781 + struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ 794 782 795 783 int i_extra_isize; /* size of extra space located in i_addr */ 796 784 kprojid_t i_projid; /* id for project quota */ ··· 916 904 /* NAT cache management */ 917 905 struct radix_tree_root nat_root;/* root of the nat entry cache */ 918 906 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 919 - struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ 907 + struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ 920 908 struct list_head nat_entries; /* cached nat entry list (clean) */ 921 909 spinlock_t nat_list_lock; /* protect clean nat entry list */ 922 910 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ ··· 1029 1017 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1030 1018 struct curseg_info *curseg_array; /* active segment information */ 1031 1019 1032 - struct rw_semaphore curseg_lock; /* for preventing curseg change */ 1020 + struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ 1033 1021 1034 1022 block_t seg0_blkaddr; /* block address of 0'th segment */ 1035 1023 block_t main_blkaddr; /* start block address of main area */ ··· 1213 1201 struct bio *bio; /* bios to merge */ 1214 1202 sector_t last_block_in_bio; /* last block number */ 1215 1203 struct f2fs_io_info fio; /* store buffered io info. */ 1216 - struct rw_semaphore io_rwsem; /* blocking op for bio */ 1204 + struct f2fs_rwsem io_rwsem; /* blocking op for bio */ 1217 1205 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1218 1206 struct list_head io_list; /* track fios */ 1219 1207 struct list_head bio_list; /* bio entry list head */ 1220 - struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1208 + struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ 1221 1209 }; 1222 1210 1223 1211 #define FDEV(i) (sbi->devs[i]) ··· 1583 1571 struct super_block *sb; /* pointer to VFS super block */ 1584 1572 struct proc_dir_entry *s_proc; /* proc entry */ 1585 1573 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1586 - struct rw_semaphore sb_lock; /* lock for raw super block */ 1574 + struct f2fs_rwsem sb_lock; /* lock for raw super block */ 1587 1575 int valid_super_block; /* valid super block no */ 1588 1576 unsigned long s_flag; /* flags for sbi */ 1589 1577 struct mutex writepages; /* mutex for writepages() */ ··· 1603 1591 /* for bio operations */ 1604 1592 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1605 1593 /* keep migration IO order for LFS mode */ 1606 - struct rw_semaphore io_order_lock; 1594 + struct f2fs_rwsem io_order_lock; 1607 1595 mempool_t *write_io_dummy; /* Dummy pages */ 1608 1596 1609 1597 /* for checkpoint */ ··· 1611 1599 int cur_cp_pack; /* remain current cp pack */ 1612 1600 spinlock_t cp_lock; /* for flag in ckpt */ 1613 1601 struct inode *meta_inode; /* cache meta blocks */ 1614 - struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1615 - struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1616 - struct rw_semaphore node_write; /* locking node writes */ 1617 - struct rw_semaphore node_change; /* locking node change */ 1602 + struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ 1603 + struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ 1604 + struct f2fs_rwsem node_write; /* locking node writes */ 1605 + struct f2fs_rwsem node_change; /* locking node change */ 1618 1606 wait_queue_head_t cp_wait; 1619 1607 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1620 1608 long interval_time[MAX_TIME]; /* to store thresholds */ ··· 1674 1662 block_t unusable_block_count; /* # of blocks saved by last cp */ 1675 1663 1676 1664 unsigned int nquota_files; /* # of quota sysfile */ 1677 - struct rw_semaphore quota_sem; /* blocking cp for flags */ 1665 + struct f2fs_rwsem quota_sem; /* blocking cp for flags */ 1678 1666 1679 1667 /* # of pages, see count_type */ 1680 1668 atomic_t nr_pages[NR_COUNT_TYPE]; ··· 1690 1678 struct f2fs_mount_info mount_opt; /* mount options */ 1691 1679 1692 1680 /* for cleaning operations */ 1693 - struct rw_semaphore gc_lock; /* 1681 + struct f2fs_rwsem gc_lock; /* 1694 1682 * semaphore for GC, avoid 1695 1683 * race between GC and GC or CP 1696 1684 */ ··· 1710 1698 1711 1699 /* threshold for gc trials on pinned files */ 1712 1700 u64 gc_pin_file_threshold; 1713 - struct rw_semaphore pin_sem; 1701 + struct f2fs_rwsem pin_sem; 1714 1702 1715 1703 /* maximum # of trials to find a victim segment for SSR and GC */ 1716 1704 unsigned int max_victim_search; ··· 2104 2092 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2105 2093 } 2106 2094 2095 + static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem) 2096 + { 2097 + init_rwsem(&sem->internal_rwsem); 2098 + init_waitqueue_head(&sem->read_waiters); 2099 + } 2100 + 2101 + static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) 2102 + { 2103 + return rwsem_is_locked(&sem->internal_rwsem); 2104 + } 2105 + 2106 + static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) 2107 + { 2108 + return rwsem_is_contended(&sem->internal_rwsem); 2109 + } 2110 + 2111 + static inline void f2fs_down_read(struct f2fs_rwsem *sem) 2112 + { 2113 + wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); 2114 + } 2115 + 2116 + static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) 2117 + { 2118 + return down_read_trylock(&sem->internal_rwsem); 2119 + } 2120 + 2121 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 2122 + static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) 2123 + { 2124 + down_read_nested(&sem->internal_rwsem, subclass); 2125 + } 2126 + #else 2127 + #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) 2128 + #endif 2129 + 2130 + static inline void f2fs_up_read(struct f2fs_rwsem *sem) 2131 + { 2132 + up_read(&sem->internal_rwsem); 2133 + } 2134 + 2135 + static inline void f2fs_down_write(struct f2fs_rwsem *sem) 2136 + { 2137 + down_write(&sem->internal_rwsem); 2138 + } 2139 + 2140 + static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) 2141 + { 2142 + return down_write_trylock(&sem->internal_rwsem); 2143 + } 2144 + 2145 + static inline void f2fs_up_write(struct f2fs_rwsem *sem) 2146 + { 2147 + up_write(&sem->internal_rwsem); 2148 + wake_up_all(&sem->read_waiters); 2149 + } 2150 + 2107 2151 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2108 2152 { 2109 - down_read(&sbi->cp_rwsem); 2153 + f2fs_down_read(&sbi->cp_rwsem); 2110 2154 } 2111 2155 2112 2156 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) ··· 2171 2103 f2fs_show_injection_info(sbi, FAULT_LOCK_OP); 2172 2104 return 0; 2173 2105 } 2174 - return down_read_trylock(&sbi->cp_rwsem); 2106 + return f2fs_down_read_trylock(&sbi->cp_rwsem); 2175 2107 } 2176 2108 2177 2109 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2178 2110 { 2179 - up_read(&sbi->cp_rwsem); 2111 + f2fs_up_read(&sbi->cp_rwsem); 2180 2112 } 2181 2113 2182 2114 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2183 2115 { 2184 - down_write(&sbi->cp_rwsem); 2116 + f2fs_down_write(&sbi->cp_rwsem); 2185 2117 } 2186 2118 2187 2119 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2188 2120 { 2189 - up_write(&sbi->cp_rwsem); 2121 + f2fs_up_write(&sbi->cp_rwsem); 2190 2122 } 2191 2123 2192 2124 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
+56 -56
fs/f2fs/file.c
··· 237 237 struct f2fs_inode_info *fi = F2FS_I(inode); 238 238 nid_t pino; 239 239 240 - down_write(&fi->i_sem); 240 + f2fs_down_write(&fi->i_sem); 241 241 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 242 242 get_parent_ino(inode, &pino)) { 243 243 f2fs_i_pino_write(inode, pino); 244 244 file_got_pino(inode); 245 245 } 246 - up_write(&fi->i_sem); 246 + f2fs_up_write(&fi->i_sem); 247 247 } 248 248 249 249 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, ··· 318 318 * Both of fdatasync() and fsync() are able to be recovered from 319 319 * sudden-power-off. 320 320 */ 321 - down_read(&F2FS_I(inode)->i_sem); 321 + f2fs_down_read(&F2FS_I(inode)->i_sem); 322 322 cp_reason = need_do_checkpoint(inode); 323 - up_read(&F2FS_I(inode)->i_sem); 323 + f2fs_up_read(&F2FS_I(inode)->i_sem); 324 324 325 325 if (cp_reason) { 326 326 /* all the dirty node pages should be flushed for POR */ ··· 958 958 return err; 959 959 } 960 960 961 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 961 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 962 962 filemap_invalidate_lock(inode->i_mapping); 963 963 964 964 truncate_setsize(inode, attr->ia_size); ··· 970 970 * larger than i_size. 971 971 */ 972 972 filemap_invalidate_unlock(inode->i_mapping); 973 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 973 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 974 974 if (err) 975 975 return err; 976 976 ··· 1112 1112 blk_start = (loff_t)pg_start << PAGE_SHIFT; 1113 1113 blk_end = (loff_t)pg_end << PAGE_SHIFT; 1114 1114 1115 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1115 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1116 1116 filemap_invalidate_lock(inode->i_mapping); 1117 1117 1118 1118 truncate_pagecache_range(inode, blk_start, blk_end - 1); ··· 1122 1122 f2fs_unlock_op(sbi); 1123 1123 1124 1124 filemap_invalidate_unlock(inode->i_mapping); 1125 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1125 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1126 1126 } 1127 1127 } 1128 1128 ··· 1355 1355 f2fs_balance_fs(sbi, true); 1356 1356 1357 1357 /* avoid gc operation during block exchange */ 1358 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1358 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1359 1359 filemap_invalidate_lock(inode->i_mapping); 1360 1360 1361 1361 f2fs_lock_op(sbi); ··· 1365 1365 f2fs_unlock_op(sbi); 1366 1366 1367 1367 filemap_invalidate_unlock(inode->i_mapping); 1368 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1368 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1369 1369 return ret; 1370 1370 } 1371 1371 ··· 1500 1500 unsigned int end_offset; 1501 1501 pgoff_t end; 1502 1502 1503 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1503 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1504 1504 filemap_invalidate_lock(mapping); 1505 1505 1506 1506 truncate_pagecache_range(inode, ··· 1514 1514 if (ret) { 1515 1515 f2fs_unlock_op(sbi); 1516 1516 filemap_invalidate_unlock(mapping); 1517 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1517 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1518 1518 goto out; 1519 1519 } 1520 1520 ··· 1526 1526 1527 1527 f2fs_unlock_op(sbi); 1528 1528 filemap_invalidate_unlock(mapping); 1529 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1529 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1530 1530 1531 1531 f2fs_balance_fs(sbi, dn.node_changed); 1532 1532 ··· 1600 1600 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1601 1601 1602 1602 /* avoid gc operation during block exchange */ 1603 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1603 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1604 1604 filemap_invalidate_lock(mapping); 1605 1605 truncate_pagecache(inode, offset); 1606 1606 ··· 1618 1618 f2fs_unlock_op(sbi); 1619 1619 } 1620 1620 filemap_invalidate_unlock(mapping); 1621 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1621 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1622 1622 1623 1623 /* write out all moved pages, if possible */ 1624 1624 filemap_invalidate_lock(mapping); ··· 1674 1674 next_alloc: 1675 1675 if (has_not_enough_free_secs(sbi, 0, 1676 1676 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { 1677 - down_write(&sbi->gc_lock); 1677 + f2fs_down_write(&sbi->gc_lock); 1678 1678 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); 1679 1679 if (err && err != -ENODATA && err != -EAGAIN) 1680 1680 goto out_err; 1681 1681 } 1682 1682 1683 - down_write(&sbi->pin_sem); 1683 + f2fs_down_write(&sbi->pin_sem); 1684 1684 1685 1685 f2fs_lock_op(sbi); 1686 1686 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); ··· 1690 1690 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); 1691 1691 file_dont_truncate(inode); 1692 1692 1693 - up_write(&sbi->pin_sem); 1693 + f2fs_up_write(&sbi->pin_sem); 1694 1694 1695 1695 expanded += map.m_len; 1696 1696 sec_len -= map.m_len; ··· 2020 2020 if (ret) 2021 2021 goto out; 2022 2022 2023 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2023 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2024 2024 2025 2025 /* 2026 2026 * Should wait end_io to count F2FS_WB_CP_DATA correctly by ··· 2031 2031 inode->i_ino, get_dirty_pages(inode)); 2032 2032 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 2033 2033 if (ret) { 2034 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2034 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2035 2035 goto out; 2036 2036 } 2037 2037 ··· 2044 2044 /* add inode in inmem_list first and set atomic_file */ 2045 2045 set_inode_flag(inode, FI_ATOMIC_FILE); 2046 2046 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2047 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2047 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2048 2048 2049 2049 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2050 2050 F2FS_I(inode)->inmem_task = current; ··· 2351 2351 if (err) 2352 2352 return err; 2353 2353 2354 - down_write(&sbi->sb_lock); 2354 + f2fs_down_write(&sbi->sb_lock); 2355 2355 2356 2356 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2357 2357 goto got_it; ··· 2370 2370 16)) 2371 2371 err = -EFAULT; 2372 2372 out_err: 2373 - up_write(&sbi->sb_lock); 2373 + f2fs_up_write(&sbi->sb_lock); 2374 2374 mnt_drop_write_file(filp); 2375 2375 return err; 2376 2376 } ··· 2447 2447 return ret; 2448 2448 2449 2449 if (!sync) { 2450 - if (!down_write_trylock(&sbi->gc_lock)) { 2450 + if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 2451 2451 ret = -EBUSY; 2452 2452 goto out; 2453 2453 } 2454 2454 } else { 2455 - down_write(&sbi->gc_lock); 2455 + f2fs_down_write(&sbi->gc_lock); 2456 2456 } 2457 2457 2458 2458 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO); ··· 2483 2483 2484 2484 do_more: 2485 2485 if (!range->sync) { 2486 - if (!down_write_trylock(&sbi->gc_lock)) { 2486 + if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 2487 2487 ret = -EBUSY; 2488 2488 goto out; 2489 2489 } 2490 2490 } else { 2491 - down_write(&sbi->gc_lock); 2491 + f2fs_down_write(&sbi->gc_lock); 2492 2492 } 2493 2493 2494 2494 ret = f2fs_gc(sbi, range->sync, true, false, ··· 2820 2820 2821 2821 f2fs_balance_fs(sbi, true); 2822 2822 2823 - down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2823 + f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2824 2824 if (src != dst) { 2825 2825 ret = -EBUSY; 2826 - if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2826 + if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2827 2827 goto out_src; 2828 2828 } 2829 2829 ··· 2841 2841 f2fs_unlock_op(sbi); 2842 2842 2843 2843 if (src != dst) 2844 - up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2844 + f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2845 2845 out_src: 2846 - up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2846 + f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2847 2847 out_unlock: 2848 2848 if (src != dst) 2849 2849 inode_unlock(dst); ··· 2938 2938 end_segno = min(start_segno + range.segments, dev_end_segno); 2939 2939 2940 2940 while (start_segno < end_segno) { 2941 - if (!down_write_trylock(&sbi->gc_lock)) { 2941 + if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 2942 2942 ret = -EBUSY; 2943 2943 goto out; 2944 2944 } ··· 3215 3215 while (map.m_lblk < end) { 3216 3216 map.m_len = end - map.m_lblk; 3217 3217 3218 - down_write(&fi->i_gc_rwsem[WRITE]); 3218 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 3219 3219 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 3220 - up_write(&fi->i_gc_rwsem[WRITE]); 3220 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 3221 3221 if (err) 3222 3222 return err; 3223 3223 ··· 3294 3294 if (!vbuf) 3295 3295 return -ENOMEM; 3296 3296 3297 - down_read(&sbi->sb_lock); 3297 + f2fs_down_read(&sbi->sb_lock); 3298 3298 count = utf16s_to_utf8s(sbi->raw_super->volume_name, 3299 3299 ARRAY_SIZE(sbi->raw_super->volume_name), 3300 3300 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); 3301 - up_read(&sbi->sb_lock); 3301 + f2fs_up_read(&sbi->sb_lock); 3302 3302 3303 3303 if (copy_to_user((char __user *)arg, vbuf, 3304 3304 min(FSLABEL_MAX, count))) ··· 3326 3326 if (err) 3327 3327 goto out; 3328 3328 3329 - down_write(&sbi->sb_lock); 3329 + f2fs_down_write(&sbi->sb_lock); 3330 3330 3331 3331 memset(sbi->raw_super->volume_name, 0, 3332 3332 sizeof(sbi->raw_super->volume_name)); ··· 3336 3336 3337 3337 err = f2fs_commit_super(sbi, false); 3338 3338 3339 - up_write(&sbi->sb_lock); 3339 + f2fs_up_write(&sbi->sb_lock); 3340 3340 3341 3341 mnt_drop_write_file(filp); 3342 3342 out: ··· 3462 3462 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3463 3463 goto out; 3464 3464 3465 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3465 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3466 3466 filemap_invalidate_lock(inode->i_mapping); 3467 3467 3468 3468 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3499 3499 } 3500 3500 3501 3501 filemap_invalidate_unlock(inode->i_mapping); 3502 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3502 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3503 3503 out: 3504 3504 inode_unlock(inode); 3505 3505 ··· 3615 3615 goto unlock_inode; 3616 3616 } 3617 3617 3618 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3618 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3619 3619 filemap_invalidate_lock(inode->i_mapping); 3620 3620 3621 3621 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3652 3652 } 3653 3653 3654 3654 filemap_invalidate_unlock(inode->i_mapping); 3655 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3655 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3656 3656 3657 3657 if (ret >= 0) { 3658 3658 clear_inode_flag(inode, FI_COMPRESS_RELEASED); ··· 3770 3770 if (ret) 3771 3771 goto err; 3772 3772 3773 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3773 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3774 3774 filemap_invalidate_lock(mapping); 3775 3775 3776 3776 ret = filemap_write_and_wait_range(mapping, range.start, ··· 3859 3859 prev_block, len, range.flags); 3860 3860 out: 3861 3861 filemap_invalidate_unlock(mapping); 3862 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3862 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3863 3863 err: 3864 3864 inode_unlock(inode); 3865 3865 file_end_write(filp); ··· 4291 4291 trace_f2fs_direct_IO_enter(inode, iocb, count, READ); 4292 4292 4293 4293 if (iocb->ki_flags & IOCB_NOWAIT) { 4294 - if (!down_read_trylock(&fi->i_gc_rwsem[READ])) { 4294 + if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) { 4295 4295 ret = -EAGAIN; 4296 4296 goto out; 4297 4297 } 4298 4298 } else { 4299 - down_read(&fi->i_gc_rwsem[READ]); 4299 + f2fs_down_read(&fi->i_gc_rwsem[READ]); 4300 4300 } 4301 4301 4302 4302 /* ··· 4315 4315 ret = iomap_dio_complete(dio); 4316 4316 } 4317 4317 4318 - up_read(&fi->i_gc_rwsem[READ]); 4318 + f2fs_up_read(&fi->i_gc_rwsem[READ]); 4319 4319 4320 4320 file_accessed(file); 4321 4321 out: ··· 4497 4497 goto out; 4498 4498 } 4499 4499 4500 - if (!down_read_trylock(&fi->i_gc_rwsem[WRITE])) { 4500 + if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) { 4501 4501 ret = -EAGAIN; 4502 4502 goto out; 4503 4503 } 4504 - if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) { 4505 - up_read(&fi->i_gc_rwsem[WRITE]); 4504 + if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) { 4505 + f2fs_up_read(&fi->i_gc_rwsem[WRITE]); 4506 4506 ret = -EAGAIN; 4507 4507 goto out; 4508 4508 } ··· 4511 4511 if (ret) 4512 4512 goto out; 4513 4513 4514 - down_read(&fi->i_gc_rwsem[WRITE]); 4514 + f2fs_down_read(&fi->i_gc_rwsem[WRITE]); 4515 4515 if (do_opu) 4516 - down_read(&fi->i_gc_rwsem[READ]); 4516 + f2fs_down_read(&fi->i_gc_rwsem[READ]); 4517 4517 } 4518 4518 if (whint_mode == WHINT_MODE_OFF) 4519 4519 iocb->ki_hint = WRITE_LIFE_NOT_SET; ··· 4542 4542 if (whint_mode == WHINT_MODE_OFF) 4543 4543 iocb->ki_hint = hint; 4544 4544 if (do_opu) 4545 - up_read(&fi->i_gc_rwsem[READ]); 4546 - up_read(&fi->i_gc_rwsem[WRITE]); 4545 + f2fs_up_read(&fi->i_gc_rwsem[READ]); 4546 + f2fs_up_read(&fi->i_gc_rwsem[WRITE]); 4547 4547 4548 4548 if (ret < 0) 4549 4549 goto out; ··· 4644 4644 4645 4645 /* Don't leave any preallocated blocks around past i_size. */ 4646 4646 if (preallocated && i_size_read(inode) < target_size) { 4647 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 4647 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 4648 4648 filemap_invalidate_lock(inode->i_mapping); 4649 4649 if (!f2fs_truncate(inode)) 4650 4650 file_dont_truncate(inode); 4651 4651 filemap_invalidate_unlock(inode->i_mapping); 4652 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 4652 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 4653 4653 } else { 4654 4654 file_dont_truncate(inode); 4655 4655 }
+23 -23
fs/f2fs/gc.c
··· 105 105 spin_unlock(&sbi->gc_urgent_high_lock); 106 106 107 107 wait_ms = gc_th->urgent_sleep_time; 108 - down_write(&sbi->gc_lock); 108 + f2fs_down_write(&sbi->gc_lock); 109 109 goto do_gc; 110 110 } 111 111 112 112 if (foreground) { 113 - down_write(&sbi->gc_lock); 113 + f2fs_down_write(&sbi->gc_lock); 114 114 goto do_gc; 115 - } else if (!down_write_trylock(&sbi->gc_lock)) { 115 + } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { 116 116 stat_other_skip_bggc_count(sbi); 117 117 goto next; 118 118 } 119 119 120 120 if (!is_idle(sbi, GC_TIME)) { 121 121 increase_sleep_time(gc_th, &wait_ms); 122 - up_write(&sbi->gc_lock); 122 + f2fs_up_write(&sbi->gc_lock); 123 123 stat_io_skip_bggc_count(sbi); 124 124 goto next; 125 125 } ··· 1230 1230 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1231 1231 1232 1232 if (lfs_mode) 1233 - down_write(&fio.sbi->io_order_lock); 1233 + f2fs_down_write(&fio.sbi->io_order_lock); 1234 1234 1235 1235 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), 1236 1236 fio.old_blkaddr, false); ··· 1316 1316 true, true, true); 1317 1317 up_out: 1318 1318 if (lfs_mode) 1319 - up_write(&fio.sbi->io_order_lock); 1319 + f2fs_up_write(&fio.sbi->io_order_lock); 1320 1320 put_out: 1321 1321 f2fs_put_dnode(&dn); 1322 1322 out: ··· 1475 1475 special_file(inode->i_mode)) 1476 1476 continue; 1477 1477 1478 - if (!down_write_trylock( 1478 + if (!f2fs_down_write_trylock( 1479 1479 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { 1480 1480 iput(inode); 1481 1481 sbi->skipped_gc_rwsem++; ··· 1488 1488 if (f2fs_post_read_required(inode)) { 1489 1489 int err = ra_data_block(inode, start_bidx); 1490 1490 1491 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1491 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1492 1492 if (err) { 1493 1493 iput(inode); 1494 1494 continue; ··· 1499 1499 1500 1500 data_page = f2fs_get_read_data_page(inode, 1501 1501 start_bidx, REQ_RAHEAD, true); 1502 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1502 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1503 1503 if (IS_ERR(data_page)) { 1504 1504 iput(inode); 1505 1505 continue; ··· 1518 1518 int err; 1519 1519 1520 1520 if (S_ISREG(inode->i_mode)) { 1521 - if (!down_write_trylock(&fi->i_gc_rwsem[READ])) { 1521 + if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) { 1522 1522 sbi->skipped_gc_rwsem++; 1523 1523 continue; 1524 1524 } 1525 - if (!down_write_trylock( 1525 + if (!f2fs_down_write_trylock( 1526 1526 &fi->i_gc_rwsem[WRITE])) { 1527 1527 sbi->skipped_gc_rwsem++; 1528 - up_write(&fi->i_gc_rwsem[READ]); 1528 + f2fs_up_write(&fi->i_gc_rwsem[READ]); 1529 1529 continue; 1530 1530 } 1531 1531 locked = true; ··· 1548 1548 submitted++; 1549 1549 1550 1550 if (locked) { 1551 - up_write(&fi->i_gc_rwsem[WRITE]); 1552 - up_write(&fi->i_gc_rwsem[READ]); 1551 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1552 + f2fs_up_write(&fi->i_gc_rwsem[READ]); 1553 1553 } 1554 1554 1555 1555 stat_inc_data_blk_count(sbi, 1, gc_type); ··· 1807 1807 reserved_segments(sbi), 1808 1808 prefree_segments(sbi)); 1809 1809 1810 - up_write(&sbi->gc_lock); 1810 + f2fs_up_write(&sbi->gc_lock); 1811 1811 1812 1812 put_gc_inode(&gc_list); 1813 1813 ··· 1936 1936 long long block_count; 1937 1937 int segs = secs * sbi->segs_per_sec; 1938 1938 1939 - down_write(&sbi->sb_lock); 1939 + f2fs_down_write(&sbi->sb_lock); 1940 1940 1941 1941 section_count = le32_to_cpu(raw_sb->section_count); 1942 1942 segment_count = le32_to_cpu(raw_sb->segment_count); ··· 1957 1957 cpu_to_le32(dev_segs + segs); 1958 1958 } 1959 1959 1960 - up_write(&sbi->sb_lock); 1960 + f2fs_up_write(&sbi->sb_lock); 1961 1961 } 1962 1962 1963 1963 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) ··· 2031 2031 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); 2032 2032 2033 2033 /* stop other GC */ 2034 - if (!down_write_trylock(&sbi->gc_lock)) 2034 + if (!f2fs_down_write_trylock(&sbi->gc_lock)) 2035 2035 return -EAGAIN; 2036 2036 2037 2037 /* stop CP to protect MAIN_SEC in free_segment_range */ ··· 2051 2051 2052 2052 out_unlock: 2053 2053 f2fs_unlock_op(sbi); 2054 - up_write(&sbi->gc_lock); 2054 + f2fs_up_write(&sbi->gc_lock); 2055 2055 if (err) 2056 2056 return err; 2057 2057 2058 2058 set_sbi_flag(sbi, SBI_IS_RESIZEFS); 2059 2059 2060 2060 freeze_super(sbi->sb); 2061 - down_write(&sbi->gc_lock); 2062 - down_write(&sbi->cp_global_sem); 2061 + f2fs_down_write(&sbi->gc_lock); 2062 + f2fs_down_write(&sbi->cp_global_sem); 2063 2063 2064 2064 spin_lock(&sbi->stat_lock); 2065 2065 if (shrunk_blocks + valid_user_blocks(sbi) + ··· 2104 2104 spin_unlock(&sbi->stat_lock); 2105 2105 } 2106 2106 out_err: 2107 - up_write(&sbi->cp_global_sem); 2108 - up_write(&sbi->gc_lock); 2107 + f2fs_up_write(&sbi->cp_global_sem); 2108 + f2fs_up_write(&sbi->gc_lock); 2109 2109 thaw_super(sbi->sb); 2110 2110 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2111 2111 return err;
+2 -2
fs/f2fs/inline.c
··· 629 629 } 630 630 631 631 if (inode) { 632 - down_write(&F2FS_I(inode)->i_sem); 632 + f2fs_down_write(&F2FS_I(inode)->i_sem); 633 633 page = f2fs_init_inode_metadata(inode, dir, fname, ipage); 634 634 if (IS_ERR(page)) { 635 635 err = PTR_ERR(page); ··· 658 658 f2fs_update_parent_metadata(dir, inode, 0); 659 659 fail: 660 660 if (inode) 661 - up_write(&F2FS_I(inode)->i_sem); 661 + f2fs_up_write(&F2FS_I(inode)->i_sem); 662 662 out: 663 663 f2fs_put_page(ipage, 1); 664 664 return err;
+17 -17
fs/f2fs/namei.c
··· 196 196 __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; 197 197 int i, cold_count, hot_count; 198 198 199 - down_read(&sbi->sb_lock); 199 + f2fs_down_read(&sbi->sb_lock); 200 200 201 201 cold_count = le32_to_cpu(sbi->raw_super->extension_count); 202 202 hot_count = sbi->raw_super->hot_ext_count; ··· 206 206 break; 207 207 } 208 208 209 - up_read(&sbi->sb_lock); 209 + f2fs_up_read(&sbi->sb_lock); 210 210 211 211 if (i == cold_count + hot_count) 212 212 return; ··· 299 299 (!ext_cnt && !noext_cnt)) 300 300 return; 301 301 302 - down_read(&sbi->sb_lock); 302 + f2fs_down_read(&sbi->sb_lock); 303 303 304 304 cold_count = le32_to_cpu(sbi->raw_super->extension_count); 305 305 hot_count = sbi->raw_super->hot_ext_count; 306 306 307 307 for (i = cold_count; i < cold_count + hot_count; i++) { 308 308 if (is_extension_exist(name, extlist[i], false)) { 309 - up_read(&sbi->sb_lock); 309 + f2fs_up_read(&sbi->sb_lock); 310 310 return; 311 311 } 312 312 } 313 313 314 - up_read(&sbi->sb_lock); 314 + f2fs_up_read(&sbi->sb_lock); 315 315 316 316 for (i = 0; i < noext_cnt; i++) { 317 317 if (is_extension_exist(name, noext[i], false)) { ··· 1023 1023 new_page = NULL; 1024 1024 1025 1025 new_inode->i_ctime = current_time(new_inode); 1026 - down_write(&F2FS_I(new_inode)->i_sem); 1026 + f2fs_down_write(&F2FS_I(new_inode)->i_sem); 1027 1027 if (old_dir_entry) 1028 1028 f2fs_i_links_write(new_inode, false); 1029 1029 f2fs_i_links_write(new_inode, false); 1030 - up_write(&F2FS_I(new_inode)->i_sem); 1030 + f2fs_up_write(&F2FS_I(new_inode)->i_sem); 1031 1031 1032 1032 if (!new_inode->i_nlink) 1033 1033 f2fs_add_orphan_inode(new_inode); ··· 1048 1048 f2fs_i_links_write(new_dir, true); 1049 1049 } 1050 1050 1051 - down_write(&F2FS_I(old_inode)->i_sem); 1051 + f2fs_down_write(&F2FS_I(old_inode)->i_sem); 1052 1052 if (!old_dir_entry || whiteout) 1053 1053 file_lost_pino(old_inode); 1054 1054 else 1055 1055 /* adjust dir's i_pino to pass fsck check */ 1056 1056 f2fs_i_pino_write(old_inode, new_dir->i_ino); 1057 - up_write(&F2FS_I(old_inode)->i_sem); 1057 + f2fs_up_write(&F2FS_I(old_inode)->i_sem); 1058 1058 1059 1059 old_inode->i_ctime = current_time(old_inode); 1060 1060 f2fs_mark_inode_dirty_sync(old_inode, false); ··· 1214 1214 /* update directory entry info of old dir inode */ 1215 1215 f2fs_set_link(old_dir, old_entry, old_page, new_inode); 1216 1216 1217 - down_write(&F2FS_I(old_inode)->i_sem); 1217 + f2fs_down_write(&F2FS_I(old_inode)->i_sem); 1218 1218 if (!old_dir_entry) 1219 1219 file_lost_pino(old_inode); 1220 1220 else 1221 1221 /* adjust dir's i_pino to pass fsck check */ 1222 1222 f2fs_i_pino_write(old_inode, new_dir->i_ino); 1223 - up_write(&F2FS_I(old_inode)->i_sem); 1223 + f2fs_up_write(&F2FS_I(old_inode)->i_sem); 1224 1224 1225 1225 old_dir->i_ctime = current_time(old_dir); 1226 1226 if (old_nlink) { 1227 - down_write(&F2FS_I(old_dir)->i_sem); 1227 + f2fs_down_write(&F2FS_I(old_dir)->i_sem); 1228 1228 f2fs_i_links_write(old_dir, old_nlink > 0); 1229 - up_write(&F2FS_I(old_dir)->i_sem); 1229 + f2fs_up_write(&F2FS_I(old_dir)->i_sem); 1230 1230 } 1231 1231 f2fs_mark_inode_dirty_sync(old_dir, false); 1232 1232 1233 1233 /* update directory entry info of new dir inode */ 1234 1234 f2fs_set_link(new_dir, new_entry, new_page, old_inode); 1235 1235 1236 - down_write(&F2FS_I(new_inode)->i_sem); 1236 + f2fs_down_write(&F2FS_I(new_inode)->i_sem); 1237 1237 if (!new_dir_entry) 1238 1238 file_lost_pino(new_inode); 1239 1239 else 1240 1240 /* adjust dir's i_pino to pass fsck check */ 1241 1241 f2fs_i_pino_write(new_inode, old_dir->i_ino); 1242 - up_write(&F2FS_I(new_inode)->i_sem); 1242 + f2fs_up_write(&F2FS_I(new_inode)->i_sem); 1243 1243 1244 1244 new_dir->i_ctime = current_time(new_dir); 1245 1245 if (new_nlink) { 1246 - down_write(&F2FS_I(new_dir)->i_sem); 1246 + f2fs_down_write(&F2FS_I(new_dir)->i_sem); 1247 1247 f2fs_i_links_write(new_dir, new_nlink > 0); 1248 - up_write(&F2FS_I(new_dir)->i_sem); 1248 + f2fs_up_write(&F2FS_I(new_dir)->i_sem); 1249 1249 } 1250 1250 f2fs_mark_inode_dirty_sync(new_dir, false); 1251 1251
+42 -42
fs/f2fs/node.c
··· 382 382 struct nat_entry *e; 383 383 bool need = false; 384 384 385 - down_read(&nm_i->nat_tree_lock); 385 + f2fs_down_read(&nm_i->nat_tree_lock); 386 386 e = __lookup_nat_cache(nm_i, nid); 387 387 if (e) { 388 388 if (!get_nat_flag(e, IS_CHECKPOINTED) && 389 389 !get_nat_flag(e, HAS_FSYNCED_INODE)) 390 390 need = true; 391 391 } 392 - up_read(&nm_i->nat_tree_lock); 392 + f2fs_up_read(&nm_i->nat_tree_lock); 393 393 return need; 394 394 } 395 395 ··· 399 399 struct nat_entry *e; 400 400 bool is_cp = true; 401 401 402 - down_read(&nm_i->nat_tree_lock); 402 + f2fs_down_read(&nm_i->nat_tree_lock); 403 403 e = __lookup_nat_cache(nm_i, nid); 404 404 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 405 405 is_cp = false; 406 - up_read(&nm_i->nat_tree_lock); 406 + f2fs_up_read(&nm_i->nat_tree_lock); 407 407 return is_cp; 408 408 } 409 409 ··· 413 413 struct nat_entry *e; 414 414 bool need_update = true; 415 415 416 - down_read(&nm_i->nat_tree_lock); 416 + f2fs_down_read(&nm_i->nat_tree_lock); 417 417 e = __lookup_nat_cache(nm_i, ino); 418 418 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 419 419 (get_nat_flag(e, IS_CHECKPOINTED) || 420 420 get_nat_flag(e, HAS_FSYNCED_INODE))) 421 421 need_update = false; 422 - up_read(&nm_i->nat_tree_lock); 422 + f2fs_up_read(&nm_i->nat_tree_lock); 423 423 return need_update; 424 424 } 425 425 ··· 431 431 struct nat_entry *new, *e; 432 432 433 433 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ 434 - if (rwsem_is_locked(&sbi->cp_global_sem)) 434 + if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) 435 435 return; 436 436 437 437 new = __alloc_nat_entry(sbi, nid, false); 438 438 if (!new) 439 439 return; 440 440 441 - down_write(&nm_i->nat_tree_lock); 441 + f2fs_down_write(&nm_i->nat_tree_lock); 442 442 e = __lookup_nat_cache(nm_i, nid); 443 443 if (!e) 444 444 e = __init_nat_entry(nm_i, new, ne, false); ··· 447 447 nat_get_blkaddr(e) != 448 448 le32_to_cpu(ne->block_addr) || 449 449 nat_get_version(e) != ne->version); 450 - up_write(&nm_i->nat_tree_lock); 450 + f2fs_up_write(&nm_i->nat_tree_lock); 451 451 if (e != new) 452 452 __free_nat_entry(new); 453 453 } ··· 459 459 struct nat_entry *e; 460 460 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); 461 461 462 - down_write(&nm_i->nat_tree_lock); 462 + f2fs_down_write(&nm_i->nat_tree_lock); 463 463 e = __lookup_nat_cache(nm_i, ni->nid); 464 464 if (!e) { 465 465 e = __init_nat_entry(nm_i, new, NULL, true); ··· 508 508 set_nat_flag(e, HAS_FSYNCED_INODE, true); 509 509 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 510 510 } 511 - up_write(&nm_i->nat_tree_lock); 511 + f2fs_up_write(&nm_i->nat_tree_lock); 512 512 } 513 513 514 514 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) ··· 516 516 struct f2fs_nm_info *nm_i = NM_I(sbi); 517 517 int nr = nr_shrink; 518 518 519 - if (!down_write_trylock(&nm_i->nat_tree_lock)) 519 + if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) 520 520 return 0; 521 521 522 522 spin_lock(&nm_i->nat_list_lock); ··· 538 538 } 539 539 spin_unlock(&nm_i->nat_list_lock); 540 540 541 - up_write(&nm_i->nat_tree_lock); 541 + f2fs_up_write(&nm_i->nat_tree_lock); 542 542 return nr - nr_shrink; 543 543 } 544 544 ··· 560 560 ni->nid = nid; 561 561 retry: 562 562 /* Check nat cache */ 563 - down_read(&nm_i->nat_tree_lock); 563 + f2fs_down_read(&nm_i->nat_tree_lock); 564 564 e = __lookup_nat_cache(nm_i, nid); 565 565 if (e) { 566 566 ni->ino = nat_get_ino(e); 567 567 ni->blk_addr = nat_get_blkaddr(e); 568 568 ni->version = nat_get_version(e); 569 - up_read(&nm_i->nat_tree_lock); 569 + f2fs_up_read(&nm_i->nat_tree_lock); 570 570 return 0; 571 571 } 572 572 ··· 576 576 * nat_tree_lock. Therefore, we should retry, if we failed to grab here 577 577 * while not bothering checkpoint. 578 578 */ 579 - if (!rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { 579 + if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { 580 580 down_read(&curseg->journal_rwsem); 581 - } else if (rwsem_is_contended(&nm_i->nat_tree_lock) || 581 + } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || 582 582 !down_read_trylock(&curseg->journal_rwsem)) { 583 - up_read(&nm_i->nat_tree_lock); 583 + f2fs_up_read(&nm_i->nat_tree_lock); 584 584 goto retry; 585 585 } 586 586 ··· 589 589 ne = nat_in_journal(journal, i); 590 590 node_info_from_raw_nat(ni, &ne); 591 591 } 592 - up_read(&curseg->journal_rwsem); 592 + up_read(&curseg->journal_rwsem); 593 593 if (i >= 0) { 594 - up_read(&nm_i->nat_tree_lock); 594 + f2fs_up_read(&nm_i->nat_tree_lock); 595 595 goto cache; 596 596 } 597 597 598 598 /* Fill node_info from nat page */ 599 599 index = current_nat_addr(sbi, nid); 600 - up_read(&nm_i->nat_tree_lock); 600 + f2fs_up_read(&nm_i->nat_tree_lock); 601 601 602 602 page = f2fs_get_meta_page(sbi, index); 603 603 if (IS_ERR(page)) ··· 1609 1609 goto redirty_out; 1610 1610 1611 1611 if (wbc->for_reclaim) { 1612 - if (!down_read_trylock(&sbi->node_write)) 1612 + if (!f2fs_down_read_trylock(&sbi->node_write)) 1613 1613 goto redirty_out; 1614 1614 } else { 1615 - down_read(&sbi->node_write); 1615 + f2fs_down_read(&sbi->node_write); 1616 1616 } 1617 1617 1618 1618 /* This page is already truncated */ 1619 1619 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1620 1620 ClearPageUptodate(page); 1621 1621 dec_page_count(sbi, F2FS_DIRTY_NODES); 1622 - up_read(&sbi->node_write); 1622 + f2fs_up_read(&sbi->node_write); 1623 1623 unlock_page(page); 1624 1624 return 0; 1625 1625 } ··· 1627 1627 if (__is_valid_data_blkaddr(ni.blk_addr) && 1628 1628 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1629 1629 DATA_GENERIC_ENHANCE)) { 1630 - up_read(&sbi->node_write); 1630 + f2fs_up_read(&sbi->node_write); 1631 1631 goto redirty_out; 1632 1632 } 1633 1633 ··· 1648 1648 f2fs_do_write_node_page(nid, &fio); 1649 1649 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1650 1650 dec_page_count(sbi, F2FS_DIRTY_NODES); 1651 - up_read(&sbi->node_write); 1651 + f2fs_up_read(&sbi->node_write); 1652 1652 1653 1653 if (wbc->for_reclaim) { 1654 1654 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); ··· 2225 2225 unsigned int i; 2226 2226 bool ret = true; 2227 2227 2228 - down_read(&nm_i->nat_tree_lock); 2228 + f2fs_down_read(&nm_i->nat_tree_lock); 2229 2229 for (i = 0; i < nm_i->nat_blocks; i++) { 2230 2230 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { 2231 2231 ret = false; 2232 2232 break; 2233 2233 } 2234 2234 } 2235 - up_read(&nm_i->nat_tree_lock); 2235 + f2fs_up_read(&nm_i->nat_tree_lock); 2236 2236 2237 2237 return ret; 2238 2238 } ··· 2415 2415 unsigned int i, idx; 2416 2416 nid_t nid; 2417 2417 2418 - down_read(&nm_i->nat_tree_lock); 2418 + f2fs_down_read(&nm_i->nat_tree_lock); 2419 2419 2420 2420 for (i = 0; i < nm_i->nat_blocks; i++) { 2421 2421 if (!test_bit_le(i, nm_i->nat_block_bitmap)) ··· 2438 2438 out: 2439 2439 scan_curseg_cache(sbi); 2440 2440 2441 - up_read(&nm_i->nat_tree_lock); 2441 + f2fs_up_read(&nm_i->nat_tree_lock); 2442 2442 } 2443 2443 2444 2444 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, ··· 2473 2473 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2474 2474 META_NAT, true); 2475 2475 2476 - down_read(&nm_i->nat_tree_lock); 2476 + f2fs_down_read(&nm_i->nat_tree_lock); 2477 2477 2478 2478 while (1) { 2479 2479 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), ··· 2488 2488 } 2489 2489 2490 2490 if (ret) { 2491 - up_read(&nm_i->nat_tree_lock); 2491 + f2fs_up_read(&nm_i->nat_tree_lock); 2492 2492 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2493 2493 return ret; 2494 2494 } ··· 2508 2508 /* find free nids from current sum_pages */ 2509 2509 scan_curseg_cache(sbi); 2510 2510 2511 - up_read(&nm_i->nat_tree_lock); 2511 + f2fs_up_read(&nm_i->nat_tree_lock); 2512 2512 2513 2513 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2514 2514 nm_i->ra_nid_pages, META_NAT, false); ··· 2953 2953 struct f2fs_nm_info *nm_i = NM_I(sbi); 2954 2954 unsigned int nat_ofs; 2955 2955 2956 - down_read(&nm_i->nat_tree_lock); 2956 + f2fs_down_read(&nm_i->nat_tree_lock); 2957 2957 2958 2958 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { 2959 2959 unsigned int valid = 0, nid_ofs = 0; ··· 2973 2973 __update_nat_bits(nm_i, nat_ofs, valid); 2974 2974 } 2975 2975 2976 - up_read(&nm_i->nat_tree_lock); 2976 + f2fs_up_read(&nm_i->nat_tree_lock); 2977 2977 } 2978 2978 2979 2979 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, ··· 3071 3071 * nat_cnt[DIRTY_NAT]. 3072 3072 */ 3073 3073 if (cpc->reason & CP_UMOUNT) { 3074 - down_write(&nm_i->nat_tree_lock); 3074 + f2fs_down_write(&nm_i->nat_tree_lock); 3075 3075 remove_nats_in_journal(sbi); 3076 - up_write(&nm_i->nat_tree_lock); 3076 + f2fs_up_write(&nm_i->nat_tree_lock); 3077 3077 } 3078 3078 3079 3079 if (!nm_i->nat_cnt[DIRTY_NAT]) 3080 3080 return 0; 3081 3081 3082 - down_write(&nm_i->nat_tree_lock); 3082 + f2fs_down_write(&nm_i->nat_tree_lock); 3083 3083 3084 3084 /* 3085 3085 * if there are no enough space in journal to store dirty nat ··· 3108 3108 break; 3109 3109 } 3110 3110 3111 - up_write(&nm_i->nat_tree_lock); 3111 + f2fs_up_write(&nm_i->nat_tree_lock); 3112 3112 /* Allow dirty nats by node block allocation in write_begin */ 3113 3113 3114 3114 return err; ··· 3228 3228 3229 3229 mutex_init(&nm_i->build_lock); 3230 3230 spin_lock_init(&nm_i->nid_list_lock); 3231 - init_rwsem(&nm_i->nat_tree_lock); 3231 + init_f2fs_rwsem(&nm_i->nat_tree_lock); 3232 3232 3233 3233 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3234 3234 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); ··· 3334 3334 spin_unlock(&nm_i->nid_list_lock); 3335 3335 3336 3336 /* destroy nat cache */ 3337 - down_write(&nm_i->nat_tree_lock); 3337 + f2fs_down_write(&nm_i->nat_tree_lock); 3338 3338 while ((found = __gang_lookup_nat_cache(nm_i, 3339 3339 nid, NATVEC_SIZE, natvec))) { 3340 3340 unsigned idx; ··· 3364 3364 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3365 3365 } 3366 3366 } 3367 - up_write(&nm_i->nat_tree_lock); 3367 + f2fs_up_write(&nm_i->nat_tree_lock); 3368 3368 3369 3369 kvfree(nm_i->nat_block_bitmap); 3370 3370 if (nm_i->free_nid_bitmap) {
+2 -2
fs/f2fs/recovery.c
··· 796 796 INIT_LIST_HEAD(&dir_list); 797 797 798 798 /* prevent checkpoint */ 799 - down_write(&sbi->cp_global_sem); 799 + f2fs_down_write(&sbi->cp_global_sem); 800 800 801 801 /* step #1: find fsynced inode numbers */ 802 802 err = find_fsync_dnodes(sbi, &inode_list, check_only); ··· 845 845 if (!err) 846 846 clear_sbi_flag(sbi, SBI_POR_DOING); 847 847 848 - up_write(&sbi->cp_global_sem); 848 + f2fs_up_write(&sbi->cp_global_sem); 849 849 850 850 /* let's drop all the directory inodes for clean checkpoint */ 851 851 destroy_fsync_dnodes(&dir_list, err);
+22 -22
fs/f2fs/segment.c
··· 471 471 472 472 f2fs_balance_fs(sbi, true); 473 473 474 - down_write(&fi->i_gc_rwsem[WRITE]); 474 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 475 475 476 476 f2fs_lock_op(sbi); 477 477 set_inode_flag(inode, FI_ATOMIC_COMMIT); ··· 483 483 clear_inode_flag(inode, FI_ATOMIC_COMMIT); 484 484 485 485 f2fs_unlock_op(sbi); 486 - up_write(&fi->i_gc_rwsem[WRITE]); 486 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 487 487 488 488 return err; 489 489 } ··· 521 521 io_schedule(); 522 522 finish_wait(&sbi->gc_thread->fggc_wq, &wait); 523 523 } else { 524 - down_write(&sbi->gc_lock); 524 + f2fs_down_write(&sbi->gc_lock); 525 525 f2fs_gc(sbi, false, false, false, NULL_SEGNO); 526 526 } 527 527 } ··· 529 529 530 530 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) 531 531 { 532 - int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; 532 + int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; 533 533 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); 534 534 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); 535 535 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); ··· 570 570 571 571 /* there is background inflight IO or foreground operation recently */ 572 572 if (is_inflight_io(sbi, REQ_TIME) || 573 - (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem))) 573 + (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) 574 574 return; 575 575 576 576 /* exceed periodical checkpoint timeout threshold */ ··· 2821 2821 if (!sbi->am.atgc_enabled) 2822 2822 return; 2823 2823 2824 - down_read(&SM_I(sbi)->curseg_lock); 2824 + f2fs_down_read(&SM_I(sbi)->curseg_lock); 2825 2825 2826 2826 mutex_lock(&curseg->curseg_mutex); 2827 2827 down_write(&SIT_I(sbi)->sentry_lock); ··· 2831 2831 up_write(&SIT_I(sbi)->sentry_lock); 2832 2832 mutex_unlock(&curseg->curseg_mutex); 2833 2833 2834 - up_read(&SM_I(sbi)->curseg_lock); 2834 + f2fs_up_read(&SM_I(sbi)->curseg_lock); 2835 2835 2836 2836 } 2837 2837 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) ··· 2982 2982 struct curseg_info *curseg = CURSEG_I(sbi, type); 2983 2983 unsigned int segno; 2984 2984 2985 - down_read(&SM_I(sbi)->curseg_lock); 2985 + f2fs_down_read(&SM_I(sbi)->curseg_lock); 2986 2986 mutex_lock(&curseg->curseg_mutex); 2987 2987 down_write(&SIT_I(sbi)->sentry_lock); 2988 2988 ··· 3006 3006 type, segno, curseg->segno); 3007 3007 3008 3008 mutex_unlock(&curseg->curseg_mutex); 3009 - up_read(&SM_I(sbi)->curseg_lock); 3009 + f2fs_up_read(&SM_I(sbi)->curseg_lock); 3010 3010 } 3011 3011 3012 3012 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type, ··· 3038 3038 3039 3039 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) 3040 3040 { 3041 - down_read(&SM_I(sbi)->curseg_lock); 3041 + f2fs_down_read(&SM_I(sbi)->curseg_lock); 3042 3042 down_write(&SIT_I(sbi)->sentry_lock); 3043 3043 __allocate_new_section(sbi, type, force); 3044 3044 up_write(&SIT_I(sbi)->sentry_lock); 3045 - up_read(&SM_I(sbi)->curseg_lock); 3045 + f2fs_up_read(&SM_I(sbi)->curseg_lock); 3046 3046 } 3047 3047 3048 3048 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) 3049 3049 { 3050 3050 int i; 3051 3051 3052 - down_read(&SM_I(sbi)->curseg_lock); 3052 + f2fs_down_read(&SM_I(sbi)->curseg_lock); 3053 3053 down_write(&SIT_I(sbi)->sentry_lock); 3054 3054 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 3055 3055 __allocate_new_segment(sbi, i, false, false); 3056 3056 up_write(&SIT_I(sbi)->sentry_lock); 3057 - up_read(&SM_I(sbi)->curseg_lock); 3057 + f2fs_up_read(&SM_I(sbi)->curseg_lock); 3058 3058 } 3059 3059 3060 3060 static const struct segment_allocation default_salloc_ops = { ··· 3192 3192 if (sbi->discard_blks == 0) 3193 3193 goto out; 3194 3194 3195 - down_write(&sbi->gc_lock); 3195 + f2fs_down_write(&sbi->gc_lock); 3196 3196 err = f2fs_write_checkpoint(sbi, &cpc); 3197 - up_write(&sbi->gc_lock); 3197 + f2fs_up_write(&sbi->gc_lock); 3198 3198 if (err) 3199 3199 goto out; 3200 3200 ··· 3431 3431 bool from_gc = (type == CURSEG_ALL_DATA_ATGC); 3432 3432 struct seg_entry *se = NULL; 3433 3433 3434 - down_read(&SM_I(sbi)->curseg_lock); 3434 + f2fs_down_read(&SM_I(sbi)->curseg_lock); 3435 3435 3436 3436 mutex_lock(&curseg->curseg_mutex); 3437 3437 down_write(&sit_i->sentry_lock); ··· 3514 3514 3515 3515 mutex_unlock(&curseg->curseg_mutex); 3516 3516 3517 - up_read(&SM_I(sbi)->curseg_lock); 3517 + f2fs_up_read(&SM_I(sbi)->curseg_lock); 3518 3518 } 3519 3519 3520 3520 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, ··· 3550 3550 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); 3551 3551 3552 3552 if (keep_order) 3553 - down_read(&fio->sbi->io_order_lock); 3553 + f2fs_down_read(&fio->sbi->io_order_lock); 3554 3554 reallocate: 3555 3555 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 3556 3556 &fio->new_blkaddr, sum, type, fio); ··· 3570 3570 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); 3571 3571 3572 3572 if (keep_order) 3573 - up_read(&fio->sbi->io_order_lock); 3573 + f2fs_up_read(&fio->sbi->io_order_lock); 3574 3574 } 3575 3575 3576 3576 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, ··· 3705 3705 se = get_seg_entry(sbi, segno); 3706 3706 type = se->type; 3707 3707 3708 - down_write(&SM_I(sbi)->curseg_lock); 3708 + f2fs_down_write(&SM_I(sbi)->curseg_lock); 3709 3709 3710 3710 if (!recover_curseg) { 3711 3711 /* for recovery flow */ ··· 3774 3774 3775 3775 up_write(&sit_i->sentry_lock); 3776 3776 mutex_unlock(&curseg->curseg_mutex); 3777 - up_write(&SM_I(sbi)->curseg_lock); 3777 + f2fs_up_write(&SM_I(sbi)->curseg_lock); 3778 3778 } 3779 3779 3780 3780 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, ··· 5258 5258 5259 5259 INIT_LIST_HEAD(&sm_info->sit_entry_set); 5260 5260 5261 - init_rwsem(&sm_info->curseg_lock); 5261 + init_f2fs_rwsem(&sm_info->curseg_lock); 5262 5262 5263 5263 if (!f2fs_readonly(sbi->sb)) { 5264 5264 err = f2fs_create_flush_cmd_control(sbi);
+28 -28
fs/f2fs/super.c
··· 1355 1355 /* Initialize f2fs-specific inode info */ 1356 1356 atomic_set(&fi->dirty_pages, 0); 1357 1357 atomic_set(&fi->i_compr_blocks, 0); 1358 - init_rwsem(&fi->i_sem); 1358 + init_f2fs_rwsem(&fi->i_sem); 1359 1359 spin_lock_init(&fi->i_size_lock); 1360 1360 INIT_LIST_HEAD(&fi->dirty_list); 1361 1361 INIT_LIST_HEAD(&fi->gdirty_list); 1362 1362 INIT_LIST_HEAD(&fi->inmem_ilist); 1363 1363 INIT_LIST_HEAD(&fi->inmem_pages); 1364 1364 mutex_init(&fi->inmem_lock); 1365 - init_rwsem(&fi->i_gc_rwsem[READ]); 1366 - init_rwsem(&fi->i_gc_rwsem[WRITE]); 1367 - init_rwsem(&fi->i_xattr_sem); 1365 + init_f2fs_rwsem(&fi->i_gc_rwsem[READ]); 1366 + init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]); 1367 + init_f2fs_rwsem(&fi->i_xattr_sem); 1368 1368 1369 1369 /* Will be used by directory only */ 1370 1370 fi->i_dir_level = F2FS_SB(sb)->dir_level; ··· 2088 2088 f2fs_update_time(sbi, DISABLE_TIME); 2089 2089 2090 2090 while (!f2fs_time_over(sbi, DISABLE_TIME)) { 2091 - down_write(&sbi->gc_lock); 2091 + f2fs_down_write(&sbi->gc_lock); 2092 2092 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); 2093 2093 if (err == -ENODATA) { 2094 2094 err = 0; ··· 2110 2110 goto restore_flag; 2111 2111 } 2112 2112 2113 - down_write(&sbi->gc_lock); 2113 + f2fs_down_write(&sbi->gc_lock); 2114 2114 cpc.reason = CP_PAUSE; 2115 2115 set_sbi_flag(sbi, SBI_CP_DISABLED); 2116 2116 err = f2fs_write_checkpoint(sbi, &cpc); ··· 2122 2122 spin_unlock(&sbi->stat_lock); 2123 2123 2124 2124 out_unlock: 2125 - up_write(&sbi->gc_lock); 2125 + f2fs_up_write(&sbi->gc_lock); 2126 2126 restore_flag: 2127 2127 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ 2128 2128 return err; ··· 2142 2142 if (unlikely(retry < 0)) 2143 2143 f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); 2144 2144 2145 - down_write(&sbi->gc_lock); 2145 + f2fs_down_write(&sbi->gc_lock); 2146 2146 f2fs_dirty_to_prefree(sbi); 2147 2147 2148 2148 clear_sbi_flag(sbi, SBI_CP_DISABLED); 2149 2149 set_sbi_flag(sbi, SBI_IS_DIRTY); 2150 - up_write(&sbi->gc_lock); 2150 + f2fs_up_write(&sbi->gc_lock); 2151 2151 2152 2152 f2fs_sync_fs(sbi->sb, 1); 2153 2153 } ··· 2707 2707 /* 2708 2708 * do_quotactl 2709 2709 * f2fs_quota_sync 2710 - * down_read(quota_sem) 2710 + * f2fs_down_read(quota_sem) 2711 2711 * dquot_writeback_dquots() 2712 2712 * f2fs_dquot_commit 2713 2713 * block_operation 2714 - * down_read(quota_sem) 2714 + * f2fs_down_read(quota_sem) 2715 2715 */ 2716 2716 f2fs_lock_op(sbi); 2717 - down_read(&sbi->quota_sem); 2717 + f2fs_down_read(&sbi->quota_sem); 2718 2718 2719 2719 ret = f2fs_quota_sync_file(sbi, cnt); 2720 2720 2721 - up_read(&sbi->quota_sem); 2721 + f2fs_up_read(&sbi->quota_sem); 2722 2722 f2fs_unlock_op(sbi); 2723 2723 2724 2724 inode_unlock(dqopt->files[cnt]); ··· 2843 2843 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); 2844 2844 int ret; 2845 2845 2846 - down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); 2846 + f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); 2847 2847 ret = dquot_commit(dquot); 2848 2848 if (ret < 0) 2849 2849 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 2850 - up_read(&sbi->quota_sem); 2850 + f2fs_up_read(&sbi->quota_sem); 2851 2851 return ret; 2852 2852 } 2853 2853 ··· 2856 2856 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); 2857 2857 int ret; 2858 2858 2859 - down_read(&sbi->quota_sem); 2859 + f2fs_down_read(&sbi->quota_sem); 2860 2860 ret = dquot_acquire(dquot); 2861 2861 if (ret < 0) 2862 2862 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 2863 - up_read(&sbi->quota_sem); 2863 + f2fs_up_read(&sbi->quota_sem); 2864 2864 return ret; 2865 2865 } 2866 2866 ··· 3601 3601 3602 3602 INIT_LIST_HEAD(&sbi->s_list); 3603 3603 mutex_init(&sbi->umount_mutex); 3604 - init_rwsem(&sbi->io_order_lock); 3604 + init_f2fs_rwsem(&sbi->io_order_lock); 3605 3605 spin_lock_init(&sbi->cp_lock); 3606 3606 3607 3607 sbi->dirty_device = 0; 3608 3608 spin_lock_init(&sbi->dev_lock); 3609 3609 3610 - init_rwsem(&sbi->sb_lock); 3611 - init_rwsem(&sbi->pin_sem); 3610 + init_f2fs_rwsem(&sbi->sb_lock); 3611 + init_f2fs_rwsem(&sbi->pin_sem); 3612 3612 } 3613 3613 3614 3614 static int init_percpu_info(struct f2fs_sb_info *sbi) ··· 4067 4067 4068 4068 /* init f2fs-specific super block info */ 4069 4069 sbi->valid_super_block = valid_super_block; 4070 - init_rwsem(&sbi->gc_lock); 4070 + init_f2fs_rwsem(&sbi->gc_lock); 4071 4071 mutex_init(&sbi->writepages); 4072 - init_rwsem(&sbi->cp_global_sem); 4073 - init_rwsem(&sbi->node_write); 4074 - init_rwsem(&sbi->node_change); 4072 + init_f2fs_rwsem(&sbi->cp_global_sem); 4073 + init_f2fs_rwsem(&sbi->node_write); 4074 + init_f2fs_rwsem(&sbi->node_change); 4075 4075 4076 4076 /* disallow all the data/node/meta page writes */ 4077 4077 set_sbi_flag(sbi, SBI_POR_DOING); ··· 4092 4092 } 4093 4093 4094 4094 for (j = HOT; j < n; j++) { 4095 - init_rwsem(&sbi->write_io[i][j].io_rwsem); 4095 + init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem); 4096 4096 sbi->write_io[i][j].sbi = sbi; 4097 4097 sbi->write_io[i][j].bio = NULL; 4098 4098 spin_lock_init(&sbi->write_io[i][j].io_lock); 4099 4099 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list); 4100 4100 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list); 4101 - init_rwsem(&sbi->write_io[i][j].bio_list_lock); 4101 + init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock); 4102 4102 } 4103 4103 } 4104 4104 4105 - init_rwsem(&sbi->cp_rwsem); 4106 - init_rwsem(&sbi->quota_sem); 4105 + init_f2fs_rwsem(&sbi->cp_rwsem); 4106 + init_f2fs_rwsem(&sbi->quota_sem); 4107 4107 init_waitqueue_head(&sbi->cp_wait); 4108 4108 init_sb_info(sbi); 4109 4109
+2 -2
fs/f2fs/sysfs.c
··· 363 363 if (!strlen(name) || strlen(name) >= F2FS_EXTENSION_LEN) 364 364 return -EINVAL; 365 365 366 - down_write(&sbi->sb_lock); 366 + f2fs_down_write(&sbi->sb_lock); 367 367 368 368 ret = f2fs_update_extension_list(sbi, name, hot, set); 369 369 if (ret) ··· 373 373 if (ret) 374 374 f2fs_update_extension_list(sbi, name, hot, !set); 375 375 out: 376 - up_write(&sbi->sb_lock); 376 + f2fs_up_write(&sbi->sb_lock); 377 377 return ret ? ret : count; 378 378 } 379 379
+2 -2
fs/f2fs/verity.c
··· 208 208 * from re-instantiating cached pages we are truncating (since unlike 209 209 * normal file accesses, garbage collection isn't limited by i_size). 210 210 */ 211 - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 211 + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 212 212 truncate_inode_pages(inode->i_mapping, inode->i_size); 213 213 err2 = f2fs_truncate(inode); 214 214 if (err2) { ··· 216 216 err2); 217 217 set_sbi_flag(sbi, SBI_NEED_FSCK); 218 218 } 219 - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 219 + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 220 220 clear_inode_flag(inode, FI_VERITY_IN_PROGRESS); 221 221 return err ?: err2; 222 222 }
+6 -6
fs/f2fs/xattr.c
··· 525 525 if (len > F2FS_NAME_LEN) 526 526 return -ERANGE; 527 527 528 - down_read(&F2FS_I(inode)->i_xattr_sem); 528 + f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); 529 529 error = lookup_all_xattrs(inode, ipage, index, len, name, 530 530 &entry, &base_addr, &base_size, &is_inline); 531 - up_read(&F2FS_I(inode)->i_xattr_sem); 531 + f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); 532 532 if (error) 533 533 return error; 534 534 ··· 562 562 int error; 563 563 size_t rest = buffer_size; 564 564 565 - down_read(&F2FS_I(inode)->i_xattr_sem); 565 + f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); 566 566 error = read_all_xattrs(inode, NULL, &base_addr); 567 - up_read(&F2FS_I(inode)->i_xattr_sem); 567 + f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); 568 568 if (error) 569 569 return error; 570 570 ··· 786 786 f2fs_balance_fs(sbi, true); 787 787 788 788 f2fs_lock_op(sbi); 789 - down_write(&F2FS_I(inode)->i_xattr_sem); 789 + f2fs_down_write(&F2FS_I(inode)->i_xattr_sem); 790 790 err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); 791 - up_write(&F2FS_I(inode)->i_xattr_sem); 791 + f2fs_up_write(&F2FS_I(inode)->i_xattr_sem); 792 792 f2fs_unlock_op(sbi); 793 793 794 794 f2fs_update_time(sbi, REQ_TIME);