Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

f2fs: produce more nids and reduce readahead nats

The readahead nat pages are more likely to be reclaimed quickly, so it'd better
to gather more free nids in advance.

And, let's keep some free nids as much as possible.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

+18 -8
+2
fs/f2fs/checkpoint.c
··· 941 941 static void unblock_operations(struct f2fs_sb_info *sbi) 942 942 { 943 943 up_write(&sbi->node_write); 944 + 945 + build_free_nids(sbi); 944 946 f2fs_unlock_all(sbi); 945 947 } 946 948
+1
fs/f2fs/f2fs.h
··· 1965 1965 int fsync_node_pages(struct f2fs_sb_info *, struct inode *, 1966 1966 struct writeback_control *, bool); 1967 1967 int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *); 1968 + void build_free_nids(struct f2fs_sb_info *); 1968 1969 bool alloc_nid(struct f2fs_sb_info *, nid_t *); 1969 1970 void alloc_nid_done(struct f2fs_sb_info *, nid_t); 1970 1971 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+6 -3
fs/f2fs/node.c
··· 1765 1765 } 1766 1766 } 1767 1767 1768 - static void build_free_nids(struct f2fs_sb_info *sbi) 1768 + void build_free_nids(struct f2fs_sb_info *sbi) 1769 1769 { 1770 1770 struct f2fs_nm_info *nm_i = NM_I(sbi); 1771 1771 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); ··· 1774 1774 nid_t nid = nm_i->next_scan_nid; 1775 1775 1776 1776 /* Enough entries */ 1777 - if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1777 + if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK) 1778 1778 return; 1779 1779 1780 1780 /* readahead nat pages to be scanned */ ··· 1912 1912 struct free_nid *i, *next; 1913 1913 int nr = nr_shrink; 1914 1914 1915 + if (nm_i->fcnt <= MAX_FREE_NIDS) 1916 + return 0; 1917 + 1915 1918 if (!mutex_trylock(&nm_i->build_lock)) 1916 1919 return 0; 1917 1920 1918 1921 spin_lock(&nm_i->free_nid_list_lock); 1919 1922 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 1920 - if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK) 1923 + if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS) 1921 1924 break; 1922 1925 if (i->state == NID_ALLOC) 1923 1926 continue;
+3 -2
fs/f2fs/node.h
··· 15 15 #define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK) 16 16 17 17 /* # of pages to perform synchronous readahead before building free nids */ 18 - #define FREE_NID_PAGES 4 18 + #define FREE_NID_PAGES 8 19 + #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) 19 20 20 - #define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */ 21 + #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ 21 22 22 23 /* maximum readahead size for node during getting data blocks */ 23 24 #define MAX_RA_NODE 128
+3 -1
fs/f2fs/segment.c
··· 371 371 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 372 372 373 373 if (!available_free_memory(sbi, FREE_NIDS)) 374 - try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES); 374 + try_to_free_nids(sbi, MAX_FREE_NIDS); 375 + else 376 + build_free_nids(sbi); 375 377 376 378 /* checkpoint is the only way to shrink partial cached entries */ 377 379 if (!available_free_memory(sbi, NAT_ENTRIES) ||
+3 -2
fs/f2fs/shrinker.c
··· 13 13 #include <linux/f2fs_fs.h> 14 14 15 15 #include "f2fs.h" 16 + #include "node.h" 16 17 17 18 static LIST_HEAD(f2fs_list); 18 19 static DEFINE_SPINLOCK(f2fs_list_lock); ··· 26 25 27 26 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) 28 27 { 29 - if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK) 30 - return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK; 28 + if (NM_I(sbi)->fcnt > MAX_FREE_NIDS) 29 + return NM_I(sbi)->fcnt - MAX_FREE_NIDS; 31 30 return 0; 32 31 } 33 32