at v4.14 12 kB view raw
1/* 2 * fs/f2fs/node.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11/* start node id of a node block dedicated to the given node id */ 12#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) 13 14/* node block offset on the NAT area dedicated to the given start node id */ 15#define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK) 16 17/* # of pages to perform synchronous readahead before building free nids */ 18#define FREE_NID_PAGES 8 19#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) 20 21#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ 22 23/* maximum readahead size for node during getting data blocks */ 24#define MAX_RA_NODE 128 25 26/* control the memory footprint threshold (10MB per 1GB ram) */ 27#define DEF_RAM_THRESHOLD 1 28 29/* control dirty nats ratio threshold (default: 10% over max nid count) */ 30#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10 31/* control total # of nats */ 32#define DEF_NAT_CACHE_THRESHOLD 100000 33 34/* vector size for gang look-up from nat cache that consists of radix tree */ 35#define NATVEC_SIZE 64 36#define SETVEC_SIZE 32 37 38/* return value for read_node_page */ 39#define LOCKED_PAGE 1 40 41/* For flag in struct node_info */ 42enum { 43 IS_CHECKPOINTED, /* is it checkpointed before? */ 44 HAS_FSYNCED_INODE, /* is the inode fsynced before? */ 45 HAS_LAST_FSYNC, /* has the latest node fsync mark? */ 46 IS_DIRTY, /* this nat entry is dirty? */ 47}; 48 49/* 50 * For node information 51 */ 52struct node_info { 53 nid_t nid; /* node id */ 54 nid_t ino; /* inode number of the node's owner */ 55 block_t blk_addr; /* block address of the node */ 56 unsigned char version; /* version of the node */ 57 unsigned char flag; /* for node information bits */ 58}; 59 60struct nat_entry { 61 struct list_head list; /* for clean or dirty nat list */ 62 struct node_info ni; /* in-memory node information */ 63}; 64 65#define nat_get_nid(nat) ((nat)->ni.nid) 66#define nat_set_nid(nat, n) ((nat)->ni.nid = (n)) 67#define nat_get_blkaddr(nat) ((nat)->ni.blk_addr) 68#define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b)) 69#define nat_get_ino(nat) ((nat)->ni.ino) 70#define nat_set_ino(nat, i) ((nat)->ni.ino = (i)) 71#define nat_get_version(nat) ((nat)->ni.version) 72#define nat_set_version(nat, v) ((nat)->ni.version = (v)) 73 74#define inc_node_version(version) (++(version)) 75 76static inline void copy_node_info(struct node_info *dst, 77 struct node_info *src) 78{ 79 dst->nid = src->nid; 80 dst->ino = src->ino; 81 dst->blk_addr = src->blk_addr; 82 dst->version = src->version; 83 /* should not copy flag here */ 84} 85 86static inline void set_nat_flag(struct nat_entry *ne, 87 unsigned int type, bool set) 88{ 89 unsigned char mask = 0x01 << type; 90 if (set) 91 ne->ni.flag |= mask; 92 else 93 ne->ni.flag &= ~mask; 94} 95 96static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type) 97{ 98 unsigned char mask = 0x01 << type; 99 return ne->ni.flag & mask; 100} 101 102static inline void nat_reset_flag(struct nat_entry *ne) 103{ 104 /* these states can be set only after checkpoint was done */ 105 set_nat_flag(ne, IS_CHECKPOINTED, true); 106 set_nat_flag(ne, HAS_FSYNCED_INODE, false); 107 set_nat_flag(ne, HAS_LAST_FSYNC, true); 108} 109 110static inline void node_info_from_raw_nat(struct node_info *ni, 111 struct f2fs_nat_entry *raw_ne) 112{ 113 ni->ino = le32_to_cpu(raw_ne->ino); 114 ni->blk_addr = le32_to_cpu(raw_ne->block_addr); 115 ni->version = raw_ne->version; 116} 117 118static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, 119 struct node_info *ni) 120{ 121 raw_ne->ino = cpu_to_le32(ni->ino); 122 raw_ne->block_addr = cpu_to_le32(ni->blk_addr); 123 raw_ne->version = ni->version; 124} 125 126static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) 127{ 128 return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid * 129 NM_I(sbi)->dirty_nats_ratio / 100; 130} 131 132static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) 133{ 134 return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD; 135} 136 137enum mem_type { 138 FREE_NIDS, /* indicates the free nid list */ 139 NAT_ENTRIES, /* indicates the cached nat entry */ 140 DIRTY_DENTS, /* indicates dirty dentry pages */ 141 INO_ENTRIES, /* indicates inode entries */ 142 EXTENT_CACHE, /* indicates extent cache */ 143 BASE_CHECK, /* check kernel status */ 144}; 145 146struct nat_entry_set { 147 struct list_head set_list; /* link with other nat sets */ 148 struct list_head entry_list; /* link with dirty nat entries */ 149 nid_t set; /* set number*/ 150 unsigned int entry_cnt; /* the # of nat entries in set */ 151}; 152 153/* 154 * For free nid mangement 155 */ 156enum nid_state { 157 NID_NEW, /* newly added to free nid list */ 158 NID_ALLOC /* it is allocated */ 159}; 160 161struct free_nid { 162 struct list_head list; /* for free node id list */ 163 nid_t nid; /* node id */ 164 int state; /* in use or not: NID_NEW or NID_ALLOC */ 165}; 166 167static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) 168{ 169 struct f2fs_nm_info *nm_i = NM_I(sbi); 170 struct free_nid *fnid; 171 172 spin_lock(&nm_i->nid_list_lock); 173 if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) { 174 spin_unlock(&nm_i->nid_list_lock); 175 return; 176 } 177 fnid = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], 178 struct free_nid, list); 179 *nid = fnid->nid; 180 spin_unlock(&nm_i->nid_list_lock); 181} 182 183/* 184 * inline functions 185 */ 186static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) 187{ 188 struct f2fs_nm_info *nm_i = NM_I(sbi); 189 190#ifdef CONFIG_F2FS_CHECK_FS 191 if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir, 192 nm_i->bitmap_size)) 193 f2fs_bug_on(sbi, 1); 194#endif 195 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); 196} 197 198static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) 199{ 200 struct f2fs_nm_info *nm_i = NM_I(sbi); 201 pgoff_t block_off; 202 pgoff_t block_addr; 203 204 /* 205 * block_off = segment_off * 512 + off_in_segment 206 * OLD = (segment_off * 512) * 2 + off_in_segment 207 * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment 208 */ 209 block_off = NAT_BLOCK_OFFSET(start); 210 211 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 212 (block_off << 1) - 213 (block_off & (sbi->blocks_per_seg - 1))); 214 215 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 216 block_addr += sbi->blocks_per_seg; 217 218 return block_addr; 219} 220 221static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, 222 pgoff_t block_addr) 223{ 224 struct f2fs_nm_info *nm_i = NM_I(sbi); 225 226 block_addr -= nm_i->nat_blkaddr; 227 block_addr ^= 1 << sbi->log_blocks_per_seg; 228 return block_addr + nm_i->nat_blkaddr; 229} 230 231static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) 232{ 233 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); 234 235 f2fs_change_bit(block_off, nm_i->nat_bitmap); 236#ifdef CONFIG_F2FS_CHECK_FS 237 f2fs_change_bit(block_off, nm_i->nat_bitmap_mir); 238#endif 239} 240 241static inline nid_t ino_of_node(struct page *node_page) 242{ 243 struct f2fs_node *rn = F2FS_NODE(node_page); 244 return le32_to_cpu(rn->footer.ino); 245} 246 247static inline nid_t nid_of_node(struct page *node_page) 248{ 249 struct f2fs_node *rn = F2FS_NODE(node_page); 250 return le32_to_cpu(rn->footer.nid); 251} 252 253static inline unsigned int ofs_of_node(struct page *node_page) 254{ 255 struct f2fs_node *rn = F2FS_NODE(node_page); 256 unsigned flag = le32_to_cpu(rn->footer.flag); 257 return flag >> OFFSET_BIT_SHIFT; 258} 259 260static inline __u64 cpver_of_node(struct page *node_page) 261{ 262 struct f2fs_node *rn = F2FS_NODE(node_page); 263 return le64_to_cpu(rn->footer.cp_ver); 264} 265 266static inline block_t next_blkaddr_of_node(struct page *node_page) 267{ 268 struct f2fs_node *rn = F2FS_NODE(node_page); 269 return le32_to_cpu(rn->footer.next_blkaddr); 270} 271 272static inline void fill_node_footer(struct page *page, nid_t nid, 273 nid_t ino, unsigned int ofs, bool reset) 274{ 275 struct f2fs_node *rn = F2FS_NODE(page); 276 unsigned int old_flag = 0; 277 278 if (reset) 279 memset(rn, 0, sizeof(*rn)); 280 else 281 old_flag = le32_to_cpu(rn->footer.flag); 282 283 rn->footer.nid = cpu_to_le32(nid); 284 rn->footer.ino = cpu_to_le32(ino); 285 286 /* should remain old flag bits such as COLD_BIT_SHIFT */ 287 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) | 288 (old_flag & OFFSET_BIT_MASK)); 289} 290 291static inline void copy_node_footer(struct page *dst, struct page *src) 292{ 293 struct f2fs_node *src_rn = F2FS_NODE(src); 294 struct f2fs_node *dst_rn = F2FS_NODE(dst); 295 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); 296} 297 298static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) 299{ 300 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); 301 struct f2fs_node *rn = F2FS_NODE(page); 302 __u64 cp_ver = cur_cp_version(ckpt); 303 304 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 305 cp_ver |= (cur_cp_crc(ckpt) << 32); 306 307 rn->footer.cp_ver = cpu_to_le64(cp_ver); 308 rn->footer.next_blkaddr = cpu_to_le32(blkaddr); 309} 310 311static inline bool is_recoverable_dnode(struct page *page) 312{ 313 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); 314 __u64 cp_ver = cur_cp_version(ckpt); 315 316 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 317 cp_ver |= (cur_cp_crc(ckpt) << 32); 318 319 return cp_ver == cpver_of_node(page); 320} 321 322/* 323 * f2fs assigns the following node offsets described as (num). 324 * N = NIDS_PER_BLOCK 325 * 326 * Inode block (0) 327 * |- direct node (1) 328 * |- direct node (2) 329 * |- indirect node (3) 330 * | `- direct node (4 => 4 + N - 1) 331 * |- indirect node (4 + N) 332 * | `- direct node (5 + N => 5 + 2N - 1) 333 * `- double indirect node (5 + 2N) 334 * `- indirect node (6 + 2N) 335 * `- direct node 336 * ...... 337 * `- indirect node ((6 + 2N) + x(N + 1)) 338 * `- direct node 339 * ...... 340 * `- indirect node ((6 + 2N) + (N - 1)(N + 1)) 341 * `- direct node 342 */ 343static inline bool IS_DNODE(struct page *node_page) 344{ 345 unsigned int ofs = ofs_of_node(node_page); 346 347 if (f2fs_has_xattr_block(ofs)) 348 return true; 349 350 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || 351 ofs == 5 + 2 * NIDS_PER_BLOCK) 352 return false; 353 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { 354 ofs -= 6 + 2 * NIDS_PER_BLOCK; 355 if (!((long int)ofs % (NIDS_PER_BLOCK + 1))) 356 return false; 357 } 358 return true; 359} 360 361static inline int set_nid(struct page *p, int off, nid_t nid, bool i) 362{ 363 struct f2fs_node *rn = F2FS_NODE(p); 364 365 f2fs_wait_on_page_writeback(p, NODE, true); 366 367 if (i) 368 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); 369 else 370 rn->in.nid[off] = cpu_to_le32(nid); 371 return set_page_dirty(p); 372} 373 374static inline nid_t get_nid(struct page *p, int off, bool i) 375{ 376 struct f2fs_node *rn = F2FS_NODE(p); 377 378 if (i) 379 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); 380 return le32_to_cpu(rn->in.nid[off]); 381} 382 383/* 384 * Coldness identification: 385 * - Mark cold files in f2fs_inode_info 386 * - Mark cold node blocks in their node footer 387 * - Mark cold data pages in page cache 388 */ 389static inline int is_cold_data(struct page *page) 390{ 391 return PageChecked(page); 392} 393 394static inline void set_cold_data(struct page *page) 395{ 396 SetPageChecked(page); 397} 398 399static inline void clear_cold_data(struct page *page) 400{ 401 ClearPageChecked(page); 402} 403 404static inline int is_node(struct page *page, int type) 405{ 406 struct f2fs_node *rn = F2FS_NODE(page); 407 return le32_to_cpu(rn->footer.flag) & (1 << type); 408} 409 410#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) 411#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) 412#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) 413 414static inline int is_inline_node(struct page *page) 415{ 416 return PageChecked(page); 417} 418 419static inline void set_inline_node(struct page *page) 420{ 421 SetPageChecked(page); 422} 423 424static inline void clear_inline_node(struct page *page) 425{ 426 ClearPageChecked(page); 427} 428 429static inline void set_cold_node(struct inode *inode, struct page *page) 430{ 431 struct f2fs_node *rn = F2FS_NODE(page); 432 unsigned int flag = le32_to_cpu(rn->footer.flag); 433 434 if (S_ISDIR(inode->i_mode)) 435 flag &= ~(0x1 << COLD_BIT_SHIFT); 436 else 437 flag |= (0x1 << COLD_BIT_SHIFT); 438 rn->footer.flag = cpu_to_le32(flag); 439} 440 441static inline void set_mark(struct page *page, int mark, int type) 442{ 443 struct f2fs_node *rn = F2FS_NODE(page); 444 unsigned int flag = le32_to_cpu(rn->footer.flag); 445 if (mark) 446 flag |= (0x1 << type); 447 else 448 flag &= ~(0x1 << type); 449 rn->footer.flag = cpu_to_le32(flag); 450} 451#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) 452#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)