at v5.8 13 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * fs/f2fs/node.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8/* start node id of a node block dedicated to the given node id */ 9#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) 10 11/* node block offset on the NAT area dedicated to the given start node id */ 12#define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK) 13 14/* # of pages to perform synchronous readahead before building free nids */ 15#define FREE_NID_PAGES 8 16#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) 17 18/* size of free nid batch when shrinking */ 19#define SHRINK_NID_BATCH_SIZE 8 20 21#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ 22 23/* maximum readahead size for node during getting data blocks */ 24#define MAX_RA_NODE 128 25 26/* control the memory footprint threshold (10MB per 1GB ram) */ 27#define DEF_RAM_THRESHOLD 1 28 29/* control dirty nats ratio threshold (default: 10% over max nid count) */ 30#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10 31/* control total # of nats */ 32#define DEF_NAT_CACHE_THRESHOLD 100000 33 34/* vector size for gang look-up from nat cache that consists of radix tree */ 35#define NATVEC_SIZE 64 36#define SETVEC_SIZE 32 37 38/* return value for read_node_page */ 39#define LOCKED_PAGE 1 40 41/* For flag in struct node_info */ 42enum { 43 IS_CHECKPOINTED, /* is it checkpointed before? */ 44 HAS_FSYNCED_INODE, /* is the inode fsynced before? */ 45 HAS_LAST_FSYNC, /* has the latest node fsync mark? */ 46 IS_DIRTY, /* this nat entry is dirty? */ 47 IS_PREALLOC, /* nat entry is preallocated */ 48}; 49 50/* 51 * For node information 52 */ 53struct node_info { 54 nid_t nid; /* node id */ 55 nid_t ino; /* inode number of the node's owner */ 56 block_t blk_addr; /* block address of the node */ 57 unsigned char version; /* version of the node */ 58 unsigned char flag; /* for node information bits */ 59}; 60 61struct nat_entry { 62 struct list_head list; /* for clean or dirty nat list */ 63 struct node_info ni; /* in-memory node information */ 64}; 65 66#define nat_get_nid(nat) ((nat)->ni.nid) 67#define nat_set_nid(nat, n) ((nat)->ni.nid = (n)) 68#define nat_get_blkaddr(nat) ((nat)->ni.blk_addr) 69#define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b)) 70#define nat_get_ino(nat) ((nat)->ni.ino) 71#define nat_set_ino(nat, i) ((nat)->ni.ino = (i)) 72#define nat_get_version(nat) ((nat)->ni.version) 73#define nat_set_version(nat, v) ((nat)->ni.version = (v)) 74 75#define inc_node_version(version) (++(version)) 76 77static inline void copy_node_info(struct node_info *dst, 78 struct node_info *src) 79{ 80 dst->nid = src->nid; 81 dst->ino = src->ino; 82 dst->blk_addr = src->blk_addr; 83 dst->version = src->version; 84 /* should not copy flag here */ 85} 86 87static inline void set_nat_flag(struct nat_entry *ne, 88 unsigned int type, bool set) 89{ 90 unsigned char mask = 0x01 << type; 91 if (set) 92 ne->ni.flag |= mask; 93 else 94 ne->ni.flag &= ~mask; 95} 96 97static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type) 98{ 99 unsigned char mask = 0x01 << type; 100 return ne->ni.flag & mask; 101} 102 103static inline void nat_reset_flag(struct nat_entry *ne) 104{ 105 /* these states can be set only after checkpoint was done */ 106 set_nat_flag(ne, IS_CHECKPOINTED, true); 107 set_nat_flag(ne, HAS_FSYNCED_INODE, false); 108 set_nat_flag(ne, HAS_LAST_FSYNC, true); 109} 110 111static inline void node_info_from_raw_nat(struct node_info *ni, 112 struct f2fs_nat_entry *raw_ne) 113{ 114 ni->ino = le32_to_cpu(raw_ne->ino); 115 ni->blk_addr = le32_to_cpu(raw_ne->block_addr); 116 ni->version = raw_ne->version; 117} 118 119static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, 120 struct node_info *ni) 121{ 122 raw_ne->ino = cpu_to_le32(ni->ino); 123 raw_ne->block_addr = cpu_to_le32(ni->blk_addr); 124 raw_ne->version = ni->version; 125} 126 127static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) 128{ 129 return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid * 130 NM_I(sbi)->dirty_nats_ratio / 100; 131} 132 133static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) 134{ 135 return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD; 136} 137 138static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi) 139{ 140 return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8; 141} 142 143enum mem_type { 144 FREE_NIDS, /* indicates the free nid list */ 145 NAT_ENTRIES, /* indicates the cached nat entry */ 146 DIRTY_DENTS, /* indicates dirty dentry pages */ 147 INO_ENTRIES, /* indicates inode entries */ 148 EXTENT_CACHE, /* indicates extent cache */ 149 INMEM_PAGES, /* indicates inmemory pages */ 150 BASE_CHECK, /* check kernel status */ 151}; 152 153struct nat_entry_set { 154 struct list_head set_list; /* link with other nat sets */ 155 struct list_head entry_list; /* link with dirty nat entries */ 156 nid_t set; /* set number*/ 157 unsigned int entry_cnt; /* the # of nat entries in set */ 158}; 159 160struct free_nid { 161 struct list_head list; /* for free node id list */ 162 nid_t nid; /* node id */ 163 int state; /* in use or not: FREE_NID or PREALLOC_NID */ 164}; 165 166static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) 167{ 168 struct f2fs_nm_info *nm_i = NM_I(sbi); 169 struct free_nid *fnid; 170 171 spin_lock(&nm_i->nid_list_lock); 172 if (nm_i->nid_cnt[FREE_NID] <= 0) { 173 spin_unlock(&nm_i->nid_list_lock); 174 return; 175 } 176 fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); 177 *nid = fnid->nid; 178 spin_unlock(&nm_i->nid_list_lock); 179} 180 181/* 182 * inline functions 183 */ 184static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) 185{ 186 struct f2fs_nm_info *nm_i = NM_I(sbi); 187 188#ifdef CONFIG_F2FS_CHECK_FS 189 if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir, 190 nm_i->bitmap_size)) 191 f2fs_bug_on(sbi, 1); 192#endif 193 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); 194} 195 196static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) 197{ 198 struct f2fs_nm_info *nm_i = NM_I(sbi); 199 pgoff_t block_off; 200 pgoff_t block_addr; 201 202 /* 203 * block_off = segment_off * 512 + off_in_segment 204 * OLD = (segment_off * 512) * 2 + off_in_segment 205 * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment 206 */ 207 block_off = NAT_BLOCK_OFFSET(start); 208 209 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 210 (block_off << 1) - 211 (block_off & (sbi->blocks_per_seg - 1))); 212 213 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 214 block_addr += sbi->blocks_per_seg; 215 216 return block_addr; 217} 218 219static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, 220 pgoff_t block_addr) 221{ 222 struct f2fs_nm_info *nm_i = NM_I(sbi); 223 224 block_addr -= nm_i->nat_blkaddr; 225 block_addr ^= 1 << sbi->log_blocks_per_seg; 226 return block_addr + nm_i->nat_blkaddr; 227} 228 229static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) 230{ 231 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); 232 233 f2fs_change_bit(block_off, nm_i->nat_bitmap); 234#ifdef CONFIG_F2FS_CHECK_FS 235 f2fs_change_bit(block_off, nm_i->nat_bitmap_mir); 236#endif 237} 238 239static inline nid_t ino_of_node(struct page *node_page) 240{ 241 struct f2fs_node *rn = F2FS_NODE(node_page); 242 return le32_to_cpu(rn->footer.ino); 243} 244 245static inline nid_t nid_of_node(struct page *node_page) 246{ 247 struct f2fs_node *rn = F2FS_NODE(node_page); 248 return le32_to_cpu(rn->footer.nid); 249} 250 251static inline unsigned int ofs_of_node(struct page *node_page) 252{ 253 struct f2fs_node *rn = F2FS_NODE(node_page); 254 unsigned flag = le32_to_cpu(rn->footer.flag); 255 return flag >> OFFSET_BIT_SHIFT; 256} 257 258static inline __u64 cpver_of_node(struct page *node_page) 259{ 260 struct f2fs_node *rn = F2FS_NODE(node_page); 261 return le64_to_cpu(rn->footer.cp_ver); 262} 263 264static inline block_t next_blkaddr_of_node(struct page *node_page) 265{ 266 struct f2fs_node *rn = F2FS_NODE(node_page); 267 return le32_to_cpu(rn->footer.next_blkaddr); 268} 269 270static inline void fill_node_footer(struct page *page, nid_t nid, 271 nid_t ino, unsigned int ofs, bool reset) 272{ 273 struct f2fs_node *rn = F2FS_NODE(page); 274 unsigned int old_flag = 0; 275 276 if (reset) 277 memset(rn, 0, sizeof(*rn)); 278 else 279 old_flag = le32_to_cpu(rn->footer.flag); 280 281 rn->footer.nid = cpu_to_le32(nid); 282 rn->footer.ino = cpu_to_le32(ino); 283 284 /* should remain old flag bits such as COLD_BIT_SHIFT */ 285 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) | 286 (old_flag & OFFSET_BIT_MASK)); 287} 288 289static inline void copy_node_footer(struct page *dst, struct page *src) 290{ 291 struct f2fs_node *src_rn = F2FS_NODE(src); 292 struct f2fs_node *dst_rn = F2FS_NODE(dst); 293 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); 294} 295 296static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) 297{ 298 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); 299 struct f2fs_node *rn = F2FS_NODE(page); 300 __u64 cp_ver = cur_cp_version(ckpt); 301 302 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 303 cp_ver |= (cur_cp_crc(ckpt) << 32); 304 305 rn->footer.cp_ver = cpu_to_le64(cp_ver); 306 rn->footer.next_blkaddr = cpu_to_le32(blkaddr); 307} 308 309static inline bool is_recoverable_dnode(struct page *page) 310{ 311 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); 312 __u64 cp_ver = cur_cp_version(ckpt); 313 314 /* Don't care crc part, if fsck.f2fs sets it. */ 315 if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG)) 316 return (cp_ver << 32) == (cpver_of_node(page) << 32); 317 318 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 319 cp_ver |= (cur_cp_crc(ckpt) << 32); 320 321 return cp_ver == cpver_of_node(page); 322} 323 324/* 325 * f2fs assigns the following node offsets described as (num). 326 * N = NIDS_PER_BLOCK 327 * 328 * Inode block (0) 329 * |- direct node (1) 330 * |- direct node (2) 331 * |- indirect node (3) 332 * | `- direct node (4 => 4 + N - 1) 333 * |- indirect node (4 + N) 334 * | `- direct node (5 + N => 5 + 2N - 1) 335 * `- double indirect node (5 + 2N) 336 * `- indirect node (6 + 2N) 337 * `- direct node 338 * ...... 339 * `- indirect node ((6 + 2N) + x(N + 1)) 340 * `- direct node 341 * ...... 342 * `- indirect node ((6 + 2N) + (N - 1)(N + 1)) 343 * `- direct node 344 */ 345static inline bool IS_DNODE(struct page *node_page) 346{ 347 unsigned int ofs = ofs_of_node(node_page); 348 349 if (f2fs_has_xattr_block(ofs)) 350 return true; 351 352 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || 353 ofs == 5 + 2 * NIDS_PER_BLOCK) 354 return false; 355 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { 356 ofs -= 6 + 2 * NIDS_PER_BLOCK; 357 if (!((long int)ofs % (NIDS_PER_BLOCK + 1))) 358 return false; 359 } 360 return true; 361} 362 363static inline int set_nid(struct page *p, int off, nid_t nid, bool i) 364{ 365 struct f2fs_node *rn = F2FS_NODE(p); 366 367 f2fs_wait_on_page_writeback(p, NODE, true, true); 368 369 if (i) 370 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); 371 else 372 rn->in.nid[off] = cpu_to_le32(nid); 373 return set_page_dirty(p); 374} 375 376static inline nid_t get_nid(struct page *p, int off, bool i) 377{ 378 struct f2fs_node *rn = F2FS_NODE(p); 379 380 if (i) 381 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); 382 return le32_to_cpu(rn->in.nid[off]); 383} 384 385/* 386 * Coldness identification: 387 * - Mark cold files in f2fs_inode_info 388 * - Mark cold node blocks in their node footer 389 * - Mark cold data pages in page cache 390 */ 391static inline int is_cold_data(struct page *page) 392{ 393 return PageChecked(page); 394} 395 396static inline void set_cold_data(struct page *page) 397{ 398 SetPageChecked(page); 399} 400 401static inline void clear_cold_data(struct page *page) 402{ 403 ClearPageChecked(page); 404} 405 406static inline int is_node(struct page *page, int type) 407{ 408 struct f2fs_node *rn = F2FS_NODE(page); 409 return le32_to_cpu(rn->footer.flag) & (1 << type); 410} 411 412#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) 413#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) 414#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) 415 416static inline int is_inline_node(struct page *page) 417{ 418 return PageChecked(page); 419} 420 421static inline void set_inline_node(struct page *page) 422{ 423 SetPageChecked(page); 424} 425 426static inline void clear_inline_node(struct page *page) 427{ 428 ClearPageChecked(page); 429} 430 431static inline void set_cold_node(struct page *page, bool is_dir) 432{ 433 struct f2fs_node *rn = F2FS_NODE(page); 434 unsigned int flag = le32_to_cpu(rn->footer.flag); 435 436 if (is_dir) 437 flag &= ~(0x1 << COLD_BIT_SHIFT); 438 else 439 flag |= (0x1 << COLD_BIT_SHIFT); 440 rn->footer.flag = cpu_to_le32(flag); 441} 442 443static inline void set_mark(struct page *page, int mark, int type) 444{ 445 struct f2fs_node *rn = F2FS_NODE(page); 446 unsigned int flag = le32_to_cpu(rn->footer.flag); 447 if (mark) 448 flag |= (0x1 << type); 449 else 450 flag &= ~(0x1 << type); 451 rn->footer.flag = cpu_to_le32(flag); 452 453#ifdef CONFIG_F2FS_CHECK_FS 454 f2fs_inode_chksum_set(F2FS_P_SB(page), page); 455#endif 456} 457#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) 458#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)