at master 12 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * fs/f2fs/node.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8/* start node id of a node block dedicated to the given node id */ 9#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) 10 11/* node block offset on the NAT area dedicated to the given start node id */ 12#define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK) 13 14/* # of pages to perform synchronous readahead before building free nids */ 15#define FREE_NID_PAGES 8 16#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) 17 18/* size of free nid batch when shrinking */ 19#define SHRINK_NID_BATCH_SIZE 8 20 21#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ 22 23/* maximum readahead size for node during getting data blocks */ 24#define MAX_RA_NODE 128 25 26/* control the memory footprint threshold (10MB per 1GB ram) */ 27#define DEF_RAM_THRESHOLD 1 28 29/* control dirty nats ratio threshold (default: 10% over max nid count) */ 30#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10 31/* control total # of nats */ 32#define DEF_NAT_CACHE_THRESHOLD 100000 33 34/* control total # of node writes used for roll-forward recovery */ 35#define DEF_RF_NODE_BLOCKS 0 36 37/* vector size for gang look-up from nat cache that consists of radix tree */ 38#define NAT_VEC_SIZE 32 39 40/* return value for read_node_page */ 41#define LOCKED_PAGE 1 42 43/* check pinned file's alignment status of physical blocks */ 44#define FILE_NOT_ALIGNED 1 45 46/* For flag in struct node_info */ 47enum { 48 IS_CHECKPOINTED, /* is it checkpointed before? */ 49 HAS_FSYNCED_INODE, /* is the inode fsynced before? */ 50 HAS_LAST_FSYNC, /* has the latest node fsync mark? */ 51 IS_DIRTY, /* this nat entry is dirty? */ 52 IS_PREALLOC, /* nat entry is preallocated */ 53}; 54 55/* For node type in __get_node_folio() */ 56enum node_type { 57 NODE_TYPE_REGULAR, 58 NODE_TYPE_INODE, 59 NODE_TYPE_XATTR, 60 NODE_TYPE_NON_INODE, 61}; 62 63/* 64 * For node information 65 */ 66struct node_info { 67 nid_t nid; /* node id */ 68 nid_t ino; /* inode number of the node's owner */ 69 block_t blk_addr; /* block address of the node */ 70 unsigned char version; /* version of the node */ 71 unsigned char flag; /* for node information bits */ 72}; 73 74struct nat_entry { 75 struct list_head list; /* for clean or dirty nat list */ 76 struct node_info ni; /* in-memory node information */ 77}; 78 79#define nat_get_nid(nat) ((nat)->ni.nid) 80#define nat_set_nid(nat, n) ((nat)->ni.nid = (n)) 81#define nat_get_blkaddr(nat) ((nat)->ni.blk_addr) 82#define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b)) 83#define nat_get_ino(nat) ((nat)->ni.ino) 84#define nat_set_ino(nat, i) ((nat)->ni.ino = (i)) 85#define nat_get_version(nat) ((nat)->ni.version) 86#define nat_set_version(nat, v) ((nat)->ni.version = (v)) 87 88#define inc_node_version(version) (++(version)) 89 90static inline void copy_node_info(struct node_info *dst, 91 struct node_info *src) 92{ 93 dst->nid = src->nid; 94 dst->ino = src->ino; 95 dst->blk_addr = src->blk_addr; 96 dst->version = src->version; 97 /* should not copy flag here */ 98} 99 100static inline void set_nat_flag(struct nat_entry *ne, 101 unsigned int type, bool set) 102{ 103 if (set) 104 ne->ni.flag |= BIT(type); 105 else 106 ne->ni.flag &= ~BIT(type); 107} 108 109static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type) 110{ 111 return ne->ni.flag & BIT(type); 112} 113 114static inline void nat_reset_flag(struct nat_entry *ne) 115{ 116 /* these states can be set only after checkpoint was done */ 117 set_nat_flag(ne, IS_CHECKPOINTED, true); 118 set_nat_flag(ne, HAS_FSYNCED_INODE, false); 119 set_nat_flag(ne, HAS_LAST_FSYNC, true); 120} 121 122static inline void node_info_from_raw_nat(struct node_info *ni, 123 struct f2fs_nat_entry *raw_ne) 124{ 125 ni->ino = le32_to_cpu(raw_ne->ino); 126 ni->blk_addr = le32_to_cpu(raw_ne->block_addr); 127 ni->version = raw_ne->version; 128} 129 130static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, 131 struct node_info *ni) 132{ 133 raw_ne->ino = cpu_to_le32(ni->ino); 134 raw_ne->block_addr = cpu_to_le32(ni->blk_addr); 135 raw_ne->version = ni->version; 136} 137 138static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) 139{ 140 return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid * 141 NM_I(sbi)->dirty_nats_ratio / 100; 142} 143 144static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) 145{ 146 return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD; 147} 148 149enum mem_type { 150 FREE_NIDS, /* indicates the free nid list */ 151 NAT_ENTRIES, /* indicates the cached nat entry */ 152 DIRTY_DENTS, /* indicates dirty dentry pages */ 153 INO_ENTRIES, /* indicates inode entries */ 154 READ_EXTENT_CACHE, /* indicates read extent cache */ 155 AGE_EXTENT_CACHE, /* indicates age extent cache */ 156 DISCARD_CACHE, /* indicates memory of cached discard cmds */ 157 COMPRESS_PAGE, /* indicates memory of cached compressed pages */ 158 BASE_CHECK, /* check kernel status */ 159}; 160 161struct nat_entry_set { 162 struct list_head set_list; /* link with other nat sets */ 163 struct list_head entry_list; /* link with dirty nat entries */ 164 nid_t set; /* set number*/ 165 unsigned int entry_cnt; /* the # of nat entries in set */ 166}; 167 168struct free_nid { 169 struct list_head list; /* for free node id list */ 170 nid_t nid; /* node id */ 171 int state; /* in use or not: FREE_NID or PREALLOC_NID */ 172}; 173 174static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) 175{ 176 struct f2fs_nm_info *nm_i = NM_I(sbi); 177 struct free_nid *fnid; 178 179 spin_lock(&nm_i->nid_list_lock); 180 if (nm_i->nid_cnt[FREE_NID] <= 0) { 181 spin_unlock(&nm_i->nid_list_lock); 182 return; 183 } 184 fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); 185 *nid = fnid->nid; 186 spin_unlock(&nm_i->nid_list_lock); 187} 188 189/* 190 * inline functions 191 */ 192static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) 193{ 194 struct f2fs_nm_info *nm_i = NM_I(sbi); 195 196#ifdef CONFIG_F2FS_CHECK_FS 197 if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir, 198 nm_i->bitmap_size)) 199 f2fs_bug_on(sbi, 1); 200#endif 201 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); 202} 203 204static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) 205{ 206 struct f2fs_nm_info *nm_i = NM_I(sbi); 207 pgoff_t block_off; 208 pgoff_t block_addr; 209 210 /* 211 * block_off = segment_off * 512 + off_in_segment 212 * OLD = (segment_off * 512) * 2 + off_in_segment 213 * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment 214 */ 215 block_off = NAT_BLOCK_OFFSET(start); 216 217 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 218 (block_off << 1) - 219 (block_off & (BLKS_PER_SEG(sbi) - 1))); 220 221 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 222 block_addr += BLKS_PER_SEG(sbi); 223 224 return block_addr; 225} 226 227static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, 228 pgoff_t block_addr) 229{ 230 struct f2fs_nm_info *nm_i = NM_I(sbi); 231 232 block_addr -= nm_i->nat_blkaddr; 233 block_addr ^= BIT(sbi->log_blocks_per_seg); 234 return block_addr + nm_i->nat_blkaddr; 235} 236 237static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) 238{ 239 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); 240 241 f2fs_change_bit(block_off, nm_i->nat_bitmap); 242#ifdef CONFIG_F2FS_CHECK_FS 243 f2fs_change_bit(block_off, nm_i->nat_bitmap_mir); 244#endif 245} 246 247static inline nid_t ino_of_node(const struct folio *node_folio) 248{ 249 struct f2fs_node *rn = F2FS_NODE(node_folio); 250 return le32_to_cpu(rn->footer.ino); 251} 252 253static inline nid_t nid_of_node(const struct folio *node_folio) 254{ 255 struct f2fs_node *rn = F2FS_NODE(node_folio); 256 return le32_to_cpu(rn->footer.nid); 257} 258 259static inline unsigned int ofs_of_node(const struct folio *node_folio) 260{ 261 struct f2fs_node *rn = F2FS_NODE(node_folio); 262 unsigned flag = le32_to_cpu(rn->footer.flag); 263 return flag >> OFFSET_BIT_SHIFT; 264} 265 266static inline __u64 cpver_of_node(const struct folio *node_folio) 267{ 268 struct f2fs_node *rn = F2FS_NODE(node_folio); 269 return le64_to_cpu(rn->footer.cp_ver); 270} 271 272static inline block_t next_blkaddr_of_node(const struct folio *node_folio) 273{ 274 struct f2fs_node *rn = F2FS_NODE(node_folio); 275 return le32_to_cpu(rn->footer.next_blkaddr); 276} 277 278static inline void fill_node_footer(const struct folio *folio, nid_t nid, 279 nid_t ino, unsigned int ofs, bool reset) 280{ 281 struct f2fs_node *rn = F2FS_NODE(folio); 282 unsigned int old_flag = 0; 283 284 if (reset) 285 memset(rn, 0, sizeof(*rn)); 286 else 287 old_flag = le32_to_cpu(rn->footer.flag); 288 289 rn->footer.nid = cpu_to_le32(nid); 290 rn->footer.ino = cpu_to_le32(ino); 291 292 /* should remain old flag bits such as COLD_BIT_SHIFT */ 293 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) | 294 (old_flag & OFFSET_BIT_MASK)); 295} 296 297static inline void copy_node_footer(const struct folio *dst, 298 const struct folio *src) 299{ 300 struct f2fs_node *src_rn = F2FS_NODE(src); 301 struct f2fs_node *dst_rn = F2FS_NODE(dst); 302 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); 303} 304 305static inline void fill_node_footer_blkaddr(struct folio *folio, block_t blkaddr) 306{ 307 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio)); 308 struct f2fs_node *rn = F2FS_NODE(folio); 309 __u64 cp_ver = cur_cp_version(ckpt); 310 311 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 312 cp_ver |= (cur_cp_crc(ckpt) << 32); 313 314 rn->footer.cp_ver = cpu_to_le64(cp_ver); 315 rn->footer.next_blkaddr = cpu_to_le32(blkaddr); 316} 317 318static inline bool is_recoverable_dnode(const struct folio *folio) 319{ 320 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio)); 321 __u64 cp_ver = cur_cp_version(ckpt); 322 323 /* Don't care crc part, if fsck.f2fs sets it. */ 324 if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG)) 325 return (cp_ver << 32) == (cpver_of_node(folio) << 32); 326 327 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 328 cp_ver |= (cur_cp_crc(ckpt) << 32); 329 330 return cp_ver == cpver_of_node(folio); 331} 332 333/* 334 * f2fs assigns the following node offsets described as (num). 335 * N = NIDS_PER_BLOCK 336 * 337 * Inode block (0) 338 * |- direct node (1) 339 * |- direct node (2) 340 * |- indirect node (3) 341 * | `- direct node (4 => 4 + N - 1) 342 * |- indirect node (4 + N) 343 * | `- direct node (5 + N => 5 + 2N - 1) 344 * `- double indirect node (5 + 2N) 345 * `- indirect node (6 + 2N) 346 * `- direct node 347 * ...... 348 * `- indirect node ((6 + 2N) + x(N + 1)) 349 * `- direct node 350 * ...... 351 * `- indirect node ((6 + 2N) + (N - 1)(N + 1)) 352 * `- direct node 353 */ 354static inline bool IS_DNODE(const struct folio *node_folio) 355{ 356 unsigned int ofs = ofs_of_node(node_folio); 357 358 if (f2fs_has_xattr_block(ofs)) 359 return true; 360 361 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || 362 ofs == 5 + 2 * NIDS_PER_BLOCK) 363 return false; 364 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { 365 ofs -= 6 + 2 * NIDS_PER_BLOCK; 366 if (!((long int)ofs % (NIDS_PER_BLOCK + 1))) 367 return false; 368 } 369 return true; 370} 371 372static inline int set_nid(struct folio *folio, int off, nid_t nid, bool i) 373{ 374 struct f2fs_node *rn = F2FS_NODE(folio); 375 376 f2fs_folio_wait_writeback(folio, NODE, true, true); 377 378 if (i) 379 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); 380 else 381 rn->in.nid[off] = cpu_to_le32(nid); 382 return folio_mark_dirty(folio); 383} 384 385static inline nid_t get_nid(const struct folio *folio, int off, bool i) 386{ 387 struct f2fs_node *rn = F2FS_NODE(folio); 388 389 if (i) 390 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); 391 return le32_to_cpu(rn->in.nid[off]); 392} 393 394/* 395 * Coldness identification: 396 * - Mark cold files in f2fs_inode_info 397 * - Mark cold node blocks in their node footer 398 * - Mark cold data pages in page cache 399 */ 400 401static inline int is_node(const struct folio *folio, int type) 402{ 403 struct f2fs_node *rn = F2FS_NODE(folio); 404 return le32_to_cpu(rn->footer.flag) & BIT(type); 405} 406 407#define is_cold_node(folio) is_node(folio, COLD_BIT_SHIFT) 408#define is_fsync_dnode(folio) is_node(folio, FSYNC_BIT_SHIFT) 409#define is_dent_dnode(folio) is_node(folio, DENT_BIT_SHIFT) 410 411static inline void set_cold_node(const struct folio *folio, bool is_dir) 412{ 413 struct f2fs_node *rn = F2FS_NODE(folio); 414 unsigned int flag = le32_to_cpu(rn->footer.flag); 415 416 if (is_dir) 417 flag &= ~BIT(COLD_BIT_SHIFT); 418 else 419 flag |= BIT(COLD_BIT_SHIFT); 420 rn->footer.flag = cpu_to_le32(flag); 421} 422 423static inline void set_mark(struct folio *folio, int mark, int type) 424{ 425 struct f2fs_node *rn = F2FS_NODE(folio); 426 unsigned int flag = le32_to_cpu(rn->footer.flag); 427 if (mark) 428 flag |= BIT(type); 429 else 430 flag &= ~BIT(type); 431 rn->footer.flag = cpu_to_le32(flag); 432 433#ifdef CONFIG_F2FS_CHECK_FS 434 f2fs_inode_chksum_set(F2FS_F_SB(folio), folio); 435#endif 436} 437#define set_dentry_mark(folio, mark) set_mark(folio, mark, DENT_BIT_SHIFT) 438#define set_fsync_mark(folio, mark) set_mark(folio, mark, FSYNC_BIT_SHIFT)