at v6.15-rc1 17 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * include/linux/buffer_head.h 4 * 5 * Everything to do with buffer_heads. 6 */ 7 8#ifndef _LINUX_BUFFER_HEAD_H 9#define _LINUX_BUFFER_HEAD_H 10 11#include <linux/types.h> 12#include <linux/blk_types.h> 13#include <linux/fs.h> 14#include <linux/linkage.h> 15#include <linux/pagemap.h> 16#include <linux/wait.h> 17#include <linux/atomic.h> 18 19enum bh_state_bits { 20 BH_Uptodate, /* Contains valid data */ 21 BH_Dirty, /* Is dirty */ 22 BH_Lock, /* Is locked */ 23 BH_Req, /* Has been submitted for I/O */ 24 25 BH_Mapped, /* Has a disk mapping */ 26 BH_New, /* Disk mapping was newly created by get_block */ 27 BH_Async_Read, /* Is under end_buffer_async_read I/O */ 28 BH_Async_Write, /* Is under end_buffer_async_write I/O */ 29 BH_Delay, /* Buffer is not yet allocated on disk */ 30 BH_Boundary, /* Block is followed by a discontiguity */ 31 BH_Write_EIO, /* I/O error on write */ 32 BH_Unwritten, /* Buffer is allocated on disk but not written */ 33 BH_Quiet, /* Buffer Error Prinks to be quiet */ 34 BH_Meta, /* Buffer contains metadata */ 35 BH_Prio, /* Buffer should be submitted with REQ_PRIO */ 36 BH_Defer_Completion, /* Defer AIO completion to workqueue */ 37 38 BH_PrivateStart,/* not a state bit, but the first bit available 39 * for private allocation by other entities 40 */ 41}; 42 43#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) 44 45struct page; 46struct buffer_head; 47struct address_space; 48typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); 49 50/* 51 * Historically, a buffer_head was used to map a single block 52 * within a page, and of course as the unit of I/O through the 53 * filesystem and block layers. Nowadays the basic I/O unit 54 * is the bio, and buffer_heads are used for extracting block 55 * mappings (via a get_block_t call), for tracking state within 56 * a folio (via a folio_mapping) and for wrapping bio submission 57 * for backward compatibility reasons (e.g. submit_bh). 58 */ 59struct buffer_head { 60 unsigned long b_state; /* buffer state bitmap (see above) */ 61 struct buffer_head *b_this_page;/* circular list of page's buffers */ 62 union { 63 struct page *b_page; /* the page this bh is mapped to */ 64 struct folio *b_folio; /* the folio this bh is mapped to */ 65 }; 66 67 sector_t b_blocknr; /* start block number */ 68 size_t b_size; /* size of mapping */ 69 char *b_data; /* pointer to data within the page */ 70 71 struct block_device *b_bdev; 72 bh_end_io_t *b_end_io; /* I/O completion */ 73 void *b_private; /* reserved for b_end_io */ 74 struct list_head b_assoc_buffers; /* associated with another mapping */ 75 struct address_space *b_assoc_map; /* mapping this buffer is 76 associated with */ 77 atomic_t b_count; /* users using this buffer_head */ 78 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to 79 * serialise IO completion of other 80 * buffers in the page */ 81}; 82 83/* 84 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() 85 * and buffer_foo() functions. 86 * To avoid reset buffer flags that are already set, because that causes 87 * a costly cache line transition, check the flag first. 88 */ 89#define BUFFER_FNS(bit, name) \ 90static __always_inline void set_buffer_##name(struct buffer_head *bh) \ 91{ \ 92 if (!test_bit(BH_##bit, &(bh)->b_state)) \ 93 set_bit(BH_##bit, &(bh)->b_state); \ 94} \ 95static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ 96{ \ 97 clear_bit(BH_##bit, &(bh)->b_state); \ 98} \ 99static __always_inline int buffer_##name(const struct buffer_head *bh) \ 100{ \ 101 return test_bit(BH_##bit, &(bh)->b_state); \ 102} 103 104/* 105 * test_set_buffer_foo() and test_clear_buffer_foo() 106 */ 107#define TAS_BUFFER_FNS(bit, name) \ 108static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ 109{ \ 110 return test_and_set_bit(BH_##bit, &(bh)->b_state); \ 111} \ 112static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ 113{ \ 114 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ 115} \ 116 117/* 118 * Emit the buffer bitops functions. Note that there are also functions 119 * of the form "mark_buffer_foo()". These are higher-level functions which 120 * do something in addition to setting a b_state bit. 121 */ 122BUFFER_FNS(Dirty, dirty) 123TAS_BUFFER_FNS(Dirty, dirty) 124BUFFER_FNS(Lock, locked) 125BUFFER_FNS(Req, req) 126TAS_BUFFER_FNS(Req, req) 127BUFFER_FNS(Mapped, mapped) 128BUFFER_FNS(New, new) 129BUFFER_FNS(Async_Read, async_read) 130BUFFER_FNS(Async_Write, async_write) 131BUFFER_FNS(Delay, delay) 132BUFFER_FNS(Boundary, boundary) 133BUFFER_FNS(Write_EIO, write_io_error) 134BUFFER_FNS(Unwritten, unwritten) 135BUFFER_FNS(Meta, meta) 136BUFFER_FNS(Prio, prio) 137BUFFER_FNS(Defer_Completion, defer_completion) 138 139static __always_inline void set_buffer_uptodate(struct buffer_head *bh) 140{ 141 /* 142 * If somebody else already set this uptodate, they will 143 * have done the memory barrier, and a reader will thus 144 * see *some* valid buffer state. 145 * 146 * Any other serialization (with IO errors or whatever that 147 * might clear the bit) has to come from other state (eg BH_Lock). 148 */ 149 if (test_bit(BH_Uptodate, &bh->b_state)) 150 return; 151 152 /* 153 * make it consistent with folio_mark_uptodate 154 * pairs with smp_load_acquire in buffer_uptodate 155 */ 156 smp_mb__before_atomic(); 157 set_bit(BH_Uptodate, &bh->b_state); 158} 159 160static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) 161{ 162 clear_bit(BH_Uptodate, &bh->b_state); 163} 164 165static __always_inline int buffer_uptodate(const struct buffer_head *bh) 166{ 167 /* 168 * make it consistent with folio_test_uptodate 169 * pairs with smp_mb__before_atomic in set_buffer_uptodate 170 */ 171 return test_bit_acquire(BH_Uptodate, &bh->b_state); 172} 173 174static inline unsigned long bh_offset(const struct buffer_head *bh) 175{ 176 return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1); 177} 178 179/* If we *know* page->private refers to buffer_heads */ 180#define page_buffers(page) \ 181 ({ \ 182 BUG_ON(!PagePrivate(page)); \ 183 ((struct buffer_head *)page_private(page)); \ 184 }) 185#define folio_buffers(folio) folio_get_private(folio) 186 187void buffer_check_dirty_writeback(struct folio *folio, 188 bool *dirty, bool *writeback); 189 190/* 191 * Declarations 192 */ 193 194void mark_buffer_dirty(struct buffer_head *bh); 195void mark_buffer_write_io_error(struct buffer_head *bh); 196void touch_buffer(struct buffer_head *bh); 197void folio_set_bh(struct buffer_head *bh, struct folio *folio, 198 unsigned long offset); 199struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 200 gfp_t gfp); 201struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size); 202struct buffer_head *create_empty_buffers(struct folio *folio, 203 unsigned long blocksize, unsigned long b_state); 204void end_buffer_read_sync(struct buffer_head *bh, int uptodate); 205void end_buffer_write_sync(struct buffer_head *bh, int uptodate); 206 207/* Things to do with buffers at mapping->private_list */ 208void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); 209int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 210 bool datasync); 211int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 212 bool datasync); 213void clean_bdev_aliases(struct block_device *bdev, sector_t block, 214 sector_t len); 215static inline void clean_bdev_bh_alias(struct buffer_head *bh) 216{ 217 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); 218} 219 220void mark_buffer_async_write(struct buffer_head *bh); 221void __wait_on_buffer(struct buffer_head *); 222wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 223struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 224 unsigned size); 225struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 226 unsigned size, gfp_t gfp); 227void __brelse(struct buffer_head *); 228void __bforget(struct buffer_head *); 229void __breadahead(struct block_device *, sector_t block, unsigned int size); 230struct buffer_head *__bread_gfp(struct block_device *, 231 sector_t block, unsigned size, gfp_t gfp); 232struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 233void free_buffer_head(struct buffer_head * bh); 234void unlock_buffer(struct buffer_head *bh); 235void __lock_buffer(struct buffer_head *bh); 236int sync_dirty_buffer(struct buffer_head *bh); 237int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 238void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 239void submit_bh(blk_opf_t, struct buffer_head *); 240void write_boundary_block(struct block_device *bdev, 241 sector_t bblock, unsigned blocksize); 242int bh_uptodate_or_lock(struct buffer_head *bh); 243int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait); 244void __bh_read_batch(int nr, struct buffer_head *bhs[], 245 blk_opf_t op_flags, bool force_lock); 246 247/* 248 * Generic address_space_operations implementations for buffer_head-backed 249 * address_spaces. 250 */ 251void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); 252int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, 253 void *get_block); 254int __block_write_full_folio(struct inode *inode, struct folio *folio, 255 get_block_t *get_block, struct writeback_control *wbc); 256int block_read_full_folio(struct folio *, get_block_t *); 257bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); 258int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 259 struct folio **foliop, get_block_t *get_block); 260int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 261 get_block_t *get_block); 262int block_write_end(struct file *, struct address_space *, 263 loff_t, unsigned len, unsigned copied, 264 struct folio *, void *); 265int generic_write_end(struct file *, struct address_space *, 266 loff_t, unsigned len, unsigned copied, 267 struct folio *, void *); 268void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); 269int cont_write_begin(struct file *, struct address_space *, loff_t, 270 unsigned, struct folio **, void **, 271 get_block_t *, loff_t *); 272int generic_cont_expand_simple(struct inode *inode, loff_t size); 273void block_commit_write(struct folio *folio, size_t from, size_t to); 274int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 275 get_block_t get_block); 276sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 277int block_truncate_page(struct address_space *, loff_t, get_block_t *); 278 279#ifdef CONFIG_MIGRATION 280extern int buffer_migrate_folio(struct address_space *, 281 struct folio *dst, struct folio *src, enum migrate_mode); 282extern int buffer_migrate_folio_norefs(struct address_space *, 283 struct folio *dst, struct folio *src, enum migrate_mode); 284#else 285#define buffer_migrate_folio NULL 286#define buffer_migrate_folio_norefs NULL 287#endif 288 289/* 290 * inline definitions 291 */ 292 293static inline void get_bh(struct buffer_head *bh) 294{ 295 atomic_inc(&bh->b_count); 296} 297 298static inline void put_bh(struct buffer_head *bh) 299{ 300 smp_mb__before_atomic(); 301 atomic_dec(&bh->b_count); 302} 303 304/** 305 * brelse - Release a buffer. 306 * @bh: The buffer to release. 307 * 308 * Decrement a buffer_head's reference count. If @bh is NULL, this 309 * function is a no-op. 310 * 311 * If all buffers on a folio have zero reference count, are clean 312 * and unlocked, and if the folio is unlocked and not under writeback 313 * then try_to_free_buffers() may strip the buffers from the folio in 314 * preparation for freeing it (sometimes, rarely, buffers are removed 315 * from a folio but it ends up not being freed, and buffers may later 316 * be reattached). 317 * 318 * Context: Any context. 319 */ 320static inline void brelse(struct buffer_head *bh) 321{ 322 if (bh) 323 __brelse(bh); 324} 325 326/** 327 * bforget - Discard any dirty data in a buffer. 328 * @bh: The buffer to forget. 329 * 330 * Call this function instead of brelse() if the data written to a buffer 331 * no longer needs to be written back. It will clear the buffer's dirty 332 * flag so writeback of this buffer will be skipped. 333 * 334 * Context: Any context. 335 */ 336static inline void bforget(struct buffer_head *bh) 337{ 338 if (bh) 339 __bforget(bh); 340} 341 342static inline struct buffer_head * 343sb_bread(struct super_block *sb, sector_t block) 344{ 345 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 346} 347 348static inline struct buffer_head * 349sb_bread_unmovable(struct super_block *sb, sector_t block) 350{ 351 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); 352} 353 354static inline void 355sb_breadahead(struct super_block *sb, sector_t block) 356{ 357 __breadahead(sb->s_bdev, block, sb->s_blocksize); 358} 359 360static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, 361 sector_t block, unsigned size) 362{ 363 gfp_t gfp; 364 365 gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 366 gfp |= __GFP_NOFAIL; 367 368 return bdev_getblk(bdev, block, size, gfp); 369} 370 371static inline struct buffer_head *__getblk(struct block_device *bdev, 372 sector_t block, unsigned size) 373{ 374 gfp_t gfp; 375 376 gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 377 gfp |= __GFP_MOVABLE | __GFP_NOFAIL; 378 379 return bdev_getblk(bdev, block, size, gfp); 380} 381 382static inline struct buffer_head *sb_getblk(struct super_block *sb, 383 sector_t block) 384{ 385 return __getblk(sb->s_bdev, block, sb->s_blocksize); 386} 387 388static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb, 389 sector_t block, gfp_t gfp) 390{ 391 return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp); 392} 393 394static inline struct buffer_head * 395sb_find_get_block(struct super_block *sb, sector_t block) 396{ 397 return __find_get_block(sb->s_bdev, block, sb->s_blocksize); 398} 399 400static inline void 401map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) 402{ 403 set_buffer_mapped(bh); 404 bh->b_bdev = sb->s_bdev; 405 bh->b_blocknr = block; 406 bh->b_size = sb->s_blocksize; 407} 408 409static inline void wait_on_buffer(struct buffer_head *bh) 410{ 411 might_sleep(); 412 if (buffer_locked(bh)) 413 __wait_on_buffer(bh); 414} 415 416static inline int trylock_buffer(struct buffer_head *bh) 417{ 418 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); 419} 420 421static inline void lock_buffer(struct buffer_head *bh) 422{ 423 might_sleep(); 424 if (!trylock_buffer(bh)) 425 __lock_buffer(bh); 426} 427 428static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) 429{ 430 if (!buffer_uptodate(bh) && trylock_buffer(bh)) { 431 if (!buffer_uptodate(bh)) 432 __bh_read(bh, op_flags, false); 433 else 434 unlock_buffer(bh); 435 } 436} 437 438static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags) 439{ 440 if (!bh_uptodate_or_lock(bh)) 441 __bh_read(bh, op_flags, false); 442} 443 444/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */ 445static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags) 446{ 447 if (bh_uptodate_or_lock(bh)) 448 return 1; 449 return __bh_read(bh, op_flags, true); 450} 451 452static inline void bh_read_batch(int nr, struct buffer_head *bhs[]) 453{ 454 __bh_read_batch(nr, bhs, 0, true); 455} 456 457static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[], 458 blk_opf_t op_flags) 459{ 460 __bh_read_batch(nr, bhs, op_flags, false); 461} 462 463/** 464 * __bread() - Read a block. 465 * @bdev: The block device to read from. 466 * @block: Block number in units of block size. 467 * @size: The block size of this device in bytes. 468 * 469 * Read a specified block, and return the buffer head that refers 470 * to it. The memory is allocated from the movable area so that it can 471 * be migrated. The returned buffer head has its refcount increased. 472 * The caller should call brelse() when it has finished with the buffer. 473 * 474 * Context: May sleep waiting for I/O. 475 * Return: NULL if the block was unreadable. 476 */ 477static inline struct buffer_head *__bread(struct block_device *bdev, 478 sector_t block, unsigned size) 479{ 480 return __bread_gfp(bdev, block, size, __GFP_MOVABLE); 481} 482 483/** 484 * get_nth_bh - Get a reference on the n'th buffer after this one. 485 * @bh: The buffer to start counting from. 486 * @count: How many buffers to skip. 487 * 488 * This is primarily useful for finding the nth buffer in a folio; in 489 * that case you pass the head buffer and the byte offset in the folio 490 * divided by the block size. It can be used for other purposes, but 491 * it will wrap at the end of the folio rather than returning NULL or 492 * proceeding to the next folio for you. 493 * 494 * Return: The requested buffer with an elevated refcount. 495 */ 496static inline __must_check 497struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count) 498{ 499 while (count--) 500 bh = bh->b_this_page; 501 get_bh(bh); 502 return bh; 503} 504 505bool block_dirty_folio(struct address_space *mapping, struct folio *folio); 506 507#ifdef CONFIG_BUFFER_HEAD 508 509void buffer_init(void); 510bool try_to_free_buffers(struct folio *folio); 511int inode_has_buffers(struct inode *inode); 512void invalidate_inode_buffers(struct inode *inode); 513int remove_inode_buffers(struct inode *inode); 514int sync_mapping_buffers(struct address_space *mapping); 515void invalidate_bh_lrus(void); 516void invalidate_bh_lrus_cpu(void); 517bool has_bh_in_lru(int cpu, void *dummy); 518extern int buffer_heads_over_limit; 519 520#else /* CONFIG_BUFFER_HEAD */ 521 522static inline void buffer_init(void) {} 523static inline bool try_to_free_buffers(struct folio *folio) { return true; } 524static inline int inode_has_buffers(struct inode *inode) { return 0; } 525static inline void invalidate_inode_buffers(struct inode *inode) {} 526static inline int remove_inode_buffers(struct inode *inode) { return 1; } 527static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 528static inline void invalidate_bh_lrus(void) {} 529static inline void invalidate_bh_lrus_cpu(void) {} 530static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 531#define buffer_heads_over_limit 0 532 533#endif /* CONFIG_BUFFER_HEAD */ 534#endif /* _LINUX_BUFFER_HEAD_H */