at v6.0-rc3 14 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * include/linux/buffer_head.h 4 * 5 * Everything to do with buffer_heads. 6 */ 7 8#ifndef _LINUX_BUFFER_HEAD_H 9#define _LINUX_BUFFER_HEAD_H 10 11#include <linux/types.h> 12#include <linux/blk_types.h> 13#include <linux/fs.h> 14#include <linux/linkage.h> 15#include <linux/pagemap.h> 16#include <linux/wait.h> 17#include <linux/atomic.h> 18 19#ifdef CONFIG_BLOCK 20 21enum bh_state_bits { 22 BH_Uptodate, /* Contains valid data */ 23 BH_Dirty, /* Is dirty */ 24 BH_Lock, /* Is locked */ 25 BH_Req, /* Has been submitted for I/O */ 26 27 BH_Mapped, /* Has a disk mapping */ 28 BH_New, /* Disk mapping was newly created by get_block */ 29 BH_Async_Read, /* Is under end_buffer_async_read I/O */ 30 BH_Async_Write, /* Is under end_buffer_async_write I/O */ 31 BH_Delay, /* Buffer is not yet allocated on disk */ 32 BH_Boundary, /* Block is followed by a discontiguity */ 33 BH_Write_EIO, /* I/O error on write */ 34 BH_Unwritten, /* Buffer is allocated on disk but not written */ 35 BH_Quiet, /* Buffer Error Prinks to be quiet */ 36 BH_Meta, /* Buffer contains metadata */ 37 BH_Prio, /* Buffer should be submitted with REQ_PRIO */ 38 BH_Defer_Completion, /* Defer AIO completion to workqueue */ 39 40 BH_PrivateStart,/* not a state bit, but the first bit available 41 * for private allocation by other entities 42 */ 43}; 44 45#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) 46 47struct page; 48struct buffer_head; 49struct address_space; 50typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); 51 52/* 53 * Historically, a buffer_head was used to map a single block 54 * within a page, and of course as the unit of I/O through the 55 * filesystem and block layers. Nowadays the basic I/O unit 56 * is the bio, and buffer_heads are used for extracting block 57 * mappings (via a get_block_t call), for tracking state within 58 * a page (via a page_mapping) and for wrapping bio submission 59 * for backward compatibility reasons (e.g. submit_bh). 60 */ 61struct buffer_head { 62 unsigned long b_state; /* buffer state bitmap (see above) */ 63 struct buffer_head *b_this_page;/* circular list of page's buffers */ 64 struct page *b_page; /* the page this bh is mapped to */ 65 66 sector_t b_blocknr; /* start block number */ 67 size_t b_size; /* size of mapping */ 68 char *b_data; /* pointer to data within the page */ 69 70 struct block_device *b_bdev; 71 bh_end_io_t *b_end_io; /* I/O completion */ 72 void *b_private; /* reserved for b_end_io */ 73 struct list_head b_assoc_buffers; /* associated with another mapping */ 74 struct address_space *b_assoc_map; /* mapping this buffer is 75 associated with */ 76 atomic_t b_count; /* users using this buffer_head */ 77 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to 78 * serialise IO completion of other 79 * buffers in the page */ 80}; 81 82/* 83 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() 84 * and buffer_foo() functions. 85 * To avoid reset buffer flags that are already set, because that causes 86 * a costly cache line transition, check the flag first. 87 */ 88#define BUFFER_FNS(bit, name) \ 89static __always_inline void set_buffer_##name(struct buffer_head *bh) \ 90{ \ 91 if (!test_bit(BH_##bit, &(bh)->b_state)) \ 92 set_bit(BH_##bit, &(bh)->b_state); \ 93} \ 94static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ 95{ \ 96 clear_bit(BH_##bit, &(bh)->b_state); \ 97} \ 98static __always_inline int buffer_##name(const struct buffer_head *bh) \ 99{ \ 100 return test_bit(BH_##bit, &(bh)->b_state); \ 101} 102 103/* 104 * test_set_buffer_foo() and test_clear_buffer_foo() 105 */ 106#define TAS_BUFFER_FNS(bit, name) \ 107static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ 108{ \ 109 return test_and_set_bit(BH_##bit, &(bh)->b_state); \ 110} \ 111static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ 112{ \ 113 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ 114} \ 115 116/* 117 * Emit the buffer bitops functions. Note that there are also functions 118 * of the form "mark_buffer_foo()". These are higher-level functions which 119 * do something in addition to setting a b_state bit. 120 */ 121BUFFER_FNS(Dirty, dirty) 122TAS_BUFFER_FNS(Dirty, dirty) 123BUFFER_FNS(Lock, locked) 124BUFFER_FNS(Req, req) 125TAS_BUFFER_FNS(Req, req) 126BUFFER_FNS(Mapped, mapped) 127BUFFER_FNS(New, new) 128BUFFER_FNS(Async_Read, async_read) 129BUFFER_FNS(Async_Write, async_write) 130BUFFER_FNS(Delay, delay) 131BUFFER_FNS(Boundary, boundary) 132BUFFER_FNS(Write_EIO, write_io_error) 133BUFFER_FNS(Unwritten, unwritten) 134BUFFER_FNS(Meta, meta) 135BUFFER_FNS(Prio, prio) 136BUFFER_FNS(Defer_Completion, defer_completion) 137 138static __always_inline void set_buffer_uptodate(struct buffer_head *bh) 139{ 140 /* 141 * make it consistent with folio_mark_uptodate 142 * pairs with smp_load_acquire in buffer_uptodate 143 */ 144 smp_mb__before_atomic(); 145 set_bit(BH_Uptodate, &bh->b_state); 146} 147 148static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) 149{ 150 clear_bit(BH_Uptodate, &bh->b_state); 151} 152 153static __always_inline int buffer_uptodate(const struct buffer_head *bh) 154{ 155 /* 156 * make it consistent with folio_test_uptodate 157 * pairs with smp_mb__before_atomic in set_buffer_uptodate 158 */ 159 return test_bit_acquire(BH_Uptodate, &bh->b_state); 160} 161 162#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) 163 164/* If we *know* page->private refers to buffer_heads */ 165#define page_buffers(page) \ 166 ({ \ 167 BUG_ON(!PagePrivate(page)); \ 168 ((struct buffer_head *)page_private(page)); \ 169 }) 170#define page_has_buffers(page) PagePrivate(page) 171#define folio_buffers(folio) folio_get_private(folio) 172 173void buffer_check_dirty_writeback(struct folio *folio, 174 bool *dirty, bool *writeback); 175 176/* 177 * Declarations 178 */ 179 180void mark_buffer_dirty(struct buffer_head *bh); 181void mark_buffer_write_io_error(struct buffer_head *bh); 182void touch_buffer(struct buffer_head *bh); 183void set_bh_page(struct buffer_head *bh, 184 struct page *page, unsigned long offset); 185bool try_to_free_buffers(struct folio *); 186struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 187 bool retry); 188void create_empty_buffers(struct page *, unsigned long, 189 unsigned long b_state); 190void end_buffer_read_sync(struct buffer_head *bh, int uptodate); 191void end_buffer_write_sync(struct buffer_head *bh, int uptodate); 192void end_buffer_async_write(struct buffer_head *bh, int uptodate); 193 194/* Things to do with buffers at mapping->private_list */ 195void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); 196int inode_has_buffers(struct inode *); 197void invalidate_inode_buffers(struct inode *); 198int remove_inode_buffers(struct inode *inode); 199int sync_mapping_buffers(struct address_space *mapping); 200void clean_bdev_aliases(struct block_device *bdev, sector_t block, 201 sector_t len); 202static inline void clean_bdev_bh_alias(struct buffer_head *bh) 203{ 204 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); 205} 206 207void mark_buffer_async_write(struct buffer_head *bh); 208void __wait_on_buffer(struct buffer_head *); 209wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 210struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 211 unsigned size); 212struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, 213 unsigned size, gfp_t gfp); 214void __brelse(struct buffer_head *); 215void __bforget(struct buffer_head *); 216void __breadahead(struct block_device *, sector_t block, unsigned int size); 217void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, 218 gfp_t gfp); 219struct buffer_head *__bread_gfp(struct block_device *, 220 sector_t block, unsigned size, gfp_t gfp); 221void invalidate_bh_lrus(void); 222void invalidate_bh_lrus_cpu(void); 223bool has_bh_in_lru(int cpu, void *dummy); 224struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 225void free_buffer_head(struct buffer_head * bh); 226void unlock_buffer(struct buffer_head *bh); 227void __lock_buffer(struct buffer_head *bh); 228void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]); 229int sync_dirty_buffer(struct buffer_head *bh); 230int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 231void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 232int submit_bh(blk_opf_t, struct buffer_head *); 233void write_boundary_block(struct block_device *bdev, 234 sector_t bblock, unsigned blocksize); 235int bh_uptodate_or_lock(struct buffer_head *bh); 236int bh_submit_read(struct buffer_head *bh); 237 238extern int buffer_heads_over_limit; 239 240/* 241 * Generic address_space_operations implementations for buffer_head-backed 242 * address_spaces. 243 */ 244void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); 245int block_write_full_page(struct page *page, get_block_t *get_block, 246 struct writeback_control *wbc); 247int __block_write_full_page(struct inode *inode, struct page *page, 248 get_block_t *get_block, struct writeback_control *wbc, 249 bh_end_io_t *handler); 250int block_read_full_folio(struct folio *, get_block_t *); 251bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); 252int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 253 struct page **pagep, get_block_t *get_block); 254int __block_write_begin(struct page *page, loff_t pos, unsigned len, 255 get_block_t *get_block); 256int block_write_end(struct file *, struct address_space *, 257 loff_t, unsigned, unsigned, 258 struct page *, void *); 259int generic_write_end(struct file *, struct address_space *, 260 loff_t, unsigned, unsigned, 261 struct page *, void *); 262void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); 263void clean_page_buffers(struct page *page); 264int cont_write_begin(struct file *, struct address_space *, loff_t, 265 unsigned, struct page **, void **, 266 get_block_t *, loff_t *); 267int generic_cont_expand_simple(struct inode *inode, loff_t size); 268int block_commit_write(struct page *page, unsigned from, unsigned to); 269int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 270 get_block_t get_block); 271/* Convert errno to return value from ->page_mkwrite() call */ 272static inline vm_fault_t block_page_mkwrite_return(int err) 273{ 274 if (err == 0) 275 return VM_FAULT_LOCKED; 276 if (err == -EFAULT || err == -EAGAIN) 277 return VM_FAULT_NOPAGE; 278 if (err == -ENOMEM) 279 return VM_FAULT_OOM; 280 /* -ENOSPC, -EDQUOT, -EIO ... */ 281 return VM_FAULT_SIGBUS; 282} 283sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 284int block_truncate_page(struct address_space *, loff_t, get_block_t *); 285 286#ifdef CONFIG_MIGRATION 287extern int buffer_migrate_folio(struct address_space *, 288 struct folio *dst, struct folio *src, enum migrate_mode); 289extern int buffer_migrate_folio_norefs(struct address_space *, 290 struct folio *dst, struct folio *src, enum migrate_mode); 291#else 292#define buffer_migrate_folio NULL 293#define buffer_migrate_folio_norefs NULL 294#endif 295 296void buffer_init(void); 297 298/* 299 * inline definitions 300 */ 301 302static inline void get_bh(struct buffer_head *bh) 303{ 304 atomic_inc(&bh->b_count); 305} 306 307static inline void put_bh(struct buffer_head *bh) 308{ 309 smp_mb__before_atomic(); 310 atomic_dec(&bh->b_count); 311} 312 313static inline void brelse(struct buffer_head *bh) 314{ 315 if (bh) 316 __brelse(bh); 317} 318 319static inline void bforget(struct buffer_head *bh) 320{ 321 if (bh) 322 __bforget(bh); 323} 324 325static inline struct buffer_head * 326sb_bread(struct super_block *sb, sector_t block) 327{ 328 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 329} 330 331static inline struct buffer_head * 332sb_bread_unmovable(struct super_block *sb, sector_t block) 333{ 334 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); 335} 336 337static inline void 338sb_breadahead(struct super_block *sb, sector_t block) 339{ 340 __breadahead(sb->s_bdev, block, sb->s_blocksize); 341} 342 343static inline void 344sb_breadahead_unmovable(struct super_block *sb, sector_t block) 345{ 346 __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); 347} 348 349static inline struct buffer_head * 350sb_getblk(struct super_block *sb, sector_t block) 351{ 352 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 353} 354 355 356static inline struct buffer_head * 357sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) 358{ 359 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); 360} 361 362static inline struct buffer_head * 363sb_find_get_block(struct super_block *sb, sector_t block) 364{ 365 return __find_get_block(sb->s_bdev, block, sb->s_blocksize); 366} 367 368static inline void 369map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) 370{ 371 set_buffer_mapped(bh); 372 bh->b_bdev = sb->s_bdev; 373 bh->b_blocknr = block; 374 bh->b_size = sb->s_blocksize; 375} 376 377static inline void wait_on_buffer(struct buffer_head *bh) 378{ 379 might_sleep(); 380 if (buffer_locked(bh)) 381 __wait_on_buffer(bh); 382} 383 384static inline int trylock_buffer(struct buffer_head *bh) 385{ 386 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); 387} 388 389static inline void lock_buffer(struct buffer_head *bh) 390{ 391 might_sleep(); 392 if (!trylock_buffer(bh)) 393 __lock_buffer(bh); 394} 395 396static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, 397 sector_t block, 398 unsigned size) 399{ 400 return __getblk_gfp(bdev, block, size, 0); 401} 402 403static inline struct buffer_head *__getblk(struct block_device *bdev, 404 sector_t block, 405 unsigned size) 406{ 407 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); 408} 409 410/** 411 * __bread() - reads a specified block and returns the bh 412 * @bdev: the block_device to read from 413 * @block: number of block 414 * @size: size (in bytes) to read 415 * 416 * Reads a specified block, and returns buffer head that contains it. 417 * The page cache is allocated from movable area so that it can be migrated. 418 * It returns NULL if the block was unreadable. 419 */ 420static inline struct buffer_head * 421__bread(struct block_device *bdev, sector_t block, unsigned size) 422{ 423 return __bread_gfp(bdev, block, size, __GFP_MOVABLE); 424} 425 426bool block_dirty_folio(struct address_space *mapping, struct folio *folio); 427 428#else /* CONFIG_BLOCK */ 429 430static inline void buffer_init(void) {} 431static inline bool try_to_free_buffers(struct folio *folio) { return true; } 432static inline int inode_has_buffers(struct inode *inode) { return 0; } 433static inline void invalidate_inode_buffers(struct inode *inode) {} 434static inline int remove_inode_buffers(struct inode *inode) { return 1; } 435static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 436static inline void invalidate_bh_lrus_cpu(void) {} 437static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 438#define buffer_heads_over_limit 0 439 440#endif /* CONFIG_BLOCK */ 441#endif /* _LINUX_BUFFER_HEAD_H */