Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * include/linux/buffer_head.h
4 *
5 * Everything to do with buffer_heads.
6 */
7
8#ifndef _LINUX_BUFFER_HEAD_H
9#define _LINUX_BUFFER_HEAD_H
10
11#include <linux/types.h>
12#include <linux/blk_types.h>
13#include <linux/fs.h>
14#include <linux/linkage.h>
15#include <linux/pagemap.h>
16#include <linux/wait.h>
17#include <linux/atomic.h>
18
19#ifdef CONFIG_BLOCK
20
21enum bh_state_bits {
22 BH_Uptodate, /* Contains valid data */
23 BH_Dirty, /* Is dirty */
24 BH_Lock, /* Is locked */
25 BH_Req, /* Has been submitted for I/O */
26
27 BH_Mapped, /* Has a disk mapping */
28 BH_New, /* Disk mapping was newly created by get_block */
29 BH_Async_Read, /* Is under end_buffer_async_read I/O */
30 BH_Async_Write, /* Is under end_buffer_async_write I/O */
31 BH_Delay, /* Buffer is not yet allocated on disk */
32 BH_Boundary, /* Block is followed by a discontiguity */
33 BH_Write_EIO, /* I/O error on write */
34 BH_Unwritten, /* Buffer is allocated on disk but not written */
35 BH_Quiet, /* Buffer Error Prinks to be quiet */
36 BH_Meta, /* Buffer contains metadata */
37 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
38 BH_Defer_Completion, /* Defer AIO completion to workqueue */
39
40 BH_PrivateStart,/* not a state bit, but the first bit available
41 * for private allocation by other entities
42 */
43};
44
45#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
46
47struct page;
48struct buffer_head;
49struct address_space;
50typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
51
52/*
53 * Historically, a buffer_head was used to map a single block
54 * within a page, and of course as the unit of I/O through the
55 * filesystem and block layers. Nowadays the basic I/O unit
56 * is the bio, and buffer_heads are used for extracting block
57 * mappings (via a get_block_t call), for tracking state within
58 * a page (via a page_mapping) and for wrapping bio submission
59 * for backward compatibility reasons (e.g. submit_bh).
60 */
61struct buffer_head {
62 unsigned long b_state; /* buffer state bitmap (see above) */
63 struct buffer_head *b_this_page;/* circular list of page's buffers */
64 union {
65 struct page *b_page; /* the page this bh is mapped to */
66 struct folio *b_folio; /* the folio this bh is mapped to */
67 };
68
69 sector_t b_blocknr; /* start block number */
70 size_t b_size; /* size of mapping */
71 char *b_data; /* pointer to data within the page */
72
73 struct block_device *b_bdev;
74 bh_end_io_t *b_end_io; /* I/O completion */
75 void *b_private; /* reserved for b_end_io */
76 struct list_head b_assoc_buffers; /* associated with another mapping */
77 struct address_space *b_assoc_map; /* mapping this buffer is
78 associated with */
79 atomic_t b_count; /* users using this buffer_head */
80 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
81 * serialise IO completion of other
82 * buffers in the page */
83};
84
85/*
86 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
87 * and buffer_foo() functions.
88 * To avoid reset buffer flags that are already set, because that causes
89 * a costly cache line transition, check the flag first.
90 */
91#define BUFFER_FNS(bit, name) \
92static __always_inline void set_buffer_##name(struct buffer_head *bh) \
93{ \
94 if (!test_bit(BH_##bit, &(bh)->b_state)) \
95 set_bit(BH_##bit, &(bh)->b_state); \
96} \
97static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
98{ \
99 clear_bit(BH_##bit, &(bh)->b_state); \
100} \
101static __always_inline int buffer_##name(const struct buffer_head *bh) \
102{ \
103 return test_bit(BH_##bit, &(bh)->b_state); \
104}
105
106/*
107 * test_set_buffer_foo() and test_clear_buffer_foo()
108 */
109#define TAS_BUFFER_FNS(bit, name) \
110static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
111{ \
112 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
113} \
114static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
115{ \
116 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
117} \
118
119/*
120 * Emit the buffer bitops functions. Note that there are also functions
121 * of the form "mark_buffer_foo()". These are higher-level functions which
122 * do something in addition to setting a b_state bit.
123 */
124BUFFER_FNS(Dirty, dirty)
125TAS_BUFFER_FNS(Dirty, dirty)
126BUFFER_FNS(Lock, locked)
127BUFFER_FNS(Req, req)
128TAS_BUFFER_FNS(Req, req)
129BUFFER_FNS(Mapped, mapped)
130BUFFER_FNS(New, new)
131BUFFER_FNS(Async_Read, async_read)
132BUFFER_FNS(Async_Write, async_write)
133BUFFER_FNS(Delay, delay)
134BUFFER_FNS(Boundary, boundary)
135BUFFER_FNS(Write_EIO, write_io_error)
136BUFFER_FNS(Unwritten, unwritten)
137BUFFER_FNS(Meta, meta)
138BUFFER_FNS(Prio, prio)
139BUFFER_FNS(Defer_Completion, defer_completion)
140
141static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
142{
143 /*
144 * If somebody else already set this uptodate, they will
145 * have done the memory barrier, and a reader will thus
146 * see *some* valid buffer state.
147 *
148 * Any other serialization (with IO errors or whatever that
149 * might clear the bit) has to come from other state (eg BH_Lock).
150 */
151 if (test_bit(BH_Uptodate, &bh->b_state))
152 return;
153
154 /*
155 * make it consistent with folio_mark_uptodate
156 * pairs with smp_load_acquire in buffer_uptodate
157 */
158 smp_mb__before_atomic();
159 set_bit(BH_Uptodate, &bh->b_state);
160}
161
162static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
163{
164 clear_bit(BH_Uptodate, &bh->b_state);
165}
166
167static __always_inline int buffer_uptodate(const struct buffer_head *bh)
168{
169 /*
170 * make it consistent with folio_test_uptodate
171 * pairs with smp_mb__before_atomic in set_buffer_uptodate
172 */
173 return test_bit_acquire(BH_Uptodate, &bh->b_state);
174}
175
176#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
177
178/* If we *know* page->private refers to buffer_heads */
179#define page_buffers(page) \
180 ({ \
181 BUG_ON(!PagePrivate(page)); \
182 ((struct buffer_head *)page_private(page)); \
183 })
184#define page_has_buffers(page) PagePrivate(page)
185#define folio_buffers(folio) folio_get_private(folio)
186
187void buffer_check_dirty_writeback(struct folio *folio,
188 bool *dirty, bool *writeback);
189
190/*
191 * Declarations
192 */
193
194void mark_buffer_dirty(struct buffer_head *bh);
195void mark_buffer_write_io_error(struct buffer_head *bh);
196void touch_buffer(struct buffer_head *bh);
197void set_bh_page(struct buffer_head *bh,
198 struct page *page, unsigned long offset);
199void folio_set_bh(struct buffer_head *bh, struct folio *folio,
200 unsigned long offset);
201bool try_to_free_buffers(struct folio *);
202struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
203 bool retry);
204struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
205 bool retry);
206void create_empty_buffers(struct page *, unsigned long,
207 unsigned long b_state);
208void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
209 unsigned long b_state);
210void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
211void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
212void end_buffer_async_write(struct buffer_head *bh, int uptodate);
213
214/* Things to do with buffers at mapping->private_list */
215void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
216int inode_has_buffers(struct inode *);
217void invalidate_inode_buffers(struct inode *);
218int remove_inode_buffers(struct inode *inode);
219int sync_mapping_buffers(struct address_space *mapping);
220int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
221 bool datasync);
222int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
223 bool datasync);
224void clean_bdev_aliases(struct block_device *bdev, sector_t block,
225 sector_t len);
226static inline void clean_bdev_bh_alias(struct buffer_head *bh)
227{
228 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
229}
230
231void mark_buffer_async_write(struct buffer_head *bh);
232void __wait_on_buffer(struct buffer_head *);
233wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
234struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
235 unsigned size);
236struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
237 unsigned size, gfp_t gfp);
238void __brelse(struct buffer_head *);
239void __bforget(struct buffer_head *);
240void __breadahead(struct block_device *, sector_t block, unsigned int size);
241struct buffer_head *__bread_gfp(struct block_device *,
242 sector_t block, unsigned size, gfp_t gfp);
243void invalidate_bh_lrus(void);
244void invalidate_bh_lrus_cpu(void);
245bool has_bh_in_lru(int cpu, void *dummy);
246struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
247void free_buffer_head(struct buffer_head * bh);
248void unlock_buffer(struct buffer_head *bh);
249void __lock_buffer(struct buffer_head *bh);
250int sync_dirty_buffer(struct buffer_head *bh);
251int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
252void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
253void submit_bh(blk_opf_t, struct buffer_head *);
254void write_boundary_block(struct block_device *bdev,
255 sector_t bblock, unsigned blocksize);
256int bh_uptodate_or_lock(struct buffer_head *bh);
257int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
258void __bh_read_batch(int nr, struct buffer_head *bhs[],
259 blk_opf_t op_flags, bool force_lock);
260
261extern int buffer_heads_over_limit;
262
263/*
264 * Generic address_space_operations implementations for buffer_head-backed
265 * address_spaces.
266 */
267void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
268int block_write_full_page(struct page *page, get_block_t *get_block,
269 struct writeback_control *wbc);
270int __block_write_full_folio(struct inode *inode, struct folio *folio,
271 get_block_t *get_block, struct writeback_control *wbc,
272 bh_end_io_t *handler);
273int block_read_full_folio(struct folio *, get_block_t *);
274bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
275int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
276 struct page **pagep, get_block_t *get_block);
277int __block_write_begin(struct page *page, loff_t pos, unsigned len,
278 get_block_t *get_block);
279int block_write_end(struct file *, struct address_space *,
280 loff_t, unsigned, unsigned,
281 struct page *, void *);
282int generic_write_end(struct file *, struct address_space *,
283 loff_t, unsigned, unsigned,
284 struct page *, void *);
285void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
286void clean_page_buffers(struct page *page);
287int cont_write_begin(struct file *, struct address_space *, loff_t,
288 unsigned, struct page **, void **,
289 get_block_t *, loff_t *);
290int generic_cont_expand_simple(struct inode *inode, loff_t size);
291int block_commit_write(struct page *page, unsigned from, unsigned to);
292int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
293 get_block_t get_block);
294/* Convert errno to return value from ->page_mkwrite() call */
295static inline vm_fault_t block_page_mkwrite_return(int err)
296{
297 if (err == 0)
298 return VM_FAULT_LOCKED;
299 if (err == -EFAULT || err == -EAGAIN)
300 return VM_FAULT_NOPAGE;
301 if (err == -ENOMEM)
302 return VM_FAULT_OOM;
303 /* -ENOSPC, -EDQUOT, -EIO ... */
304 return VM_FAULT_SIGBUS;
305}
306sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
307int block_truncate_page(struct address_space *, loff_t, get_block_t *);
308
309#ifdef CONFIG_MIGRATION
310extern int buffer_migrate_folio(struct address_space *,
311 struct folio *dst, struct folio *src, enum migrate_mode);
312extern int buffer_migrate_folio_norefs(struct address_space *,
313 struct folio *dst, struct folio *src, enum migrate_mode);
314#else
315#define buffer_migrate_folio NULL
316#define buffer_migrate_folio_norefs NULL
317#endif
318
319void buffer_init(void);
320
321/*
322 * inline definitions
323 */
324
325static inline void get_bh(struct buffer_head *bh)
326{
327 atomic_inc(&bh->b_count);
328}
329
330static inline void put_bh(struct buffer_head *bh)
331{
332 smp_mb__before_atomic();
333 atomic_dec(&bh->b_count);
334}
335
336static inline void brelse(struct buffer_head *bh)
337{
338 if (bh)
339 __brelse(bh);
340}
341
342static inline void bforget(struct buffer_head *bh)
343{
344 if (bh)
345 __bforget(bh);
346}
347
348static inline struct buffer_head *
349sb_bread(struct super_block *sb, sector_t block)
350{
351 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
352}
353
354static inline struct buffer_head *
355sb_bread_unmovable(struct super_block *sb, sector_t block)
356{
357 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
358}
359
360static inline void
361sb_breadahead(struct super_block *sb, sector_t block)
362{
363 __breadahead(sb->s_bdev, block, sb->s_blocksize);
364}
365
366static inline struct buffer_head *
367sb_getblk(struct super_block *sb, sector_t block)
368{
369 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
370}
371
372
373static inline struct buffer_head *
374sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
375{
376 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
377}
378
379static inline struct buffer_head *
380sb_find_get_block(struct super_block *sb, sector_t block)
381{
382 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
383}
384
385static inline void
386map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
387{
388 set_buffer_mapped(bh);
389 bh->b_bdev = sb->s_bdev;
390 bh->b_blocknr = block;
391 bh->b_size = sb->s_blocksize;
392}
393
394static inline void wait_on_buffer(struct buffer_head *bh)
395{
396 might_sleep();
397 if (buffer_locked(bh))
398 __wait_on_buffer(bh);
399}
400
401static inline int trylock_buffer(struct buffer_head *bh)
402{
403 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
404}
405
406static inline void lock_buffer(struct buffer_head *bh)
407{
408 might_sleep();
409 if (!trylock_buffer(bh))
410 __lock_buffer(bh);
411}
412
413static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
414 sector_t block,
415 unsigned size)
416{
417 return __getblk_gfp(bdev, block, size, 0);
418}
419
420static inline struct buffer_head *__getblk(struct block_device *bdev,
421 sector_t block,
422 unsigned size)
423{
424 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
425}
426
427static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
428{
429 if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
430 if (!buffer_uptodate(bh))
431 __bh_read(bh, op_flags, false);
432 else
433 unlock_buffer(bh);
434 }
435}
436
437static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
438{
439 if (!bh_uptodate_or_lock(bh))
440 __bh_read(bh, op_flags, false);
441}
442
443/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
444static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
445{
446 if (bh_uptodate_or_lock(bh))
447 return 1;
448 return __bh_read(bh, op_flags, true);
449}
450
451static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
452{
453 __bh_read_batch(nr, bhs, 0, true);
454}
455
456static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
457 blk_opf_t op_flags)
458{
459 __bh_read_batch(nr, bhs, op_flags, false);
460}
461
462/**
463 * __bread() - reads a specified block and returns the bh
464 * @bdev: the block_device to read from
465 * @block: number of block
466 * @size: size (in bytes) to read
467 *
468 * Reads a specified block, and returns buffer head that contains it.
469 * The page cache is allocated from movable area so that it can be migrated.
470 * It returns NULL if the block was unreadable.
471 */
472static inline struct buffer_head *
473__bread(struct block_device *bdev, sector_t block, unsigned size)
474{
475 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
476}
477
478bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
479
480#else /* CONFIG_BLOCK */
481
482static inline void buffer_init(void) {}
483static inline bool try_to_free_buffers(struct folio *folio) { return true; }
484static inline int inode_has_buffers(struct inode *inode) { return 0; }
485static inline void invalidate_inode_buffers(struct inode *inode) {}
486static inline int remove_inode_buffers(struct inode *inode) { return 1; }
487static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
488static inline void invalidate_bh_lrus_cpu(void) {}
489static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
490#define buffer_heads_over_limit 0
491
492#endif /* CONFIG_BLOCK */
493#endif /* _LINUX_BUFFER_HEAD_H */