Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * include/linux/buffer_head.h
4 *
5 * Everything to do with buffer_heads.
6 */
7
8#ifndef _LINUX_BUFFER_HEAD_H
9#define _LINUX_BUFFER_HEAD_H
10
11#include <linux/types.h>
12#include <linux/blk_types.h>
13#include <linux/fs.h>
14#include <linux/linkage.h>
15#include <linux/pagemap.h>
16#include <linux/wait.h>
17#include <linux/atomic.h>
18
19#ifdef CONFIG_BLOCK
20
21enum bh_state_bits {
22 BH_Uptodate, /* Contains valid data */
23 BH_Dirty, /* Is dirty */
24 BH_Lock, /* Is locked */
25 BH_Req, /* Has been submitted for I/O */
26
27 BH_Mapped, /* Has a disk mapping */
28 BH_New, /* Disk mapping was newly created by get_block */
29 BH_Async_Read, /* Is under end_buffer_async_read I/O */
30 BH_Async_Write, /* Is under end_buffer_async_write I/O */
31 BH_Delay, /* Buffer is not yet allocated on disk */
32 BH_Boundary, /* Block is followed by a discontiguity */
33 BH_Write_EIO, /* I/O error on write */
34 BH_Unwritten, /* Buffer is allocated on disk but not written */
35 BH_Quiet, /* Buffer Error Prinks to be quiet */
36 BH_Meta, /* Buffer contains metadata */
37 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
38 BH_Defer_Completion, /* Defer AIO completion to workqueue */
39
40 BH_PrivateStart,/* not a state bit, but the first bit available
41 * for private allocation by other entities
42 */
43};
44
45#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
46
47struct page;
48struct buffer_head;
49struct address_space;
50typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
51
52/*
53 * Historically, a buffer_head was used to map a single block
54 * within a page, and of course as the unit of I/O through the
55 * filesystem and block layers. Nowadays the basic I/O unit
56 * is the bio, and buffer_heads are used for extracting block
57 * mappings (via a get_block_t call), for tracking state within
58 * a page (via a page_mapping) and for wrapping bio submission
59 * for backward compatibility reasons (e.g. submit_bh).
60 */
61struct buffer_head {
62 unsigned long b_state; /* buffer state bitmap (see above) */
63 struct buffer_head *b_this_page;/* circular list of page's buffers */
64 struct page *b_page; /* the page this bh is mapped to */
65
66 sector_t b_blocknr; /* start block number */
67 size_t b_size; /* size of mapping */
68 char *b_data; /* pointer to data within the page */
69
70 struct block_device *b_bdev;
71 bh_end_io_t *b_end_io; /* I/O completion */
72 void *b_private; /* reserved for b_end_io */
73 struct list_head b_assoc_buffers; /* associated with another mapping */
74 struct address_space *b_assoc_map; /* mapping this buffer is
75 associated with */
76 atomic_t b_count; /* users using this buffer_head */
77 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
78 * serialise IO completion of other
79 * buffers in the page */
80};
81
82/*
83 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
84 * and buffer_foo() functions.
85 * To avoid reset buffer flags that are already set, because that causes
86 * a costly cache line transition, check the flag first.
87 */
88#define BUFFER_FNS(bit, name) \
89static __always_inline void set_buffer_##name(struct buffer_head *bh) \
90{ \
91 if (!test_bit(BH_##bit, &(bh)->b_state)) \
92 set_bit(BH_##bit, &(bh)->b_state); \
93} \
94static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
95{ \
96 clear_bit(BH_##bit, &(bh)->b_state); \
97} \
98static __always_inline int buffer_##name(const struct buffer_head *bh) \
99{ \
100 return test_bit(BH_##bit, &(bh)->b_state); \
101}
102
103/*
104 * test_set_buffer_foo() and test_clear_buffer_foo()
105 */
106#define TAS_BUFFER_FNS(bit, name) \
107static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
108{ \
109 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
110} \
111static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
112{ \
113 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
114} \
115
116/*
117 * Emit the buffer bitops functions. Note that there are also functions
118 * of the form "mark_buffer_foo()". These are higher-level functions which
119 * do something in addition to setting a b_state bit.
120 */
121BUFFER_FNS(Dirty, dirty)
122TAS_BUFFER_FNS(Dirty, dirty)
123BUFFER_FNS(Lock, locked)
124BUFFER_FNS(Req, req)
125TAS_BUFFER_FNS(Req, req)
126BUFFER_FNS(Mapped, mapped)
127BUFFER_FNS(New, new)
128BUFFER_FNS(Async_Read, async_read)
129BUFFER_FNS(Async_Write, async_write)
130BUFFER_FNS(Delay, delay)
131BUFFER_FNS(Boundary, boundary)
132BUFFER_FNS(Write_EIO, write_io_error)
133BUFFER_FNS(Unwritten, unwritten)
134BUFFER_FNS(Meta, meta)
135BUFFER_FNS(Prio, prio)
136BUFFER_FNS(Defer_Completion, defer_completion)
137
138static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
139{
140 /*
141 * If somebody else already set this uptodate, they will
142 * have done the memory barrier, and a reader will thus
143 * see *some* valid buffer state.
144 *
145 * Any other serialization (with IO errors or whatever that
146 * might clear the bit) has to come from other state (eg BH_Lock).
147 */
148 if (test_bit(BH_Uptodate, &bh->b_state))
149 return;
150
151 /*
152 * make it consistent with folio_mark_uptodate
153 * pairs with smp_load_acquire in buffer_uptodate
154 */
155 smp_mb__before_atomic();
156 set_bit(BH_Uptodate, &bh->b_state);
157}
158
159static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
160{
161 clear_bit(BH_Uptodate, &bh->b_state);
162}
163
164static __always_inline int buffer_uptodate(const struct buffer_head *bh)
165{
166 /*
167 * make it consistent with folio_test_uptodate
168 * pairs with smp_mb__before_atomic in set_buffer_uptodate
169 */
170 return test_bit_acquire(BH_Uptodate, &bh->b_state);
171}
172
173#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
174
175/* If we *know* page->private refers to buffer_heads */
176#define page_buffers(page) \
177 ({ \
178 BUG_ON(!PagePrivate(page)); \
179 ((struct buffer_head *)page_private(page)); \
180 })
181#define page_has_buffers(page) PagePrivate(page)
182#define folio_buffers(folio) folio_get_private(folio)
183
184void buffer_check_dirty_writeback(struct folio *folio,
185 bool *dirty, bool *writeback);
186
187/*
188 * Declarations
189 */
190
191void mark_buffer_dirty(struct buffer_head *bh);
192void mark_buffer_write_io_error(struct buffer_head *bh);
193void touch_buffer(struct buffer_head *bh);
194void set_bh_page(struct buffer_head *bh,
195 struct page *page, unsigned long offset);
196bool try_to_free_buffers(struct folio *);
197struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
198 bool retry);
199void create_empty_buffers(struct page *, unsigned long,
200 unsigned long b_state);
201void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
202void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
203void end_buffer_async_write(struct buffer_head *bh, int uptodate);
204
205/* Things to do with buffers at mapping->private_list */
206void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
207int inode_has_buffers(struct inode *);
208void invalidate_inode_buffers(struct inode *);
209int remove_inode_buffers(struct inode *inode);
210int sync_mapping_buffers(struct address_space *mapping);
211void clean_bdev_aliases(struct block_device *bdev, sector_t block,
212 sector_t len);
213static inline void clean_bdev_bh_alias(struct buffer_head *bh)
214{
215 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
216}
217
218void mark_buffer_async_write(struct buffer_head *bh);
219void __wait_on_buffer(struct buffer_head *);
220wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
221struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
222 unsigned size);
223struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
224 unsigned size, gfp_t gfp);
225void __brelse(struct buffer_head *);
226void __bforget(struct buffer_head *);
227void __breadahead(struct block_device *, sector_t block, unsigned int size);
228void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
229 gfp_t gfp);
230struct buffer_head *__bread_gfp(struct block_device *,
231 sector_t block, unsigned size, gfp_t gfp);
232void invalidate_bh_lrus(void);
233void invalidate_bh_lrus_cpu(void);
234bool has_bh_in_lru(int cpu, void *dummy);
235struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
236void free_buffer_head(struct buffer_head * bh);
237void unlock_buffer(struct buffer_head *bh);
238void __lock_buffer(struct buffer_head *bh);
239void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
240int sync_dirty_buffer(struct buffer_head *bh);
241int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
242void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
243int submit_bh(blk_opf_t, struct buffer_head *);
244void write_boundary_block(struct block_device *bdev,
245 sector_t bblock, unsigned blocksize);
246int bh_uptodate_or_lock(struct buffer_head *bh);
247int bh_submit_read(struct buffer_head *bh);
248
249extern int buffer_heads_over_limit;
250
251/*
252 * Generic address_space_operations implementations for buffer_head-backed
253 * address_spaces.
254 */
255void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
256int block_write_full_page(struct page *page, get_block_t *get_block,
257 struct writeback_control *wbc);
258int __block_write_full_page(struct inode *inode, struct page *page,
259 get_block_t *get_block, struct writeback_control *wbc,
260 bh_end_io_t *handler);
261int block_read_full_folio(struct folio *, get_block_t *);
262bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
263int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
264 struct page **pagep, get_block_t *get_block);
265int __block_write_begin(struct page *page, loff_t pos, unsigned len,
266 get_block_t *get_block);
267int block_write_end(struct file *, struct address_space *,
268 loff_t, unsigned, unsigned,
269 struct page *, void *);
270int generic_write_end(struct file *, struct address_space *,
271 loff_t, unsigned, unsigned,
272 struct page *, void *);
273void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
274void clean_page_buffers(struct page *page);
275int cont_write_begin(struct file *, struct address_space *, loff_t,
276 unsigned, struct page **, void **,
277 get_block_t *, loff_t *);
278int generic_cont_expand_simple(struct inode *inode, loff_t size);
279int block_commit_write(struct page *page, unsigned from, unsigned to);
280int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
281 get_block_t get_block);
282/* Convert errno to return value from ->page_mkwrite() call */
283static inline vm_fault_t block_page_mkwrite_return(int err)
284{
285 if (err == 0)
286 return VM_FAULT_LOCKED;
287 if (err == -EFAULT || err == -EAGAIN)
288 return VM_FAULT_NOPAGE;
289 if (err == -ENOMEM)
290 return VM_FAULT_OOM;
291 /* -ENOSPC, -EDQUOT, -EIO ... */
292 return VM_FAULT_SIGBUS;
293}
294sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
295int block_truncate_page(struct address_space *, loff_t, get_block_t *);
296
297#ifdef CONFIG_MIGRATION
298extern int buffer_migrate_folio(struct address_space *,
299 struct folio *dst, struct folio *src, enum migrate_mode);
300extern int buffer_migrate_folio_norefs(struct address_space *,
301 struct folio *dst, struct folio *src, enum migrate_mode);
302#else
303#define buffer_migrate_folio NULL
304#define buffer_migrate_folio_norefs NULL
305#endif
306
307void buffer_init(void);
308
309/*
310 * inline definitions
311 */
312
313static inline void get_bh(struct buffer_head *bh)
314{
315 atomic_inc(&bh->b_count);
316}
317
318static inline void put_bh(struct buffer_head *bh)
319{
320 smp_mb__before_atomic();
321 atomic_dec(&bh->b_count);
322}
323
324static inline void brelse(struct buffer_head *bh)
325{
326 if (bh)
327 __brelse(bh);
328}
329
330static inline void bforget(struct buffer_head *bh)
331{
332 if (bh)
333 __bforget(bh);
334}
335
336static inline struct buffer_head *
337sb_bread(struct super_block *sb, sector_t block)
338{
339 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
340}
341
342static inline struct buffer_head *
343sb_bread_unmovable(struct super_block *sb, sector_t block)
344{
345 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
346}
347
348static inline void
349sb_breadahead(struct super_block *sb, sector_t block)
350{
351 __breadahead(sb->s_bdev, block, sb->s_blocksize);
352}
353
354static inline void
355sb_breadahead_unmovable(struct super_block *sb, sector_t block)
356{
357 __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
358}
359
360static inline struct buffer_head *
361sb_getblk(struct super_block *sb, sector_t block)
362{
363 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
364}
365
366
367static inline struct buffer_head *
368sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
369{
370 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
371}
372
373static inline struct buffer_head *
374sb_find_get_block(struct super_block *sb, sector_t block)
375{
376 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
377}
378
379static inline void
380map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
381{
382 set_buffer_mapped(bh);
383 bh->b_bdev = sb->s_bdev;
384 bh->b_blocknr = block;
385 bh->b_size = sb->s_blocksize;
386}
387
388static inline void wait_on_buffer(struct buffer_head *bh)
389{
390 might_sleep();
391 if (buffer_locked(bh))
392 __wait_on_buffer(bh);
393}
394
395static inline int trylock_buffer(struct buffer_head *bh)
396{
397 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
398}
399
400static inline void lock_buffer(struct buffer_head *bh)
401{
402 might_sleep();
403 if (!trylock_buffer(bh))
404 __lock_buffer(bh);
405}
406
407static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
408 sector_t block,
409 unsigned size)
410{
411 return __getblk_gfp(bdev, block, size, 0);
412}
413
414static inline struct buffer_head *__getblk(struct block_device *bdev,
415 sector_t block,
416 unsigned size)
417{
418 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
419}
420
421/**
422 * __bread() - reads a specified block and returns the bh
423 * @bdev: the block_device to read from
424 * @block: number of block
425 * @size: size (in bytes) to read
426 *
427 * Reads a specified block, and returns buffer head that contains it.
428 * The page cache is allocated from movable area so that it can be migrated.
429 * It returns NULL if the block was unreadable.
430 */
431static inline struct buffer_head *
432__bread(struct block_device *bdev, sector_t block, unsigned size)
433{
434 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
435}
436
437bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
438
439#else /* CONFIG_BLOCK */
440
441static inline void buffer_init(void) {}
442static inline bool try_to_free_buffers(struct folio *folio) { return true; }
443static inline int inode_has_buffers(struct inode *inode) { return 0; }
444static inline void invalidate_inode_buffers(struct inode *inode) {}
445static inline int remove_inode_buffers(struct inode *inode) { return 1; }
446static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
447static inline void invalidate_bh_lrus_cpu(void) {}
448static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
449#define buffer_heads_over_limit 0
450
451#endif /* CONFIG_BLOCK */
452#endif /* _LINUX_BUFFER_HEAD_H */