Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/buffer.c
4 *
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
6 */
7
8/*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
22#include <linux/kernel.h>
23#include <linux/sched/signal.h>
24#include <linux/syscalls.h>
25#include <linux/fs.h>
26#include <linux/iomap.h>
27#include <linux/mm.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
30#include <linux/capability.h>
31#include <linux/blkdev.h>
32#include <linux/file.h>
33#include <linux/quotaops.h>
34#include <linux/highmem.h>
35#include <linux/export.h>
36#include <linux/backing-dev.h>
37#include <linux/writeback.h>
38#include <linux/hash.h>
39#include <linux/suspend.h>
40#include <linux/buffer_head.h>
41#include <linux/task_io_accounting_ops.h>
42#include <linux/bio.h>
43#include <linux/cpu.h>
44#include <linux/bitops.h>
45#include <linux/mpage.h>
46#include <linux/bit_spinlock.h>
47#include <linux/pagevec.h>
48#include <linux/sched/mm.h>
49#include <trace/events/block.h>
50#include <linux/fscrypt.h>
51#include <linux/fsverity.h>
52#include <linux/sched/isolation.h>
53
54#include "internal.h"
55
56static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 enum rw_hint hint, struct writeback_control *wbc);
59
60#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
62inline void touch_buffer(struct buffer_head *bh)
63{
64 trace_block_touch_buffer(bh);
65 folio_mark_accessed(bh->b_folio);
66}
67EXPORT_SYMBOL(touch_buffer);
68
69void __lock_buffer(struct buffer_head *bh)
70{
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72}
73EXPORT_SYMBOL(__lock_buffer);
74
75void unlock_buffer(struct buffer_head *bh)
76{
77 clear_bit_unlock(BH_Lock, &bh->b_state);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh->b_state, BH_Lock);
80}
81EXPORT_SYMBOL(unlock_buffer);
82
83/*
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
87 */
88void buffer_check_dirty_writeback(struct folio *folio,
89 bool *dirty, bool *writeback)
90{
91 struct buffer_head *head, *bh;
92 *dirty = false;
93 *writeback = false;
94
95 BUG_ON(!folio_test_locked(folio));
96
97 head = folio_buffers(folio);
98 if (!head)
99 return;
100
101 if (folio_test_writeback(folio))
102 *writeback = true;
103
104 bh = head;
105 do {
106 if (buffer_locked(bh))
107 *writeback = true;
108
109 if (buffer_dirty(bh))
110 *dirty = true;
111
112 bh = bh->b_this_page;
113 } while (bh != head);
114}
115
116/*
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
121void __wait_on_buffer(struct buffer_head * bh)
122{
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124}
125EXPORT_SYMBOL(__wait_on_buffer);
126
127static void buffer_io_error(struct buffer_head *bh, char *msg)
128{
129 if (!test_bit(BH_Quiet, &bh->b_state))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133}
134
135/*
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
142 */
143static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144{
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh);
150 }
151 unlock_buffer(bh);
152}
153
154/*
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
156 * unlock the buffer.
157 */
158void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159{
160 __end_buffer_read_notouch(bh, uptodate);
161 put_bh(bh);
162}
163EXPORT_SYMBOL(end_buffer_read_sync);
164
165void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166{
167 if (uptodate) {
168 set_buffer_uptodate(bh);
169 } else {
170 buffer_io_error(bh, ", lost sync page write");
171 mark_buffer_write_io_error(bh);
172 clear_buffer_uptodate(bh);
173 }
174 unlock_buffer(bh);
175 put_bh(bh);
176}
177EXPORT_SYMBOL(end_buffer_write_sync);
178
179static struct buffer_head *
180__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
181{
182 struct address_space *bd_mapping = bdev->bd_mapping;
183 const int blkbits = bd_mapping->host->i_blkbits;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct folio *folio;
189 int all_mapped = 1;
190 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
191
192 index = ((loff_t)block << blkbits) / PAGE_SIZE;
193 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
194 if (IS_ERR(folio))
195 goto out;
196
197 /*
198 * Folio lock protects the buffers. Callers that cannot block
199 * will fallback to serializing vs try_to_free_buffers() via
200 * the i_private_lock.
201 */
202 if (atomic)
203 spin_lock(&bd_mapping->i_private_lock);
204 else
205 folio_lock(folio);
206
207 head = folio_buffers(folio);
208 if (!head)
209 goto out_unlock;
210 /*
211 * Upon a noref migration, the folio lock serializes here;
212 * otherwise bail.
213 */
214 if (test_bit_acquire(BH_Migrate, &head->b_state)) {
215 WARN_ON(!atomic);
216 goto out_unlock;
217 }
218
219 bh = head;
220 do {
221 if (!buffer_mapped(bh))
222 all_mapped = 0;
223 else if (bh->b_blocknr == block) {
224 ret = bh;
225 get_bh(bh);
226 goto out_unlock;
227 }
228 bh = bh->b_this_page;
229 } while (bh != head);
230
231 /* we might be here because some of the buffers on this page are
232 * not mapped. This is due to various races between
233 * file io on the block device and getblk. It gets dealt with
234 * elsewhere, don't buffer_error if we had some unmapped buffers
235 */
236 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
237 if (all_mapped && __ratelimit(&last_warned)) {
238 printk("__find_get_block_slow() failed. block=%llu, "
239 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
240 "device %pg blocksize: %d\n",
241 (unsigned long long)block,
242 (unsigned long long)bh->b_blocknr,
243 bh->b_state, bh->b_size, bdev,
244 1 << blkbits);
245 }
246out_unlock:
247 if (atomic)
248 spin_unlock(&bd_mapping->i_private_lock);
249 else
250 folio_unlock(folio);
251 folio_put(folio);
252out:
253 return ret;
254}
255
256static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
257{
258 unsigned long flags;
259 struct buffer_head *first;
260 struct buffer_head *tmp;
261 struct folio *folio;
262 int folio_uptodate = 1;
263
264 BUG_ON(!buffer_async_read(bh));
265
266 folio = bh->b_folio;
267 if (uptodate) {
268 set_buffer_uptodate(bh);
269 } else {
270 clear_buffer_uptodate(bh);
271 buffer_io_error(bh, ", async page read");
272 }
273
274 /*
275 * Be _very_ careful from here on. Bad things can happen if
276 * two buffer heads end IO at almost the same time and both
277 * decide that the page is now completely done.
278 */
279 first = folio_buffers(folio);
280 spin_lock_irqsave(&first->b_uptodate_lock, flags);
281 clear_buffer_async_read(bh);
282 unlock_buffer(bh);
283 tmp = bh;
284 do {
285 if (!buffer_uptodate(tmp))
286 folio_uptodate = 0;
287 if (buffer_async_read(tmp)) {
288 BUG_ON(!buffer_locked(tmp));
289 goto still_busy;
290 }
291 tmp = tmp->b_this_page;
292 } while (tmp != bh);
293 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
294
295 folio_end_read(folio, folio_uptodate);
296 return;
297
298still_busy:
299 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
300 return;
301}
302
303struct postprocess_bh_ctx {
304 struct work_struct work;
305 struct buffer_head *bh;
306};
307
308static void verify_bh(struct work_struct *work)
309{
310 struct postprocess_bh_ctx *ctx =
311 container_of(work, struct postprocess_bh_ctx, work);
312 struct buffer_head *bh = ctx->bh;
313 bool valid;
314
315 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
316 end_buffer_async_read(bh, valid);
317 kfree(ctx);
318}
319
320static bool need_fsverity(struct buffer_head *bh)
321{
322 struct folio *folio = bh->b_folio;
323 struct inode *inode = folio->mapping->host;
324
325 return fsverity_active(inode) &&
326 /* needed by ext4 */
327 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
328}
329
330static void decrypt_bh(struct work_struct *work)
331{
332 struct postprocess_bh_ctx *ctx =
333 container_of(work, struct postprocess_bh_ctx, work);
334 struct buffer_head *bh = ctx->bh;
335 int err;
336
337 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
338 bh_offset(bh));
339 if (err == 0 && need_fsverity(bh)) {
340 /*
341 * We use different work queues for decryption and for verity
342 * because verity may require reading metadata pages that need
343 * decryption, and we shouldn't recurse to the same workqueue.
344 */
345 INIT_WORK(&ctx->work, verify_bh);
346 fsverity_enqueue_verify_work(&ctx->work);
347 return;
348 }
349 end_buffer_async_read(bh, err == 0);
350 kfree(ctx);
351}
352
353/*
354 * I/O completion handler for block_read_full_folio() - pages
355 * which come unlocked at the end of I/O.
356 */
357static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
358{
359 struct inode *inode = bh->b_folio->mapping->host;
360 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
361 bool verify = need_fsverity(bh);
362
363 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
364 if (uptodate && (decrypt || verify)) {
365 struct postprocess_bh_ctx *ctx =
366 kmalloc(sizeof(*ctx), GFP_ATOMIC);
367
368 if (ctx) {
369 ctx->bh = bh;
370 if (decrypt) {
371 INIT_WORK(&ctx->work, decrypt_bh);
372 fscrypt_enqueue_decrypt_work(&ctx->work);
373 } else {
374 INIT_WORK(&ctx->work, verify_bh);
375 fsverity_enqueue_verify_work(&ctx->work);
376 }
377 return;
378 }
379 uptodate = 0;
380 }
381 end_buffer_async_read(bh, uptodate);
382}
383
384/*
385 * Completion handler for block_write_full_folio() - folios which are unlocked
386 * during I/O, and which have the writeback flag cleared upon I/O completion.
387 */
388static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
389{
390 unsigned long flags;
391 struct buffer_head *first;
392 struct buffer_head *tmp;
393 struct folio *folio;
394
395 BUG_ON(!buffer_async_write(bh));
396
397 folio = bh->b_folio;
398 if (uptodate) {
399 set_buffer_uptodate(bh);
400 } else {
401 buffer_io_error(bh, ", lost async page write");
402 mark_buffer_write_io_error(bh);
403 clear_buffer_uptodate(bh);
404 }
405
406 first = folio_buffers(folio);
407 spin_lock_irqsave(&first->b_uptodate_lock, flags);
408
409 clear_buffer_async_write(bh);
410 unlock_buffer(bh);
411 tmp = bh->b_this_page;
412 while (tmp != bh) {
413 if (buffer_async_write(tmp)) {
414 BUG_ON(!buffer_locked(tmp));
415 goto still_busy;
416 }
417 tmp = tmp->b_this_page;
418 }
419 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
420 folio_end_writeback(folio);
421 return;
422
423still_busy:
424 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
425 return;
426}
427
428/*
429 * If a page's buffers are under async readin (end_buffer_async_read
430 * completion) then there is a possibility that another thread of
431 * control could lock one of the buffers after it has completed
432 * but while some of the other buffers have not completed. This
433 * locked buffer would confuse end_buffer_async_read() into not unlocking
434 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
435 * that this buffer is not under async I/O.
436 *
437 * The page comes unlocked when it has no locked buffer_async buffers
438 * left.
439 *
440 * PageLocked prevents anyone starting new async I/O reads any of
441 * the buffers.
442 *
443 * PageWriteback is used to prevent simultaneous writeout of the same
444 * page.
445 *
446 * PageLocked prevents anyone from starting writeback of a page which is
447 * under read I/O (PageWriteback is only ever set against a locked page).
448 */
449static void mark_buffer_async_read(struct buffer_head *bh)
450{
451 bh->b_end_io = end_buffer_async_read_io;
452 set_buffer_async_read(bh);
453}
454
455static void mark_buffer_async_write_endio(struct buffer_head *bh,
456 bh_end_io_t *handler)
457{
458 bh->b_end_io = handler;
459 set_buffer_async_write(bh);
460}
461
462void mark_buffer_async_write(struct buffer_head *bh)
463{
464 mark_buffer_async_write_endio(bh, end_buffer_async_write);
465}
466EXPORT_SYMBOL(mark_buffer_async_write);
467
468
469/*
470 * fs/buffer.c contains helper functions for buffer-backed address space's
471 * fsync functions. A common requirement for buffer-based filesystems is
472 * that certain data from the backing blockdev needs to be written out for
473 * a successful fsync(). For example, ext2 indirect blocks need to be
474 * written back and waited upon before fsync() returns.
475 *
476 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
477 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
478 * management of a list of dependent buffers at ->i_mapping->i_private_list.
479 *
480 * Locking is a little subtle: try_to_free_buffers() will remove buffers
481 * from their controlling inode's queue when they are being freed. But
482 * try_to_free_buffers() will be operating against the *blockdev* mapping
483 * at the time, not against the S_ISREG file which depends on those buffers.
484 * So the locking for i_private_list is via the i_private_lock in the address_space
485 * which backs the buffers. Which is different from the address_space
486 * against which the buffers are listed. So for a particular address_space,
487 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
488 * mapping->i_private_list will always be protected by the backing blockdev's
489 * ->i_private_lock.
490 *
491 * Which introduces a requirement: all buffers on an address_space's
492 * ->i_private_list must be from the same address_space: the blockdev's.
493 *
494 * address_spaces which do not place buffers at ->i_private_list via these
495 * utility functions are free to use i_private_lock and i_private_list for
496 * whatever they want. The only requirement is that list_empty(i_private_list)
497 * be true at clear_inode() time.
498 *
499 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
500 * filesystems should do that. invalidate_inode_buffers() should just go
501 * BUG_ON(!list_empty).
502 *
503 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
504 * take an address_space, not an inode. And it should be called
505 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
506 * queued up.
507 *
508 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
509 * list if it is already on a list. Because if the buffer is on a list,
510 * it *must* already be on the right one. If not, the filesystem is being
511 * silly. This will save a ton of locking. But first we have to ensure
512 * that buffers are taken *off* the old inode's list when they are freed
513 * (presumably in truncate). That requires careful auditing of all
514 * filesystems (do it inside bforget()). It could also be done by bringing
515 * b_inode back.
516 */
517
518/*
519 * The buffer's backing address_space's i_private_lock must be held
520 */
521static void __remove_assoc_queue(struct buffer_head *bh)
522{
523 list_del_init(&bh->b_assoc_buffers);
524 WARN_ON(!bh->b_assoc_map);
525 bh->b_assoc_map = NULL;
526}
527
528int inode_has_buffers(struct inode *inode)
529{
530 return !list_empty(&inode->i_data.i_private_list);
531}
532
533/*
534 * osync is designed to support O_SYNC io. It waits synchronously for
535 * all already-submitted IO to complete, but does not queue any new
536 * writes to the disk.
537 *
538 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
539 * as you dirty the buffers, and then use osync_inode_buffers to wait for
540 * completion. Any other dirty buffers which are not yet queued for
541 * write will not be flushed to disk by the osync.
542 */
543static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
544{
545 struct buffer_head *bh;
546 struct list_head *p;
547 int err = 0;
548
549 spin_lock(lock);
550repeat:
551 list_for_each_prev(p, list) {
552 bh = BH_ENTRY(p);
553 if (buffer_locked(bh)) {
554 get_bh(bh);
555 spin_unlock(lock);
556 wait_on_buffer(bh);
557 if (!buffer_uptodate(bh))
558 err = -EIO;
559 brelse(bh);
560 spin_lock(lock);
561 goto repeat;
562 }
563 }
564 spin_unlock(lock);
565 return err;
566}
567
568/**
569 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
570 * @mapping: the mapping which wants those buffers written
571 *
572 * Starts I/O against the buffers at mapping->i_private_list, and waits upon
573 * that I/O.
574 *
575 * Basically, this is a convenience function for fsync().
576 * @mapping is a file or directory which needs those buffers to be written for
577 * a successful fsync().
578 */
579int sync_mapping_buffers(struct address_space *mapping)
580{
581 struct address_space *buffer_mapping = mapping->i_private_data;
582
583 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
584 return 0;
585
586 return fsync_buffers_list(&buffer_mapping->i_private_lock,
587 &mapping->i_private_list);
588}
589EXPORT_SYMBOL(sync_mapping_buffers);
590
591/**
592 * generic_buffers_fsync_noflush - generic buffer fsync implementation
593 * for simple filesystems with no inode lock
594 *
595 * @file: file to synchronize
596 * @start: start offset in bytes
597 * @end: end offset in bytes (inclusive)
598 * @datasync: only synchronize essential metadata if true
599 *
600 * This is a generic implementation of the fsync method for simple
601 * filesystems which track all non-inode metadata in the buffers list
602 * hanging off the address_space structure.
603 */
604int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
605 bool datasync)
606{
607 struct inode *inode = file->f_mapping->host;
608 int err;
609 int ret;
610
611 err = file_write_and_wait_range(file, start, end);
612 if (err)
613 return err;
614
615 ret = sync_mapping_buffers(inode->i_mapping);
616 if (!(inode->i_state & I_DIRTY_ALL))
617 goto out;
618 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
619 goto out;
620
621 err = sync_inode_metadata(inode, 1);
622 if (ret == 0)
623 ret = err;
624
625out:
626 /* check and advance again to catch errors after syncing out buffers */
627 err = file_check_and_advance_wb_err(file);
628 if (ret == 0)
629 ret = err;
630 return ret;
631}
632EXPORT_SYMBOL(generic_buffers_fsync_noflush);
633
634/**
635 * generic_buffers_fsync - generic buffer fsync implementation
636 * for simple filesystems with no inode lock
637 *
638 * @file: file to synchronize
639 * @start: start offset in bytes
640 * @end: end offset in bytes (inclusive)
641 * @datasync: only synchronize essential metadata if true
642 *
643 * This is a generic implementation of the fsync method for simple
644 * filesystems which track all non-inode metadata in the buffers list
645 * hanging off the address_space structure. This also makes sure that
646 * a device cache flush operation is called at the end.
647 */
648int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
649 bool datasync)
650{
651 struct inode *inode = file->f_mapping->host;
652 int ret;
653
654 ret = generic_buffers_fsync_noflush(file, start, end, datasync);
655 if (!ret)
656 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
657 return ret;
658}
659EXPORT_SYMBOL(generic_buffers_fsync);
660
661/*
662 * Called when we've recently written block `bblock', and it is known that
663 * `bblock' was for a buffer_boundary() buffer. This means that the block at
664 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
665 * dirty, schedule it for IO. So that indirects merge nicely with their data.
666 */
667void write_boundary_block(struct block_device *bdev,
668 sector_t bblock, unsigned blocksize)
669{
670 struct buffer_head *bh;
671
672 bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
673 if (bh) {
674 if (buffer_dirty(bh))
675 write_dirty_buffer(bh, 0);
676 put_bh(bh);
677 }
678}
679
680void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
681{
682 struct address_space *mapping = inode->i_mapping;
683 struct address_space *buffer_mapping = bh->b_folio->mapping;
684
685 mark_buffer_dirty(bh);
686 if (!mapping->i_private_data) {
687 mapping->i_private_data = buffer_mapping;
688 } else {
689 BUG_ON(mapping->i_private_data != buffer_mapping);
690 }
691 if (!bh->b_assoc_map) {
692 spin_lock(&buffer_mapping->i_private_lock);
693 list_move_tail(&bh->b_assoc_buffers,
694 &mapping->i_private_list);
695 bh->b_assoc_map = mapping;
696 spin_unlock(&buffer_mapping->i_private_lock);
697 }
698}
699EXPORT_SYMBOL(mark_buffer_dirty_inode);
700
701/**
702 * block_dirty_folio - Mark a folio as dirty.
703 * @mapping: The address space containing this folio.
704 * @folio: The folio to mark dirty.
705 *
706 * Filesystems which use buffer_heads can use this function as their
707 * ->dirty_folio implementation. Some filesystems need to do a little
708 * work before calling this function. Filesystems which do not use
709 * buffer_heads should call filemap_dirty_folio() instead.
710 *
711 * If the folio has buffers, the uptodate buffers are set dirty, to
712 * preserve dirty-state coherency between the folio and the buffers.
713 * Buffers added to a dirty folio are created dirty.
714 *
715 * The buffers are dirtied before the folio is dirtied. There's a small
716 * race window in which writeback may see the folio cleanness but not the
717 * buffer dirtiness. That's fine. If this code were to set the folio
718 * dirty before the buffers, writeback could clear the folio dirty flag,
719 * see a bunch of clean buffers and we'd end up with dirty buffers/clean
720 * folio on the dirty folio list.
721 *
722 * We use i_private_lock to lock against try_to_free_buffers() while
723 * using the folio's buffer list. This also prevents clean buffers
724 * being added to the folio after it was set dirty.
725 *
726 * Context: May only be called from process context. Does not sleep.
727 * Caller must ensure that @folio cannot be truncated during this call,
728 * typically by holding the folio lock or having a page in the folio
729 * mapped and holding the page table lock.
730 *
731 * Return: True if the folio was dirtied; false if it was already dirtied.
732 */
733bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
734{
735 struct buffer_head *head;
736 bool newly_dirty;
737
738 spin_lock(&mapping->i_private_lock);
739 head = folio_buffers(folio);
740 if (head) {
741 struct buffer_head *bh = head;
742
743 do {
744 set_buffer_dirty(bh);
745 bh = bh->b_this_page;
746 } while (bh != head);
747 }
748 /*
749 * Lock out page's memcg migration to keep PageDirty
750 * synchronized with per-memcg dirty page counters.
751 */
752 newly_dirty = !folio_test_set_dirty(folio);
753 spin_unlock(&mapping->i_private_lock);
754
755 if (newly_dirty)
756 __folio_mark_dirty(folio, mapping, 1);
757
758 if (newly_dirty)
759 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
760
761 return newly_dirty;
762}
763EXPORT_SYMBOL(block_dirty_folio);
764
765/*
766 * Write out and wait upon a list of buffers.
767 *
768 * We have conflicting pressures: we want to make sure that all
769 * initially dirty buffers get waited on, but that any subsequently
770 * dirtied buffers don't. After all, we don't want fsync to last
771 * forever if somebody is actively writing to the file.
772 *
773 * Do this in two main stages: first we copy dirty buffers to a
774 * temporary inode list, queueing the writes as we go. Then we clean
775 * up, waiting for those writes to complete.
776 *
777 * During this second stage, any subsequent updates to the file may end
778 * up refiling the buffer on the original inode's dirty list again, so
779 * there is a chance we will end up with a buffer queued for write but
780 * not yet completed on that list. So, as a final cleanup we go through
781 * the osync code to catch these locked, dirty buffers without requeuing
782 * any newly dirty buffers for write.
783 */
784static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
785{
786 struct buffer_head *bh;
787 struct address_space *mapping;
788 int err = 0, err2;
789 struct blk_plug plug;
790 LIST_HEAD(tmp);
791
792 blk_start_plug(&plug);
793
794 spin_lock(lock);
795 while (!list_empty(list)) {
796 bh = BH_ENTRY(list->next);
797 mapping = bh->b_assoc_map;
798 __remove_assoc_queue(bh);
799 /* Avoid race with mark_buffer_dirty_inode() which does
800 * a lockless check and we rely on seeing the dirty bit */
801 smp_mb();
802 if (buffer_dirty(bh) || buffer_locked(bh)) {
803 list_add(&bh->b_assoc_buffers, &tmp);
804 bh->b_assoc_map = mapping;
805 if (buffer_dirty(bh)) {
806 get_bh(bh);
807 spin_unlock(lock);
808 /*
809 * Ensure any pending I/O completes so that
810 * write_dirty_buffer() actually writes the
811 * current contents - it is a noop if I/O is
812 * still in flight on potentially older
813 * contents.
814 */
815 write_dirty_buffer(bh, REQ_SYNC);
816
817 /*
818 * Kick off IO for the previous mapping. Note
819 * that we will not run the very last mapping,
820 * wait_on_buffer() will do that for us
821 * through sync_buffer().
822 */
823 brelse(bh);
824 spin_lock(lock);
825 }
826 }
827 }
828
829 spin_unlock(lock);
830 blk_finish_plug(&plug);
831 spin_lock(lock);
832
833 while (!list_empty(&tmp)) {
834 bh = BH_ENTRY(tmp.prev);
835 get_bh(bh);
836 mapping = bh->b_assoc_map;
837 __remove_assoc_queue(bh);
838 /* Avoid race with mark_buffer_dirty_inode() which does
839 * a lockless check and we rely on seeing the dirty bit */
840 smp_mb();
841 if (buffer_dirty(bh)) {
842 list_add(&bh->b_assoc_buffers,
843 &mapping->i_private_list);
844 bh->b_assoc_map = mapping;
845 }
846 spin_unlock(lock);
847 wait_on_buffer(bh);
848 if (!buffer_uptodate(bh))
849 err = -EIO;
850 brelse(bh);
851 spin_lock(lock);
852 }
853
854 spin_unlock(lock);
855 err2 = osync_buffers_list(lock, list);
856 if (err)
857 return err;
858 else
859 return err2;
860}
861
862/*
863 * Invalidate any and all dirty buffers on a given inode. We are
864 * probably unmounting the fs, but that doesn't mean we have already
865 * done a sync(). Just drop the buffers from the inode list.
866 *
867 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
868 * assumes that all the buffers are against the blockdev.
869 */
870void invalidate_inode_buffers(struct inode *inode)
871{
872 if (inode_has_buffers(inode)) {
873 struct address_space *mapping = &inode->i_data;
874 struct list_head *list = &mapping->i_private_list;
875 struct address_space *buffer_mapping = mapping->i_private_data;
876
877 spin_lock(&buffer_mapping->i_private_lock);
878 while (!list_empty(list))
879 __remove_assoc_queue(BH_ENTRY(list->next));
880 spin_unlock(&buffer_mapping->i_private_lock);
881 }
882}
883EXPORT_SYMBOL(invalidate_inode_buffers);
884
885/*
886 * Remove any clean buffers from the inode's buffer list. This is called
887 * when we're trying to free the inode itself. Those buffers can pin it.
888 *
889 * Returns true if all buffers were removed.
890 */
891int remove_inode_buffers(struct inode *inode)
892{
893 int ret = 1;
894
895 if (inode_has_buffers(inode)) {
896 struct address_space *mapping = &inode->i_data;
897 struct list_head *list = &mapping->i_private_list;
898 struct address_space *buffer_mapping = mapping->i_private_data;
899
900 spin_lock(&buffer_mapping->i_private_lock);
901 while (!list_empty(list)) {
902 struct buffer_head *bh = BH_ENTRY(list->next);
903 if (buffer_dirty(bh)) {
904 ret = 0;
905 break;
906 }
907 __remove_assoc_queue(bh);
908 }
909 spin_unlock(&buffer_mapping->i_private_lock);
910 }
911 return ret;
912}
913
914/*
915 * Create the appropriate buffers when given a folio for data area and
916 * the size of each buffer.. Use the bh->b_this_page linked list to
917 * follow the buffers created. Return NULL if unable to create more
918 * buffers.
919 *
920 * The retry flag is used to differentiate async IO (paging, swapping)
921 * which may not fail from ordinary buffer allocations.
922 */
923struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
924 gfp_t gfp)
925{
926 struct buffer_head *bh, *head;
927 long offset;
928 struct mem_cgroup *memcg, *old_memcg;
929
930 /* The folio lock pins the memcg */
931 memcg = folio_memcg(folio);
932 old_memcg = set_active_memcg(memcg);
933
934 head = NULL;
935 offset = folio_size(folio);
936 while ((offset -= size) >= 0) {
937 bh = alloc_buffer_head(gfp);
938 if (!bh)
939 goto no_grow;
940
941 bh->b_this_page = head;
942 bh->b_blocknr = -1;
943 head = bh;
944
945 bh->b_size = size;
946
947 /* Link the buffer to its folio */
948 folio_set_bh(bh, folio, offset);
949 }
950out:
951 set_active_memcg(old_memcg);
952 return head;
953/*
954 * In case anything failed, we just free everything we got.
955 */
956no_grow:
957 if (head) {
958 do {
959 bh = head;
960 head = head->b_this_page;
961 free_buffer_head(bh);
962 } while (head);
963 }
964
965 goto out;
966}
967EXPORT_SYMBOL_GPL(folio_alloc_buffers);
968
969struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
970{
971 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
972
973 return folio_alloc_buffers(page_folio(page), size, gfp);
974}
975EXPORT_SYMBOL_GPL(alloc_page_buffers);
976
977static inline void link_dev_buffers(struct folio *folio,
978 struct buffer_head *head)
979{
980 struct buffer_head *bh, *tail;
981
982 bh = head;
983 do {
984 tail = bh;
985 bh = bh->b_this_page;
986 } while (bh);
987 tail->b_this_page = head;
988 folio_attach_private(folio, head);
989}
990
991static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
992{
993 sector_t retval = ~((sector_t)0);
994 loff_t sz = bdev_nr_bytes(bdev);
995
996 if (sz) {
997 unsigned int sizebits = blksize_bits(size);
998 retval = (sz >> sizebits);
999 }
1000 return retval;
1001}
1002
1003/*
1004 * Initialise the state of a blockdev folio's buffers.
1005 */
1006static sector_t folio_init_buffers(struct folio *folio,
1007 struct block_device *bdev, unsigned size)
1008{
1009 struct buffer_head *head = folio_buffers(folio);
1010 struct buffer_head *bh = head;
1011 bool uptodate = folio_test_uptodate(folio);
1012 sector_t block = div_u64(folio_pos(folio), size);
1013 sector_t end_block = blkdev_max_block(bdev, size);
1014
1015 do {
1016 if (!buffer_mapped(bh)) {
1017 bh->b_end_io = NULL;
1018 bh->b_private = NULL;
1019 bh->b_bdev = bdev;
1020 bh->b_blocknr = block;
1021 if (uptodate)
1022 set_buffer_uptodate(bh);
1023 if (block < end_block)
1024 set_buffer_mapped(bh);
1025 }
1026 block++;
1027 bh = bh->b_this_page;
1028 } while (bh != head);
1029
1030 /*
1031 * Caller needs to validate requested block against end of device.
1032 */
1033 return end_block;
1034}
1035
1036/*
1037 * Create the page-cache folio that contains the requested block.
1038 *
1039 * This is used purely for blockdev mappings.
1040 *
1041 * Returns false if we have a failure which cannot be cured by retrying
1042 * without sleeping. Returns true if we succeeded, or the caller should retry.
1043 */
1044static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1045 pgoff_t index, unsigned size, gfp_t gfp)
1046{
1047 struct address_space *mapping = bdev->bd_mapping;
1048 struct folio *folio;
1049 struct buffer_head *bh;
1050 sector_t end_block = 0;
1051
1052 folio = __filemap_get_folio(mapping, index,
1053 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1054 if (IS_ERR(folio))
1055 return false;
1056
1057 bh = folio_buffers(folio);
1058 if (bh) {
1059 if (bh->b_size == size) {
1060 end_block = folio_init_buffers(folio, bdev, size);
1061 goto unlock;
1062 }
1063
1064 /*
1065 * Retrying may succeed; for example the folio may finish
1066 * writeback, or buffers may be cleaned. This should not
1067 * happen very often; maybe we have old buffers attached to
1068 * this blockdev's page cache and we're trying to change
1069 * the block size?
1070 */
1071 if (!try_to_free_buffers(folio)) {
1072 end_block = ~0ULL;
1073 goto unlock;
1074 }
1075 }
1076
1077 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1078 if (!bh)
1079 goto unlock;
1080
1081 /*
1082 * Link the folio to the buffers and initialise them. Take the
1083 * lock to be atomic wrt __find_get_block(), which does not
1084 * run under the folio lock.
1085 */
1086 spin_lock(&mapping->i_private_lock);
1087 link_dev_buffers(folio, bh);
1088 end_block = folio_init_buffers(folio, bdev, size);
1089 spin_unlock(&mapping->i_private_lock);
1090unlock:
1091 folio_unlock(folio);
1092 folio_put(folio);
1093 return block < end_block;
1094}
1095
1096/*
1097 * Create buffers for the specified block device block's folio. If
1098 * that folio was dirty, the buffers are set dirty also. Returns false
1099 * if we've hit a permanent error.
1100 */
1101static bool grow_buffers(struct block_device *bdev, sector_t block,
1102 unsigned size, gfp_t gfp)
1103{
1104 loff_t pos;
1105
1106 /*
1107 * Check for a block which lies outside our maximum possible
1108 * pagecache index.
1109 */
1110 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1111 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1112 __func__, (unsigned long long)block,
1113 bdev);
1114 return false;
1115 }
1116
1117 /* Create a folio with the proper size buffers */
1118 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1119}
1120
1121static struct buffer_head *
1122__getblk_slow(struct block_device *bdev, sector_t block,
1123 unsigned size, gfp_t gfp)
1124{
1125 /* Size must be multiple of hard sectorsize */
1126 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1127 (size < 512 || size > PAGE_SIZE))) {
1128 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1129 size);
1130 printk(KERN_ERR "logical block size: %d\n",
1131 bdev_logical_block_size(bdev));
1132
1133 dump_stack();
1134 return NULL;
1135 }
1136
1137 for (;;) {
1138 struct buffer_head *bh;
1139
1140 bh = __find_get_block(bdev, block, size);
1141 if (bh)
1142 return bh;
1143
1144 if (!grow_buffers(bdev, block, size, gfp))
1145 return NULL;
1146 }
1147}
1148
1149/*
1150 * The relationship between dirty buffers and dirty pages:
1151 *
1152 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1153 * the page is tagged dirty in the page cache.
1154 *
1155 * At all times, the dirtiness of the buffers represents the dirtiness of
1156 * subsections of the page. If the page has buffers, the page dirty bit is
1157 * merely a hint about the true dirty state.
1158 *
1159 * When a page is set dirty in its entirety, all its buffers are marked dirty
1160 * (if the page has buffers).
1161 *
1162 * When a buffer is marked dirty, its page is dirtied, but the page's other
1163 * buffers are not.
1164 *
1165 * Also. When blockdev buffers are explicitly read with bread(), they
1166 * individually become uptodate. But their backing page remains not
1167 * uptodate - even if all of its buffers are uptodate. A subsequent
1168 * block_read_full_folio() against that folio will discover all the uptodate
1169 * buffers, will set the folio uptodate and will perform no I/O.
1170 */
1171
1172/**
1173 * mark_buffer_dirty - mark a buffer_head as needing writeout
1174 * @bh: the buffer_head to mark dirty
1175 *
1176 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1177 * its backing page dirty, then tag the page as dirty in the page cache
1178 * and then attach the address_space's inode to its superblock's dirty
1179 * inode list.
1180 *
1181 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1182 * i_pages lock and mapping->host->i_lock.
1183 */
1184void mark_buffer_dirty(struct buffer_head *bh)
1185{
1186 WARN_ON_ONCE(!buffer_uptodate(bh));
1187
1188 trace_block_dirty_buffer(bh);
1189
1190 /*
1191 * Very *carefully* optimize the it-is-already-dirty case.
1192 *
1193 * Don't let the final "is it dirty" escape to before we
1194 * perhaps modified the buffer.
1195 */
1196 if (buffer_dirty(bh)) {
1197 smp_mb();
1198 if (buffer_dirty(bh))
1199 return;
1200 }
1201
1202 if (!test_set_buffer_dirty(bh)) {
1203 struct folio *folio = bh->b_folio;
1204 struct address_space *mapping = NULL;
1205
1206 if (!folio_test_set_dirty(folio)) {
1207 mapping = folio->mapping;
1208 if (mapping)
1209 __folio_mark_dirty(folio, mapping, 0);
1210 }
1211 if (mapping)
1212 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1213 }
1214}
1215EXPORT_SYMBOL(mark_buffer_dirty);
1216
1217void mark_buffer_write_io_error(struct buffer_head *bh)
1218{
1219 set_buffer_write_io_error(bh);
1220 /* FIXME: do we need to set this in both places? */
1221 if (bh->b_folio && bh->b_folio->mapping)
1222 mapping_set_error(bh->b_folio->mapping, -EIO);
1223 if (bh->b_assoc_map) {
1224 mapping_set_error(bh->b_assoc_map, -EIO);
1225 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1226 }
1227}
1228EXPORT_SYMBOL(mark_buffer_write_io_error);
1229
1230/**
1231 * __brelse - Release a buffer.
1232 * @bh: The buffer to release.
1233 *
1234 * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1235 */
1236void __brelse(struct buffer_head *bh)
1237{
1238 if (atomic_read(&bh->b_count)) {
1239 put_bh(bh);
1240 return;
1241 }
1242 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1243}
1244EXPORT_SYMBOL(__brelse);
1245
1246/**
1247 * __bforget - Discard any dirty data in a buffer.
1248 * @bh: The buffer to forget.
1249 *
1250 * This variant of bforget() can be called if @bh is guaranteed to not
1251 * be NULL.
1252 */
1253void __bforget(struct buffer_head *bh)
1254{
1255 clear_buffer_dirty(bh);
1256 if (bh->b_assoc_map) {
1257 struct address_space *buffer_mapping = bh->b_folio->mapping;
1258
1259 spin_lock(&buffer_mapping->i_private_lock);
1260 list_del_init(&bh->b_assoc_buffers);
1261 bh->b_assoc_map = NULL;
1262 spin_unlock(&buffer_mapping->i_private_lock);
1263 }
1264 __brelse(bh);
1265}
1266EXPORT_SYMBOL(__bforget);
1267
1268static struct buffer_head *__bread_slow(struct buffer_head *bh)
1269{
1270 lock_buffer(bh);
1271 if (buffer_uptodate(bh)) {
1272 unlock_buffer(bh);
1273 return bh;
1274 } else {
1275 get_bh(bh);
1276 bh->b_end_io = end_buffer_read_sync;
1277 submit_bh(REQ_OP_READ, bh);
1278 wait_on_buffer(bh);
1279 if (buffer_uptodate(bh))
1280 return bh;
1281 }
1282 brelse(bh);
1283 return NULL;
1284}
1285
1286/*
1287 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1288 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1289 * refcount elevated by one when they're in an LRU. A buffer can only appear
1290 * once in a particular CPU's LRU. A single buffer can be present in multiple
1291 * CPU's LRUs at the same time.
1292 *
1293 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1294 * sb_find_get_block().
1295 *
1296 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1297 * a local interrupt disable for that.
1298 */
1299
1300#define BH_LRU_SIZE 16
1301
1302struct bh_lru {
1303 struct buffer_head *bhs[BH_LRU_SIZE];
1304};
1305
1306static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1307
1308#ifdef CONFIG_SMP
1309#define bh_lru_lock() local_irq_disable()
1310#define bh_lru_unlock() local_irq_enable()
1311#else
1312#define bh_lru_lock() preempt_disable()
1313#define bh_lru_unlock() preempt_enable()
1314#endif
1315
1316static inline void check_irqs_on(void)
1317{
1318#ifdef irqs_disabled
1319 BUG_ON(irqs_disabled());
1320#endif
1321}
1322
1323/*
1324 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1325 * inserted at the front, and the buffer_head at the back if any is evicted.
1326 * Or, if already in the LRU it is moved to the front.
1327 */
1328static void bh_lru_install(struct buffer_head *bh)
1329{
1330 struct buffer_head *evictee = bh;
1331 struct bh_lru *b;
1332 int i;
1333
1334 check_irqs_on();
1335 bh_lru_lock();
1336
1337 /*
1338 * the refcount of buffer_head in bh_lru prevents dropping the
1339 * attached page(i.e., try_to_free_buffers) so it could cause
1340 * failing page migration.
1341 * Skip putting upcoming bh into bh_lru until migration is done.
1342 */
1343 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1344 bh_lru_unlock();
1345 return;
1346 }
1347
1348 b = this_cpu_ptr(&bh_lrus);
1349 for (i = 0; i < BH_LRU_SIZE; i++) {
1350 swap(evictee, b->bhs[i]);
1351 if (evictee == bh) {
1352 bh_lru_unlock();
1353 return;
1354 }
1355 }
1356
1357 get_bh(bh);
1358 bh_lru_unlock();
1359 brelse(evictee);
1360}
1361
1362/*
1363 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1364 */
1365static struct buffer_head *
1366lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1367{
1368 struct buffer_head *ret = NULL;
1369 unsigned int i;
1370
1371 check_irqs_on();
1372 bh_lru_lock();
1373 if (cpu_is_isolated(smp_processor_id())) {
1374 bh_lru_unlock();
1375 return NULL;
1376 }
1377 for (i = 0; i < BH_LRU_SIZE; i++) {
1378 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1379
1380 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1381 bh->b_size == size) {
1382 if (i) {
1383 while (i) {
1384 __this_cpu_write(bh_lrus.bhs[i],
1385 __this_cpu_read(bh_lrus.bhs[i - 1]));
1386 i--;
1387 }
1388 __this_cpu_write(bh_lrus.bhs[0], bh);
1389 }
1390 get_bh(bh);
1391 ret = bh;
1392 break;
1393 }
1394 }
1395 bh_lru_unlock();
1396 return ret;
1397}
1398
1399/*
1400 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1401 * it in the LRU and mark it as accessed. If it is not present then return
1402 * NULL. Atomic context callers may also return NULL if the buffer is being
1403 * migrated; similarly the page is not marked accessed either.
1404 */
1405static struct buffer_head *
1406find_get_block_common(struct block_device *bdev, sector_t block,
1407 unsigned size, bool atomic)
1408{
1409 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1410
1411 if (bh == NULL) {
1412 /* __find_get_block_slow will mark the page accessed */
1413 bh = __find_get_block_slow(bdev, block, atomic);
1414 if (bh)
1415 bh_lru_install(bh);
1416 } else
1417 touch_buffer(bh);
1418
1419 return bh;
1420}
1421
1422struct buffer_head *
1423__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1424{
1425 return find_get_block_common(bdev, block, size, true);
1426}
1427EXPORT_SYMBOL(__find_get_block);
1428
1429/* same as __find_get_block() but allows sleeping contexts */
1430struct buffer_head *
1431__find_get_block_nonatomic(struct block_device *bdev, sector_t block,
1432 unsigned size)
1433{
1434 return find_get_block_common(bdev, block, size, false);
1435}
1436EXPORT_SYMBOL(__find_get_block_nonatomic);
1437
1438/**
1439 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1440 * @bdev: The block device.
1441 * @block: The block number.
1442 * @size: The size of buffer_heads for this @bdev.
1443 * @gfp: The memory allocation flags to use.
1444 *
1445 * The returned buffer head has its reference count incremented, but is
1446 * not locked. The caller should call brelse() when it has finished
1447 * with the buffer. The buffer may not be uptodate. If needed, the
1448 * caller can bring it uptodate either by reading it or overwriting it.
1449 *
1450 * Return: The buffer head, or NULL if memory could not be allocated.
1451 */
1452struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1453 unsigned size, gfp_t gfp)
1454{
1455 struct buffer_head *bh;
1456
1457 if (gfpflags_allow_blocking(gfp))
1458 bh = __find_get_block_nonatomic(bdev, block, size);
1459 else
1460 bh = __find_get_block(bdev, block, size);
1461
1462 might_alloc(gfp);
1463 if (bh)
1464 return bh;
1465
1466 return __getblk_slow(bdev, block, size, gfp);
1467}
1468EXPORT_SYMBOL(bdev_getblk);
1469
1470/*
1471 * Do async read-ahead on a buffer..
1472 */
1473void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1474{
1475 struct buffer_head *bh = bdev_getblk(bdev, block, size,
1476 GFP_NOWAIT | __GFP_MOVABLE);
1477
1478 if (likely(bh)) {
1479 bh_readahead(bh, REQ_RAHEAD);
1480 brelse(bh);
1481 }
1482}
1483EXPORT_SYMBOL(__breadahead);
1484
1485/**
1486 * __bread_gfp() - Read a block.
1487 * @bdev: The block device to read from.
1488 * @block: Block number in units of block size.
1489 * @size: The block size of this device in bytes.
1490 * @gfp: Not page allocation flags; see below.
1491 *
1492 * You are not expected to call this function. You should use one of
1493 * sb_bread(), sb_bread_unmovable() or __bread().
1494 *
1495 * Read a specified block, and return the buffer head that refers to it.
1496 * If @gfp is 0, the memory will be allocated using the block device's
1497 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1498 * allocated from a movable area. Do not pass in a complete set of
1499 * GFP flags.
1500 *
1501 * The returned buffer head has its refcount increased. The caller should
1502 * call brelse() when it has finished with the buffer.
1503 *
1504 * Context: May sleep waiting for I/O.
1505 * Return: NULL if the block was unreadable.
1506 */
1507struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1508 unsigned size, gfp_t gfp)
1509{
1510 struct buffer_head *bh;
1511
1512 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1513
1514 /*
1515 * Prefer looping in the allocator rather than here, at least that
1516 * code knows what it's doing.
1517 */
1518 gfp |= __GFP_NOFAIL;
1519
1520 bh = bdev_getblk(bdev, block, size, gfp);
1521
1522 if (likely(bh) && !buffer_uptodate(bh))
1523 bh = __bread_slow(bh);
1524 return bh;
1525}
1526EXPORT_SYMBOL(__bread_gfp);
1527
1528static void __invalidate_bh_lrus(struct bh_lru *b)
1529{
1530 int i;
1531
1532 for (i = 0; i < BH_LRU_SIZE; i++) {
1533 brelse(b->bhs[i]);
1534 b->bhs[i] = NULL;
1535 }
1536}
1537/*
1538 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1539 * This doesn't race because it runs in each cpu either in irq
1540 * or with preempt disabled.
1541 */
1542static void invalidate_bh_lru(void *arg)
1543{
1544 struct bh_lru *b = &get_cpu_var(bh_lrus);
1545
1546 __invalidate_bh_lrus(b);
1547 put_cpu_var(bh_lrus);
1548}
1549
1550bool has_bh_in_lru(int cpu, void *dummy)
1551{
1552 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1553 int i;
1554
1555 for (i = 0; i < BH_LRU_SIZE; i++) {
1556 if (b->bhs[i])
1557 return true;
1558 }
1559
1560 return false;
1561}
1562
1563void invalidate_bh_lrus(void)
1564{
1565 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1566}
1567EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1568
1569/*
1570 * It's called from workqueue context so we need a bh_lru_lock to close
1571 * the race with preemption/irq.
1572 */
1573void invalidate_bh_lrus_cpu(void)
1574{
1575 struct bh_lru *b;
1576
1577 bh_lru_lock();
1578 b = this_cpu_ptr(&bh_lrus);
1579 __invalidate_bh_lrus(b);
1580 bh_lru_unlock();
1581}
1582
1583void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1584 unsigned long offset)
1585{
1586 bh->b_folio = folio;
1587 BUG_ON(offset >= folio_size(folio));
1588 if (folio_test_highmem(folio))
1589 /*
1590 * This catches illegal uses and preserves the offset:
1591 */
1592 bh->b_data = (char *)(0 + offset);
1593 else
1594 bh->b_data = folio_address(folio) + offset;
1595}
1596EXPORT_SYMBOL(folio_set_bh);
1597
1598/*
1599 * Called when truncating a buffer on a page completely.
1600 */
1601
1602/* Bits that are cleared during an invalidate */
1603#define BUFFER_FLAGS_DISCARD \
1604 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1605 1 << BH_Delay | 1 << BH_Unwritten)
1606
1607static void discard_buffer(struct buffer_head * bh)
1608{
1609 unsigned long b_state;
1610
1611 lock_buffer(bh);
1612 clear_buffer_dirty(bh);
1613 bh->b_bdev = NULL;
1614 b_state = READ_ONCE(bh->b_state);
1615 do {
1616 } while (!try_cmpxchg(&bh->b_state, &b_state,
1617 b_state & ~BUFFER_FLAGS_DISCARD));
1618 unlock_buffer(bh);
1619}
1620
1621/**
1622 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1623 * @folio: The folio which is affected.
1624 * @offset: start of the range to invalidate
1625 * @length: length of the range to invalidate
1626 *
1627 * block_invalidate_folio() is called when all or part of the folio has been
1628 * invalidated by a truncate operation.
1629 *
1630 * block_invalidate_folio() does not have to release all buffers, but it must
1631 * ensure that no dirty buffer is left outside @offset and that no I/O
1632 * is underway against any of the blocks which are outside the truncation
1633 * point. Because the caller is about to free (and possibly reuse) those
1634 * blocks on-disk.
1635 */
1636void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1637{
1638 struct buffer_head *head, *bh, *next;
1639 size_t curr_off = 0;
1640 size_t stop = length + offset;
1641
1642 BUG_ON(!folio_test_locked(folio));
1643
1644 /*
1645 * Check for overflow
1646 */
1647 BUG_ON(stop > folio_size(folio) || stop < length);
1648
1649 head = folio_buffers(folio);
1650 if (!head)
1651 return;
1652
1653 bh = head;
1654 do {
1655 size_t next_off = curr_off + bh->b_size;
1656 next = bh->b_this_page;
1657
1658 /*
1659 * Are we still fully in range ?
1660 */
1661 if (next_off > stop)
1662 goto out;
1663
1664 /*
1665 * is this block fully invalidated?
1666 */
1667 if (offset <= curr_off)
1668 discard_buffer(bh);
1669 curr_off = next_off;
1670 bh = next;
1671 } while (bh != head);
1672
1673 /*
1674 * We release buffers only if the entire folio is being invalidated.
1675 * The get_block cached value has been unconditionally invalidated,
1676 * so real IO is not possible anymore.
1677 */
1678 if (length == folio_size(folio))
1679 filemap_release_folio(folio, 0);
1680out:
1681 folio_clear_mappedtodisk(folio);
1682 return;
1683}
1684EXPORT_SYMBOL(block_invalidate_folio);
1685
1686/*
1687 * We attach and possibly dirty the buffers atomically wrt
1688 * block_dirty_folio() via i_private_lock. try_to_free_buffers
1689 * is already excluded via the folio lock.
1690 */
1691struct buffer_head *create_empty_buffers(struct folio *folio,
1692 unsigned long blocksize, unsigned long b_state)
1693{
1694 struct buffer_head *bh, *head, *tail;
1695 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1696
1697 head = folio_alloc_buffers(folio, blocksize, gfp);
1698 bh = head;
1699 do {
1700 bh->b_state |= b_state;
1701 tail = bh;
1702 bh = bh->b_this_page;
1703 } while (bh);
1704 tail->b_this_page = head;
1705
1706 spin_lock(&folio->mapping->i_private_lock);
1707 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1708 bh = head;
1709 do {
1710 if (folio_test_dirty(folio))
1711 set_buffer_dirty(bh);
1712 if (folio_test_uptodate(folio))
1713 set_buffer_uptodate(bh);
1714 bh = bh->b_this_page;
1715 } while (bh != head);
1716 }
1717 folio_attach_private(folio, head);
1718 spin_unlock(&folio->mapping->i_private_lock);
1719
1720 return head;
1721}
1722EXPORT_SYMBOL(create_empty_buffers);
1723
1724/**
1725 * clean_bdev_aliases: clean a range of buffers in block device
1726 * @bdev: Block device to clean buffers in
1727 * @block: Start of a range of blocks to clean
1728 * @len: Number of blocks to clean
1729 *
1730 * We are taking a range of blocks for data and we don't want writeback of any
1731 * buffer-cache aliases starting from return from this function and until the
1732 * moment when something will explicitly mark the buffer dirty (hopefully that
1733 * will not happen until we will free that block ;-) We don't even need to mark
1734 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1735 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1736 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1737 * would confuse anyone who might pick it with bread() afterwards...
1738 *
1739 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1740 * writeout I/O going on against recently-freed buffers. We don't wait on that
1741 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1742 * need to. That happens here.
1743 */
1744void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1745{
1746 struct address_space *bd_mapping = bdev->bd_mapping;
1747 const int blkbits = bd_mapping->host->i_blkbits;
1748 struct folio_batch fbatch;
1749 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1750 pgoff_t end;
1751 int i, count;
1752 struct buffer_head *bh;
1753 struct buffer_head *head;
1754
1755 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1756 folio_batch_init(&fbatch);
1757 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1758 count = folio_batch_count(&fbatch);
1759 for (i = 0; i < count; i++) {
1760 struct folio *folio = fbatch.folios[i];
1761
1762 if (!folio_buffers(folio))
1763 continue;
1764 /*
1765 * We use folio lock instead of bd_mapping->i_private_lock
1766 * to pin buffers here since we can afford to sleep and
1767 * it scales better than a global spinlock lock.
1768 */
1769 folio_lock(folio);
1770 /* Recheck when the folio is locked which pins bhs */
1771 head = folio_buffers(folio);
1772 if (!head)
1773 goto unlock_page;
1774 bh = head;
1775 do {
1776 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1777 goto next;
1778 if (bh->b_blocknr >= block + len)
1779 break;
1780 clear_buffer_dirty(bh);
1781 wait_on_buffer(bh);
1782 clear_buffer_req(bh);
1783next:
1784 bh = bh->b_this_page;
1785 } while (bh != head);
1786unlock_page:
1787 folio_unlock(folio);
1788 }
1789 folio_batch_release(&fbatch);
1790 cond_resched();
1791 /* End of range already reached? */
1792 if (index > end || !index)
1793 break;
1794 }
1795}
1796EXPORT_SYMBOL(clean_bdev_aliases);
1797
1798static struct buffer_head *folio_create_buffers(struct folio *folio,
1799 struct inode *inode,
1800 unsigned int b_state)
1801{
1802 struct buffer_head *bh;
1803
1804 BUG_ON(!folio_test_locked(folio));
1805
1806 bh = folio_buffers(folio);
1807 if (!bh)
1808 bh = create_empty_buffers(folio,
1809 1 << READ_ONCE(inode->i_blkbits), b_state);
1810 return bh;
1811}
1812
1813/*
1814 * NOTE! All mapped/uptodate combinations are valid:
1815 *
1816 * Mapped Uptodate Meaning
1817 *
1818 * No No "unknown" - must do get_block()
1819 * No Yes "hole" - zero-filled
1820 * Yes No "allocated" - allocated on disk, not read in
1821 * Yes Yes "valid" - allocated and up-to-date in memory.
1822 *
1823 * "Dirty" is valid only with the last case (mapped+uptodate).
1824 */
1825
1826/*
1827 * While block_write_full_folio is writing back the dirty buffers under
1828 * the page lock, whoever dirtied the buffers may decide to clean them
1829 * again at any time. We handle that by only looking at the buffer
1830 * state inside lock_buffer().
1831 *
1832 * If block_write_full_folio() is called for regular writeback
1833 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1834 * locked buffer. This only can happen if someone has written the buffer
1835 * directly, with submit_bh(). At the address_space level PageWriteback
1836 * prevents this contention from occurring.
1837 *
1838 * If block_write_full_folio() is called with wbc->sync_mode ==
1839 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1840 * causes the writes to be flagged as synchronous writes.
1841 */
1842int __block_write_full_folio(struct inode *inode, struct folio *folio,
1843 get_block_t *get_block, struct writeback_control *wbc)
1844{
1845 int err;
1846 sector_t block;
1847 sector_t last_block;
1848 struct buffer_head *bh, *head;
1849 size_t blocksize;
1850 int nr_underway = 0;
1851 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1852
1853 head = folio_create_buffers(folio, inode,
1854 (1 << BH_Dirty) | (1 << BH_Uptodate));
1855
1856 /*
1857 * Be very careful. We have no exclusion from block_dirty_folio
1858 * here, and the (potentially unmapped) buffers may become dirty at
1859 * any time. If a buffer becomes dirty here after we've inspected it
1860 * then we just miss that fact, and the folio stays dirty.
1861 *
1862 * Buffers outside i_size may be dirtied by block_dirty_folio;
1863 * handle that here by just cleaning them.
1864 */
1865
1866 bh = head;
1867 blocksize = bh->b_size;
1868
1869 block = div_u64(folio_pos(folio), blocksize);
1870 last_block = div_u64(i_size_read(inode) - 1, blocksize);
1871
1872 /*
1873 * Get all the dirty buffers mapped to disk addresses and
1874 * handle any aliases from the underlying blockdev's mapping.
1875 */
1876 do {
1877 if (block > last_block) {
1878 /*
1879 * mapped buffers outside i_size will occur, because
1880 * this folio can be outside i_size when there is a
1881 * truncate in progress.
1882 */
1883 /*
1884 * The buffer was zeroed by block_write_full_folio()
1885 */
1886 clear_buffer_dirty(bh);
1887 set_buffer_uptodate(bh);
1888 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1889 buffer_dirty(bh)) {
1890 WARN_ON(bh->b_size != blocksize);
1891 err = get_block(inode, block, bh, 1);
1892 if (err)
1893 goto recover;
1894 clear_buffer_delay(bh);
1895 if (buffer_new(bh)) {
1896 /* blockdev mappings never come here */
1897 clear_buffer_new(bh);
1898 clean_bdev_bh_alias(bh);
1899 }
1900 }
1901 bh = bh->b_this_page;
1902 block++;
1903 } while (bh != head);
1904
1905 do {
1906 if (!buffer_mapped(bh))
1907 continue;
1908 /*
1909 * If it's a fully non-blocking write attempt and we cannot
1910 * lock the buffer then redirty the folio. Note that this can
1911 * potentially cause a busy-wait loop from writeback threads
1912 * and kswapd activity, but those code paths have their own
1913 * higher-level throttling.
1914 */
1915 if (wbc->sync_mode != WB_SYNC_NONE) {
1916 lock_buffer(bh);
1917 } else if (!trylock_buffer(bh)) {
1918 folio_redirty_for_writepage(wbc, folio);
1919 continue;
1920 }
1921 if (test_clear_buffer_dirty(bh)) {
1922 mark_buffer_async_write_endio(bh,
1923 end_buffer_async_write);
1924 } else {
1925 unlock_buffer(bh);
1926 }
1927 } while ((bh = bh->b_this_page) != head);
1928
1929 /*
1930 * The folio and its buffers are protected by the writeback flag,
1931 * so we can drop the bh refcounts early.
1932 */
1933 BUG_ON(folio_test_writeback(folio));
1934 folio_start_writeback(folio);
1935
1936 do {
1937 struct buffer_head *next = bh->b_this_page;
1938 if (buffer_async_write(bh)) {
1939 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1940 inode->i_write_hint, wbc);
1941 nr_underway++;
1942 }
1943 bh = next;
1944 } while (bh != head);
1945 folio_unlock(folio);
1946
1947 err = 0;
1948done:
1949 if (nr_underway == 0) {
1950 /*
1951 * The folio was marked dirty, but the buffers were
1952 * clean. Someone wrote them back by hand with
1953 * write_dirty_buffer/submit_bh. A rare case.
1954 */
1955 folio_end_writeback(folio);
1956
1957 /*
1958 * The folio and buffer_heads can be released at any time from
1959 * here on.
1960 */
1961 }
1962 return err;
1963
1964recover:
1965 /*
1966 * ENOSPC, or some other error. We may already have added some
1967 * blocks to the file, so we need to write these out to avoid
1968 * exposing stale data.
1969 * The folio is currently locked and not marked for writeback
1970 */
1971 bh = head;
1972 /* Recovery: lock and submit the mapped buffers */
1973 do {
1974 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1975 !buffer_delay(bh)) {
1976 lock_buffer(bh);
1977 mark_buffer_async_write_endio(bh,
1978 end_buffer_async_write);
1979 } else {
1980 /*
1981 * The buffer may have been set dirty during
1982 * attachment to a dirty folio.
1983 */
1984 clear_buffer_dirty(bh);
1985 }
1986 } while ((bh = bh->b_this_page) != head);
1987 BUG_ON(folio_test_writeback(folio));
1988 mapping_set_error(folio->mapping, err);
1989 folio_start_writeback(folio);
1990 do {
1991 struct buffer_head *next = bh->b_this_page;
1992 if (buffer_async_write(bh)) {
1993 clear_buffer_dirty(bh);
1994 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1995 inode->i_write_hint, wbc);
1996 nr_underway++;
1997 }
1998 bh = next;
1999 } while (bh != head);
2000 folio_unlock(folio);
2001 goto done;
2002}
2003EXPORT_SYMBOL(__block_write_full_folio);
2004
2005/*
2006 * If a folio has any new buffers, zero them out here, and mark them uptodate
2007 * and dirty so they'll be written out (in order to prevent uninitialised
2008 * block data from leaking). And clear the new bit.
2009 */
2010void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
2011{
2012 size_t block_start, block_end;
2013 struct buffer_head *head, *bh;
2014
2015 BUG_ON(!folio_test_locked(folio));
2016 head = folio_buffers(folio);
2017 if (!head)
2018 return;
2019
2020 bh = head;
2021 block_start = 0;
2022 do {
2023 block_end = block_start + bh->b_size;
2024
2025 if (buffer_new(bh)) {
2026 if (block_end > from && block_start < to) {
2027 if (!folio_test_uptodate(folio)) {
2028 size_t start, xend;
2029
2030 start = max(from, block_start);
2031 xend = min(to, block_end);
2032
2033 folio_zero_segment(folio, start, xend);
2034 set_buffer_uptodate(bh);
2035 }
2036
2037 clear_buffer_new(bh);
2038 mark_buffer_dirty(bh);
2039 }
2040 }
2041
2042 block_start = block_end;
2043 bh = bh->b_this_page;
2044 } while (bh != head);
2045}
2046EXPORT_SYMBOL(folio_zero_new_buffers);
2047
2048static int
2049iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2050 const struct iomap *iomap)
2051{
2052 loff_t offset = (loff_t)block << inode->i_blkbits;
2053
2054 bh->b_bdev = iomap->bdev;
2055
2056 /*
2057 * Block points to offset in file we need to map, iomap contains
2058 * the offset at which the map starts. If the map ends before the
2059 * current block, then do not map the buffer and let the caller
2060 * handle it.
2061 */
2062 if (offset >= iomap->offset + iomap->length)
2063 return -EIO;
2064
2065 switch (iomap->type) {
2066 case IOMAP_HOLE:
2067 /*
2068 * If the buffer is not up to date or beyond the current EOF,
2069 * we need to mark it as new to ensure sub-block zeroing is
2070 * executed if necessary.
2071 */
2072 if (!buffer_uptodate(bh) ||
2073 (offset >= i_size_read(inode)))
2074 set_buffer_new(bh);
2075 return 0;
2076 case IOMAP_DELALLOC:
2077 if (!buffer_uptodate(bh) ||
2078 (offset >= i_size_read(inode)))
2079 set_buffer_new(bh);
2080 set_buffer_uptodate(bh);
2081 set_buffer_mapped(bh);
2082 set_buffer_delay(bh);
2083 return 0;
2084 case IOMAP_UNWRITTEN:
2085 /*
2086 * For unwritten regions, we always need to ensure that regions
2087 * in the block we are not writing to are zeroed. Mark the
2088 * buffer as new to ensure this.
2089 */
2090 set_buffer_new(bh);
2091 set_buffer_unwritten(bh);
2092 fallthrough;
2093 case IOMAP_MAPPED:
2094 if ((iomap->flags & IOMAP_F_NEW) ||
2095 offset >= i_size_read(inode)) {
2096 /*
2097 * This can happen if truncating the block device races
2098 * with the check in the caller as i_size updates on
2099 * block devices aren't synchronized by i_rwsem for
2100 * block devices.
2101 */
2102 if (S_ISBLK(inode->i_mode))
2103 return -EIO;
2104 set_buffer_new(bh);
2105 }
2106 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2107 inode->i_blkbits;
2108 set_buffer_mapped(bh);
2109 return 0;
2110 default:
2111 WARN_ON_ONCE(1);
2112 return -EIO;
2113 }
2114}
2115
2116int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2117 get_block_t *get_block, const struct iomap *iomap)
2118{
2119 size_t from = offset_in_folio(folio, pos);
2120 size_t to = from + len;
2121 struct inode *inode = folio->mapping->host;
2122 size_t block_start, block_end;
2123 sector_t block;
2124 int err = 0;
2125 size_t blocksize;
2126 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2127
2128 BUG_ON(!folio_test_locked(folio));
2129 BUG_ON(to > folio_size(folio));
2130 BUG_ON(from > to);
2131
2132 head = folio_create_buffers(folio, inode, 0);
2133 blocksize = head->b_size;
2134 block = div_u64(folio_pos(folio), blocksize);
2135
2136 for (bh = head, block_start = 0; bh != head || !block_start;
2137 block++, block_start=block_end, bh = bh->b_this_page) {
2138 block_end = block_start + blocksize;
2139 if (block_end <= from || block_start >= to) {
2140 if (folio_test_uptodate(folio)) {
2141 if (!buffer_uptodate(bh))
2142 set_buffer_uptodate(bh);
2143 }
2144 continue;
2145 }
2146 if (buffer_new(bh))
2147 clear_buffer_new(bh);
2148 if (!buffer_mapped(bh)) {
2149 WARN_ON(bh->b_size != blocksize);
2150 if (get_block)
2151 err = get_block(inode, block, bh, 1);
2152 else
2153 err = iomap_to_bh(inode, block, bh, iomap);
2154 if (err)
2155 break;
2156
2157 if (buffer_new(bh)) {
2158 clean_bdev_bh_alias(bh);
2159 if (folio_test_uptodate(folio)) {
2160 clear_buffer_new(bh);
2161 set_buffer_uptodate(bh);
2162 mark_buffer_dirty(bh);
2163 continue;
2164 }
2165 if (block_end > to || block_start < from)
2166 folio_zero_segments(folio,
2167 to, block_end,
2168 block_start, from);
2169 continue;
2170 }
2171 }
2172 if (folio_test_uptodate(folio)) {
2173 if (!buffer_uptodate(bh))
2174 set_buffer_uptodate(bh);
2175 continue;
2176 }
2177 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2178 !buffer_unwritten(bh) &&
2179 (block_start < from || block_end > to)) {
2180 bh_read_nowait(bh, 0);
2181 *wait_bh++=bh;
2182 }
2183 }
2184 /*
2185 * If we issued read requests - let them complete.
2186 */
2187 while(wait_bh > wait) {
2188 wait_on_buffer(*--wait_bh);
2189 if (!buffer_uptodate(*wait_bh))
2190 err = -EIO;
2191 }
2192 if (unlikely(err))
2193 folio_zero_new_buffers(folio, from, to);
2194 return err;
2195}
2196
2197int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2198 get_block_t *get_block)
2199{
2200 return __block_write_begin_int(folio, pos, len, get_block, NULL);
2201}
2202EXPORT_SYMBOL(__block_write_begin);
2203
2204void block_commit_write(struct folio *folio, size_t from, size_t to)
2205{
2206 size_t block_start, block_end;
2207 bool partial = false;
2208 unsigned blocksize;
2209 struct buffer_head *bh, *head;
2210
2211 bh = head = folio_buffers(folio);
2212 if (!bh)
2213 return;
2214 blocksize = bh->b_size;
2215
2216 block_start = 0;
2217 do {
2218 block_end = block_start + blocksize;
2219 if (block_end <= from || block_start >= to) {
2220 if (!buffer_uptodate(bh))
2221 partial = true;
2222 } else {
2223 set_buffer_uptodate(bh);
2224 mark_buffer_dirty(bh);
2225 }
2226 if (buffer_new(bh))
2227 clear_buffer_new(bh);
2228
2229 block_start = block_end;
2230 bh = bh->b_this_page;
2231 } while (bh != head);
2232
2233 /*
2234 * If this is a partial write which happened to make all buffers
2235 * uptodate then we can optimize away a bogus read_folio() for
2236 * the next read(). Here we 'discover' whether the folio went
2237 * uptodate as a result of this (potentially partial) write.
2238 */
2239 if (!partial)
2240 folio_mark_uptodate(folio);
2241}
2242EXPORT_SYMBOL(block_commit_write);
2243
2244/*
2245 * block_write_begin takes care of the basic task of block allocation and
2246 * bringing partial write blocks uptodate first.
2247 *
2248 * The filesystem needs to handle block truncation upon failure.
2249 */
2250int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2251 struct folio **foliop, get_block_t *get_block)
2252{
2253 pgoff_t index = pos >> PAGE_SHIFT;
2254 struct folio *folio;
2255 int status;
2256
2257 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2258 mapping_gfp_mask(mapping));
2259 if (IS_ERR(folio))
2260 return PTR_ERR(folio);
2261
2262 status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2263 if (unlikely(status)) {
2264 folio_unlock(folio);
2265 folio_put(folio);
2266 folio = NULL;
2267 }
2268
2269 *foliop = folio;
2270 return status;
2271}
2272EXPORT_SYMBOL(block_write_begin);
2273
2274int block_write_end(struct file *file, struct address_space *mapping,
2275 loff_t pos, unsigned len, unsigned copied,
2276 struct folio *folio, void *fsdata)
2277{
2278 size_t start = pos - folio_pos(folio);
2279
2280 if (unlikely(copied < len)) {
2281 /*
2282 * The buffers that were written will now be uptodate, so
2283 * we don't have to worry about a read_folio reading them
2284 * and overwriting a partial write. However if we have
2285 * encountered a short write and only partially written
2286 * into a buffer, it will not be marked uptodate, so a
2287 * read_folio might come in and destroy our partial write.
2288 *
2289 * Do the simplest thing, and just treat any short write to a
2290 * non uptodate folio as a zero-length write, and force the
2291 * caller to redo the whole thing.
2292 */
2293 if (!folio_test_uptodate(folio))
2294 copied = 0;
2295
2296 folio_zero_new_buffers(folio, start+copied, start+len);
2297 }
2298 flush_dcache_folio(folio);
2299
2300 /* This could be a short (even 0-length) commit */
2301 block_commit_write(folio, start, start + copied);
2302
2303 return copied;
2304}
2305EXPORT_SYMBOL(block_write_end);
2306
2307int generic_write_end(struct file *file, struct address_space *mapping,
2308 loff_t pos, unsigned len, unsigned copied,
2309 struct folio *folio, void *fsdata)
2310{
2311 struct inode *inode = mapping->host;
2312 loff_t old_size = inode->i_size;
2313 bool i_size_changed = false;
2314
2315 copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
2316
2317 /*
2318 * No need to use i_size_read() here, the i_size cannot change under us
2319 * because we hold i_rwsem.
2320 *
2321 * But it's important to update i_size while still holding folio lock:
2322 * page writeout could otherwise come in and zero beyond i_size.
2323 */
2324 if (pos + copied > inode->i_size) {
2325 i_size_write(inode, pos + copied);
2326 i_size_changed = true;
2327 }
2328
2329 folio_unlock(folio);
2330 folio_put(folio);
2331
2332 if (old_size < pos)
2333 pagecache_isize_extended(inode, old_size, pos);
2334 /*
2335 * Don't mark the inode dirty under page lock. First, it unnecessarily
2336 * makes the holding time of page lock longer. Second, it forces lock
2337 * ordering of page lock and transaction start for journaling
2338 * filesystems.
2339 */
2340 if (i_size_changed)
2341 mark_inode_dirty(inode);
2342 return copied;
2343}
2344EXPORT_SYMBOL(generic_write_end);
2345
2346/*
2347 * block_is_partially_uptodate checks whether buffers within a folio are
2348 * uptodate or not.
2349 *
2350 * Returns true if all buffers which correspond to the specified part
2351 * of the folio are uptodate.
2352 */
2353bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2354{
2355 unsigned block_start, block_end, blocksize;
2356 unsigned to;
2357 struct buffer_head *bh, *head;
2358 bool ret = true;
2359
2360 head = folio_buffers(folio);
2361 if (!head)
2362 return false;
2363 blocksize = head->b_size;
2364 to = min_t(unsigned, folio_size(folio) - from, count);
2365 to = from + to;
2366 if (from < blocksize && to > folio_size(folio) - blocksize)
2367 return false;
2368
2369 bh = head;
2370 block_start = 0;
2371 do {
2372 block_end = block_start + blocksize;
2373 if (block_end > from && block_start < to) {
2374 if (!buffer_uptodate(bh)) {
2375 ret = false;
2376 break;
2377 }
2378 if (block_end >= to)
2379 break;
2380 }
2381 block_start = block_end;
2382 bh = bh->b_this_page;
2383 } while (bh != head);
2384
2385 return ret;
2386}
2387EXPORT_SYMBOL(block_is_partially_uptodate);
2388
2389/*
2390 * Generic "read_folio" function for block devices that have the normal
2391 * get_block functionality. This is most of the block device filesystems.
2392 * Reads the folio asynchronously --- the unlock_buffer() and
2393 * set/clear_buffer_uptodate() functions propagate buffer state into the
2394 * folio once IO has completed.
2395 */
2396int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2397{
2398 struct inode *inode = folio->mapping->host;
2399 sector_t iblock, lblock;
2400 struct buffer_head *bh, *head, *prev = NULL;
2401 size_t blocksize;
2402 int fully_mapped = 1;
2403 bool page_error = false;
2404 loff_t limit = i_size_read(inode);
2405
2406 /* This is needed for ext4. */
2407 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2408 limit = inode->i_sb->s_maxbytes;
2409
2410 head = folio_create_buffers(folio, inode, 0);
2411 blocksize = head->b_size;
2412
2413 iblock = div_u64(folio_pos(folio), blocksize);
2414 lblock = div_u64(limit + blocksize - 1, blocksize);
2415 bh = head;
2416
2417 do {
2418 if (buffer_uptodate(bh))
2419 continue;
2420
2421 if (!buffer_mapped(bh)) {
2422 int err = 0;
2423
2424 fully_mapped = 0;
2425 if (iblock < lblock) {
2426 WARN_ON(bh->b_size != blocksize);
2427 err = get_block(inode, iblock, bh, 0);
2428 if (err)
2429 page_error = true;
2430 }
2431 if (!buffer_mapped(bh)) {
2432 folio_zero_range(folio, bh_offset(bh),
2433 blocksize);
2434 if (!err)
2435 set_buffer_uptodate(bh);
2436 continue;
2437 }
2438 /*
2439 * get_block() might have updated the buffer
2440 * synchronously
2441 */
2442 if (buffer_uptodate(bh))
2443 continue;
2444 }
2445
2446 lock_buffer(bh);
2447 if (buffer_uptodate(bh)) {
2448 unlock_buffer(bh);
2449 continue;
2450 }
2451
2452 mark_buffer_async_read(bh);
2453 if (prev)
2454 submit_bh(REQ_OP_READ, prev);
2455 prev = bh;
2456 } while (iblock++, (bh = bh->b_this_page) != head);
2457
2458 if (fully_mapped)
2459 folio_set_mappedtodisk(folio);
2460
2461 /*
2462 * All buffers are uptodate or get_block() returned an error
2463 * when trying to map them - we must finish the read because
2464 * end_buffer_async_read() will never be called on any buffer
2465 * in this folio.
2466 */
2467 if (prev)
2468 submit_bh(REQ_OP_READ, prev);
2469 else
2470 folio_end_read(folio, !page_error);
2471
2472 return 0;
2473}
2474EXPORT_SYMBOL(block_read_full_folio);
2475
2476/* utility function for filesystems that need to do work on expanding
2477 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2478 * deal with the hole.
2479 */
2480int generic_cont_expand_simple(struct inode *inode, loff_t size)
2481{
2482 struct address_space *mapping = inode->i_mapping;
2483 const struct address_space_operations *aops = mapping->a_ops;
2484 struct folio *folio;
2485 void *fsdata = NULL;
2486 int err;
2487
2488 err = inode_newsize_ok(inode, size);
2489 if (err)
2490 goto out;
2491
2492 err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2493 if (err)
2494 goto out;
2495
2496 err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2497 BUG_ON(err > 0);
2498
2499out:
2500 return err;
2501}
2502EXPORT_SYMBOL(generic_cont_expand_simple);
2503
2504static int cont_expand_zero(struct file *file, struct address_space *mapping,
2505 loff_t pos, loff_t *bytes)
2506{
2507 struct inode *inode = mapping->host;
2508 const struct address_space_operations *aops = mapping->a_ops;
2509 unsigned int blocksize = i_blocksize(inode);
2510 struct folio *folio;
2511 void *fsdata = NULL;
2512 pgoff_t index, curidx;
2513 loff_t curpos;
2514 unsigned zerofrom, offset, len;
2515 int err = 0;
2516
2517 index = pos >> PAGE_SHIFT;
2518 offset = pos & ~PAGE_MASK;
2519
2520 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2521 zerofrom = curpos & ~PAGE_MASK;
2522 if (zerofrom & (blocksize-1)) {
2523 *bytes |= (blocksize-1);
2524 (*bytes)++;
2525 }
2526 len = PAGE_SIZE - zerofrom;
2527
2528 err = aops->write_begin(file, mapping, curpos, len,
2529 &folio, &fsdata);
2530 if (err)
2531 goto out;
2532 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2533 err = aops->write_end(file, mapping, curpos, len, len,
2534 folio, fsdata);
2535 if (err < 0)
2536 goto out;
2537 BUG_ON(err != len);
2538 err = 0;
2539
2540 balance_dirty_pages_ratelimited(mapping);
2541
2542 if (fatal_signal_pending(current)) {
2543 err = -EINTR;
2544 goto out;
2545 }
2546 }
2547
2548 /* page covers the boundary, find the boundary offset */
2549 if (index == curidx) {
2550 zerofrom = curpos & ~PAGE_MASK;
2551 /* if we will expand the thing last block will be filled */
2552 if (offset <= zerofrom) {
2553 goto out;
2554 }
2555 if (zerofrom & (blocksize-1)) {
2556 *bytes |= (blocksize-1);
2557 (*bytes)++;
2558 }
2559 len = offset - zerofrom;
2560
2561 err = aops->write_begin(file, mapping, curpos, len,
2562 &folio, &fsdata);
2563 if (err)
2564 goto out;
2565 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2566 err = aops->write_end(file, mapping, curpos, len, len,
2567 folio, fsdata);
2568 if (err < 0)
2569 goto out;
2570 BUG_ON(err != len);
2571 err = 0;
2572 }
2573out:
2574 return err;
2575}
2576
2577/*
2578 * For moronic filesystems that do not allow holes in file.
2579 * We may have to extend the file.
2580 */
2581int cont_write_begin(struct file *file, struct address_space *mapping,
2582 loff_t pos, unsigned len,
2583 struct folio **foliop, void **fsdata,
2584 get_block_t *get_block, loff_t *bytes)
2585{
2586 struct inode *inode = mapping->host;
2587 unsigned int blocksize = i_blocksize(inode);
2588 unsigned int zerofrom;
2589 int err;
2590
2591 err = cont_expand_zero(file, mapping, pos, bytes);
2592 if (err)
2593 return err;
2594
2595 zerofrom = *bytes & ~PAGE_MASK;
2596 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2597 *bytes |= (blocksize-1);
2598 (*bytes)++;
2599 }
2600
2601 return block_write_begin(mapping, pos, len, foliop, get_block);
2602}
2603EXPORT_SYMBOL(cont_write_begin);
2604
2605/*
2606 * block_page_mkwrite() is not allowed to change the file size as it gets
2607 * called from a page fault handler when a page is first dirtied. Hence we must
2608 * be careful to check for EOF conditions here. We set the page up correctly
2609 * for a written page which means we get ENOSPC checking when writing into
2610 * holes and correct delalloc and unwritten extent mapping on filesystems that
2611 * support these features.
2612 *
2613 * We are not allowed to take the i_mutex here so we have to play games to
2614 * protect against truncate races as the page could now be beyond EOF. Because
2615 * truncate writes the inode size before removing pages, once we have the
2616 * page lock we can determine safely if the page is beyond EOF. If it is not
2617 * beyond EOF, then the page is guaranteed safe against truncation until we
2618 * unlock the page.
2619 *
2620 * Direct callers of this function should protect against filesystem freezing
2621 * using sb_start_pagefault() - sb_end_pagefault() functions.
2622 */
2623int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2624 get_block_t get_block)
2625{
2626 struct folio *folio = page_folio(vmf->page);
2627 struct inode *inode = file_inode(vma->vm_file);
2628 unsigned long end;
2629 loff_t size;
2630 int ret;
2631
2632 folio_lock(folio);
2633 size = i_size_read(inode);
2634 if ((folio->mapping != inode->i_mapping) ||
2635 (folio_pos(folio) >= size)) {
2636 /* We overload EFAULT to mean page got truncated */
2637 ret = -EFAULT;
2638 goto out_unlock;
2639 }
2640
2641 end = folio_size(folio);
2642 /* folio is wholly or partially inside EOF */
2643 if (folio_pos(folio) + end > size)
2644 end = size - folio_pos(folio);
2645
2646 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2647 if (unlikely(ret))
2648 goto out_unlock;
2649
2650 block_commit_write(folio, 0, end);
2651
2652 folio_mark_dirty(folio);
2653 folio_wait_stable(folio);
2654 return 0;
2655out_unlock:
2656 folio_unlock(folio);
2657 return ret;
2658}
2659EXPORT_SYMBOL(block_page_mkwrite);
2660
2661int block_truncate_page(struct address_space *mapping,
2662 loff_t from, get_block_t *get_block)
2663{
2664 pgoff_t index = from >> PAGE_SHIFT;
2665 unsigned blocksize;
2666 sector_t iblock;
2667 size_t offset, length, pos;
2668 struct inode *inode = mapping->host;
2669 struct folio *folio;
2670 struct buffer_head *bh;
2671 int err = 0;
2672
2673 blocksize = i_blocksize(inode);
2674 length = from & (blocksize - 1);
2675
2676 /* Block boundary? Nothing to do */
2677 if (!length)
2678 return 0;
2679
2680 length = blocksize - length;
2681 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2682
2683 folio = filemap_grab_folio(mapping, index);
2684 if (IS_ERR(folio))
2685 return PTR_ERR(folio);
2686
2687 bh = folio_buffers(folio);
2688 if (!bh)
2689 bh = create_empty_buffers(folio, blocksize, 0);
2690
2691 /* Find the buffer that contains "offset" */
2692 offset = offset_in_folio(folio, from);
2693 pos = blocksize;
2694 while (offset >= pos) {
2695 bh = bh->b_this_page;
2696 iblock++;
2697 pos += blocksize;
2698 }
2699
2700 if (!buffer_mapped(bh)) {
2701 WARN_ON(bh->b_size != blocksize);
2702 err = get_block(inode, iblock, bh, 0);
2703 if (err)
2704 goto unlock;
2705 /* unmapped? It's a hole - nothing to do */
2706 if (!buffer_mapped(bh))
2707 goto unlock;
2708 }
2709
2710 /* Ok, it's mapped. Make sure it's up-to-date */
2711 if (folio_test_uptodate(folio))
2712 set_buffer_uptodate(bh);
2713
2714 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2715 err = bh_read(bh, 0);
2716 /* Uhhuh. Read error. Complain and punt. */
2717 if (err < 0)
2718 goto unlock;
2719 }
2720
2721 folio_zero_range(folio, offset, length);
2722 mark_buffer_dirty(bh);
2723
2724unlock:
2725 folio_unlock(folio);
2726 folio_put(folio);
2727
2728 return err;
2729}
2730EXPORT_SYMBOL(block_truncate_page);
2731
2732/*
2733 * The generic ->writepage function for buffer-backed address_spaces
2734 */
2735int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2736 void *get_block)
2737{
2738 struct inode * const inode = folio->mapping->host;
2739 loff_t i_size = i_size_read(inode);
2740
2741 /* Is the folio fully inside i_size? */
2742 if (folio_pos(folio) + folio_size(folio) <= i_size)
2743 return __block_write_full_folio(inode, folio, get_block, wbc);
2744
2745 /* Is the folio fully outside i_size? (truncate in progress) */
2746 if (folio_pos(folio) >= i_size) {
2747 folio_unlock(folio);
2748 return 0; /* don't care */
2749 }
2750
2751 /*
2752 * The folio straddles i_size. It must be zeroed out on each and every
2753 * writepage invocation because it may be mmapped. "A file is mapped
2754 * in multiples of the page size. For a file that is not a multiple of
2755 * the page size, the remaining memory is zeroed when mapped, and
2756 * writes to that region are not written out to the file."
2757 */
2758 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2759 folio_size(folio));
2760 return __block_write_full_folio(inode, folio, get_block, wbc);
2761}
2762
2763sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2764 get_block_t *get_block)
2765{
2766 struct inode *inode = mapping->host;
2767 struct buffer_head tmp = {
2768 .b_size = i_blocksize(inode),
2769 };
2770
2771 get_block(inode, block, &tmp, 0);
2772 return tmp.b_blocknr;
2773}
2774EXPORT_SYMBOL(generic_block_bmap);
2775
2776static void end_bio_bh_io_sync(struct bio *bio)
2777{
2778 struct buffer_head *bh = bio->bi_private;
2779
2780 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2781 set_bit(BH_Quiet, &bh->b_state);
2782
2783 bh->b_end_io(bh, !bio->bi_status);
2784 bio_put(bio);
2785}
2786
2787static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2788 enum rw_hint write_hint,
2789 struct writeback_control *wbc)
2790{
2791 const enum req_op op = opf & REQ_OP_MASK;
2792 struct bio *bio;
2793
2794 BUG_ON(!buffer_locked(bh));
2795 BUG_ON(!buffer_mapped(bh));
2796 BUG_ON(!bh->b_end_io);
2797 BUG_ON(buffer_delay(bh));
2798 BUG_ON(buffer_unwritten(bh));
2799
2800 /*
2801 * Only clear out a write error when rewriting
2802 */
2803 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2804 clear_buffer_write_io_error(bh);
2805
2806 if (buffer_meta(bh))
2807 opf |= REQ_META;
2808 if (buffer_prio(bh))
2809 opf |= REQ_PRIO;
2810
2811 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2812
2813 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2814
2815 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2816 bio->bi_write_hint = write_hint;
2817
2818 bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
2819
2820 bio->bi_end_io = end_bio_bh_io_sync;
2821 bio->bi_private = bh;
2822
2823 /* Take care of bh's that straddle the end of the device */
2824 guard_bio_eod(bio);
2825
2826 if (wbc) {
2827 wbc_init_bio(wbc, bio);
2828 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2829 }
2830
2831 submit_bio(bio);
2832}
2833
2834void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2835{
2836 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2837}
2838EXPORT_SYMBOL(submit_bh);
2839
2840void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2841{
2842 lock_buffer(bh);
2843 if (!test_clear_buffer_dirty(bh)) {
2844 unlock_buffer(bh);
2845 return;
2846 }
2847 bh->b_end_io = end_buffer_write_sync;
2848 get_bh(bh);
2849 submit_bh(REQ_OP_WRITE | op_flags, bh);
2850}
2851EXPORT_SYMBOL(write_dirty_buffer);
2852
2853/*
2854 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2855 * and then start new I/O and then wait upon it. The caller must have a ref on
2856 * the buffer_head.
2857 */
2858int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2859{
2860 WARN_ON(atomic_read(&bh->b_count) < 1);
2861 lock_buffer(bh);
2862 if (test_clear_buffer_dirty(bh)) {
2863 /*
2864 * The bh should be mapped, but it might not be if the
2865 * device was hot-removed. Not much we can do but fail the I/O.
2866 */
2867 if (!buffer_mapped(bh)) {
2868 unlock_buffer(bh);
2869 return -EIO;
2870 }
2871
2872 get_bh(bh);
2873 bh->b_end_io = end_buffer_write_sync;
2874 submit_bh(REQ_OP_WRITE | op_flags, bh);
2875 wait_on_buffer(bh);
2876 if (!buffer_uptodate(bh))
2877 return -EIO;
2878 } else {
2879 unlock_buffer(bh);
2880 }
2881 return 0;
2882}
2883EXPORT_SYMBOL(__sync_dirty_buffer);
2884
2885int sync_dirty_buffer(struct buffer_head *bh)
2886{
2887 return __sync_dirty_buffer(bh, REQ_SYNC);
2888}
2889EXPORT_SYMBOL(sync_dirty_buffer);
2890
2891static inline int buffer_busy(struct buffer_head *bh)
2892{
2893 return atomic_read(&bh->b_count) |
2894 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2895}
2896
2897static bool
2898drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2899{
2900 struct buffer_head *head = folio_buffers(folio);
2901 struct buffer_head *bh;
2902
2903 bh = head;
2904 do {
2905 if (buffer_busy(bh))
2906 goto failed;
2907 bh = bh->b_this_page;
2908 } while (bh != head);
2909
2910 do {
2911 struct buffer_head *next = bh->b_this_page;
2912
2913 if (bh->b_assoc_map)
2914 __remove_assoc_queue(bh);
2915 bh = next;
2916 } while (bh != head);
2917 *buffers_to_free = head;
2918 folio_detach_private(folio);
2919 return true;
2920failed:
2921 return false;
2922}
2923
2924/**
2925 * try_to_free_buffers - Release buffers attached to this folio.
2926 * @folio: The folio.
2927 *
2928 * If any buffers are in use (dirty, under writeback, elevated refcount),
2929 * no buffers will be freed.
2930 *
2931 * If the folio is dirty but all the buffers are clean then we need to
2932 * be sure to mark the folio clean as well. This is because the folio
2933 * may be against a block device, and a later reattachment of buffers
2934 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2935 * filesystem data on the same device.
2936 *
2937 * The same applies to regular filesystem folios: if all the buffers are
2938 * clean then we set the folio clean and proceed. To do that, we require
2939 * total exclusion from block_dirty_folio(). That is obtained with
2940 * i_private_lock.
2941 *
2942 * Exclusion against try_to_free_buffers may be obtained by either
2943 * locking the folio or by holding its mapping's i_private_lock.
2944 *
2945 * Context: Process context. @folio must be locked. Will not sleep.
2946 * Return: true if all buffers attached to this folio were freed.
2947 */
2948bool try_to_free_buffers(struct folio *folio)
2949{
2950 struct address_space * const mapping = folio->mapping;
2951 struct buffer_head *buffers_to_free = NULL;
2952 bool ret = 0;
2953
2954 BUG_ON(!folio_test_locked(folio));
2955 if (folio_test_writeback(folio))
2956 return false;
2957
2958 if (mapping == NULL) { /* can this still happen? */
2959 ret = drop_buffers(folio, &buffers_to_free);
2960 goto out;
2961 }
2962
2963 spin_lock(&mapping->i_private_lock);
2964 ret = drop_buffers(folio, &buffers_to_free);
2965
2966 /*
2967 * If the filesystem writes its buffers by hand (eg ext3)
2968 * then we can have clean buffers against a dirty folio. We
2969 * clean the folio here; otherwise the VM will never notice
2970 * that the filesystem did any IO at all.
2971 *
2972 * Also, during truncate, discard_buffer will have marked all
2973 * the folio's buffers clean. We discover that here and clean
2974 * the folio also.
2975 *
2976 * i_private_lock must be held over this entire operation in order
2977 * to synchronise against block_dirty_folio and prevent the
2978 * dirty bit from being lost.
2979 */
2980 if (ret)
2981 folio_cancel_dirty(folio);
2982 spin_unlock(&mapping->i_private_lock);
2983out:
2984 if (buffers_to_free) {
2985 struct buffer_head *bh = buffers_to_free;
2986
2987 do {
2988 struct buffer_head *next = bh->b_this_page;
2989 free_buffer_head(bh);
2990 bh = next;
2991 } while (bh != buffers_to_free);
2992 }
2993 return ret;
2994}
2995EXPORT_SYMBOL(try_to_free_buffers);
2996
2997/*
2998 * Buffer-head allocation
2999 */
3000static struct kmem_cache *bh_cachep __ro_after_init;
3001
3002/*
3003 * Once the number of bh's in the machine exceeds this level, we start
3004 * stripping them in writeback.
3005 */
3006static unsigned long max_buffer_heads __ro_after_init;
3007
3008int buffer_heads_over_limit;
3009
3010struct bh_accounting {
3011 int nr; /* Number of live bh's */
3012 int ratelimit; /* Limit cacheline bouncing */
3013};
3014
3015static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3016
3017static void recalc_bh_state(void)
3018{
3019 int i;
3020 int tot = 0;
3021
3022 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3023 return;
3024 __this_cpu_write(bh_accounting.ratelimit, 0);
3025 for_each_online_cpu(i)
3026 tot += per_cpu(bh_accounting, i).nr;
3027 buffer_heads_over_limit = (tot > max_buffer_heads);
3028}
3029
3030struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3031{
3032 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3033 if (ret) {
3034 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3035 spin_lock_init(&ret->b_uptodate_lock);
3036 preempt_disable();
3037 __this_cpu_inc(bh_accounting.nr);
3038 recalc_bh_state();
3039 preempt_enable();
3040 }
3041 return ret;
3042}
3043EXPORT_SYMBOL(alloc_buffer_head);
3044
3045void free_buffer_head(struct buffer_head *bh)
3046{
3047 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3048 kmem_cache_free(bh_cachep, bh);
3049 preempt_disable();
3050 __this_cpu_dec(bh_accounting.nr);
3051 recalc_bh_state();
3052 preempt_enable();
3053}
3054EXPORT_SYMBOL(free_buffer_head);
3055
3056static int buffer_exit_cpu_dead(unsigned int cpu)
3057{
3058 int i;
3059 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3060
3061 for (i = 0; i < BH_LRU_SIZE; i++) {
3062 brelse(b->bhs[i]);
3063 b->bhs[i] = NULL;
3064 }
3065 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3066 per_cpu(bh_accounting, cpu).nr = 0;
3067 return 0;
3068}
3069
3070/**
3071 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3072 * @bh: struct buffer_head
3073 *
3074 * Return true if the buffer is up-to-date and false,
3075 * with the buffer locked, if not.
3076 */
3077int bh_uptodate_or_lock(struct buffer_head *bh)
3078{
3079 if (!buffer_uptodate(bh)) {
3080 lock_buffer(bh);
3081 if (!buffer_uptodate(bh))
3082 return 0;
3083 unlock_buffer(bh);
3084 }
3085 return 1;
3086}
3087EXPORT_SYMBOL(bh_uptodate_or_lock);
3088
3089/**
3090 * __bh_read - Submit read for a locked buffer
3091 * @bh: struct buffer_head
3092 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3093 * @wait: wait until reading finish
3094 *
3095 * Returns zero on success or don't wait, and -EIO on error.
3096 */
3097int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3098{
3099 int ret = 0;
3100
3101 BUG_ON(!buffer_locked(bh));
3102
3103 get_bh(bh);
3104 bh->b_end_io = end_buffer_read_sync;
3105 submit_bh(REQ_OP_READ | op_flags, bh);
3106 if (wait) {
3107 wait_on_buffer(bh);
3108 if (!buffer_uptodate(bh))
3109 ret = -EIO;
3110 }
3111 return ret;
3112}
3113EXPORT_SYMBOL(__bh_read);
3114
3115/**
3116 * __bh_read_batch - Submit read for a batch of unlocked buffers
3117 * @nr: entry number of the buffer batch
3118 * @bhs: a batch of struct buffer_head
3119 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3120 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3121 * buffer that cannot lock.
3122 *
3123 * Returns zero on success or don't wait, and -EIO on error.
3124 */
3125void __bh_read_batch(int nr, struct buffer_head *bhs[],
3126 blk_opf_t op_flags, bool force_lock)
3127{
3128 int i;
3129
3130 for (i = 0; i < nr; i++) {
3131 struct buffer_head *bh = bhs[i];
3132
3133 if (buffer_uptodate(bh))
3134 continue;
3135
3136 if (force_lock)
3137 lock_buffer(bh);
3138 else
3139 if (!trylock_buffer(bh))
3140 continue;
3141
3142 if (buffer_uptodate(bh)) {
3143 unlock_buffer(bh);
3144 continue;
3145 }
3146
3147 bh->b_end_io = end_buffer_read_sync;
3148 get_bh(bh);
3149 submit_bh(REQ_OP_READ | op_flags, bh);
3150 }
3151}
3152EXPORT_SYMBOL(__bh_read_batch);
3153
3154void __init buffer_init(void)
3155{
3156 unsigned long nrpages;
3157 int ret;
3158
3159 bh_cachep = KMEM_CACHE(buffer_head,
3160 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3161 /*
3162 * Limit the bh occupancy to 10% of ZONE_NORMAL
3163 */
3164 nrpages = (nr_free_buffer_pages() * 10) / 100;
3165 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3166 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3167 NULL, buffer_exit_cpu_dead);
3168 WARN_ON(ret < 0);
3169}