Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/filemap.c
4 *
5 * Copyright (C) 1994-1999 Linus Torvalds
6 */
7
8/*
9 * This file handles the generic file mmap semantics used by
10 * most "normal" filesystems (but you don't /have/ to use this:
11 * the NFS filesystem used to do this differently, for example)
12 */
13#include <linux/export.h>
14#include <linux/compiler.h>
15#include <linux/dax.h>
16#include <linux/fs.h>
17#include <linux/sched/signal.h>
18#include <linux/uaccess.h>
19#include <linux/capability.h>
20#include <linux/kernel_stat.h>
21#include <linux/gfp.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/swapops.h>
25#include <linux/mman.h>
26#include <linux/pagemap.h>
27#include <linux/file.h>
28#include <linux/uio.h>
29#include <linux/error-injection.h>
30#include <linux/hash.h>
31#include <linux/writeback.h>
32#include <linux/backing-dev.h>
33#include <linux/pagevec.h>
34#include <linux/security.h>
35#include <linux/cpuset.h>
36#include <linux/hugetlb.h>
37#include <linux/memcontrol.h>
38#include <linux/shmem_fs.h>
39#include <linux/rmap.h>
40#include <linux/delayacct.h>
41#include <linux/psi.h>
42#include <linux/ramfs.h>
43#include <linux/page_idle.h>
44#include <linux/migrate.h>
45#include <asm/pgalloc.h>
46#include <asm/tlbflush.h>
47#include "internal.h"
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/filemap.h>
51
52/*
53 * FIXME: remove all knowledge of the buffer layer from the core VM
54 */
55#include <linux/buffer_head.h> /* for try_to_free_buffers */
56
57#include <asm/mman.h>
58
59/*
60 * Shared mappings implemented 30.11.1994. It's not fully working yet,
61 * though.
62 *
63 * Shared mappings now work. 15.8.1995 Bruno.
64 *
65 * finished 'unifying' the page and buffer cache and SMP-threaded the
66 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
67 *
68 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
69 */
70
71/*
72 * Lock ordering:
73 *
74 * ->i_mmap_rwsem (truncate_pagecache)
75 * ->private_lock (__free_pte->block_dirty_folio)
76 * ->swap_lock (exclusive_swap_page, others)
77 * ->i_pages lock
78 *
79 * ->i_rwsem
80 * ->invalidate_lock (acquired by fs in truncate path)
81 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
82 *
83 * ->mmap_lock
84 * ->i_mmap_rwsem
85 * ->page_table_lock or pte_lock (various, mainly in memory.c)
86 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
87 *
88 * ->mmap_lock
89 * ->invalidate_lock (filemap_fault)
90 * ->lock_page (filemap_fault, access_process_vm)
91 *
92 * ->i_rwsem (generic_perform_write)
93 * ->mmap_lock (fault_in_readable->do_page_fault)
94 *
95 * bdi->wb.list_lock
96 * sb_lock (fs/fs-writeback.c)
97 * ->i_pages lock (__sync_single_inode)
98 *
99 * ->i_mmap_rwsem
100 * ->anon_vma.lock (vma_adjust)
101 *
102 * ->anon_vma.lock
103 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
104 *
105 * ->page_table_lock or pte_lock
106 * ->swap_lock (try_to_unmap_one)
107 * ->private_lock (try_to_unmap_one)
108 * ->i_pages lock (try_to_unmap_one)
109 * ->lruvec->lru_lock (follow_page->mark_page_accessed)
110 * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
111 * ->private_lock (page_remove_rmap->set_page_dirty)
112 * ->i_pages lock (page_remove_rmap->set_page_dirty)
113 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
114 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
115 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
116 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
117 * ->inode->i_lock (zap_pte_range->set_page_dirty)
118 * ->private_lock (zap_pte_range->block_dirty_folio)
119 *
120 * ->i_mmap_rwsem
121 * ->tasklist_lock (memory_failure, collect_procs_ao)
122 */
123
124static void page_cache_delete(struct address_space *mapping,
125 struct folio *folio, void *shadow)
126{
127 XA_STATE(xas, &mapping->i_pages, folio->index);
128 long nr = 1;
129
130 mapping_set_update(&xas, mapping);
131
132 /* hugetlb pages are represented by a single entry in the xarray */
133 if (!folio_test_hugetlb(folio)) {
134 xas_set_order(&xas, folio->index, folio_order(folio));
135 nr = folio_nr_pages(folio);
136 }
137
138 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
139
140 xas_store(&xas, shadow);
141 xas_init_marks(&xas);
142
143 folio->mapping = NULL;
144 /* Leave page->index set: truncation lookup relies upon it */
145 mapping->nrpages -= nr;
146}
147
148static void filemap_unaccount_folio(struct address_space *mapping,
149 struct folio *folio)
150{
151 long nr;
152
153 VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
154 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
155 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
156 current->comm, folio_pfn(folio));
157 dump_page(&folio->page, "still mapped when deleted");
158 dump_stack();
159 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
160
161 if (mapping_exiting(mapping) && !folio_test_large(folio)) {
162 int mapcount = page_mapcount(&folio->page);
163
164 if (folio_ref_count(folio) >= mapcount + 2) {
165 /*
166 * All vmas have already been torn down, so it's
167 * a good bet that actually the page is unmapped
168 * and we'd rather not leak it: if we're wrong,
169 * another bad page check should catch it later.
170 */
171 page_mapcount_reset(&folio->page);
172 folio_ref_sub(folio, mapcount);
173 }
174 }
175 }
176
177 /* hugetlb folios do not participate in page cache accounting. */
178 if (folio_test_hugetlb(folio))
179 return;
180
181 nr = folio_nr_pages(folio);
182
183 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
184 if (folio_test_swapbacked(folio)) {
185 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
186 if (folio_test_pmd_mappable(folio))
187 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
188 } else if (folio_test_pmd_mappable(folio)) {
189 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
190 filemap_nr_thps_dec(mapping);
191 }
192
193 /*
194 * At this point folio must be either written or cleaned by
195 * truncate. Dirty folio here signals a bug and loss of
196 * unwritten data - on ordinary filesystems.
197 *
198 * But it's harmless on in-memory filesystems like tmpfs; and can
199 * occur when a driver which did get_user_pages() sets page dirty
200 * before putting it, while the inode is being finally evicted.
201 *
202 * Below fixes dirty accounting after removing the folio entirely
203 * but leaves the dirty flag set: it has no effect for truncated
204 * folio and anyway will be cleared before returning folio to
205 * buddy allocator.
206 */
207 if (WARN_ON_ONCE(folio_test_dirty(folio) &&
208 mapping_can_writeback(mapping)))
209 folio_account_cleaned(folio, inode_to_wb(mapping->host));
210}
211
212/*
213 * Delete a page from the page cache and free it. Caller has to make
214 * sure the page is locked and that nobody else uses it - or that usage
215 * is safe. The caller must hold the i_pages lock.
216 */
217void __filemap_remove_folio(struct folio *folio, void *shadow)
218{
219 struct address_space *mapping = folio->mapping;
220
221 trace_mm_filemap_delete_from_page_cache(folio);
222 filemap_unaccount_folio(mapping, folio);
223 page_cache_delete(mapping, folio, shadow);
224}
225
226void filemap_free_folio(struct address_space *mapping, struct folio *folio)
227{
228 void (*free_folio)(struct folio *);
229 int refs = 1;
230
231 free_folio = mapping->a_ops->free_folio;
232 if (free_folio)
233 free_folio(folio);
234
235 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
236 refs = folio_nr_pages(folio);
237 folio_put_refs(folio, refs);
238}
239
240/**
241 * filemap_remove_folio - Remove folio from page cache.
242 * @folio: The folio.
243 *
244 * This must be called only on folios that are locked and have been
245 * verified to be in the page cache. It will never put the folio into
246 * the free list because the caller has a reference on the page.
247 */
248void filemap_remove_folio(struct folio *folio)
249{
250 struct address_space *mapping = folio->mapping;
251
252 BUG_ON(!folio_test_locked(folio));
253 spin_lock(&mapping->host->i_lock);
254 xa_lock_irq(&mapping->i_pages);
255 __filemap_remove_folio(folio, NULL);
256 xa_unlock_irq(&mapping->i_pages);
257 if (mapping_shrinkable(mapping))
258 inode_add_lru(mapping->host);
259 spin_unlock(&mapping->host->i_lock);
260
261 filemap_free_folio(mapping, folio);
262}
263
264/*
265 * page_cache_delete_batch - delete several folios from page cache
266 * @mapping: the mapping to which folios belong
267 * @fbatch: batch of folios to delete
268 *
269 * The function walks over mapping->i_pages and removes folios passed in
270 * @fbatch from the mapping. The function expects @fbatch to be sorted
271 * by page index and is optimised for it to be dense.
272 * It tolerates holes in @fbatch (mapping entries at those indices are not
273 * modified).
274 *
275 * The function expects the i_pages lock to be held.
276 */
277static void page_cache_delete_batch(struct address_space *mapping,
278 struct folio_batch *fbatch)
279{
280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
281 long total_pages = 0;
282 int i = 0;
283 struct folio *folio;
284
285 mapping_set_update(&xas, mapping);
286 xas_for_each(&xas, folio, ULONG_MAX) {
287 if (i >= folio_batch_count(fbatch))
288 break;
289
290 /* A swap/dax/shadow entry got inserted? Skip it. */
291 if (xa_is_value(folio))
292 continue;
293 /*
294 * A page got inserted in our range? Skip it. We have our
295 * pages locked so they are protected from being removed.
296 * If we see a page whose index is higher than ours, it
297 * means our page has been removed, which shouldn't be
298 * possible because we're holding the PageLock.
299 */
300 if (folio != fbatch->folios[i]) {
301 VM_BUG_ON_FOLIO(folio->index >
302 fbatch->folios[i]->index, folio);
303 continue;
304 }
305
306 WARN_ON_ONCE(!folio_test_locked(folio));
307
308 folio->mapping = NULL;
309 /* Leave folio->index set: truncation lookup relies on it */
310
311 i++;
312 xas_store(&xas, NULL);
313 total_pages += folio_nr_pages(folio);
314 }
315 mapping->nrpages -= total_pages;
316}
317
318void delete_from_page_cache_batch(struct address_space *mapping,
319 struct folio_batch *fbatch)
320{
321 int i;
322
323 if (!folio_batch_count(fbatch))
324 return;
325
326 spin_lock(&mapping->host->i_lock);
327 xa_lock_irq(&mapping->i_pages);
328 for (i = 0; i < folio_batch_count(fbatch); i++) {
329 struct folio *folio = fbatch->folios[i];
330
331 trace_mm_filemap_delete_from_page_cache(folio);
332 filemap_unaccount_folio(mapping, folio);
333 }
334 page_cache_delete_batch(mapping, fbatch);
335 xa_unlock_irq(&mapping->i_pages);
336 if (mapping_shrinkable(mapping))
337 inode_add_lru(mapping->host);
338 spin_unlock(&mapping->host->i_lock);
339
340 for (i = 0; i < folio_batch_count(fbatch); i++)
341 filemap_free_folio(mapping, fbatch->folios[i]);
342}
343
344int filemap_check_errors(struct address_space *mapping)
345{
346 int ret = 0;
347 /* Check for outstanding write errors */
348 if (test_bit(AS_ENOSPC, &mapping->flags) &&
349 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
350 ret = -ENOSPC;
351 if (test_bit(AS_EIO, &mapping->flags) &&
352 test_and_clear_bit(AS_EIO, &mapping->flags))
353 ret = -EIO;
354 return ret;
355}
356EXPORT_SYMBOL(filemap_check_errors);
357
358static int filemap_check_and_keep_errors(struct address_space *mapping)
359{
360 /* Check for outstanding write errors */
361 if (test_bit(AS_EIO, &mapping->flags))
362 return -EIO;
363 if (test_bit(AS_ENOSPC, &mapping->flags))
364 return -ENOSPC;
365 return 0;
366}
367
368/**
369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
370 * @mapping: address space structure to write
371 * @wbc: the writeback_control controlling the writeout
372 *
373 * Call writepages on the mapping using the provided wbc to control the
374 * writeout.
375 *
376 * Return: %0 on success, negative error code otherwise.
377 */
378int filemap_fdatawrite_wbc(struct address_space *mapping,
379 struct writeback_control *wbc)
380{
381 int ret;
382
383 if (!mapping_can_writeback(mapping) ||
384 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
385 return 0;
386
387 wbc_attach_fdatawrite_inode(wbc, mapping->host);
388 ret = do_writepages(mapping, wbc);
389 wbc_detach_inode(wbc);
390 return ret;
391}
392EXPORT_SYMBOL(filemap_fdatawrite_wbc);
393
394/**
395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
396 * @mapping: address space structure to write
397 * @start: offset in bytes where the range starts
398 * @end: offset in bytes where the range ends (inclusive)
399 * @sync_mode: enable synchronous operation
400 *
401 * Start writeback against all of a mapping's dirty pages that lie
402 * within the byte offsets <start, end> inclusive.
403 *
404 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
405 * opposed to a regular memory cleansing writeback. The difference between
406 * these two operations is that if a dirty page/buffer is encountered, it must
407 * be waited upon, and not just skipped over.
408 *
409 * Return: %0 on success, negative error code otherwise.
410 */
411int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
412 loff_t end, int sync_mode)
413{
414 struct writeback_control wbc = {
415 .sync_mode = sync_mode,
416 .nr_to_write = LONG_MAX,
417 .range_start = start,
418 .range_end = end,
419 };
420
421 return filemap_fdatawrite_wbc(mapping, &wbc);
422}
423
424static inline int __filemap_fdatawrite(struct address_space *mapping,
425 int sync_mode)
426{
427 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
428}
429
430int filemap_fdatawrite(struct address_space *mapping)
431{
432 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
433}
434EXPORT_SYMBOL(filemap_fdatawrite);
435
436int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
437 loff_t end)
438{
439 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
440}
441EXPORT_SYMBOL(filemap_fdatawrite_range);
442
443/**
444 * filemap_flush - mostly a non-blocking flush
445 * @mapping: target address_space
446 *
447 * This is a mostly non-blocking flush. Not suitable for data-integrity
448 * purposes - I/O may not be started against all dirty pages.
449 *
450 * Return: %0 on success, negative error code otherwise.
451 */
452int filemap_flush(struct address_space *mapping)
453{
454 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
455}
456EXPORT_SYMBOL(filemap_flush);
457
458/**
459 * filemap_range_has_page - check if a page exists in range.
460 * @mapping: address space within which to check
461 * @start_byte: offset in bytes where the range starts
462 * @end_byte: offset in bytes where the range ends (inclusive)
463 *
464 * Find at least one page in the range supplied, usually used to check if
465 * direct writing in this range will trigger a writeback.
466 *
467 * Return: %true if at least one page exists in the specified range,
468 * %false otherwise.
469 */
470bool filemap_range_has_page(struct address_space *mapping,
471 loff_t start_byte, loff_t end_byte)
472{
473 struct page *page;
474 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
475 pgoff_t max = end_byte >> PAGE_SHIFT;
476
477 if (end_byte < start_byte)
478 return false;
479
480 rcu_read_lock();
481 for (;;) {
482 page = xas_find(&xas, max);
483 if (xas_retry(&xas, page))
484 continue;
485 /* Shadow entries don't count */
486 if (xa_is_value(page))
487 continue;
488 /*
489 * We don't need to try to pin this page; we're about to
490 * release the RCU lock anyway. It is enough to know that
491 * there was a page here recently.
492 */
493 break;
494 }
495 rcu_read_unlock();
496
497 return page != NULL;
498}
499EXPORT_SYMBOL(filemap_range_has_page);
500
501static void __filemap_fdatawait_range(struct address_space *mapping,
502 loff_t start_byte, loff_t end_byte)
503{
504 pgoff_t index = start_byte >> PAGE_SHIFT;
505 pgoff_t end = end_byte >> PAGE_SHIFT;
506 struct pagevec pvec;
507 int nr_pages;
508
509 pagevec_init(&pvec);
510 while (index <= end) {
511 unsigned i;
512
513 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
514 end, PAGECACHE_TAG_WRITEBACK);
515 if (!nr_pages)
516 break;
517
518 for (i = 0; i < nr_pages; i++) {
519 struct page *page = pvec.pages[i];
520
521 wait_on_page_writeback(page);
522 ClearPageError(page);
523 }
524 pagevec_release(&pvec);
525 cond_resched();
526 }
527}
528
529/**
530 * filemap_fdatawait_range - wait for writeback to complete
531 * @mapping: address space structure to wait for
532 * @start_byte: offset in bytes where the range starts
533 * @end_byte: offset in bytes where the range ends (inclusive)
534 *
535 * Walk the list of under-writeback pages of the given address space
536 * in the given range and wait for all of them. Check error status of
537 * the address space and return it.
538 *
539 * Since the error status of the address space is cleared by this function,
540 * callers are responsible for checking the return value and handling and/or
541 * reporting the error.
542 *
543 * Return: error status of the address space.
544 */
545int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
546 loff_t end_byte)
547{
548 __filemap_fdatawait_range(mapping, start_byte, end_byte);
549 return filemap_check_errors(mapping);
550}
551EXPORT_SYMBOL(filemap_fdatawait_range);
552
553/**
554 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
555 * @mapping: address space structure to wait for
556 * @start_byte: offset in bytes where the range starts
557 * @end_byte: offset in bytes where the range ends (inclusive)
558 *
559 * Walk the list of under-writeback pages of the given address space in the
560 * given range and wait for all of them. Unlike filemap_fdatawait_range(),
561 * this function does not clear error status of the address space.
562 *
563 * Use this function if callers don't handle errors themselves. Expected
564 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
565 * fsfreeze(8)
566 */
567int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
568 loff_t start_byte, loff_t end_byte)
569{
570 __filemap_fdatawait_range(mapping, start_byte, end_byte);
571 return filemap_check_and_keep_errors(mapping);
572}
573EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
574
575/**
576 * file_fdatawait_range - wait for writeback to complete
577 * @file: file pointing to address space structure to wait for
578 * @start_byte: offset in bytes where the range starts
579 * @end_byte: offset in bytes where the range ends (inclusive)
580 *
581 * Walk the list of under-writeback pages of the address space that file
582 * refers to, in the given range and wait for all of them. Check error
583 * status of the address space vs. the file->f_wb_err cursor and return it.
584 *
585 * Since the error status of the file is advanced by this function,
586 * callers are responsible for checking the return value and handling and/or
587 * reporting the error.
588 *
589 * Return: error status of the address space vs. the file->f_wb_err cursor.
590 */
591int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
592{
593 struct address_space *mapping = file->f_mapping;
594
595 __filemap_fdatawait_range(mapping, start_byte, end_byte);
596 return file_check_and_advance_wb_err(file);
597}
598EXPORT_SYMBOL(file_fdatawait_range);
599
600/**
601 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
602 * @mapping: address space structure to wait for
603 *
604 * Walk the list of under-writeback pages of the given address space
605 * and wait for all of them. Unlike filemap_fdatawait(), this function
606 * does not clear error status of the address space.
607 *
608 * Use this function if callers don't handle errors themselves. Expected
609 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
610 * fsfreeze(8)
611 *
612 * Return: error status of the address space.
613 */
614int filemap_fdatawait_keep_errors(struct address_space *mapping)
615{
616 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
617 return filemap_check_and_keep_errors(mapping);
618}
619EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
620
621/* Returns true if writeback might be needed or already in progress. */
622static bool mapping_needs_writeback(struct address_space *mapping)
623{
624 return mapping->nrpages;
625}
626
627bool filemap_range_has_writeback(struct address_space *mapping,
628 loff_t start_byte, loff_t end_byte)
629{
630 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
631 pgoff_t max = end_byte >> PAGE_SHIFT;
632 struct folio *folio;
633
634 if (end_byte < start_byte)
635 return false;
636
637 rcu_read_lock();
638 xas_for_each(&xas, folio, max) {
639 if (xas_retry(&xas, folio))
640 continue;
641 if (xa_is_value(folio))
642 continue;
643 if (folio_test_dirty(folio) || folio_test_locked(folio) ||
644 folio_test_writeback(folio))
645 break;
646 }
647 rcu_read_unlock();
648 return folio != NULL;
649}
650EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
651
652/**
653 * filemap_write_and_wait_range - write out & wait on a file range
654 * @mapping: the address_space for the pages
655 * @lstart: offset in bytes where the range starts
656 * @lend: offset in bytes where the range ends (inclusive)
657 *
658 * Write out and wait upon file offsets lstart->lend, inclusive.
659 *
660 * Note that @lend is inclusive (describes the last byte to be written) so
661 * that this function can be used to write to the very end-of-file (end = -1).
662 *
663 * Return: error status of the address space.
664 */
665int filemap_write_and_wait_range(struct address_space *mapping,
666 loff_t lstart, loff_t lend)
667{
668 int err = 0, err2;
669
670 if (lend < lstart)
671 return 0;
672
673 if (mapping_needs_writeback(mapping)) {
674 err = __filemap_fdatawrite_range(mapping, lstart, lend,
675 WB_SYNC_ALL);
676 /*
677 * Even if the above returned error, the pages may be
678 * written partially (e.g. -ENOSPC), so we wait for it.
679 * But the -EIO is special case, it may indicate the worst
680 * thing (e.g. bug) happened, so we avoid waiting for it.
681 */
682 if (err != -EIO)
683 __filemap_fdatawait_range(mapping, lstart, lend);
684 }
685 err2 = filemap_check_errors(mapping);
686 if (!err)
687 err = err2;
688 return err;
689}
690EXPORT_SYMBOL(filemap_write_and_wait_range);
691
692void __filemap_set_wb_err(struct address_space *mapping, int err)
693{
694 errseq_t eseq = errseq_set(&mapping->wb_err, err);
695
696 trace_filemap_set_wb_err(mapping, eseq);
697}
698EXPORT_SYMBOL(__filemap_set_wb_err);
699
700/**
701 * file_check_and_advance_wb_err - report wb error (if any) that was previously
702 * and advance wb_err to current one
703 * @file: struct file on which the error is being reported
704 *
705 * When userland calls fsync (or something like nfsd does the equivalent), we
706 * want to report any writeback errors that occurred since the last fsync (or
707 * since the file was opened if there haven't been any).
708 *
709 * Grab the wb_err from the mapping. If it matches what we have in the file,
710 * then just quickly return 0. The file is all caught up.
711 *
712 * If it doesn't match, then take the mapping value, set the "seen" flag in
713 * it and try to swap it into place. If it works, or another task beat us
714 * to it with the new value, then update the f_wb_err and return the error
715 * portion. The error at this point must be reported via proper channels
716 * (a'la fsync, or NFS COMMIT operation, etc.).
717 *
718 * While we handle mapping->wb_err with atomic operations, the f_wb_err
719 * value is protected by the f_lock since we must ensure that it reflects
720 * the latest value swapped in for this file descriptor.
721 *
722 * Return: %0 on success, negative error code otherwise.
723 */
724int file_check_and_advance_wb_err(struct file *file)
725{
726 int err = 0;
727 errseq_t old = READ_ONCE(file->f_wb_err);
728 struct address_space *mapping = file->f_mapping;
729
730 /* Locklessly handle the common case where nothing has changed */
731 if (errseq_check(&mapping->wb_err, old)) {
732 /* Something changed, must use slow path */
733 spin_lock(&file->f_lock);
734 old = file->f_wb_err;
735 err = errseq_check_and_advance(&mapping->wb_err,
736 &file->f_wb_err);
737 trace_file_check_and_advance_wb_err(file, old);
738 spin_unlock(&file->f_lock);
739 }
740
741 /*
742 * We're mostly using this function as a drop in replacement for
743 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
744 * that the legacy code would have had on these flags.
745 */
746 clear_bit(AS_EIO, &mapping->flags);
747 clear_bit(AS_ENOSPC, &mapping->flags);
748 return err;
749}
750EXPORT_SYMBOL(file_check_and_advance_wb_err);
751
752/**
753 * file_write_and_wait_range - write out & wait on a file range
754 * @file: file pointing to address_space with pages
755 * @lstart: offset in bytes where the range starts
756 * @lend: offset in bytes where the range ends (inclusive)
757 *
758 * Write out and wait upon file offsets lstart->lend, inclusive.
759 *
760 * Note that @lend is inclusive (describes the last byte to be written) so
761 * that this function can be used to write to the very end-of-file (end = -1).
762 *
763 * After writing out and waiting on the data, we check and advance the
764 * f_wb_err cursor to the latest value, and return any errors detected there.
765 *
766 * Return: %0 on success, negative error code otherwise.
767 */
768int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
769{
770 int err = 0, err2;
771 struct address_space *mapping = file->f_mapping;
772
773 if (lend < lstart)
774 return 0;
775
776 if (mapping_needs_writeback(mapping)) {
777 err = __filemap_fdatawrite_range(mapping, lstart, lend,
778 WB_SYNC_ALL);
779 /* See comment of filemap_write_and_wait() */
780 if (err != -EIO)
781 __filemap_fdatawait_range(mapping, lstart, lend);
782 }
783 err2 = file_check_and_advance_wb_err(file);
784 if (!err)
785 err = err2;
786 return err;
787}
788EXPORT_SYMBOL(file_write_and_wait_range);
789
790/**
791 * replace_page_cache_folio - replace a pagecache folio with a new one
792 * @old: folio to be replaced
793 * @new: folio to replace with
794 *
795 * This function replaces a folio in the pagecache with a new one. On
796 * success it acquires the pagecache reference for the new folio and
797 * drops it for the old folio. Both the old and new folios must be
798 * locked. This function does not add the new folio to the LRU, the
799 * caller must do that.
800 *
801 * The remove + add is atomic. This function cannot fail.
802 */
803void replace_page_cache_folio(struct folio *old, struct folio *new)
804{
805 struct address_space *mapping = old->mapping;
806 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
807 pgoff_t offset = old->index;
808 XA_STATE(xas, &mapping->i_pages, offset);
809
810 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
811 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
812 VM_BUG_ON_FOLIO(new->mapping, new);
813
814 folio_get(new);
815 new->mapping = mapping;
816 new->index = offset;
817
818 mem_cgroup_migrate(old, new);
819
820 xas_lock_irq(&xas);
821 xas_store(&xas, new);
822
823 old->mapping = NULL;
824 /* hugetlb pages do not participate in page cache accounting. */
825 if (!folio_test_hugetlb(old))
826 __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
827 if (!folio_test_hugetlb(new))
828 __lruvec_stat_add_folio(new, NR_FILE_PAGES);
829 if (folio_test_swapbacked(old))
830 __lruvec_stat_sub_folio(old, NR_SHMEM);
831 if (folio_test_swapbacked(new))
832 __lruvec_stat_add_folio(new, NR_SHMEM);
833 xas_unlock_irq(&xas);
834 if (free_folio)
835 free_folio(old);
836 folio_put(old);
837}
838EXPORT_SYMBOL_GPL(replace_page_cache_folio);
839
840noinline int __filemap_add_folio(struct address_space *mapping,
841 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
842{
843 XA_STATE(xas, &mapping->i_pages, index);
844 int huge = folio_test_hugetlb(folio);
845 bool charged = false;
846 long nr = 1;
847
848 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
849 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
850 mapping_set_update(&xas, mapping);
851
852 if (!huge) {
853 int error = mem_cgroup_charge(folio, NULL, gfp);
854 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
855 if (error)
856 return error;
857 charged = true;
858 xas_set_order(&xas, index, folio_order(folio));
859 nr = folio_nr_pages(folio);
860 }
861
862 gfp &= GFP_RECLAIM_MASK;
863 folio_ref_add(folio, nr);
864 folio->mapping = mapping;
865 folio->index = xas.xa_index;
866
867 do {
868 unsigned int order = xa_get_order(xas.xa, xas.xa_index);
869 void *entry, *old = NULL;
870
871 if (order > folio_order(folio))
872 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
873 order, gfp);
874 xas_lock_irq(&xas);
875 xas_for_each_conflict(&xas, entry) {
876 old = entry;
877 if (!xa_is_value(entry)) {
878 xas_set_err(&xas, -EEXIST);
879 goto unlock;
880 }
881 }
882
883 if (old) {
884 if (shadowp)
885 *shadowp = old;
886 /* entry may have been split before we acquired lock */
887 order = xa_get_order(xas.xa, xas.xa_index);
888 if (order > folio_order(folio)) {
889 /* How to handle large swap entries? */
890 BUG_ON(shmem_mapping(mapping));
891 xas_split(&xas, old, order);
892 xas_reset(&xas);
893 }
894 }
895
896 xas_store(&xas, folio);
897 if (xas_error(&xas))
898 goto unlock;
899
900 mapping->nrpages += nr;
901
902 /* hugetlb pages do not participate in page cache accounting */
903 if (!huge) {
904 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
905 if (folio_test_pmd_mappable(folio))
906 __lruvec_stat_mod_folio(folio,
907 NR_FILE_THPS, nr);
908 }
909unlock:
910 xas_unlock_irq(&xas);
911 } while (xas_nomem(&xas, gfp));
912
913 if (xas_error(&xas))
914 goto error;
915
916 trace_mm_filemap_add_to_page_cache(folio);
917 return 0;
918error:
919 if (charged)
920 mem_cgroup_uncharge(folio);
921 folio->mapping = NULL;
922 /* Leave page->index set: truncation relies upon it */
923 folio_put_refs(folio, nr);
924 return xas_error(&xas);
925}
926ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
927
928int filemap_add_folio(struct address_space *mapping, struct folio *folio,
929 pgoff_t index, gfp_t gfp)
930{
931 void *shadow = NULL;
932 int ret;
933
934 __folio_set_locked(folio);
935 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
936 if (unlikely(ret))
937 __folio_clear_locked(folio);
938 else {
939 /*
940 * The folio might have been evicted from cache only
941 * recently, in which case it should be activated like
942 * any other repeatedly accessed folio.
943 * The exception is folios getting rewritten; evicting other
944 * data from the working set, only to cache data that will
945 * get overwritten with something else, is a waste of memory.
946 */
947 WARN_ON_ONCE(folio_test_active(folio));
948 if (!(gfp & __GFP_WRITE) && shadow)
949 workingset_refault(folio, shadow);
950 folio_add_lru(folio);
951 }
952 return ret;
953}
954EXPORT_SYMBOL_GPL(filemap_add_folio);
955
956#ifdef CONFIG_NUMA
957struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
958{
959 int n;
960 struct folio *folio;
961
962 if (cpuset_do_page_mem_spread()) {
963 unsigned int cpuset_mems_cookie;
964 do {
965 cpuset_mems_cookie = read_mems_allowed_begin();
966 n = cpuset_mem_spread_node();
967 folio = __folio_alloc_node(gfp, order, n);
968 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
969
970 return folio;
971 }
972 return folio_alloc(gfp, order);
973}
974EXPORT_SYMBOL(filemap_alloc_folio);
975#endif
976
977/*
978 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
979 *
980 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
981 *
982 * @mapping1: the first mapping to lock
983 * @mapping2: the second mapping to lock
984 */
985void filemap_invalidate_lock_two(struct address_space *mapping1,
986 struct address_space *mapping2)
987{
988 if (mapping1 > mapping2)
989 swap(mapping1, mapping2);
990 if (mapping1)
991 down_write(&mapping1->invalidate_lock);
992 if (mapping2 && mapping1 != mapping2)
993 down_write_nested(&mapping2->invalidate_lock, 1);
994}
995EXPORT_SYMBOL(filemap_invalidate_lock_two);
996
997/*
998 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
999 *
1000 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1001 *
1002 * @mapping1: the first mapping to unlock
1003 * @mapping2: the second mapping to unlock
1004 */
1005void filemap_invalidate_unlock_two(struct address_space *mapping1,
1006 struct address_space *mapping2)
1007{
1008 if (mapping1)
1009 up_write(&mapping1->invalidate_lock);
1010 if (mapping2 && mapping1 != mapping2)
1011 up_write(&mapping2->invalidate_lock);
1012}
1013EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1014
1015/*
1016 * In order to wait for pages to become available there must be
1017 * waitqueues associated with pages. By using a hash table of
1018 * waitqueues where the bucket discipline is to maintain all
1019 * waiters on the same queue and wake all when any of the pages
1020 * become available, and for the woken contexts to check to be
1021 * sure the appropriate page became available, this saves space
1022 * at a cost of "thundering herd" phenomena during rare hash
1023 * collisions.
1024 */
1025#define PAGE_WAIT_TABLE_BITS 8
1026#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1027static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1028
1029static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1030{
1031 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1032}
1033
1034void __init pagecache_init(void)
1035{
1036 int i;
1037
1038 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1039 init_waitqueue_head(&folio_wait_table[i]);
1040
1041 page_writeback_init();
1042}
1043
1044/*
1045 * The page wait code treats the "wait->flags" somewhat unusually, because
1046 * we have multiple different kinds of waits, not just the usual "exclusive"
1047 * one.
1048 *
1049 * We have:
1050 *
1051 * (a) no special bits set:
1052 *
1053 * We're just waiting for the bit to be released, and when a waker
1054 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1055 * and remove it from the wait queue.
1056 *
1057 * Simple and straightforward.
1058 *
1059 * (b) WQ_FLAG_EXCLUSIVE:
1060 *
1061 * The waiter is waiting to get the lock, and only one waiter should
1062 * be woken up to avoid any thundering herd behavior. We'll set the
1063 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1064 *
1065 * This is the traditional exclusive wait.
1066 *
1067 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1068 *
1069 * The waiter is waiting to get the bit, and additionally wants the
1070 * lock to be transferred to it for fair lock behavior. If the lock
1071 * cannot be taken, we stop walking the wait queue without waking
1072 * the waiter.
1073 *
1074 * This is the "fair lock handoff" case, and in addition to setting
1075 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1076 * that it now has the lock.
1077 */
1078static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1079{
1080 unsigned int flags;
1081 struct wait_page_key *key = arg;
1082 struct wait_page_queue *wait_page
1083 = container_of(wait, struct wait_page_queue, wait);
1084
1085 if (!wake_page_match(wait_page, key))
1086 return 0;
1087
1088 /*
1089 * If it's a lock handoff wait, we get the bit for it, and
1090 * stop walking (and do not wake it up) if we can't.
1091 */
1092 flags = wait->flags;
1093 if (flags & WQ_FLAG_EXCLUSIVE) {
1094 if (test_bit(key->bit_nr, &key->folio->flags))
1095 return -1;
1096 if (flags & WQ_FLAG_CUSTOM) {
1097 if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1098 return -1;
1099 flags |= WQ_FLAG_DONE;
1100 }
1101 }
1102
1103 /*
1104 * We are holding the wait-queue lock, but the waiter that
1105 * is waiting for this will be checking the flags without
1106 * any locking.
1107 *
1108 * So update the flags atomically, and wake up the waiter
1109 * afterwards to avoid any races. This store-release pairs
1110 * with the load-acquire in folio_wait_bit_common().
1111 */
1112 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1113 wake_up_state(wait->private, mode);
1114
1115 /*
1116 * Ok, we have successfully done what we're waiting for,
1117 * and we can unconditionally remove the wait entry.
1118 *
1119 * Note that this pairs with the "finish_wait()" in the
1120 * waiter, and has to be the absolute last thing we do.
1121 * After this list_del_init(&wait->entry) the wait entry
1122 * might be de-allocated and the process might even have
1123 * exited.
1124 */
1125 list_del_init_careful(&wait->entry);
1126 return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1127}
1128
1129static void folio_wake_bit(struct folio *folio, int bit_nr)
1130{
1131 wait_queue_head_t *q = folio_waitqueue(folio);
1132 struct wait_page_key key;
1133 unsigned long flags;
1134 wait_queue_entry_t bookmark;
1135
1136 key.folio = folio;
1137 key.bit_nr = bit_nr;
1138 key.page_match = 0;
1139
1140 bookmark.flags = 0;
1141 bookmark.private = NULL;
1142 bookmark.func = NULL;
1143 INIT_LIST_HEAD(&bookmark.entry);
1144
1145 spin_lock_irqsave(&q->lock, flags);
1146 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1147
1148 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1149 /*
1150 * Take a breather from holding the lock,
1151 * allow pages that finish wake up asynchronously
1152 * to acquire the lock and remove themselves
1153 * from wait queue
1154 */
1155 spin_unlock_irqrestore(&q->lock, flags);
1156 cpu_relax();
1157 spin_lock_irqsave(&q->lock, flags);
1158 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1159 }
1160
1161 /*
1162 * It's possible to miss clearing waiters here, when we woke our page
1163 * waiters, but the hashed waitqueue has waiters for other pages on it.
1164 * That's okay, it's a rare case. The next waker will clear it.
1165 *
1166 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1167 * other), the flag may be cleared in the course of freeing the page;
1168 * but that is not required for correctness.
1169 */
1170 if (!waitqueue_active(q) || !key.page_match)
1171 folio_clear_waiters(folio);
1172
1173 spin_unlock_irqrestore(&q->lock, flags);
1174}
1175
1176static void folio_wake(struct folio *folio, int bit)
1177{
1178 if (!folio_test_waiters(folio))
1179 return;
1180 folio_wake_bit(folio, bit);
1181}
1182
1183/*
1184 * A choice of three behaviors for folio_wait_bit_common():
1185 */
1186enum behavior {
1187 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1188 * __folio_lock() waiting on then setting PG_locked.
1189 */
1190 SHARED, /* Hold ref to page and check the bit when woken, like
1191 * folio_wait_writeback() waiting on PG_writeback.
1192 */
1193 DROP, /* Drop ref to page before wait, no check when woken,
1194 * like folio_put_wait_locked() on PG_locked.
1195 */
1196};
1197
1198/*
1199 * Attempt to check (or get) the folio flag, and mark us done
1200 * if successful.
1201 */
1202static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1203 struct wait_queue_entry *wait)
1204{
1205 if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1206 if (test_and_set_bit(bit_nr, &folio->flags))
1207 return false;
1208 } else if (test_bit(bit_nr, &folio->flags))
1209 return false;
1210
1211 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1212 return true;
1213}
1214
1215/* How many times do we accept lock stealing from under a waiter? */
1216int sysctl_page_lock_unfairness = 5;
1217
1218static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1219 int state, enum behavior behavior)
1220{
1221 wait_queue_head_t *q = folio_waitqueue(folio);
1222 int unfairness = sysctl_page_lock_unfairness;
1223 struct wait_page_queue wait_page;
1224 wait_queue_entry_t *wait = &wait_page.wait;
1225 bool thrashing = false;
1226 unsigned long pflags;
1227 bool in_thrashing;
1228
1229 if (bit_nr == PG_locked &&
1230 !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1231 delayacct_thrashing_start(&in_thrashing);
1232 psi_memstall_enter(&pflags);
1233 thrashing = true;
1234 }
1235
1236 init_wait(wait);
1237 wait->func = wake_page_function;
1238 wait_page.folio = folio;
1239 wait_page.bit_nr = bit_nr;
1240
1241repeat:
1242 wait->flags = 0;
1243 if (behavior == EXCLUSIVE) {
1244 wait->flags = WQ_FLAG_EXCLUSIVE;
1245 if (--unfairness < 0)
1246 wait->flags |= WQ_FLAG_CUSTOM;
1247 }
1248
1249 /*
1250 * Do one last check whether we can get the
1251 * page bit synchronously.
1252 *
1253 * Do the folio_set_waiters() marking before that
1254 * to let any waker we _just_ missed know they
1255 * need to wake us up (otherwise they'll never
1256 * even go to the slow case that looks at the
1257 * page queue), and add ourselves to the wait
1258 * queue if we need to sleep.
1259 *
1260 * This part needs to be done under the queue
1261 * lock to avoid races.
1262 */
1263 spin_lock_irq(&q->lock);
1264 folio_set_waiters(folio);
1265 if (!folio_trylock_flag(folio, bit_nr, wait))
1266 __add_wait_queue_entry_tail(q, wait);
1267 spin_unlock_irq(&q->lock);
1268
1269 /*
1270 * From now on, all the logic will be based on
1271 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1272 * see whether the page bit testing has already
1273 * been done by the wake function.
1274 *
1275 * We can drop our reference to the folio.
1276 */
1277 if (behavior == DROP)
1278 folio_put(folio);
1279
1280 /*
1281 * Note that until the "finish_wait()", or until
1282 * we see the WQ_FLAG_WOKEN flag, we need to
1283 * be very careful with the 'wait->flags', because
1284 * we may race with a waker that sets them.
1285 */
1286 for (;;) {
1287 unsigned int flags;
1288
1289 set_current_state(state);
1290
1291 /* Loop until we've been woken or interrupted */
1292 flags = smp_load_acquire(&wait->flags);
1293 if (!(flags & WQ_FLAG_WOKEN)) {
1294 if (signal_pending_state(state, current))
1295 break;
1296
1297 io_schedule();
1298 continue;
1299 }
1300
1301 /* If we were non-exclusive, we're done */
1302 if (behavior != EXCLUSIVE)
1303 break;
1304
1305 /* If the waker got the lock for us, we're done */
1306 if (flags & WQ_FLAG_DONE)
1307 break;
1308
1309 /*
1310 * Otherwise, if we're getting the lock, we need to
1311 * try to get it ourselves.
1312 *
1313 * And if that fails, we'll have to retry this all.
1314 */
1315 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1316 goto repeat;
1317
1318 wait->flags |= WQ_FLAG_DONE;
1319 break;
1320 }
1321
1322 /*
1323 * If a signal happened, this 'finish_wait()' may remove the last
1324 * waiter from the wait-queues, but the folio waiters bit will remain
1325 * set. That's ok. The next wakeup will take care of it, and trying
1326 * to do it here would be difficult and prone to races.
1327 */
1328 finish_wait(q, wait);
1329
1330 if (thrashing) {
1331 delayacct_thrashing_end(&in_thrashing);
1332 psi_memstall_leave(&pflags);
1333 }
1334
1335 /*
1336 * NOTE! The wait->flags weren't stable until we've done the
1337 * 'finish_wait()', and we could have exited the loop above due
1338 * to a signal, and had a wakeup event happen after the signal
1339 * test but before the 'finish_wait()'.
1340 *
1341 * So only after the finish_wait() can we reliably determine
1342 * if we got woken up or not, so we can now figure out the final
1343 * return value based on that state without races.
1344 *
1345 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1346 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1347 */
1348 if (behavior == EXCLUSIVE)
1349 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1350
1351 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1352}
1353
1354#ifdef CONFIG_MIGRATION
1355/**
1356 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1357 * @entry: migration swap entry.
1358 * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
1359 * for pte entries, pass NULL for pmd entries.
1360 * @ptl: already locked ptl. This function will drop the lock.
1361 *
1362 * Wait for a migration entry referencing the given page to be removed. This is
1363 * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1364 * this can be called without taking a reference on the page. Instead this
1365 * should be called while holding the ptl for the migration entry referencing
1366 * the page.
1367 *
1368 * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
1369 *
1370 * This follows the same logic as folio_wait_bit_common() so see the comments
1371 * there.
1372 */
1373void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
1374 spinlock_t *ptl)
1375{
1376 struct wait_page_queue wait_page;
1377 wait_queue_entry_t *wait = &wait_page.wait;
1378 bool thrashing = false;
1379 unsigned long pflags;
1380 bool in_thrashing;
1381 wait_queue_head_t *q;
1382 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1383
1384 q = folio_waitqueue(folio);
1385 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1386 delayacct_thrashing_start(&in_thrashing);
1387 psi_memstall_enter(&pflags);
1388 thrashing = true;
1389 }
1390
1391 init_wait(wait);
1392 wait->func = wake_page_function;
1393 wait_page.folio = folio;
1394 wait_page.bit_nr = PG_locked;
1395 wait->flags = 0;
1396
1397 spin_lock_irq(&q->lock);
1398 folio_set_waiters(folio);
1399 if (!folio_trylock_flag(folio, PG_locked, wait))
1400 __add_wait_queue_entry_tail(q, wait);
1401 spin_unlock_irq(&q->lock);
1402
1403 /*
1404 * If a migration entry exists for the page the migration path must hold
1405 * a valid reference to the page, and it must take the ptl to remove the
1406 * migration entry. So the page is valid until the ptl is dropped.
1407 */
1408 if (ptep)
1409 pte_unmap_unlock(ptep, ptl);
1410 else
1411 spin_unlock(ptl);
1412
1413 for (;;) {
1414 unsigned int flags;
1415
1416 set_current_state(TASK_UNINTERRUPTIBLE);
1417
1418 /* Loop until we've been woken or interrupted */
1419 flags = smp_load_acquire(&wait->flags);
1420 if (!(flags & WQ_FLAG_WOKEN)) {
1421 if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1422 break;
1423
1424 io_schedule();
1425 continue;
1426 }
1427 break;
1428 }
1429
1430 finish_wait(q, wait);
1431
1432 if (thrashing) {
1433 delayacct_thrashing_end(&in_thrashing);
1434 psi_memstall_leave(&pflags);
1435 }
1436}
1437#endif
1438
1439void folio_wait_bit(struct folio *folio, int bit_nr)
1440{
1441 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1442}
1443EXPORT_SYMBOL(folio_wait_bit);
1444
1445int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1446{
1447 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1448}
1449EXPORT_SYMBOL(folio_wait_bit_killable);
1450
1451/**
1452 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1453 * @folio: The folio to wait for.
1454 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1455 *
1456 * The caller should hold a reference on @folio. They expect the page to
1457 * become unlocked relatively soon, but do not wish to hold up migration
1458 * (for example) by holding the reference while waiting for the folio to
1459 * come unlocked. After this function returns, the caller should not
1460 * dereference @folio.
1461 *
1462 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1463 */
1464static int folio_put_wait_locked(struct folio *folio, int state)
1465{
1466 return folio_wait_bit_common(folio, PG_locked, state, DROP);
1467}
1468
1469/**
1470 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1471 * @folio: Folio defining the wait queue of interest
1472 * @waiter: Waiter to add to the queue
1473 *
1474 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1475 */
1476void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1477{
1478 wait_queue_head_t *q = folio_waitqueue(folio);
1479 unsigned long flags;
1480
1481 spin_lock_irqsave(&q->lock, flags);
1482 __add_wait_queue_entry_tail(q, waiter);
1483 folio_set_waiters(folio);
1484 spin_unlock_irqrestore(&q->lock, flags);
1485}
1486EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1487
1488#ifndef clear_bit_unlock_is_negative_byte
1489
1490/*
1491 * PG_waiters is the high bit in the same byte as PG_lock.
1492 *
1493 * On x86 (and on many other architectures), we can clear PG_lock and
1494 * test the sign bit at the same time. But if the architecture does
1495 * not support that special operation, we just do this all by hand
1496 * instead.
1497 *
1498 * The read of PG_waiters has to be after (or concurrently with) PG_locked
1499 * being cleared, but a memory barrier should be unnecessary since it is
1500 * in the same byte as PG_locked.
1501 */
1502static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1503{
1504 clear_bit_unlock(nr, mem);
1505 /* smp_mb__after_atomic(); */
1506 return test_bit(PG_waiters, mem);
1507}
1508
1509#endif
1510
1511/**
1512 * folio_unlock - Unlock a locked folio.
1513 * @folio: The folio.
1514 *
1515 * Unlocks the folio and wakes up any thread sleeping on the page lock.
1516 *
1517 * Context: May be called from interrupt or process context. May not be
1518 * called from NMI context.
1519 */
1520void folio_unlock(struct folio *folio)
1521{
1522 /* Bit 7 allows x86 to check the byte's sign bit */
1523 BUILD_BUG_ON(PG_waiters != 7);
1524 BUILD_BUG_ON(PG_locked > 7);
1525 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1526 if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1527 folio_wake_bit(folio, PG_locked);
1528}
1529EXPORT_SYMBOL(folio_unlock);
1530
1531/**
1532 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1533 * @folio: The folio.
1534 *
1535 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1536 * it. The folio reference held for PG_private_2 being set is released.
1537 *
1538 * This is, for example, used when a netfs folio is being written to a local
1539 * disk cache, thereby allowing writes to the cache for the same folio to be
1540 * serialised.
1541 */
1542void folio_end_private_2(struct folio *folio)
1543{
1544 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1545 clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1546 folio_wake_bit(folio, PG_private_2);
1547 folio_put(folio);
1548}
1549EXPORT_SYMBOL(folio_end_private_2);
1550
1551/**
1552 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1553 * @folio: The folio to wait on.
1554 *
1555 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1556 */
1557void folio_wait_private_2(struct folio *folio)
1558{
1559 while (folio_test_private_2(folio))
1560 folio_wait_bit(folio, PG_private_2);
1561}
1562EXPORT_SYMBOL(folio_wait_private_2);
1563
1564/**
1565 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1566 * @folio: The folio to wait on.
1567 *
1568 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1569 * fatal signal is received by the calling task.
1570 *
1571 * Return:
1572 * - 0 if successful.
1573 * - -EINTR if a fatal signal was encountered.
1574 */
1575int folio_wait_private_2_killable(struct folio *folio)
1576{
1577 int ret = 0;
1578
1579 while (folio_test_private_2(folio)) {
1580 ret = folio_wait_bit_killable(folio, PG_private_2);
1581 if (ret < 0)
1582 break;
1583 }
1584
1585 return ret;
1586}
1587EXPORT_SYMBOL(folio_wait_private_2_killable);
1588
1589/**
1590 * folio_end_writeback - End writeback against a folio.
1591 * @folio: The folio.
1592 */
1593void folio_end_writeback(struct folio *folio)
1594{
1595 /*
1596 * folio_test_clear_reclaim() could be used here but it is an
1597 * atomic operation and overkill in this particular case. Failing
1598 * to shuffle a folio marked for immediate reclaim is too mild
1599 * a gain to justify taking an atomic operation penalty at the
1600 * end of every folio writeback.
1601 */
1602 if (folio_test_reclaim(folio)) {
1603 folio_clear_reclaim(folio);
1604 folio_rotate_reclaimable(folio);
1605 }
1606
1607 /*
1608 * Writeback does not hold a folio reference of its own, relying
1609 * on truncation to wait for the clearing of PG_writeback.
1610 * But here we must make sure that the folio is not freed and
1611 * reused before the folio_wake().
1612 */
1613 folio_get(folio);
1614 if (!__folio_end_writeback(folio))
1615 BUG();
1616
1617 smp_mb__after_atomic();
1618 folio_wake(folio, PG_writeback);
1619 acct_reclaim_writeback(folio);
1620 folio_put(folio);
1621}
1622EXPORT_SYMBOL(folio_end_writeback);
1623
1624/*
1625 * After completing I/O on a page, call this routine to update the page
1626 * flags appropriately
1627 */
1628void page_endio(struct page *page, bool is_write, int err)
1629{
1630 struct folio *folio = page_folio(page);
1631
1632 if (!is_write) {
1633 if (!err) {
1634 folio_mark_uptodate(folio);
1635 } else {
1636 folio_clear_uptodate(folio);
1637 folio_set_error(folio);
1638 }
1639 folio_unlock(folio);
1640 } else {
1641 if (err) {
1642 struct address_space *mapping;
1643
1644 folio_set_error(folio);
1645 mapping = folio_mapping(folio);
1646 if (mapping)
1647 mapping_set_error(mapping, err);
1648 }
1649 folio_end_writeback(folio);
1650 }
1651}
1652EXPORT_SYMBOL_GPL(page_endio);
1653
1654/**
1655 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1656 * @folio: The folio to lock
1657 */
1658void __folio_lock(struct folio *folio)
1659{
1660 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1661 EXCLUSIVE);
1662}
1663EXPORT_SYMBOL(__folio_lock);
1664
1665int __folio_lock_killable(struct folio *folio)
1666{
1667 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1668 EXCLUSIVE);
1669}
1670EXPORT_SYMBOL_GPL(__folio_lock_killable);
1671
1672static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1673{
1674 struct wait_queue_head *q = folio_waitqueue(folio);
1675 int ret = 0;
1676
1677 wait->folio = folio;
1678 wait->bit_nr = PG_locked;
1679
1680 spin_lock_irq(&q->lock);
1681 __add_wait_queue_entry_tail(q, &wait->wait);
1682 folio_set_waiters(folio);
1683 ret = !folio_trylock(folio);
1684 /*
1685 * If we were successful now, we know we're still on the
1686 * waitqueue as we're still under the lock. This means it's
1687 * safe to remove and return success, we know the callback
1688 * isn't going to trigger.
1689 */
1690 if (!ret)
1691 __remove_wait_queue(q, &wait->wait);
1692 else
1693 ret = -EIOCBQUEUED;
1694 spin_unlock_irq(&q->lock);
1695 return ret;
1696}
1697
1698/*
1699 * Return values:
1700 * true - folio is locked; mmap_lock is still held.
1701 * false - folio is not locked.
1702 * mmap_lock has been released (mmap_read_unlock(), unless flags had both
1703 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1704 * which case mmap_lock is still held.
1705 *
1706 * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
1707 * with the folio locked and the mmap_lock unperturbed.
1708 */
1709bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
1710 unsigned int flags)
1711{
1712 if (fault_flag_allow_retry_first(flags)) {
1713 /*
1714 * CAUTION! In this case, mmap_lock is not released
1715 * even though return 0.
1716 */
1717 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1718 return false;
1719
1720 mmap_read_unlock(mm);
1721 if (flags & FAULT_FLAG_KILLABLE)
1722 folio_wait_locked_killable(folio);
1723 else
1724 folio_wait_locked(folio);
1725 return false;
1726 }
1727 if (flags & FAULT_FLAG_KILLABLE) {
1728 bool ret;
1729
1730 ret = __folio_lock_killable(folio);
1731 if (ret) {
1732 mmap_read_unlock(mm);
1733 return false;
1734 }
1735 } else {
1736 __folio_lock(folio);
1737 }
1738
1739 return true;
1740}
1741
1742/**
1743 * page_cache_next_miss() - Find the next gap in the page cache.
1744 * @mapping: Mapping.
1745 * @index: Index.
1746 * @max_scan: Maximum range to search.
1747 *
1748 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1749 * gap with the lowest index.
1750 *
1751 * This function may be called under the rcu_read_lock. However, this will
1752 * not atomically search a snapshot of the cache at a single point in time.
1753 * For example, if a gap is created at index 5, then subsequently a gap is
1754 * created at index 10, page_cache_next_miss covering both indices may
1755 * return 10 if called under the rcu_read_lock.
1756 *
1757 * Return: The index of the gap if found, otherwise an index outside the
1758 * range specified (in which case 'return - index >= max_scan' will be true).
1759 * In the rare case of index wrap-around, 0 will be returned.
1760 */
1761pgoff_t page_cache_next_miss(struct address_space *mapping,
1762 pgoff_t index, unsigned long max_scan)
1763{
1764 XA_STATE(xas, &mapping->i_pages, index);
1765
1766 while (max_scan--) {
1767 void *entry = xas_next(&xas);
1768 if (!entry || xa_is_value(entry))
1769 break;
1770 if (xas.xa_index == 0)
1771 break;
1772 }
1773
1774 return xas.xa_index;
1775}
1776EXPORT_SYMBOL(page_cache_next_miss);
1777
1778/**
1779 * page_cache_prev_miss() - Find the previous gap in the page cache.
1780 * @mapping: Mapping.
1781 * @index: Index.
1782 * @max_scan: Maximum range to search.
1783 *
1784 * Search the range [max(index - max_scan + 1, 0), index] for the
1785 * gap with the highest index.
1786 *
1787 * This function may be called under the rcu_read_lock. However, this will
1788 * not atomically search a snapshot of the cache at a single point in time.
1789 * For example, if a gap is created at index 10, then subsequently a gap is
1790 * created at index 5, page_cache_prev_miss() covering both indices may
1791 * return 5 if called under the rcu_read_lock.
1792 *
1793 * Return: The index of the gap if found, otherwise an index outside the
1794 * range specified (in which case 'index - return >= max_scan' will be true).
1795 * In the rare case of wrap-around, ULONG_MAX will be returned.
1796 */
1797pgoff_t page_cache_prev_miss(struct address_space *mapping,
1798 pgoff_t index, unsigned long max_scan)
1799{
1800 XA_STATE(xas, &mapping->i_pages, index);
1801
1802 while (max_scan--) {
1803 void *entry = xas_prev(&xas);
1804 if (!entry || xa_is_value(entry))
1805 break;
1806 if (xas.xa_index == ULONG_MAX)
1807 break;
1808 }
1809
1810 return xas.xa_index;
1811}
1812EXPORT_SYMBOL(page_cache_prev_miss);
1813
1814/*
1815 * Lockless page cache protocol:
1816 * On the lookup side:
1817 * 1. Load the folio from i_pages
1818 * 2. Increment the refcount if it's not zero
1819 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1820 *
1821 * On the removal side:
1822 * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1823 * B. Remove the page from i_pages
1824 * C. Return the page to the page allocator
1825 *
1826 * This means that any page may have its reference count temporarily
1827 * increased by a speculative page cache (or fast GUP) lookup as it can
1828 * be allocated by another user before the RCU grace period expires.
1829 * Because the refcount temporarily acquired here may end up being the
1830 * last refcount on the page, any page allocation must be freeable by
1831 * folio_put().
1832 */
1833
1834/*
1835 * mapping_get_entry - Get a page cache entry.
1836 * @mapping: the address_space to search
1837 * @index: The page cache index.
1838 *
1839 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1840 * it is returned with an increased refcount. If it is a shadow entry
1841 * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1842 * it is returned without further action.
1843 *
1844 * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1845 */
1846static void *mapping_get_entry(struct address_space *mapping, pgoff_t index)
1847{
1848 XA_STATE(xas, &mapping->i_pages, index);
1849 struct folio *folio;
1850
1851 rcu_read_lock();
1852repeat:
1853 xas_reset(&xas);
1854 folio = xas_load(&xas);
1855 if (xas_retry(&xas, folio))
1856 goto repeat;
1857 /*
1858 * A shadow entry of a recently evicted page, or a swap entry from
1859 * shmem/tmpfs. Return it without attempting to raise page count.
1860 */
1861 if (!folio || xa_is_value(folio))
1862 goto out;
1863
1864 if (!folio_try_get_rcu(folio))
1865 goto repeat;
1866
1867 if (unlikely(folio != xas_reload(&xas))) {
1868 folio_put(folio);
1869 goto repeat;
1870 }
1871out:
1872 rcu_read_unlock();
1873
1874 return folio;
1875}
1876
1877/**
1878 * __filemap_get_folio - Find and get a reference to a folio.
1879 * @mapping: The address_space to search.
1880 * @index: The page index.
1881 * @fgp_flags: %FGP flags modify how the folio is returned.
1882 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1883 *
1884 * Looks up the page cache entry at @mapping & @index.
1885 *
1886 * @fgp_flags can be zero or more of these flags:
1887 *
1888 * * %FGP_ACCESSED - The folio will be marked accessed.
1889 * * %FGP_LOCK - The folio is returned locked.
1890 * * %FGP_ENTRY - If there is a shadow / swap / DAX entry, return it
1891 * instead of allocating a new folio to replace it.
1892 * * %FGP_CREAT - If no page is present then a new page is allocated using
1893 * @gfp and added to the page cache and the VM's LRU list.
1894 * The page is returned locked and with an increased refcount.
1895 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1896 * page is already in cache. If the page was allocated, unlock it before
1897 * returning so the caller can do the same dance.
1898 * * %FGP_WRITE - The page will be written to by the caller.
1899 * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
1900 * * %FGP_NOWAIT - Don't get blocked by page lock.
1901 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
1902 *
1903 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1904 * if the %GFP flags specified for %FGP_CREAT are atomic.
1905 *
1906 * If there is a page cache page, it is returned with an increased refcount.
1907 *
1908 * Return: The found folio or %NULL otherwise.
1909 */
1910struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1911 int fgp_flags, gfp_t gfp)
1912{
1913 struct folio *folio;
1914
1915repeat:
1916 folio = mapping_get_entry(mapping, index);
1917 if (xa_is_value(folio)) {
1918 if (fgp_flags & FGP_ENTRY)
1919 return folio;
1920 folio = NULL;
1921 }
1922 if (!folio)
1923 goto no_page;
1924
1925 if (fgp_flags & FGP_LOCK) {
1926 if (fgp_flags & FGP_NOWAIT) {
1927 if (!folio_trylock(folio)) {
1928 folio_put(folio);
1929 return NULL;
1930 }
1931 } else {
1932 folio_lock(folio);
1933 }
1934
1935 /* Has the page been truncated? */
1936 if (unlikely(folio->mapping != mapping)) {
1937 folio_unlock(folio);
1938 folio_put(folio);
1939 goto repeat;
1940 }
1941 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1942 }
1943
1944 if (fgp_flags & FGP_ACCESSED)
1945 folio_mark_accessed(folio);
1946 else if (fgp_flags & FGP_WRITE) {
1947 /* Clear idle flag for buffer write */
1948 if (folio_test_idle(folio))
1949 folio_clear_idle(folio);
1950 }
1951
1952 if (fgp_flags & FGP_STABLE)
1953 folio_wait_stable(folio);
1954no_page:
1955 if (!folio && (fgp_flags & FGP_CREAT)) {
1956 int err;
1957 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1958 gfp |= __GFP_WRITE;
1959 if (fgp_flags & FGP_NOFS)
1960 gfp &= ~__GFP_FS;
1961 if (fgp_flags & FGP_NOWAIT) {
1962 gfp &= ~GFP_KERNEL;
1963 gfp |= GFP_NOWAIT | __GFP_NOWARN;
1964 }
1965
1966 folio = filemap_alloc_folio(gfp, 0);
1967 if (!folio)
1968 return NULL;
1969
1970 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1971 fgp_flags |= FGP_LOCK;
1972
1973 /* Init accessed so avoid atomic mark_page_accessed later */
1974 if (fgp_flags & FGP_ACCESSED)
1975 __folio_set_referenced(folio);
1976
1977 err = filemap_add_folio(mapping, folio, index, gfp);
1978 if (unlikely(err)) {
1979 folio_put(folio);
1980 folio = NULL;
1981 if (err == -EEXIST)
1982 goto repeat;
1983 }
1984
1985 /*
1986 * filemap_add_folio locks the page, and for mmap
1987 * we expect an unlocked page.
1988 */
1989 if (folio && (fgp_flags & FGP_FOR_MMAP))
1990 folio_unlock(folio);
1991 }
1992
1993 return folio;
1994}
1995EXPORT_SYMBOL(__filemap_get_folio);
1996
1997static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1998 xa_mark_t mark)
1999{
2000 struct folio *folio;
2001
2002retry:
2003 if (mark == XA_PRESENT)
2004 folio = xas_find(xas, max);
2005 else
2006 folio = xas_find_marked(xas, max, mark);
2007
2008 if (xas_retry(xas, folio))
2009 goto retry;
2010 /*
2011 * A shadow entry of a recently evicted page, a swap
2012 * entry from shmem/tmpfs or a DAX entry. Return it
2013 * without attempting to raise page count.
2014 */
2015 if (!folio || xa_is_value(folio))
2016 return folio;
2017
2018 if (!folio_try_get_rcu(folio))
2019 goto reset;
2020
2021 if (unlikely(folio != xas_reload(xas))) {
2022 folio_put(folio);
2023 goto reset;
2024 }
2025
2026 return folio;
2027reset:
2028 xas_reset(xas);
2029 goto retry;
2030}
2031
2032/**
2033 * find_get_entries - gang pagecache lookup
2034 * @mapping: The address_space to search
2035 * @start: The starting page cache index
2036 * @end: The final page index (inclusive).
2037 * @fbatch: Where the resulting entries are placed.
2038 * @indices: The cache indices corresponding to the entries in @entries
2039 *
2040 * find_get_entries() will search for and return a batch of entries in
2041 * the mapping. The entries are placed in @fbatch. find_get_entries()
2042 * takes a reference on any actual folios it returns.
2043 *
2044 * The entries have ascending indexes. The indices may not be consecutive
2045 * due to not-present entries or large folios.
2046 *
2047 * Any shadow entries of evicted folios, or swap entries from
2048 * shmem/tmpfs, are included in the returned array.
2049 *
2050 * Return: The number of entries which were found.
2051 */
2052unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2053 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2054{
2055 XA_STATE(xas, &mapping->i_pages, *start);
2056 struct folio *folio;
2057
2058 rcu_read_lock();
2059 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2060 indices[fbatch->nr] = xas.xa_index;
2061 if (!folio_batch_add(fbatch, folio))
2062 break;
2063 }
2064 rcu_read_unlock();
2065
2066 if (folio_batch_count(fbatch)) {
2067 unsigned long nr = 1;
2068 int idx = folio_batch_count(fbatch) - 1;
2069
2070 folio = fbatch->folios[idx];
2071 if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2072 nr = folio_nr_pages(folio);
2073 *start = indices[idx] + nr;
2074 }
2075 return folio_batch_count(fbatch);
2076}
2077
2078/**
2079 * find_lock_entries - Find a batch of pagecache entries.
2080 * @mapping: The address_space to search.
2081 * @start: The starting page cache index.
2082 * @end: The final page index (inclusive).
2083 * @fbatch: Where the resulting entries are placed.
2084 * @indices: The cache indices of the entries in @fbatch.
2085 *
2086 * find_lock_entries() will return a batch of entries from @mapping.
2087 * Swap, shadow and DAX entries are included. Folios are returned
2088 * locked and with an incremented refcount. Folios which are locked
2089 * by somebody else or under writeback are skipped. Folios which are
2090 * partially outside the range are not returned.
2091 *
2092 * The entries have ascending indexes. The indices may not be consecutive
2093 * due to not-present entries, large folios, folios which could not be
2094 * locked or folios under writeback.
2095 *
2096 * Return: The number of entries which were found.
2097 */
2098unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2099 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2100{
2101 XA_STATE(xas, &mapping->i_pages, *start);
2102 struct folio *folio;
2103
2104 rcu_read_lock();
2105 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2106 if (!xa_is_value(folio)) {
2107 if (folio->index < *start)
2108 goto put;
2109 if (folio->index + folio_nr_pages(folio) - 1 > end)
2110 goto put;
2111 if (!folio_trylock(folio))
2112 goto put;
2113 if (folio->mapping != mapping ||
2114 folio_test_writeback(folio))
2115 goto unlock;
2116 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2117 folio);
2118 }
2119 indices[fbatch->nr] = xas.xa_index;
2120 if (!folio_batch_add(fbatch, folio))
2121 break;
2122 continue;
2123unlock:
2124 folio_unlock(folio);
2125put:
2126 folio_put(folio);
2127 }
2128 rcu_read_unlock();
2129
2130 if (folio_batch_count(fbatch)) {
2131 unsigned long nr = 1;
2132 int idx = folio_batch_count(fbatch) - 1;
2133
2134 folio = fbatch->folios[idx];
2135 if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2136 nr = folio_nr_pages(folio);
2137 *start = indices[idx] + nr;
2138 }
2139 return folio_batch_count(fbatch);
2140}
2141
2142/**
2143 * filemap_get_folios - Get a batch of folios
2144 * @mapping: The address_space to search
2145 * @start: The starting page index
2146 * @end: The final page index (inclusive)
2147 * @fbatch: The batch to fill.
2148 *
2149 * Search for and return a batch of folios in the mapping starting at
2150 * index @start and up to index @end (inclusive). The folios are returned
2151 * in @fbatch with an elevated reference count.
2152 *
2153 * The first folio may start before @start; if it does, it will contain
2154 * @start. The final folio may extend beyond @end; if it does, it will
2155 * contain @end. The folios have ascending indices. There may be gaps
2156 * between the folios if there are indices which have no folio in the
2157 * page cache. If folios are added to or removed from the page cache
2158 * while this is running, they may or may not be found by this call.
2159 *
2160 * Return: The number of folios which were found.
2161 * We also update @start to index the next folio for the traversal.
2162 */
2163unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2164 pgoff_t end, struct folio_batch *fbatch)
2165{
2166 XA_STATE(xas, &mapping->i_pages, *start);
2167 struct folio *folio;
2168
2169 rcu_read_lock();
2170 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2171 /* Skip over shadow, swap and DAX entries */
2172 if (xa_is_value(folio))
2173 continue;
2174 if (!folio_batch_add(fbatch, folio)) {
2175 unsigned long nr = folio_nr_pages(folio);
2176
2177 if (folio_test_hugetlb(folio))
2178 nr = 1;
2179 *start = folio->index + nr;
2180 goto out;
2181 }
2182 }
2183
2184 /*
2185 * We come here when there is no page beyond @end. We take care to not
2186 * overflow the index @start as it confuses some of the callers. This
2187 * breaks the iteration when there is a page at index -1 but that is
2188 * already broken anyway.
2189 */
2190 if (end == (pgoff_t)-1)
2191 *start = (pgoff_t)-1;
2192 else
2193 *start = end + 1;
2194out:
2195 rcu_read_unlock();
2196
2197 return folio_batch_count(fbatch);
2198}
2199EXPORT_SYMBOL(filemap_get_folios);
2200
2201static inline
2202bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2203{
2204 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2205 return false;
2206 if (index >= max)
2207 return false;
2208 return index < folio->index + folio_nr_pages(folio) - 1;
2209}
2210
2211/**
2212 * filemap_get_folios_contig - Get a batch of contiguous folios
2213 * @mapping: The address_space to search
2214 * @start: The starting page index
2215 * @end: The final page index (inclusive)
2216 * @fbatch: The batch to fill
2217 *
2218 * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2219 * except the returned folios are guaranteed to be contiguous. This may
2220 * not return all contiguous folios if the batch gets filled up.
2221 *
2222 * Return: The number of folios found.
2223 * Also update @start to be positioned for traversal of the next folio.
2224 */
2225
2226unsigned filemap_get_folios_contig(struct address_space *mapping,
2227 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2228{
2229 XA_STATE(xas, &mapping->i_pages, *start);
2230 unsigned long nr;
2231 struct folio *folio;
2232
2233 rcu_read_lock();
2234
2235 for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2236 folio = xas_next(&xas)) {
2237 if (xas_retry(&xas, folio))
2238 continue;
2239 /*
2240 * If the entry has been swapped out, we can stop looking.
2241 * No current caller is looking for DAX entries.
2242 */
2243 if (xa_is_value(folio))
2244 goto update_start;
2245
2246 if (!folio_try_get_rcu(folio))
2247 goto retry;
2248
2249 if (unlikely(folio != xas_reload(&xas)))
2250 goto put_folio;
2251
2252 if (!folio_batch_add(fbatch, folio)) {
2253 nr = folio_nr_pages(folio);
2254
2255 if (folio_test_hugetlb(folio))
2256 nr = 1;
2257 *start = folio->index + nr;
2258 goto out;
2259 }
2260 continue;
2261put_folio:
2262 folio_put(folio);
2263
2264retry:
2265 xas_reset(&xas);
2266 }
2267
2268update_start:
2269 nr = folio_batch_count(fbatch);
2270
2271 if (nr) {
2272 folio = fbatch->folios[nr - 1];
2273 if (folio_test_hugetlb(folio))
2274 *start = folio->index + 1;
2275 else
2276 *start = folio->index + folio_nr_pages(folio);
2277 }
2278out:
2279 rcu_read_unlock();
2280 return folio_batch_count(fbatch);
2281}
2282EXPORT_SYMBOL(filemap_get_folios_contig);
2283
2284/**
2285 * find_get_pages_range_tag - Find and return head pages matching @tag.
2286 * @mapping: the address_space to search
2287 * @index: the starting page index
2288 * @end: The final page index (inclusive)
2289 * @tag: the tag index
2290 * @nr_pages: the maximum number of pages
2291 * @pages: where the resulting pages are placed
2292 *
2293 * Like find_get_pages_range(), except we only return head pages which are
2294 * tagged with @tag. @index is updated to the index immediately after the
2295 * last page we return, ready for the next iteration.
2296 *
2297 * Return: the number of pages which were found.
2298 */
2299unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
2300 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
2301 struct page **pages)
2302{
2303 XA_STATE(xas, &mapping->i_pages, *index);
2304 struct folio *folio;
2305 unsigned ret = 0;
2306
2307 if (unlikely(!nr_pages))
2308 return 0;
2309
2310 rcu_read_lock();
2311 while ((folio = find_get_entry(&xas, end, tag))) {
2312 /*
2313 * Shadow entries should never be tagged, but this iteration
2314 * is lockless so there is a window for page reclaim to evict
2315 * a page we saw tagged. Skip over it.
2316 */
2317 if (xa_is_value(folio))
2318 continue;
2319
2320 pages[ret] = &folio->page;
2321 if (++ret == nr_pages) {
2322 *index = folio->index + folio_nr_pages(folio);
2323 goto out;
2324 }
2325 }
2326
2327 /*
2328 * We come here when we got to @end. We take care to not overflow the
2329 * index @index as it confuses some of the callers. This breaks the
2330 * iteration when there is a page at index -1 but that is already
2331 * broken anyway.
2332 */
2333 if (end == (pgoff_t)-1)
2334 *index = (pgoff_t)-1;
2335 else
2336 *index = end + 1;
2337out:
2338 rcu_read_unlock();
2339
2340 return ret;
2341}
2342EXPORT_SYMBOL(find_get_pages_range_tag);
2343
2344/*
2345 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2346 * a _large_ part of the i/o request. Imagine the worst scenario:
2347 *
2348 * ---R__________________________________________B__________
2349 * ^ reading here ^ bad block(assume 4k)
2350 *
2351 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2352 * => failing the whole request => read(R) => read(R+1) =>
2353 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2354 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2355 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2356 *
2357 * It is going insane. Fix it by quickly scaling down the readahead size.
2358 */
2359static void shrink_readahead_size_eio(struct file_ra_state *ra)
2360{
2361 ra->ra_pages /= 4;
2362}
2363
2364/*
2365 * filemap_get_read_batch - Get a batch of folios for read
2366 *
2367 * Get a batch of folios which represent a contiguous range of bytes in
2368 * the file. No exceptional entries will be returned. If @index is in
2369 * the middle of a folio, the entire folio will be returned. The last
2370 * folio in the batch may have the readahead flag set or the uptodate flag
2371 * clear so that the caller can take the appropriate action.
2372 */
2373static void filemap_get_read_batch(struct address_space *mapping,
2374 pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2375{
2376 XA_STATE(xas, &mapping->i_pages, index);
2377 struct folio *folio;
2378
2379 rcu_read_lock();
2380 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2381 if (xas_retry(&xas, folio))
2382 continue;
2383 if (xas.xa_index > max || xa_is_value(folio))
2384 break;
2385 if (xa_is_sibling(folio))
2386 break;
2387 if (!folio_try_get_rcu(folio))
2388 goto retry;
2389
2390 if (unlikely(folio != xas_reload(&xas)))
2391 goto put_folio;
2392
2393 if (!folio_batch_add(fbatch, folio))
2394 break;
2395 if (!folio_test_uptodate(folio))
2396 break;
2397 if (folio_test_readahead(folio))
2398 break;
2399 xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
2400 continue;
2401put_folio:
2402 folio_put(folio);
2403retry:
2404 xas_reset(&xas);
2405 }
2406 rcu_read_unlock();
2407}
2408
2409static int filemap_read_folio(struct file *file, filler_t filler,
2410 struct folio *folio)
2411{
2412 bool workingset = folio_test_workingset(folio);
2413 unsigned long pflags;
2414 int error;
2415
2416 /*
2417 * A previous I/O error may have been due to temporary failures,
2418 * eg. multipath errors. PG_error will be set again if read_folio
2419 * fails.
2420 */
2421 folio_clear_error(folio);
2422
2423 /* Start the actual read. The read will unlock the page. */
2424 if (unlikely(workingset))
2425 psi_memstall_enter(&pflags);
2426 error = filler(file, folio);
2427 if (unlikely(workingset))
2428 psi_memstall_leave(&pflags);
2429 if (error)
2430 return error;
2431
2432 error = folio_wait_locked_killable(folio);
2433 if (error)
2434 return error;
2435 if (folio_test_uptodate(folio))
2436 return 0;
2437 if (file)
2438 shrink_readahead_size_eio(&file->f_ra);
2439 return -EIO;
2440}
2441
2442static bool filemap_range_uptodate(struct address_space *mapping,
2443 loff_t pos, struct iov_iter *iter, struct folio *folio)
2444{
2445 int count;
2446
2447 if (folio_test_uptodate(folio))
2448 return true;
2449 /* pipes can't handle partially uptodate pages */
2450 if (iov_iter_is_pipe(iter))
2451 return false;
2452 if (!mapping->a_ops->is_partially_uptodate)
2453 return false;
2454 if (mapping->host->i_blkbits >= folio_shift(folio))
2455 return false;
2456
2457 count = iter->count;
2458 if (folio_pos(folio) > pos) {
2459 count -= folio_pos(folio) - pos;
2460 pos = 0;
2461 } else {
2462 pos -= folio_pos(folio);
2463 }
2464
2465 return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2466}
2467
2468static int filemap_update_page(struct kiocb *iocb,
2469 struct address_space *mapping, struct iov_iter *iter,
2470 struct folio *folio)
2471{
2472 int error;
2473
2474 if (iocb->ki_flags & IOCB_NOWAIT) {
2475 if (!filemap_invalidate_trylock_shared(mapping))
2476 return -EAGAIN;
2477 } else {
2478 filemap_invalidate_lock_shared(mapping);
2479 }
2480
2481 if (!folio_trylock(folio)) {
2482 error = -EAGAIN;
2483 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2484 goto unlock_mapping;
2485 if (!(iocb->ki_flags & IOCB_WAITQ)) {
2486 filemap_invalidate_unlock_shared(mapping);
2487 /*
2488 * This is where we usually end up waiting for a
2489 * previously submitted readahead to finish.
2490 */
2491 folio_put_wait_locked(folio, TASK_KILLABLE);
2492 return AOP_TRUNCATED_PAGE;
2493 }
2494 error = __folio_lock_async(folio, iocb->ki_waitq);
2495 if (error)
2496 goto unlock_mapping;
2497 }
2498
2499 error = AOP_TRUNCATED_PAGE;
2500 if (!folio->mapping)
2501 goto unlock;
2502
2503 error = 0;
2504 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, folio))
2505 goto unlock;
2506
2507 error = -EAGAIN;
2508 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2509 goto unlock;
2510
2511 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2512 folio);
2513 goto unlock_mapping;
2514unlock:
2515 folio_unlock(folio);
2516unlock_mapping:
2517 filemap_invalidate_unlock_shared(mapping);
2518 if (error == AOP_TRUNCATED_PAGE)
2519 folio_put(folio);
2520 return error;
2521}
2522
2523static int filemap_create_folio(struct file *file,
2524 struct address_space *mapping, pgoff_t index,
2525 struct folio_batch *fbatch)
2526{
2527 struct folio *folio;
2528 int error;
2529
2530 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2531 if (!folio)
2532 return -ENOMEM;
2533
2534 /*
2535 * Protect against truncate / hole punch. Grabbing invalidate_lock
2536 * here assures we cannot instantiate and bring uptodate new
2537 * pagecache folios after evicting page cache during truncate
2538 * and before actually freeing blocks. Note that we could
2539 * release invalidate_lock after inserting the folio into
2540 * the page cache as the locked folio would then be enough to
2541 * synchronize with hole punching. But there are code paths
2542 * such as filemap_update_page() filling in partially uptodate
2543 * pages or ->readahead() that need to hold invalidate_lock
2544 * while mapping blocks for IO so let's hold the lock here as
2545 * well to keep locking rules simple.
2546 */
2547 filemap_invalidate_lock_shared(mapping);
2548 error = filemap_add_folio(mapping, folio, index,
2549 mapping_gfp_constraint(mapping, GFP_KERNEL));
2550 if (error == -EEXIST)
2551 error = AOP_TRUNCATED_PAGE;
2552 if (error)
2553 goto error;
2554
2555 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2556 if (error)
2557 goto error;
2558
2559 filemap_invalidate_unlock_shared(mapping);
2560 folio_batch_add(fbatch, folio);
2561 return 0;
2562error:
2563 filemap_invalidate_unlock_shared(mapping);
2564 folio_put(folio);
2565 return error;
2566}
2567
2568static int filemap_readahead(struct kiocb *iocb, struct file *file,
2569 struct address_space *mapping, struct folio *folio,
2570 pgoff_t last_index)
2571{
2572 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2573
2574 if (iocb->ki_flags & IOCB_NOIO)
2575 return -EAGAIN;
2576 page_cache_async_ra(&ractl, folio, last_index - folio->index);
2577 return 0;
2578}
2579
2580static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
2581 struct folio_batch *fbatch)
2582{
2583 struct file *filp = iocb->ki_filp;
2584 struct address_space *mapping = filp->f_mapping;
2585 struct file_ra_state *ra = &filp->f_ra;
2586 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2587 pgoff_t last_index;
2588 struct folio *folio;
2589 int err = 0;
2590
2591 last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
2592retry:
2593 if (fatal_signal_pending(current))
2594 return -EINTR;
2595
2596 filemap_get_read_batch(mapping, index, last_index, fbatch);
2597 if (!folio_batch_count(fbatch)) {
2598 if (iocb->ki_flags & IOCB_NOIO)
2599 return -EAGAIN;
2600 page_cache_sync_readahead(mapping, ra, filp, index,
2601 last_index - index);
2602 filemap_get_read_batch(mapping, index, last_index, fbatch);
2603 }
2604 if (!folio_batch_count(fbatch)) {
2605 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2606 return -EAGAIN;
2607 err = filemap_create_folio(filp, mapping,
2608 iocb->ki_pos >> PAGE_SHIFT, fbatch);
2609 if (err == AOP_TRUNCATED_PAGE)
2610 goto retry;
2611 return err;
2612 }
2613
2614 folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2615 if (folio_test_readahead(folio)) {
2616 err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2617 if (err)
2618 goto err;
2619 }
2620 if (!folio_test_uptodate(folio)) {
2621 if ((iocb->ki_flags & IOCB_WAITQ) &&
2622 folio_batch_count(fbatch) > 1)
2623 iocb->ki_flags |= IOCB_NOWAIT;
2624 err = filemap_update_page(iocb, mapping, iter, folio);
2625 if (err)
2626 goto err;
2627 }
2628
2629 return 0;
2630err:
2631 if (err < 0)
2632 folio_put(folio);
2633 if (likely(--fbatch->nr))
2634 return 0;
2635 if (err == AOP_TRUNCATED_PAGE)
2636 goto retry;
2637 return err;
2638}
2639
2640static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2641{
2642 unsigned int shift = folio_shift(folio);
2643
2644 return (pos1 >> shift == pos2 >> shift);
2645}
2646
2647/**
2648 * filemap_read - Read data from the page cache.
2649 * @iocb: The iocb to read.
2650 * @iter: Destination for the data.
2651 * @already_read: Number of bytes already read by the caller.
2652 *
2653 * Copies data from the page cache. If the data is not currently present,
2654 * uses the readahead and read_folio address_space operations to fetch it.
2655 *
2656 * Return: Total number of bytes copied, including those already read by
2657 * the caller. If an error happens before any bytes are copied, returns
2658 * a negative error number.
2659 */
2660ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2661 ssize_t already_read)
2662{
2663 struct file *filp = iocb->ki_filp;
2664 struct file_ra_state *ra = &filp->f_ra;
2665 struct address_space *mapping = filp->f_mapping;
2666 struct inode *inode = mapping->host;
2667 struct folio_batch fbatch;
2668 int i, error = 0;
2669 bool writably_mapped;
2670 loff_t isize, end_offset;
2671
2672 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2673 return 0;
2674 if (unlikely(!iov_iter_count(iter)))
2675 return 0;
2676
2677 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2678 folio_batch_init(&fbatch);
2679
2680 do {
2681 cond_resched();
2682
2683 /*
2684 * If we've already successfully copied some data, then we
2685 * can no longer safely return -EIOCBQUEUED. Hence mark
2686 * an async read NOWAIT at that point.
2687 */
2688 if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2689 iocb->ki_flags |= IOCB_NOWAIT;
2690
2691 if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2692 break;
2693
2694 error = filemap_get_pages(iocb, iter, &fbatch);
2695 if (error < 0)
2696 break;
2697
2698 /*
2699 * i_size must be checked after we know the pages are Uptodate.
2700 *
2701 * Checking i_size after the check allows us to calculate
2702 * the correct value for "nr", which means the zero-filled
2703 * part of the page is not copied back to userspace (unless
2704 * another truncate extends the file - this is desired though).
2705 */
2706 isize = i_size_read(inode);
2707 if (unlikely(iocb->ki_pos >= isize))
2708 goto put_folios;
2709 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2710
2711 /*
2712 * Once we start copying data, we don't want to be touching any
2713 * cachelines that might be contended:
2714 */
2715 writably_mapped = mapping_writably_mapped(mapping);
2716
2717 /*
2718 * When a read accesses the same folio several times, only
2719 * mark it as accessed the first time.
2720 */
2721 if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
2722 fbatch.folios[0]))
2723 folio_mark_accessed(fbatch.folios[0]);
2724
2725 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2726 struct folio *folio = fbatch.folios[i];
2727 size_t fsize = folio_size(folio);
2728 size_t offset = iocb->ki_pos & (fsize - 1);
2729 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2730 fsize - offset);
2731 size_t copied;
2732
2733 if (end_offset < folio_pos(folio))
2734 break;
2735 if (i > 0)
2736 folio_mark_accessed(folio);
2737 /*
2738 * If users can be writing to this folio using arbitrary
2739 * virtual addresses, take care of potential aliasing
2740 * before reading the folio on the kernel side.
2741 */
2742 if (writably_mapped)
2743 flush_dcache_folio(folio);
2744
2745 copied = copy_folio_to_iter(folio, offset, bytes, iter);
2746
2747 already_read += copied;
2748 iocb->ki_pos += copied;
2749 ra->prev_pos = iocb->ki_pos;
2750
2751 if (copied < bytes) {
2752 error = -EFAULT;
2753 break;
2754 }
2755 }
2756put_folios:
2757 for (i = 0; i < folio_batch_count(&fbatch); i++)
2758 folio_put(fbatch.folios[i]);
2759 folio_batch_init(&fbatch);
2760 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2761
2762 file_accessed(filp);
2763
2764 return already_read ? already_read : error;
2765}
2766EXPORT_SYMBOL_GPL(filemap_read);
2767
2768/**
2769 * generic_file_read_iter - generic filesystem read routine
2770 * @iocb: kernel I/O control block
2771 * @iter: destination for the data read
2772 *
2773 * This is the "read_iter()" routine for all filesystems
2774 * that can use the page cache directly.
2775 *
2776 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2777 * be returned when no data can be read without waiting for I/O requests
2778 * to complete; it doesn't prevent readahead.
2779 *
2780 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2781 * requests shall be made for the read or for readahead. When no data
2782 * can be read, -EAGAIN shall be returned. When readahead would be
2783 * triggered, a partial, possibly empty read shall be returned.
2784 *
2785 * Return:
2786 * * number of bytes copied, even for partial reads
2787 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2788 */
2789ssize_t
2790generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2791{
2792 size_t count = iov_iter_count(iter);
2793 ssize_t retval = 0;
2794
2795 if (!count)
2796 return 0; /* skip atime */
2797
2798 if (iocb->ki_flags & IOCB_DIRECT) {
2799 struct file *file = iocb->ki_filp;
2800 struct address_space *mapping = file->f_mapping;
2801 struct inode *inode = mapping->host;
2802
2803 if (iocb->ki_flags & IOCB_NOWAIT) {
2804 if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
2805 iocb->ki_pos + count - 1))
2806 return -EAGAIN;
2807 } else {
2808 retval = filemap_write_and_wait_range(mapping,
2809 iocb->ki_pos,
2810 iocb->ki_pos + count - 1);
2811 if (retval < 0)
2812 return retval;
2813 }
2814
2815 file_accessed(file);
2816
2817 retval = mapping->a_ops->direct_IO(iocb, iter);
2818 if (retval >= 0) {
2819 iocb->ki_pos += retval;
2820 count -= retval;
2821 }
2822 if (retval != -EIOCBQUEUED)
2823 iov_iter_revert(iter, count - iov_iter_count(iter));
2824
2825 /*
2826 * Btrfs can have a short DIO read if we encounter
2827 * compressed extents, so if there was an error, or if
2828 * we've already read everything we wanted to, or if
2829 * there was a short read because we hit EOF, go ahead
2830 * and return. Otherwise fallthrough to buffered io for
2831 * the rest of the read. Buffered reads will not work for
2832 * DAX files, so don't bother trying.
2833 */
2834 if (retval < 0 || !count || IS_DAX(inode))
2835 return retval;
2836 if (iocb->ki_pos >= i_size_read(inode))
2837 return retval;
2838 }
2839
2840 return filemap_read(iocb, iter, retval);
2841}
2842EXPORT_SYMBOL(generic_file_read_iter);
2843
2844static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2845 struct address_space *mapping, struct folio *folio,
2846 loff_t start, loff_t end, bool seek_data)
2847{
2848 const struct address_space_operations *ops = mapping->a_ops;
2849 size_t offset, bsz = i_blocksize(mapping->host);
2850
2851 if (xa_is_value(folio) || folio_test_uptodate(folio))
2852 return seek_data ? start : end;
2853 if (!ops->is_partially_uptodate)
2854 return seek_data ? end : start;
2855
2856 xas_pause(xas);
2857 rcu_read_unlock();
2858 folio_lock(folio);
2859 if (unlikely(folio->mapping != mapping))
2860 goto unlock;
2861
2862 offset = offset_in_folio(folio, start) & ~(bsz - 1);
2863
2864 do {
2865 if (ops->is_partially_uptodate(folio, offset, bsz) ==
2866 seek_data)
2867 break;
2868 start = (start + bsz) & ~(bsz - 1);
2869 offset += bsz;
2870 } while (offset < folio_size(folio));
2871unlock:
2872 folio_unlock(folio);
2873 rcu_read_lock();
2874 return start;
2875}
2876
2877static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
2878{
2879 if (xa_is_value(folio))
2880 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
2881 return folio_size(folio);
2882}
2883
2884/**
2885 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
2886 * @mapping: Address space to search.
2887 * @start: First byte to consider.
2888 * @end: Limit of search (exclusive).
2889 * @whence: Either SEEK_HOLE or SEEK_DATA.
2890 *
2891 * If the page cache knows which blocks contain holes and which blocks
2892 * contain data, your filesystem can use this function to implement
2893 * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are
2894 * entirely memory-based such as tmpfs, and filesystems which support
2895 * unwritten extents.
2896 *
2897 * Return: The requested offset on success, or -ENXIO if @whence specifies
2898 * SEEK_DATA and there is no data after @start. There is an implicit hole
2899 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
2900 * and @end contain data.
2901 */
2902loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
2903 loff_t end, int whence)
2904{
2905 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
2906 pgoff_t max = (end - 1) >> PAGE_SHIFT;
2907 bool seek_data = (whence == SEEK_DATA);
2908 struct folio *folio;
2909
2910 if (end <= start)
2911 return -ENXIO;
2912
2913 rcu_read_lock();
2914 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
2915 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
2916 size_t seek_size;
2917
2918 if (start < pos) {
2919 if (!seek_data)
2920 goto unlock;
2921 start = pos;
2922 }
2923
2924 seek_size = seek_folio_size(&xas, folio);
2925 pos = round_up((u64)pos + 1, seek_size);
2926 start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
2927 seek_data);
2928 if (start < pos)
2929 goto unlock;
2930 if (start >= end)
2931 break;
2932 if (seek_size > PAGE_SIZE)
2933 xas_set(&xas, pos >> PAGE_SHIFT);
2934 if (!xa_is_value(folio))
2935 folio_put(folio);
2936 }
2937 if (seek_data)
2938 start = -ENXIO;
2939unlock:
2940 rcu_read_unlock();
2941 if (folio && !xa_is_value(folio))
2942 folio_put(folio);
2943 if (start > end)
2944 return end;
2945 return start;
2946}
2947
2948#ifdef CONFIG_MMU
2949#define MMAP_LOTSAMISS (100)
2950/*
2951 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2952 * @vmf - the vm_fault for this fault.
2953 * @folio - the folio to lock.
2954 * @fpin - the pointer to the file we may pin (or is already pinned).
2955 *
2956 * This works similar to lock_folio_or_retry in that it can drop the
2957 * mmap_lock. It differs in that it actually returns the folio locked
2958 * if it returns 1 and 0 if it couldn't lock the folio. If we did have
2959 * to drop the mmap_lock then fpin will point to the pinned file and
2960 * needs to be fput()'ed at a later point.
2961 */
2962static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
2963 struct file **fpin)
2964{
2965 if (folio_trylock(folio))
2966 return 1;
2967
2968 /*
2969 * NOTE! This will make us return with VM_FAULT_RETRY, but with
2970 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
2971 * is supposed to work. We have way too many special cases..
2972 */
2973 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2974 return 0;
2975
2976 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2977 if (vmf->flags & FAULT_FLAG_KILLABLE) {
2978 if (__folio_lock_killable(folio)) {
2979 /*
2980 * We didn't have the right flags to drop the mmap_lock,
2981 * but all fault_handlers only check for fatal signals
2982 * if we return VM_FAULT_RETRY, so we need to drop the
2983 * mmap_lock here and return 0 if we don't have a fpin.
2984 */
2985 if (*fpin == NULL)
2986 mmap_read_unlock(vmf->vma->vm_mm);
2987 return 0;
2988 }
2989 } else
2990 __folio_lock(folio);
2991
2992 return 1;
2993}
2994
2995/*
2996 * Synchronous readahead happens when we don't even find a page in the page
2997 * cache at all. We don't want to perform IO under the mmap sem, so if we have
2998 * to drop the mmap sem we return the file that was pinned in order for us to do
2999 * that. If we didn't pin a file then we return NULL. The file that is
3000 * returned needs to be fput()'ed when we're done with it.
3001 */
3002static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3003{
3004 struct file *file = vmf->vma->vm_file;
3005 struct file_ra_state *ra = &file->f_ra;
3006 struct address_space *mapping = file->f_mapping;
3007 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3008 struct file *fpin = NULL;
3009 unsigned long vm_flags = vmf->vma->vm_flags;
3010 unsigned int mmap_miss;
3011
3012#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3013 /* Use the readahead code, even if readahead is disabled */
3014 if (vm_flags & VM_HUGEPAGE) {
3015 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3016 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3017 ra->size = HPAGE_PMD_NR;
3018 /*
3019 * Fetch two PMD folios, so we get the chance to actually
3020 * readahead, unless we've been told not to.
3021 */
3022 if (!(vm_flags & VM_RAND_READ))
3023 ra->size *= 2;
3024 ra->async_size = HPAGE_PMD_NR;
3025 page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3026 return fpin;
3027 }
3028#endif
3029
3030 /* If we don't want any read-ahead, don't bother */
3031 if (vm_flags & VM_RAND_READ)
3032 return fpin;
3033 if (!ra->ra_pages)
3034 return fpin;
3035
3036 if (vm_flags & VM_SEQ_READ) {
3037 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3038 page_cache_sync_ra(&ractl, ra->ra_pages);
3039 return fpin;
3040 }
3041
3042 /* Avoid banging the cache line if not needed */
3043 mmap_miss = READ_ONCE(ra->mmap_miss);
3044 if (mmap_miss < MMAP_LOTSAMISS * 10)
3045 WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3046
3047 /*
3048 * Do we miss much more than hit in this file? If so,
3049 * stop bothering with read-ahead. It will only hurt.
3050 */
3051 if (mmap_miss > MMAP_LOTSAMISS)
3052 return fpin;
3053
3054 /*
3055 * mmap read-around
3056 */
3057 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3058 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3059 ra->size = ra->ra_pages;
3060 ra->async_size = ra->ra_pages / 4;
3061 ractl._index = ra->start;
3062 page_cache_ra_order(&ractl, ra, 0);
3063 return fpin;
3064}
3065
3066/*
3067 * Asynchronous readahead happens when we find the page and PG_readahead,
3068 * so we want to possibly extend the readahead further. We return the file that
3069 * was pinned if we have to drop the mmap_lock in order to do IO.
3070 */
3071static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3072 struct folio *folio)
3073{
3074 struct file *file = vmf->vma->vm_file;
3075 struct file_ra_state *ra = &file->f_ra;
3076 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3077 struct file *fpin = NULL;
3078 unsigned int mmap_miss;
3079
3080 /* If we don't want any read-ahead, don't bother */
3081 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3082 return fpin;
3083
3084 mmap_miss = READ_ONCE(ra->mmap_miss);
3085 if (mmap_miss)
3086 WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3087
3088 if (folio_test_readahead(folio)) {
3089 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3090 page_cache_async_ra(&ractl, folio, ra->ra_pages);
3091 }
3092 return fpin;
3093}
3094
3095/**
3096 * filemap_fault - read in file data for page fault handling
3097 * @vmf: struct vm_fault containing details of the fault
3098 *
3099 * filemap_fault() is invoked via the vma operations vector for a
3100 * mapped memory region to read in file data during a page fault.
3101 *
3102 * The goto's are kind of ugly, but this streamlines the normal case of having
3103 * it in the page cache, and handles the special cases reasonably without
3104 * having a lot of duplicated code.
3105 *
3106 * vma->vm_mm->mmap_lock must be held on entry.
3107 *
3108 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3109 * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3110 *
3111 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3112 * has not been released.
3113 *
3114 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3115 *
3116 * Return: bitwise-OR of %VM_FAULT_ codes.
3117 */
3118vm_fault_t filemap_fault(struct vm_fault *vmf)
3119{
3120 int error;
3121 struct file *file = vmf->vma->vm_file;
3122 struct file *fpin = NULL;
3123 struct address_space *mapping = file->f_mapping;
3124 struct inode *inode = mapping->host;
3125 pgoff_t max_idx, index = vmf->pgoff;
3126 struct folio *folio;
3127 vm_fault_t ret = 0;
3128 bool mapping_locked = false;
3129
3130 max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3131 if (unlikely(index >= max_idx))
3132 return VM_FAULT_SIGBUS;
3133
3134 /*
3135 * Do we have something in the page cache already?
3136 */
3137 folio = filemap_get_folio(mapping, index);
3138 if (likely(folio)) {
3139 /*
3140 * We found the page, so try async readahead before waiting for
3141 * the lock.
3142 */
3143 if (!(vmf->flags & FAULT_FLAG_TRIED))
3144 fpin = do_async_mmap_readahead(vmf, folio);
3145 if (unlikely(!folio_test_uptodate(folio))) {
3146 filemap_invalidate_lock_shared(mapping);
3147 mapping_locked = true;
3148 }
3149 } else {
3150 /* No page in the page cache at all */
3151 count_vm_event(PGMAJFAULT);
3152 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3153 ret = VM_FAULT_MAJOR;
3154 fpin = do_sync_mmap_readahead(vmf);
3155retry_find:
3156 /*
3157 * See comment in filemap_create_folio() why we need
3158 * invalidate_lock
3159 */
3160 if (!mapping_locked) {
3161 filemap_invalidate_lock_shared(mapping);
3162 mapping_locked = true;
3163 }
3164 folio = __filemap_get_folio(mapping, index,
3165 FGP_CREAT|FGP_FOR_MMAP,
3166 vmf->gfp_mask);
3167 if (!folio) {
3168 if (fpin)
3169 goto out_retry;
3170 filemap_invalidate_unlock_shared(mapping);
3171 return VM_FAULT_OOM;
3172 }
3173 }
3174
3175 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3176 goto out_retry;
3177
3178 /* Did it get truncated? */
3179 if (unlikely(folio->mapping != mapping)) {
3180 folio_unlock(folio);
3181 folio_put(folio);
3182 goto retry_find;
3183 }
3184 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3185
3186 /*
3187 * We have a locked page in the page cache, now we need to check
3188 * that it's up-to-date. If not, it is going to be due to an error.
3189 */
3190 if (unlikely(!folio_test_uptodate(folio))) {
3191 /*
3192 * The page was in cache and uptodate and now it is not.
3193 * Strange but possible since we didn't hold the page lock all
3194 * the time. Let's drop everything get the invalidate lock and
3195 * try again.
3196 */
3197 if (!mapping_locked) {
3198 folio_unlock(folio);
3199 folio_put(folio);
3200 goto retry_find;
3201 }
3202 goto page_not_uptodate;
3203 }
3204
3205 /*
3206 * We've made it this far and we had to drop our mmap_lock, now is the
3207 * time to return to the upper layer and have it re-find the vma and
3208 * redo the fault.
3209 */
3210 if (fpin) {
3211 folio_unlock(folio);
3212 goto out_retry;
3213 }
3214 if (mapping_locked)
3215 filemap_invalidate_unlock_shared(mapping);
3216
3217 /*
3218 * Found the page and have a reference on it.
3219 * We must recheck i_size under page lock.
3220 */
3221 max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3222 if (unlikely(index >= max_idx)) {
3223 folio_unlock(folio);
3224 folio_put(folio);
3225 return VM_FAULT_SIGBUS;
3226 }
3227
3228 vmf->page = folio_file_page(folio, index);
3229 return ret | VM_FAULT_LOCKED;
3230
3231page_not_uptodate:
3232 /*
3233 * Umm, take care of errors if the page isn't up-to-date.
3234 * Try to re-read it _once_. We do this synchronously,
3235 * because there really aren't any performance issues here
3236 * and we need to check for errors.
3237 */
3238 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3239 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3240 if (fpin)
3241 goto out_retry;
3242 folio_put(folio);
3243
3244 if (!error || error == AOP_TRUNCATED_PAGE)
3245 goto retry_find;
3246 filemap_invalidate_unlock_shared(mapping);
3247
3248 return VM_FAULT_SIGBUS;
3249
3250out_retry:
3251 /*
3252 * We dropped the mmap_lock, we need to return to the fault handler to
3253 * re-find the vma and come back and find our hopefully still populated
3254 * page.
3255 */
3256 if (folio)
3257 folio_put(folio);
3258 if (mapping_locked)
3259 filemap_invalidate_unlock_shared(mapping);
3260 if (fpin)
3261 fput(fpin);
3262 return ret | VM_FAULT_RETRY;
3263}
3264EXPORT_SYMBOL(filemap_fault);
3265
3266static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
3267{
3268 struct mm_struct *mm = vmf->vma->vm_mm;
3269
3270 /* Huge page is mapped? No need to proceed. */
3271 if (pmd_trans_huge(*vmf->pmd)) {
3272 unlock_page(page);
3273 put_page(page);
3274 return true;
3275 }
3276
3277 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
3278 vm_fault_t ret = do_set_pmd(vmf, page);
3279 if (!ret) {
3280 /* The page is mapped successfully, reference consumed. */
3281 unlock_page(page);
3282 return true;
3283 }
3284 }
3285
3286 if (pmd_none(*vmf->pmd))
3287 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3288
3289 /* See comment in handle_pte_fault() */
3290 if (pmd_devmap_trans_unstable(vmf->pmd)) {
3291 unlock_page(page);
3292 put_page(page);
3293 return true;
3294 }
3295
3296 return false;
3297}
3298
3299static struct folio *next_uptodate_page(struct folio *folio,
3300 struct address_space *mapping,
3301 struct xa_state *xas, pgoff_t end_pgoff)
3302{
3303 unsigned long max_idx;
3304
3305 do {
3306 if (!folio)
3307 return NULL;
3308 if (xas_retry(xas, folio))
3309 continue;
3310 if (xa_is_value(folio))
3311 continue;
3312 if (folio_test_locked(folio))
3313 continue;
3314 if (!folio_try_get_rcu(folio))
3315 continue;
3316 /* Has the page moved or been split? */
3317 if (unlikely(folio != xas_reload(xas)))
3318 goto skip;
3319 if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3320 goto skip;
3321 if (!folio_trylock(folio))
3322 goto skip;
3323 if (folio->mapping != mapping)
3324 goto unlock;
3325 if (!folio_test_uptodate(folio))
3326 goto unlock;
3327 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3328 if (xas->xa_index >= max_idx)
3329 goto unlock;
3330 return folio;
3331unlock:
3332 folio_unlock(folio);
3333skip:
3334 folio_put(folio);
3335 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3336
3337 return NULL;
3338}
3339
3340static inline struct folio *first_map_page(struct address_space *mapping,
3341 struct xa_state *xas,
3342 pgoff_t end_pgoff)
3343{
3344 return next_uptodate_page(xas_find(xas, end_pgoff),
3345 mapping, xas, end_pgoff);
3346}
3347
3348static inline struct folio *next_map_page(struct address_space *mapping,
3349 struct xa_state *xas,
3350 pgoff_t end_pgoff)
3351{
3352 return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3353 mapping, xas, end_pgoff);
3354}
3355
3356vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3357 pgoff_t start_pgoff, pgoff_t end_pgoff)
3358{
3359 struct vm_area_struct *vma = vmf->vma;
3360 struct file *file = vma->vm_file;
3361 struct address_space *mapping = file->f_mapping;
3362 pgoff_t last_pgoff = start_pgoff;
3363 unsigned long addr;
3364 XA_STATE(xas, &mapping->i_pages, start_pgoff);
3365 struct folio *folio;
3366 struct page *page;
3367 unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3368 vm_fault_t ret = 0;
3369
3370 rcu_read_lock();
3371 folio = first_map_page(mapping, &xas, end_pgoff);
3372 if (!folio)
3373 goto out;
3374
3375 if (filemap_map_pmd(vmf, &folio->page)) {
3376 ret = VM_FAULT_NOPAGE;
3377 goto out;
3378 }
3379
3380 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3381 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3382 do {
3383again:
3384 page = folio_file_page(folio, xas.xa_index);
3385 if (PageHWPoison(page))
3386 goto unlock;
3387
3388 if (mmap_miss > 0)
3389 mmap_miss--;
3390
3391 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3392 vmf->pte += xas.xa_index - last_pgoff;
3393 last_pgoff = xas.xa_index;
3394
3395 /*
3396 * NOTE: If there're PTE markers, we'll leave them to be
3397 * handled in the specific fault path, and it'll prohibit the
3398 * fault-around logic.
3399 */
3400 if (!pte_none(*vmf->pte))
3401 goto unlock;
3402
3403 /* We're about to handle the fault */
3404 if (vmf->address == addr)
3405 ret = VM_FAULT_NOPAGE;
3406
3407 do_set_pte(vmf, page, addr);
3408 /* no need to invalidate: a not-present page won't be cached */
3409 update_mmu_cache(vma, addr, vmf->pte);
3410 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3411 xas.xa_index++;
3412 folio_ref_inc(folio);
3413 goto again;
3414 }
3415 folio_unlock(folio);
3416 continue;
3417unlock:
3418 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3419 xas.xa_index++;
3420 goto again;
3421 }
3422 folio_unlock(folio);
3423 folio_put(folio);
3424 } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3425 pte_unmap_unlock(vmf->pte, vmf->ptl);
3426out:
3427 rcu_read_unlock();
3428 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3429 return ret;
3430}
3431EXPORT_SYMBOL(filemap_map_pages);
3432
3433vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3434{
3435 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3436 struct folio *folio = page_folio(vmf->page);
3437 vm_fault_t ret = VM_FAULT_LOCKED;
3438
3439 sb_start_pagefault(mapping->host->i_sb);
3440 file_update_time(vmf->vma->vm_file);
3441 folio_lock(folio);
3442 if (folio->mapping != mapping) {
3443 folio_unlock(folio);
3444 ret = VM_FAULT_NOPAGE;
3445 goto out;
3446 }
3447 /*
3448 * We mark the folio dirty already here so that when freeze is in
3449 * progress, we are guaranteed that writeback during freezing will
3450 * see the dirty folio and writeprotect it again.
3451 */
3452 folio_mark_dirty(folio);
3453 folio_wait_stable(folio);
3454out:
3455 sb_end_pagefault(mapping->host->i_sb);
3456 return ret;
3457}
3458
3459const struct vm_operations_struct generic_file_vm_ops = {
3460 .fault = filemap_fault,
3461 .map_pages = filemap_map_pages,
3462 .page_mkwrite = filemap_page_mkwrite,
3463};
3464
3465/* This is used for a general mmap of a disk file */
3466
3467int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3468{
3469 struct address_space *mapping = file->f_mapping;
3470
3471 if (!mapping->a_ops->read_folio)
3472 return -ENOEXEC;
3473 file_accessed(file);
3474 vma->vm_ops = &generic_file_vm_ops;
3475 return 0;
3476}
3477
3478/*
3479 * This is for filesystems which do not implement ->writepage.
3480 */
3481int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3482{
3483 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3484 return -EINVAL;
3485 return generic_file_mmap(file, vma);
3486}
3487#else
3488vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3489{
3490 return VM_FAULT_SIGBUS;
3491}
3492int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3493{
3494 return -ENOSYS;
3495}
3496int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3497{
3498 return -ENOSYS;
3499}
3500#endif /* CONFIG_MMU */
3501
3502EXPORT_SYMBOL(filemap_page_mkwrite);
3503EXPORT_SYMBOL(generic_file_mmap);
3504EXPORT_SYMBOL(generic_file_readonly_mmap);
3505
3506static struct folio *do_read_cache_folio(struct address_space *mapping,
3507 pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3508{
3509 struct folio *folio;
3510 int err;
3511
3512 if (!filler)
3513 filler = mapping->a_ops->read_folio;
3514repeat:
3515 folio = filemap_get_folio(mapping, index);
3516 if (!folio) {
3517 folio = filemap_alloc_folio(gfp, 0);
3518 if (!folio)
3519 return ERR_PTR(-ENOMEM);
3520 err = filemap_add_folio(mapping, folio, index, gfp);
3521 if (unlikely(err)) {
3522 folio_put(folio);
3523 if (err == -EEXIST)
3524 goto repeat;
3525 /* Presumably ENOMEM for xarray node */
3526 return ERR_PTR(err);
3527 }
3528
3529 goto filler;
3530 }
3531 if (folio_test_uptodate(folio))
3532 goto out;
3533
3534 if (!folio_trylock(folio)) {
3535 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3536 goto repeat;
3537 }
3538
3539 /* Folio was truncated from mapping */
3540 if (!folio->mapping) {
3541 folio_unlock(folio);
3542 folio_put(folio);
3543 goto repeat;
3544 }
3545
3546 /* Someone else locked and filled the page in a very small window */
3547 if (folio_test_uptodate(folio)) {
3548 folio_unlock(folio);
3549 goto out;
3550 }
3551
3552filler:
3553 err = filemap_read_folio(file, filler, folio);
3554 if (err) {
3555 folio_put(folio);
3556 if (err == AOP_TRUNCATED_PAGE)
3557 goto repeat;
3558 return ERR_PTR(err);
3559 }
3560
3561out:
3562 folio_mark_accessed(folio);
3563 return folio;
3564}
3565
3566/**
3567 * read_cache_folio - Read into page cache, fill it if needed.
3568 * @mapping: The address_space to read from.
3569 * @index: The index to read.
3570 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3571 * @file: Passed to filler function, may be NULL if not required.
3572 *
3573 * Read one page into the page cache. If it succeeds, the folio returned
3574 * will contain @index, but it may not be the first page of the folio.
3575 *
3576 * If the filler function returns an error, it will be returned to the
3577 * caller.
3578 *
3579 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3580 * Return: An uptodate folio on success, ERR_PTR() on failure.
3581 */
3582struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3583 filler_t filler, struct file *file)
3584{
3585 return do_read_cache_folio(mapping, index, filler, file,
3586 mapping_gfp_mask(mapping));
3587}
3588EXPORT_SYMBOL(read_cache_folio);
3589
3590static struct page *do_read_cache_page(struct address_space *mapping,
3591 pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3592{
3593 struct folio *folio;
3594
3595 folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3596 if (IS_ERR(folio))
3597 return &folio->page;
3598 return folio_file_page(folio, index);
3599}
3600
3601struct page *read_cache_page(struct address_space *mapping,
3602 pgoff_t index, filler_t *filler, struct file *file)
3603{
3604 return do_read_cache_page(mapping, index, filler, file,
3605 mapping_gfp_mask(mapping));
3606}
3607EXPORT_SYMBOL(read_cache_page);
3608
3609/**
3610 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3611 * @mapping: the page's address_space
3612 * @index: the page index
3613 * @gfp: the page allocator flags to use if allocating
3614 *
3615 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3616 * any new page allocations done using the specified allocation flags.
3617 *
3618 * If the page does not get brought uptodate, return -EIO.
3619 *
3620 * The function expects mapping->invalidate_lock to be already held.
3621 *
3622 * Return: up to date page on success, ERR_PTR() on failure.
3623 */
3624struct page *read_cache_page_gfp(struct address_space *mapping,
3625 pgoff_t index,
3626 gfp_t gfp)
3627{
3628 return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3629}
3630EXPORT_SYMBOL(read_cache_page_gfp);
3631
3632/*
3633 * Warn about a page cache invalidation failure during a direct I/O write.
3634 */
3635void dio_warn_stale_pagecache(struct file *filp)
3636{
3637 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3638 char pathname[128];
3639 char *path;
3640
3641 errseq_set(&filp->f_mapping->wb_err, -EIO);
3642 if (__ratelimit(&_rs)) {
3643 path = file_path(filp, pathname, sizeof(pathname));
3644 if (IS_ERR(path))
3645 path = "(unknown)";
3646 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
3647 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3648 current->comm);
3649 }
3650}
3651
3652ssize_t
3653generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3654{
3655 struct file *file = iocb->ki_filp;
3656 struct address_space *mapping = file->f_mapping;
3657 struct inode *inode = mapping->host;
3658 loff_t pos = iocb->ki_pos;
3659 ssize_t written;
3660 size_t write_len;
3661 pgoff_t end;
3662
3663 write_len = iov_iter_count(from);
3664 end = (pos + write_len - 1) >> PAGE_SHIFT;
3665
3666 if (iocb->ki_flags & IOCB_NOWAIT) {
3667 /* If there are pages to writeback, return */
3668 if (filemap_range_has_page(file->f_mapping, pos,
3669 pos + write_len - 1))
3670 return -EAGAIN;
3671 } else {
3672 written = filemap_write_and_wait_range(mapping, pos,
3673 pos + write_len - 1);
3674 if (written)
3675 goto out;
3676 }
3677
3678 /*
3679 * After a write we want buffered reads to be sure to go to disk to get
3680 * the new data. We invalidate clean cached page from the region we're
3681 * about to write. We do this *before* the write so that we can return
3682 * without clobbering -EIOCBQUEUED from ->direct_IO().
3683 */
3684 written = invalidate_inode_pages2_range(mapping,
3685 pos >> PAGE_SHIFT, end);
3686 /*
3687 * If a page can not be invalidated, return 0 to fall back
3688 * to buffered write.
3689 */
3690 if (written) {
3691 if (written == -EBUSY)
3692 return 0;
3693 goto out;
3694 }
3695
3696 written = mapping->a_ops->direct_IO(iocb, from);
3697
3698 /*
3699 * Finally, try again to invalidate clean pages which might have been
3700 * cached by non-direct readahead, or faulted in by get_user_pages()
3701 * if the source of the write was an mmap'ed region of the file
3702 * we're writing. Either one is a pretty crazy thing to do,
3703 * so we don't support it 100%. If this invalidation
3704 * fails, tough, the write still worked...
3705 *
3706 * Most of the time we do not need this since dio_complete() will do
3707 * the invalidation for us. However there are some file systems that
3708 * do not end up with dio_complete() being called, so let's not break
3709 * them by removing it completely.
3710 *
3711 * Noticeable example is a blkdev_direct_IO().
3712 *
3713 * Skip invalidation for async writes or if mapping has no pages.
3714 */
3715 if (written > 0 && mapping->nrpages &&
3716 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3717 dio_warn_stale_pagecache(file);
3718
3719 if (written > 0) {
3720 pos += written;
3721 write_len -= written;
3722 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3723 i_size_write(inode, pos);
3724 mark_inode_dirty(inode);
3725 }
3726 iocb->ki_pos = pos;
3727 }
3728 if (written != -EIOCBQUEUED)
3729 iov_iter_revert(from, write_len - iov_iter_count(from));
3730out:
3731 return written;
3732}
3733EXPORT_SYMBOL(generic_file_direct_write);
3734
3735ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
3736{
3737 struct file *file = iocb->ki_filp;
3738 loff_t pos = iocb->ki_pos;
3739 struct address_space *mapping = file->f_mapping;
3740 const struct address_space_operations *a_ops = mapping->a_ops;
3741 long status = 0;
3742 ssize_t written = 0;
3743
3744 do {
3745 struct page *page;
3746 unsigned long offset; /* Offset into pagecache page */
3747 unsigned long bytes; /* Bytes to write to page */
3748 size_t copied; /* Bytes copied from user */
3749 void *fsdata = NULL;
3750
3751 offset = (pos & (PAGE_SIZE - 1));
3752 bytes = min_t(unsigned long, PAGE_SIZE - offset,
3753 iov_iter_count(i));
3754
3755again:
3756 /*
3757 * Bring in the user page that we will copy from _first_.
3758 * Otherwise there's a nasty deadlock on copying from the
3759 * same page as we're writing to, without it being marked
3760 * up-to-date.
3761 */
3762 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3763 status = -EFAULT;
3764 break;
3765 }
3766
3767 if (fatal_signal_pending(current)) {
3768 status = -EINTR;
3769 break;
3770 }
3771
3772 status = a_ops->write_begin(file, mapping, pos, bytes,
3773 &page, &fsdata);
3774 if (unlikely(status < 0))
3775 break;
3776
3777 if (mapping_writably_mapped(mapping))
3778 flush_dcache_page(page);
3779
3780 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3781 flush_dcache_page(page);
3782
3783 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3784 page, fsdata);
3785 if (unlikely(status != copied)) {
3786 iov_iter_revert(i, copied - max(status, 0L));
3787 if (unlikely(status < 0))
3788 break;
3789 }
3790 cond_resched();
3791
3792 if (unlikely(status == 0)) {
3793 /*
3794 * A short copy made ->write_end() reject the
3795 * thing entirely. Might be memory poisoning
3796 * halfway through, might be a race with munmap,
3797 * might be severe memory pressure.
3798 */
3799 if (copied)
3800 bytes = copied;
3801 goto again;
3802 }
3803 pos += status;
3804 written += status;
3805
3806 balance_dirty_pages_ratelimited(mapping);
3807 } while (iov_iter_count(i));
3808
3809 return written ? written : status;
3810}
3811EXPORT_SYMBOL(generic_perform_write);
3812
3813/**
3814 * __generic_file_write_iter - write data to a file
3815 * @iocb: IO state structure (file, offset, etc.)
3816 * @from: iov_iter with data to write
3817 *
3818 * This function does all the work needed for actually writing data to a
3819 * file. It does all basic checks, removes SUID from the file, updates
3820 * modification times and calls proper subroutines depending on whether we
3821 * do direct IO or a standard buffered write.
3822 *
3823 * It expects i_rwsem to be grabbed unless we work on a block device or similar
3824 * object which does not need locking at all.
3825 *
3826 * This function does *not* take care of syncing data in case of O_SYNC write.
3827 * A caller has to handle it. This is mainly due to the fact that we want to
3828 * avoid syncing under i_rwsem.
3829 *
3830 * Return:
3831 * * number of bytes written, even for truncated writes
3832 * * negative error code if no data has been written at all
3833 */
3834ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3835{
3836 struct file *file = iocb->ki_filp;
3837 struct address_space *mapping = file->f_mapping;
3838 struct inode *inode = mapping->host;
3839 ssize_t written = 0;
3840 ssize_t err;
3841 ssize_t status;
3842
3843 /* We can write back this queue in page reclaim */
3844 current->backing_dev_info = inode_to_bdi(inode);
3845 err = file_remove_privs(file);
3846 if (err)
3847 goto out;
3848
3849 err = file_update_time(file);
3850 if (err)
3851 goto out;
3852
3853 if (iocb->ki_flags & IOCB_DIRECT) {
3854 loff_t pos, endbyte;
3855
3856 written = generic_file_direct_write(iocb, from);
3857 /*
3858 * If the write stopped short of completing, fall back to
3859 * buffered writes. Some filesystems do this for writes to
3860 * holes, for example. For DAX files, a buffered write will
3861 * not succeed (even if it did, DAX does not handle dirty
3862 * page-cache pages correctly).
3863 */
3864 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
3865 goto out;
3866
3867 pos = iocb->ki_pos;
3868 status = generic_perform_write(iocb, from);
3869 /*
3870 * If generic_perform_write() returned a synchronous error
3871 * then we want to return the number of bytes which were
3872 * direct-written, or the error code if that was zero. Note
3873 * that this differs from normal direct-io semantics, which
3874 * will return -EFOO even if some bytes were written.
3875 */
3876 if (unlikely(status < 0)) {
3877 err = status;
3878 goto out;
3879 }
3880 /*
3881 * We need to ensure that the page cache pages are written to
3882 * disk and invalidated to preserve the expected O_DIRECT
3883 * semantics.
3884 */
3885 endbyte = pos + status - 1;
3886 err = filemap_write_and_wait_range(mapping, pos, endbyte);
3887 if (err == 0) {
3888 iocb->ki_pos = endbyte + 1;
3889 written += status;
3890 invalidate_mapping_pages(mapping,
3891 pos >> PAGE_SHIFT,
3892 endbyte >> PAGE_SHIFT);
3893 } else {
3894 /*
3895 * We don't know how much we wrote, so just return
3896 * the number of bytes which were direct-written
3897 */
3898 }
3899 } else {
3900 written = generic_perform_write(iocb, from);
3901 if (likely(written > 0))
3902 iocb->ki_pos += written;
3903 }
3904out:
3905 current->backing_dev_info = NULL;
3906 return written ? written : err;
3907}
3908EXPORT_SYMBOL(__generic_file_write_iter);
3909
3910/**
3911 * generic_file_write_iter - write data to a file
3912 * @iocb: IO state structure
3913 * @from: iov_iter with data to write
3914 *
3915 * This is a wrapper around __generic_file_write_iter() to be used by most
3916 * filesystems. It takes care of syncing the file in case of O_SYNC file
3917 * and acquires i_rwsem as needed.
3918 * Return:
3919 * * negative error code if no data has been written at all of
3920 * vfs_fsync_range() failed for a synchronous write
3921 * * number of bytes written, even for truncated writes
3922 */
3923ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3924{
3925 struct file *file = iocb->ki_filp;
3926 struct inode *inode = file->f_mapping->host;
3927 ssize_t ret;
3928
3929 inode_lock(inode);
3930 ret = generic_write_checks(iocb, from);
3931 if (ret > 0)
3932 ret = __generic_file_write_iter(iocb, from);
3933 inode_unlock(inode);
3934
3935 if (ret > 0)
3936 ret = generic_write_sync(iocb, ret);
3937 return ret;
3938}
3939EXPORT_SYMBOL(generic_file_write_iter);
3940
3941/**
3942 * filemap_release_folio() - Release fs-specific metadata on a folio.
3943 * @folio: The folio which the kernel is trying to free.
3944 * @gfp: Memory allocation flags (and I/O mode).
3945 *
3946 * The address_space is trying to release any data attached to a folio
3947 * (presumably at folio->private).
3948 *
3949 * This will also be called if the private_2 flag is set on a page,
3950 * indicating that the folio has other metadata associated with it.
3951 *
3952 * The @gfp argument specifies whether I/O may be performed to release
3953 * this page (__GFP_IO), and whether the call may block
3954 * (__GFP_RECLAIM & __GFP_FS).
3955 *
3956 * Return: %true if the release was successful, otherwise %false.
3957 */
3958bool filemap_release_folio(struct folio *folio, gfp_t gfp)
3959{
3960 struct address_space * const mapping = folio->mapping;
3961
3962 BUG_ON(!folio_test_locked(folio));
3963 if (folio_test_writeback(folio))
3964 return false;
3965
3966 if (mapping && mapping->a_ops->release_folio)
3967 return mapping->a_ops->release_folio(folio, gfp);
3968 return try_to_free_buffers(folio);
3969}
3970EXPORT_SYMBOL(filemap_release_folio);