at v6.6 1544 lines 48 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_PAGEMAP_H 3#define _LINUX_PAGEMAP_H 4 5/* 6 * Copyright 1995 Linus Torvalds 7 */ 8#include <linux/mm.h> 9#include <linux/fs.h> 10#include <linux/list.h> 11#include <linux/highmem.h> 12#include <linux/compiler.h> 13#include <linux/uaccess.h> 14#include <linux/gfp.h> 15#include <linux/bitops.h> 16#include <linux/hardirq.h> /* for in_interrupt() */ 17#include <linux/hugetlb_inline.h> 18 19struct folio_batch; 20 21unsigned long invalidate_mapping_pages(struct address_space *mapping, 22 pgoff_t start, pgoff_t end); 23 24static inline void invalidate_remote_inode(struct inode *inode) 25{ 26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 27 S_ISLNK(inode->i_mode)) 28 invalidate_mapping_pages(inode->i_mapping, 0, -1); 29} 30int invalidate_inode_pages2(struct address_space *mapping); 31int invalidate_inode_pages2_range(struct address_space *mapping, 32 pgoff_t start, pgoff_t end); 33int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); 34void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); 35 36int write_inode_now(struct inode *, int sync); 37int filemap_fdatawrite(struct address_space *); 38int filemap_flush(struct address_space *); 39int filemap_fdatawait_keep_errors(struct address_space *mapping); 40int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); 41int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 42 loff_t start_byte, loff_t end_byte); 43 44static inline int filemap_fdatawait(struct address_space *mapping) 45{ 46 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); 47} 48 49bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); 50int filemap_write_and_wait_range(struct address_space *mapping, 51 loff_t lstart, loff_t lend); 52int __filemap_fdatawrite_range(struct address_space *mapping, 53 loff_t start, loff_t end, int sync_mode); 54int filemap_fdatawrite_range(struct address_space *mapping, 55 loff_t start, loff_t end); 56int filemap_check_errors(struct address_space *mapping); 57void __filemap_set_wb_err(struct address_space *mapping, int err); 58int filemap_fdatawrite_wbc(struct address_space *mapping, 59 struct writeback_control *wbc); 60int kiocb_write_and_wait(struct kiocb *iocb, size_t count); 61 62static inline int filemap_write_and_wait(struct address_space *mapping) 63{ 64 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); 65} 66 67/** 68 * filemap_set_wb_err - set a writeback error on an address_space 69 * @mapping: mapping in which to set writeback error 70 * @err: error to be set in mapping 71 * 72 * When writeback fails in some way, we must record that error so that 73 * userspace can be informed when fsync and the like are called. We endeavor 74 * to report errors on any file that was open at the time of the error. Some 75 * internal callers also need to know when writeback errors have occurred. 76 * 77 * When a writeback error occurs, most filesystems will want to call 78 * filemap_set_wb_err to record the error in the mapping so that it will be 79 * automatically reported whenever fsync is called on the file. 80 */ 81static inline void filemap_set_wb_err(struct address_space *mapping, int err) 82{ 83 /* Fastpath for common case of no error */ 84 if (unlikely(err)) 85 __filemap_set_wb_err(mapping, err); 86} 87 88/** 89 * filemap_check_wb_err - has an error occurred since the mark was sampled? 90 * @mapping: mapping to check for writeback errors 91 * @since: previously-sampled errseq_t 92 * 93 * Grab the errseq_t value from the mapping, and see if it has changed "since" 94 * the given value was sampled. 95 * 96 * If it has then report the latest error set, otherwise return 0. 97 */ 98static inline int filemap_check_wb_err(struct address_space *mapping, 99 errseq_t since) 100{ 101 return errseq_check(&mapping->wb_err, since); 102} 103 104/** 105 * filemap_sample_wb_err - sample the current errseq_t to test for later errors 106 * @mapping: mapping to be sampled 107 * 108 * Writeback errors are always reported relative to a particular sample point 109 * in the past. This function provides those sample points. 110 */ 111static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) 112{ 113 return errseq_sample(&mapping->wb_err); 114} 115 116/** 117 * file_sample_sb_err - sample the current errseq_t to test for later errors 118 * @file: file pointer to be sampled 119 * 120 * Grab the most current superblock-level errseq_t value for the given 121 * struct file. 122 */ 123static inline errseq_t file_sample_sb_err(struct file *file) 124{ 125 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); 126} 127 128/* 129 * Flush file data before changing attributes. Caller must hold any locks 130 * required to prevent further writes to this file until we're done setting 131 * flags. 132 */ 133static inline int inode_drain_writes(struct inode *inode) 134{ 135 inode_dio_wait(inode); 136 return filemap_write_and_wait(inode->i_mapping); 137} 138 139static inline bool mapping_empty(struct address_space *mapping) 140{ 141 return xa_empty(&mapping->i_pages); 142} 143 144/* 145 * mapping_shrinkable - test if page cache state allows inode reclaim 146 * @mapping: the page cache mapping 147 * 148 * This checks the mapping's cache state for the pupose of inode 149 * reclaim and LRU management. 150 * 151 * The caller is expected to hold the i_lock, but is not required to 152 * hold the i_pages lock, which usually protects cache state. That's 153 * because the i_lock and the list_lru lock that protect the inode and 154 * its LRU state don't nest inside the irq-safe i_pages lock. 155 * 156 * Cache deletions are performed under the i_lock, which ensures that 157 * when an inode goes empty, it will reliably get queued on the LRU. 158 * 159 * Cache additions do not acquire the i_lock and may race with this 160 * check, in which case we'll report the inode as shrinkable when it 161 * has cache pages. This is okay: the shrinker also checks the 162 * refcount and the referenced bit, which will be elevated or set in 163 * the process of adding new cache pages to an inode. 164 */ 165static inline bool mapping_shrinkable(struct address_space *mapping) 166{ 167 void *head; 168 169 /* 170 * On highmem systems, there could be lowmem pressure from the 171 * inodes before there is highmem pressure from the page 172 * cache. Make inodes shrinkable regardless of cache state. 173 */ 174 if (IS_ENABLED(CONFIG_HIGHMEM)) 175 return true; 176 177 /* Cache completely empty? Shrink away. */ 178 head = rcu_access_pointer(mapping->i_pages.xa_head); 179 if (!head) 180 return true; 181 182 /* 183 * The xarray stores single offset-0 entries directly in the 184 * head pointer, which allows non-resident page cache entries 185 * to escape the shadow shrinker's list of xarray nodes. The 186 * inode shrinker needs to pick them up under memory pressure. 187 */ 188 if (!xa_is_node(head) && xa_is_value(head)) 189 return true; 190 191 return false; 192} 193 194/* 195 * Bits in mapping->flags. 196 */ 197enum mapping_flags { 198 AS_EIO = 0, /* IO error on async write */ 199 AS_ENOSPC = 1, /* ENOSPC on async write */ 200 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 201 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 202 AS_EXITING = 4, /* final truncate in progress */ 203 /* writeback related tags are not used */ 204 AS_NO_WRITEBACK_TAGS = 5, 205 AS_LARGE_FOLIO_SUPPORT = 6, 206 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ 207}; 208 209/** 210 * mapping_set_error - record a writeback error in the address_space 211 * @mapping: the mapping in which an error should be set 212 * @error: the error to set in the mapping 213 * 214 * When writeback fails in some way, we must record that error so that 215 * userspace can be informed when fsync and the like are called. We endeavor 216 * to report errors on any file that was open at the time of the error. Some 217 * internal callers also need to know when writeback errors have occurred. 218 * 219 * When a writeback error occurs, most filesystems will want to call 220 * mapping_set_error to record the error in the mapping so that it can be 221 * reported when the application calls fsync(2). 222 */ 223static inline void mapping_set_error(struct address_space *mapping, int error) 224{ 225 if (likely(!error)) 226 return; 227 228 /* Record in wb_err for checkers using errseq_t based tracking */ 229 __filemap_set_wb_err(mapping, error); 230 231 /* Record it in superblock */ 232 if (mapping->host) 233 errseq_set(&mapping->host->i_sb->s_wb_err, error); 234 235 /* Record it in flags for now, for legacy callers */ 236 if (error == -ENOSPC) 237 set_bit(AS_ENOSPC, &mapping->flags); 238 else 239 set_bit(AS_EIO, &mapping->flags); 240} 241 242static inline void mapping_set_unevictable(struct address_space *mapping) 243{ 244 set_bit(AS_UNEVICTABLE, &mapping->flags); 245} 246 247static inline void mapping_clear_unevictable(struct address_space *mapping) 248{ 249 clear_bit(AS_UNEVICTABLE, &mapping->flags); 250} 251 252static inline bool mapping_unevictable(struct address_space *mapping) 253{ 254 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 255} 256 257static inline void mapping_set_exiting(struct address_space *mapping) 258{ 259 set_bit(AS_EXITING, &mapping->flags); 260} 261 262static inline int mapping_exiting(struct address_space *mapping) 263{ 264 return test_bit(AS_EXITING, &mapping->flags); 265} 266 267static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 268{ 269 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 270} 271 272static inline int mapping_use_writeback_tags(struct address_space *mapping) 273{ 274 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 275} 276 277static inline bool mapping_release_always(const struct address_space *mapping) 278{ 279 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); 280} 281 282static inline void mapping_set_release_always(struct address_space *mapping) 283{ 284 set_bit(AS_RELEASE_ALWAYS, &mapping->flags); 285} 286 287static inline void mapping_clear_release_always(struct address_space *mapping) 288{ 289 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); 290} 291 292static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 293{ 294 return mapping->gfp_mask; 295} 296 297/* Restricts the given gfp_mask to what the mapping allows. */ 298static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 299 gfp_t gfp_mask) 300{ 301 return mapping_gfp_mask(mapping) & gfp_mask; 302} 303 304/* 305 * This is non-atomic. Only to be used before the mapping is activated. 306 * Probably needs a barrier... 307 */ 308static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 309{ 310 m->gfp_mask = mask; 311} 312 313/** 314 * mapping_set_large_folios() - Indicate the file supports large folios. 315 * @mapping: The file. 316 * 317 * The filesystem should call this function in its inode constructor to 318 * indicate that the VFS can use large folios to cache the contents of 319 * the file. 320 * 321 * Context: This should not be called while the inode is active as it 322 * is non-atomic. 323 */ 324static inline void mapping_set_large_folios(struct address_space *mapping) 325{ 326 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 327} 328 329/* 330 * Large folio support currently depends on THP. These dependencies are 331 * being worked on but are not yet fixed. 332 */ 333static inline bool mapping_large_folio_support(struct address_space *mapping) 334{ 335 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 336 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 337} 338 339static inline int filemap_nr_thps(struct address_space *mapping) 340{ 341#ifdef CONFIG_READ_ONLY_THP_FOR_FS 342 return atomic_read(&mapping->nr_thps); 343#else 344 return 0; 345#endif 346} 347 348static inline void filemap_nr_thps_inc(struct address_space *mapping) 349{ 350#ifdef CONFIG_READ_ONLY_THP_FOR_FS 351 if (!mapping_large_folio_support(mapping)) 352 atomic_inc(&mapping->nr_thps); 353#else 354 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 355#endif 356} 357 358static inline void filemap_nr_thps_dec(struct address_space *mapping) 359{ 360#ifdef CONFIG_READ_ONLY_THP_FOR_FS 361 if (!mapping_large_folio_support(mapping)) 362 atomic_dec(&mapping->nr_thps); 363#else 364 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 365#endif 366} 367 368struct address_space *page_mapping(struct page *); 369struct address_space *folio_mapping(struct folio *); 370struct address_space *swapcache_mapping(struct folio *); 371 372/** 373 * folio_file_mapping - Find the mapping this folio belongs to. 374 * @folio: The folio. 375 * 376 * For folios which are in the page cache, return the mapping that this 377 * page belongs to. Folios in the swap cache return the mapping of the 378 * swap file or swap device where the data is stored. This is different 379 * from the mapping returned by folio_mapping(). The only reason to 380 * use it is if, like NFS, you return 0 from ->activate_swapfile. 381 * 382 * Do not call this for folios which aren't in the page cache or swap cache. 383 */ 384static inline struct address_space *folio_file_mapping(struct folio *folio) 385{ 386 if (unlikely(folio_test_swapcache(folio))) 387 return swapcache_mapping(folio); 388 389 return folio->mapping; 390} 391 392/** 393 * folio_flush_mapping - Find the file mapping this folio belongs to. 394 * @folio: The folio. 395 * 396 * For folios which are in the page cache, return the mapping that this 397 * page belongs to. Anonymous folios return NULL, even if they're in 398 * the swap cache. Other kinds of folio also return NULL. 399 * 400 * This is ONLY used by architecture cache flushing code. If you aren't 401 * writing cache flushing code, you want either folio_mapping() or 402 * folio_file_mapping(). 403 */ 404static inline struct address_space *folio_flush_mapping(struct folio *folio) 405{ 406 if (unlikely(folio_test_swapcache(folio))) 407 return NULL; 408 409 return folio_mapping(folio); 410} 411 412static inline struct address_space *page_file_mapping(struct page *page) 413{ 414 return folio_file_mapping(page_folio(page)); 415} 416 417/** 418 * folio_inode - Get the host inode for this folio. 419 * @folio: The folio. 420 * 421 * For folios which are in the page cache, return the inode that this folio 422 * belongs to. 423 * 424 * Do not call this for folios which aren't in the page cache. 425 */ 426static inline struct inode *folio_inode(struct folio *folio) 427{ 428 return folio->mapping->host; 429} 430 431/** 432 * folio_attach_private - Attach private data to a folio. 433 * @folio: Folio to attach data to. 434 * @data: Data to attach to folio. 435 * 436 * Attaching private data to a folio increments the page's reference count. 437 * The data must be detached before the folio will be freed. 438 */ 439static inline void folio_attach_private(struct folio *folio, void *data) 440{ 441 folio_get(folio); 442 folio->private = data; 443 folio_set_private(folio); 444} 445 446/** 447 * folio_change_private - Change private data on a folio. 448 * @folio: Folio to change the data on. 449 * @data: Data to set on the folio. 450 * 451 * Change the private data attached to a folio and return the old 452 * data. The page must previously have had data attached and the data 453 * must be detached before the folio will be freed. 454 * 455 * Return: Data that was previously attached to the folio. 456 */ 457static inline void *folio_change_private(struct folio *folio, void *data) 458{ 459 void *old = folio_get_private(folio); 460 461 folio->private = data; 462 return old; 463} 464 465/** 466 * folio_detach_private - Detach private data from a folio. 467 * @folio: Folio to detach data from. 468 * 469 * Removes the data that was previously attached to the folio and decrements 470 * the refcount on the page. 471 * 472 * Return: Data that was attached to the folio. 473 */ 474static inline void *folio_detach_private(struct folio *folio) 475{ 476 void *data = folio_get_private(folio); 477 478 if (!folio_test_private(folio)) 479 return NULL; 480 folio_clear_private(folio); 481 folio->private = NULL; 482 folio_put(folio); 483 484 return data; 485} 486 487static inline void attach_page_private(struct page *page, void *data) 488{ 489 folio_attach_private(page_folio(page), data); 490} 491 492static inline void *detach_page_private(struct page *page) 493{ 494 return folio_detach_private(page_folio(page)); 495} 496 497/* 498 * There are some parts of the kernel which assume that PMD entries 499 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, 500 * limit the maximum allocation order to PMD size. I'm not aware of any 501 * assumptions about maximum order if THP are disabled, but 8 seems like 502 * a good order (that's 1MB if you're using 4kB pages) 503 */ 504#ifdef CONFIG_TRANSPARENT_HUGEPAGE 505#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER 506#else 507#define MAX_PAGECACHE_ORDER 8 508#endif 509 510#ifdef CONFIG_NUMA 511struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); 512#else 513static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) 514{ 515 return folio_alloc(gfp, order); 516} 517#endif 518 519static inline struct page *__page_cache_alloc(gfp_t gfp) 520{ 521 return &filemap_alloc_folio(gfp, 0)->page; 522} 523 524static inline struct page *page_cache_alloc(struct address_space *x) 525{ 526 return __page_cache_alloc(mapping_gfp_mask(x)); 527} 528 529static inline gfp_t readahead_gfp_mask(struct address_space *x) 530{ 531 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 532} 533 534typedef int filler_t(struct file *, struct folio *); 535 536pgoff_t page_cache_next_miss(struct address_space *mapping, 537 pgoff_t index, unsigned long max_scan); 538pgoff_t page_cache_prev_miss(struct address_space *mapping, 539 pgoff_t index, unsigned long max_scan); 540 541/** 542 * typedef fgf_t - Flags for getting folios from the page cache. 543 * 544 * Most users of the page cache will not need to use these flags; 545 * there are convenience functions such as filemap_get_folio() and 546 * filemap_lock_folio(). For users which need more control over exactly 547 * what is done with the folios, these flags to __filemap_get_folio() 548 * are available. 549 * 550 * * %FGP_ACCESSED - The folio will be marked accessed. 551 * * %FGP_LOCK - The folio is returned locked. 552 * * %FGP_CREAT - If no folio is present then a new folio is allocated, 553 * added to the page cache and the VM's LRU list. The folio is 554 * returned locked. 555 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the 556 * folio is already in cache. If the folio was allocated, unlock it 557 * before returning so the caller can do the same dance. 558 * * %FGP_WRITE - The folio will be written to by the caller. 559 * * %FGP_NOFS - __GFP_FS will get cleared in gfp. 560 * * %FGP_NOWAIT - Don't block on the folio lock. 561 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) 562 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() 563 * implementation. 564 */ 565typedef unsigned int __bitwise fgf_t; 566 567#define FGP_ACCESSED ((__force fgf_t)0x00000001) 568#define FGP_LOCK ((__force fgf_t)0x00000002) 569#define FGP_CREAT ((__force fgf_t)0x00000004) 570#define FGP_WRITE ((__force fgf_t)0x00000008) 571#define FGP_NOFS ((__force fgf_t)0x00000010) 572#define FGP_NOWAIT ((__force fgf_t)0x00000020) 573#define FGP_FOR_MMAP ((__force fgf_t)0x00000040) 574#define FGP_STABLE ((__force fgf_t)0x00000080) 575#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ 576 577#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) 578 579/** 580 * fgf_set_order - Encode a length in the fgf_t flags. 581 * @size: The suggested size of the folio to create. 582 * 583 * The caller of __filemap_get_folio() can use this to suggest a preferred 584 * size for the folio that is created. If there is already a folio at 585 * the index, it will be returned, no matter what its size. If a folio 586 * is freshly created, it may be of a different size than requested 587 * due to alignment constraints, memory pressure, or the presence of 588 * other folios at nearby indices. 589 */ 590static inline fgf_t fgf_set_order(size_t size) 591{ 592 unsigned int shift = ilog2(size); 593 594 if (shift <= PAGE_SHIFT) 595 return 0; 596 return (__force fgf_t)((shift - PAGE_SHIFT) << 26); 597} 598 599void *filemap_get_entry(struct address_space *mapping, pgoff_t index); 600struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 601 fgf_t fgp_flags, gfp_t gfp); 602struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 603 fgf_t fgp_flags, gfp_t gfp); 604 605/** 606 * filemap_get_folio - Find and get a folio. 607 * @mapping: The address_space to search. 608 * @index: The page index. 609 * 610 * Looks up the page cache entry at @mapping & @index. If a folio is 611 * present, it is returned with an increased refcount. 612 * 613 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 614 * this index. Will not return a shadow, swap or DAX entry. 615 */ 616static inline struct folio *filemap_get_folio(struct address_space *mapping, 617 pgoff_t index) 618{ 619 return __filemap_get_folio(mapping, index, 0, 0); 620} 621 622/** 623 * filemap_lock_folio - Find and lock a folio. 624 * @mapping: The address_space to search. 625 * @index: The page index. 626 * 627 * Looks up the page cache entry at @mapping & @index. If a folio is 628 * present, it is returned locked with an increased refcount. 629 * 630 * Context: May sleep. 631 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 632 * this index. Will not return a shadow, swap or DAX entry. 633 */ 634static inline struct folio *filemap_lock_folio(struct address_space *mapping, 635 pgoff_t index) 636{ 637 return __filemap_get_folio(mapping, index, FGP_LOCK, 0); 638} 639 640/** 641 * filemap_grab_folio - grab a folio from the page cache 642 * @mapping: The address space to search 643 * @index: The page index 644 * 645 * Looks up the page cache entry at @mapping & @index. If no folio is found, 646 * a new folio is created. The folio is locked, marked as accessed, and 647 * returned. 648 * 649 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found 650 * and failed to create a folio. 651 */ 652static inline struct folio *filemap_grab_folio(struct address_space *mapping, 653 pgoff_t index) 654{ 655 return __filemap_get_folio(mapping, index, 656 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 657 mapping_gfp_mask(mapping)); 658} 659 660/** 661 * find_get_page - find and get a page reference 662 * @mapping: the address_space to search 663 * @offset: the page index 664 * 665 * Looks up the page cache slot at @mapping & @offset. If there is a 666 * page cache page, it is returned with an increased refcount. 667 * 668 * Otherwise, %NULL is returned. 669 */ 670static inline struct page *find_get_page(struct address_space *mapping, 671 pgoff_t offset) 672{ 673 return pagecache_get_page(mapping, offset, 0, 0); 674} 675 676static inline struct page *find_get_page_flags(struct address_space *mapping, 677 pgoff_t offset, fgf_t fgp_flags) 678{ 679 return pagecache_get_page(mapping, offset, fgp_flags, 0); 680} 681 682/** 683 * find_lock_page - locate, pin and lock a pagecache page 684 * @mapping: the address_space to search 685 * @index: the page index 686 * 687 * Looks up the page cache entry at @mapping & @index. If there is a 688 * page cache page, it is returned locked and with an increased 689 * refcount. 690 * 691 * Context: May sleep. 692 * Return: A struct page or %NULL if there is no page in the cache for this 693 * index. 694 */ 695static inline struct page *find_lock_page(struct address_space *mapping, 696 pgoff_t index) 697{ 698 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 699} 700 701/** 702 * find_or_create_page - locate or add a pagecache page 703 * @mapping: the page's address_space 704 * @index: the page's index into the mapping 705 * @gfp_mask: page allocation mode 706 * 707 * Looks up the page cache slot at @mapping & @offset. If there is a 708 * page cache page, it is returned locked and with an increased 709 * refcount. 710 * 711 * If the page is not present, a new page is allocated using @gfp_mask 712 * and added to the page cache and the VM's LRU list. The page is 713 * returned locked and with an increased refcount. 714 * 715 * On memory exhaustion, %NULL is returned. 716 * 717 * find_or_create_page() may sleep, even if @gfp_flags specifies an 718 * atomic allocation! 719 */ 720static inline struct page *find_or_create_page(struct address_space *mapping, 721 pgoff_t index, gfp_t gfp_mask) 722{ 723 return pagecache_get_page(mapping, index, 724 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 725 gfp_mask); 726} 727 728/** 729 * grab_cache_page_nowait - returns locked page at given index in given cache 730 * @mapping: target address_space 731 * @index: the page index 732 * 733 * Same as grab_cache_page(), but do not wait if the page is unavailable. 734 * This is intended for speculative data generators, where the data can 735 * be regenerated if the page couldn't be grabbed. This routine should 736 * be safe to call while holding the lock for another page. 737 * 738 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 739 * and deadlock against the caller's locked page. 740 */ 741static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 742 pgoff_t index) 743{ 744 return pagecache_get_page(mapping, index, 745 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 746 mapping_gfp_mask(mapping)); 747} 748 749#define swapcache_index(folio) __page_file_index(&(folio)->page) 750 751/** 752 * folio_index - File index of a folio. 753 * @folio: The folio. 754 * 755 * For a folio which is either in the page cache or the swap cache, 756 * return its index within the address_space it belongs to. If you know 757 * the page is definitely in the page cache, you can look at the folio's 758 * index directly. 759 * 760 * Return: The index (offset in units of pages) of a folio in its file. 761 */ 762static inline pgoff_t folio_index(struct folio *folio) 763{ 764 if (unlikely(folio_test_swapcache(folio))) 765 return swapcache_index(folio); 766 return folio->index; 767} 768 769/** 770 * folio_next_index - Get the index of the next folio. 771 * @folio: The current folio. 772 * 773 * Return: The index of the folio which follows this folio in the file. 774 */ 775static inline pgoff_t folio_next_index(struct folio *folio) 776{ 777 return folio->index + folio_nr_pages(folio); 778} 779 780/** 781 * folio_file_page - The page for a particular index. 782 * @folio: The folio which contains this index. 783 * @index: The index we want to look up. 784 * 785 * Sometimes after looking up a folio in the page cache, we need to 786 * obtain the specific page for an index (eg a page fault). 787 * 788 * Return: The page containing the file data for this index. 789 */ 790static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 791{ 792 /* HugeTLBfs indexes the page cache in units of hpage_size */ 793 if (folio_test_hugetlb(folio)) 794 return &folio->page; 795 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 796} 797 798/** 799 * folio_contains - Does this folio contain this index? 800 * @folio: The folio. 801 * @index: The page index within the file. 802 * 803 * Context: The caller should have the page locked in order to prevent 804 * (eg) shmem from moving the page between the page cache and swap cache 805 * and changing its index in the middle of the operation. 806 * Return: true or false. 807 */ 808static inline bool folio_contains(struct folio *folio, pgoff_t index) 809{ 810 /* HugeTLBfs indexes the page cache in units of hpage_size */ 811 if (folio_test_hugetlb(folio)) 812 return folio->index == index; 813 return index - folio_index(folio) < folio_nr_pages(folio); 814} 815 816/* 817 * Given the page we found in the page cache, return the page corresponding 818 * to this index in the file 819 */ 820static inline struct page *find_subpage(struct page *head, pgoff_t index) 821{ 822 /* HugeTLBfs wants the head page regardless */ 823 if (PageHuge(head)) 824 return head; 825 826 return head + (index & (thp_nr_pages(head) - 1)); 827} 828 829unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 830 pgoff_t end, struct folio_batch *fbatch); 831unsigned filemap_get_folios_contig(struct address_space *mapping, 832 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 833unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 834 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 835 836struct page *grab_cache_page_write_begin(struct address_space *mapping, 837 pgoff_t index); 838 839/* 840 * Returns locked page at given index in given cache, creating it if needed. 841 */ 842static inline struct page *grab_cache_page(struct address_space *mapping, 843 pgoff_t index) 844{ 845 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 846} 847 848struct folio *read_cache_folio(struct address_space *, pgoff_t index, 849 filler_t *filler, struct file *file); 850struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, 851 gfp_t flags); 852struct page *read_cache_page(struct address_space *, pgoff_t index, 853 filler_t *filler, struct file *file); 854extern struct page * read_cache_page_gfp(struct address_space *mapping, 855 pgoff_t index, gfp_t gfp_mask); 856 857static inline struct page *read_mapping_page(struct address_space *mapping, 858 pgoff_t index, struct file *file) 859{ 860 return read_cache_page(mapping, index, NULL, file); 861} 862 863static inline struct folio *read_mapping_folio(struct address_space *mapping, 864 pgoff_t index, struct file *file) 865{ 866 return read_cache_folio(mapping, index, NULL, file); 867} 868 869/* 870 * Get index of the page within radix-tree (but not for hugetlb pages). 871 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) 872 */ 873static inline pgoff_t page_to_index(struct page *page) 874{ 875 struct page *head; 876 877 if (likely(!PageTransTail(page))) 878 return page->index; 879 880 head = compound_head(page); 881 /* 882 * We don't initialize ->index for tail pages: calculate based on 883 * head page 884 */ 885 return head->index + page - head; 886} 887 888extern pgoff_t hugetlb_basepage_index(struct page *page); 889 890/* 891 * Get the offset in PAGE_SIZE (even for hugetlb pages). 892 * (TODO: hugetlb pages should have ->index in PAGE_SIZE) 893 */ 894static inline pgoff_t page_to_pgoff(struct page *page) 895{ 896 if (unlikely(PageHuge(page))) 897 return hugetlb_basepage_index(page); 898 return page_to_index(page); 899} 900 901/* 902 * Return byte-offset into filesystem object for page. 903 */ 904static inline loff_t page_offset(struct page *page) 905{ 906 return ((loff_t)page->index) << PAGE_SHIFT; 907} 908 909static inline loff_t page_file_offset(struct page *page) 910{ 911 return ((loff_t)page_index(page)) << PAGE_SHIFT; 912} 913 914/** 915 * folio_pos - Returns the byte position of this folio in its file. 916 * @folio: The folio. 917 */ 918static inline loff_t folio_pos(struct folio *folio) 919{ 920 return page_offset(&folio->page); 921} 922 923/** 924 * folio_file_pos - Returns the byte position of this folio in its file. 925 * @folio: The folio. 926 * 927 * This differs from folio_pos() for folios which belong to a swap file. 928 * NFS is the only filesystem today which needs to use folio_file_pos(). 929 */ 930static inline loff_t folio_file_pos(struct folio *folio) 931{ 932 return page_file_offset(&folio->page); 933} 934 935/* 936 * Get the offset in PAGE_SIZE (even for hugetlb folios). 937 * (TODO: hugetlb folios should have ->index in PAGE_SIZE) 938 */ 939static inline pgoff_t folio_pgoff(struct folio *folio) 940{ 941 if (unlikely(folio_test_hugetlb(folio))) 942 return hugetlb_basepage_index(&folio->page); 943 return folio->index; 944} 945 946extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 947 unsigned long address); 948 949static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 950 unsigned long address) 951{ 952 pgoff_t pgoff; 953 if (unlikely(is_vm_hugetlb_page(vma))) 954 return linear_hugepage_index(vma, address); 955 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 956 pgoff += vma->vm_pgoff; 957 return pgoff; 958} 959 960struct wait_page_key { 961 struct folio *folio; 962 int bit_nr; 963 int page_match; 964}; 965 966struct wait_page_queue { 967 struct folio *folio; 968 int bit_nr; 969 wait_queue_entry_t wait; 970}; 971 972static inline bool wake_page_match(struct wait_page_queue *wait_page, 973 struct wait_page_key *key) 974{ 975 if (wait_page->folio != key->folio) 976 return false; 977 key->page_match = 1; 978 979 if (wait_page->bit_nr != key->bit_nr) 980 return false; 981 982 return true; 983} 984 985void __folio_lock(struct folio *folio); 986int __folio_lock_killable(struct folio *folio); 987vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); 988void unlock_page(struct page *page); 989void folio_unlock(struct folio *folio); 990 991/** 992 * folio_trylock() - Attempt to lock a folio. 993 * @folio: The folio to attempt to lock. 994 * 995 * Sometimes it is undesirable to wait for a folio to be unlocked (eg 996 * when the locks are being taken in the wrong order, or if making 997 * progress through a batch of folios is more important than processing 998 * them in order). Usually folio_lock() is the correct function to call. 999 * 1000 * Context: Any context. 1001 * Return: Whether the lock was successfully acquired. 1002 */ 1003static inline bool folio_trylock(struct folio *folio) 1004{ 1005 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 1006} 1007 1008/* 1009 * Return true if the page was successfully locked 1010 */ 1011static inline int trylock_page(struct page *page) 1012{ 1013 return folio_trylock(page_folio(page)); 1014} 1015 1016/** 1017 * folio_lock() - Lock this folio. 1018 * @folio: The folio to lock. 1019 * 1020 * The folio lock protects against many things, probably more than it 1021 * should. It is primarily held while a folio is being brought uptodate, 1022 * either from its backing file or from swap. It is also held while a 1023 * folio is being truncated from its address_space, so holding the lock 1024 * is sufficient to keep folio->mapping stable. 1025 * 1026 * The folio lock is also held while write() is modifying the page to 1027 * provide POSIX atomicity guarantees (as long as the write does not 1028 * cross a page boundary). Other modifications to the data in the folio 1029 * do not hold the folio lock and can race with writes, eg DMA and stores 1030 * to mapped pages. 1031 * 1032 * Context: May sleep. If you need to acquire the locks of two or 1033 * more folios, they must be in order of ascending index, if they are 1034 * in the same address_space. If they are in different address_spaces, 1035 * acquire the lock of the folio which belongs to the address_space which 1036 * has the lowest address in memory first. 1037 */ 1038static inline void folio_lock(struct folio *folio) 1039{ 1040 might_sleep(); 1041 if (!folio_trylock(folio)) 1042 __folio_lock(folio); 1043} 1044 1045/** 1046 * lock_page() - Lock the folio containing this page. 1047 * @page: The page to lock. 1048 * 1049 * See folio_lock() for a description of what the lock protects. 1050 * This is a legacy function and new code should probably use folio_lock() 1051 * instead. 1052 * 1053 * Context: May sleep. Pages in the same folio share a lock, so do not 1054 * attempt to lock two pages which share a folio. 1055 */ 1056static inline void lock_page(struct page *page) 1057{ 1058 struct folio *folio; 1059 might_sleep(); 1060 1061 folio = page_folio(page); 1062 if (!folio_trylock(folio)) 1063 __folio_lock(folio); 1064} 1065 1066/** 1067 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. 1068 * @folio: The folio to lock. 1069 * 1070 * Attempts to lock the folio, like folio_lock(), except that the sleep 1071 * to acquire the lock is interruptible by a fatal signal. 1072 * 1073 * Context: May sleep; see folio_lock(). 1074 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. 1075 */ 1076static inline int folio_lock_killable(struct folio *folio) 1077{ 1078 might_sleep(); 1079 if (!folio_trylock(folio)) 1080 return __folio_lock_killable(folio); 1081 return 0; 1082} 1083 1084/* 1085 * folio_lock_or_retry - Lock the folio, unless this would block and the 1086 * caller indicated that it can handle a retry. 1087 * 1088 * Return value and mmap_lock implications depend on flags; see 1089 * __folio_lock_or_retry(). 1090 */ 1091static inline vm_fault_t folio_lock_or_retry(struct folio *folio, 1092 struct vm_fault *vmf) 1093{ 1094 might_sleep(); 1095 if (!folio_trylock(folio)) 1096 return __folio_lock_or_retry(folio, vmf); 1097 return 0; 1098} 1099 1100/* 1101 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 1102 * and should not be used directly. 1103 */ 1104void folio_wait_bit(struct folio *folio, int bit_nr); 1105int folio_wait_bit_killable(struct folio *folio, int bit_nr); 1106 1107/* 1108 * Wait for a folio to be unlocked. 1109 * 1110 * This must be called with the caller "holding" the folio, 1111 * ie with increased folio reference count so that the folio won't 1112 * go away during the wait. 1113 */ 1114static inline void folio_wait_locked(struct folio *folio) 1115{ 1116 if (folio_test_locked(folio)) 1117 folio_wait_bit(folio, PG_locked); 1118} 1119 1120static inline int folio_wait_locked_killable(struct folio *folio) 1121{ 1122 if (!folio_test_locked(folio)) 1123 return 0; 1124 return folio_wait_bit_killable(folio, PG_locked); 1125} 1126 1127static inline void wait_on_page_locked(struct page *page) 1128{ 1129 folio_wait_locked(page_folio(page)); 1130} 1131 1132void wait_on_page_writeback(struct page *page); 1133void folio_wait_writeback(struct folio *folio); 1134int folio_wait_writeback_killable(struct folio *folio); 1135void end_page_writeback(struct page *page); 1136void folio_end_writeback(struct folio *folio); 1137void wait_for_stable_page(struct page *page); 1138void folio_wait_stable(struct folio *folio); 1139void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 1140static inline void __set_page_dirty(struct page *page, 1141 struct address_space *mapping, int warn) 1142{ 1143 __folio_mark_dirty(page_folio(page), mapping, warn); 1144} 1145void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); 1146void __folio_cancel_dirty(struct folio *folio); 1147static inline void folio_cancel_dirty(struct folio *folio) 1148{ 1149 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1150 if (folio_test_dirty(folio)) 1151 __folio_cancel_dirty(folio); 1152} 1153bool folio_clear_dirty_for_io(struct folio *folio); 1154bool clear_page_dirty_for_io(struct page *page); 1155void folio_invalidate(struct folio *folio, size_t offset, size_t length); 1156int __set_page_dirty_nobuffers(struct page *page); 1157bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); 1158 1159#ifdef CONFIG_MIGRATION 1160int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, 1161 struct folio *src, enum migrate_mode mode); 1162#else 1163#define filemap_migrate_folio NULL 1164#endif 1165void folio_end_private_2(struct folio *folio); 1166void folio_wait_private_2(struct folio *folio); 1167int folio_wait_private_2_killable(struct folio *folio); 1168 1169/* 1170 * Add an arbitrary waiter to a page's wait queue 1171 */ 1172void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 1173 1174/* 1175 * Fault in userspace address range. 1176 */ 1177size_t fault_in_writeable(char __user *uaddr, size_t size); 1178size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); 1179size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 1180size_t fault_in_readable(const char __user *uaddr, size_t size); 1181 1182int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 1183 pgoff_t index, gfp_t gfp); 1184int filemap_add_folio(struct address_space *mapping, struct folio *folio, 1185 pgoff_t index, gfp_t gfp); 1186void filemap_remove_folio(struct folio *folio); 1187void __filemap_remove_folio(struct folio *folio, void *shadow); 1188void replace_page_cache_folio(struct folio *old, struct folio *new); 1189void delete_from_page_cache_batch(struct address_space *mapping, 1190 struct folio_batch *fbatch); 1191bool filemap_release_folio(struct folio *folio, gfp_t gfp); 1192loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 1193 int whence); 1194 1195/* Must be non-static for BPF error injection */ 1196int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 1197 pgoff_t index, gfp_t gfp, void **shadowp); 1198 1199bool filemap_range_has_writeback(struct address_space *mapping, 1200 loff_t start_byte, loff_t end_byte); 1201 1202/** 1203 * filemap_range_needs_writeback - check if range potentially needs writeback 1204 * @mapping: address space within which to check 1205 * @start_byte: offset in bytes where the range starts 1206 * @end_byte: offset in bytes where the range ends (inclusive) 1207 * 1208 * Find at least one page in the range supplied, usually used to check if 1209 * direct writing in this range will trigger a writeback. Used by O_DIRECT 1210 * read/write with IOCB_NOWAIT, to see if the caller needs to do 1211 * filemap_write_and_wait_range() before proceeding. 1212 * 1213 * Return: %true if the caller should do filemap_write_and_wait_range() before 1214 * doing O_DIRECT to a page in this range, %false otherwise. 1215 */ 1216static inline bool filemap_range_needs_writeback(struct address_space *mapping, 1217 loff_t start_byte, 1218 loff_t end_byte) 1219{ 1220 if (!mapping->nrpages) 1221 return false; 1222 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 1223 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 1224 return false; 1225 return filemap_range_has_writeback(mapping, start_byte, end_byte); 1226} 1227 1228/** 1229 * struct readahead_control - Describes a readahead request. 1230 * 1231 * A readahead request is for consecutive pages. Filesystems which 1232 * implement the ->readahead method should call readahead_page() or 1233 * readahead_page_batch() in a loop and attempt to start I/O against 1234 * each page in the request. 1235 * 1236 * Most of the fields in this struct are private and should be accessed 1237 * by the functions below. 1238 * 1239 * @file: The file, used primarily by network filesystems for authentication. 1240 * May be NULL if invoked internally by the filesystem. 1241 * @mapping: Readahead this filesystem object. 1242 * @ra: File readahead state. May be NULL. 1243 */ 1244struct readahead_control { 1245 struct file *file; 1246 struct address_space *mapping; 1247 struct file_ra_state *ra; 1248/* private: use the readahead_* accessors instead */ 1249 pgoff_t _index; 1250 unsigned int _nr_pages; 1251 unsigned int _batch_count; 1252 bool _workingset; 1253 unsigned long _pflags; 1254}; 1255 1256#define DEFINE_READAHEAD(ractl, f, r, m, i) \ 1257 struct readahead_control ractl = { \ 1258 .file = f, \ 1259 .mapping = m, \ 1260 .ra = r, \ 1261 ._index = i, \ 1262 } 1263 1264#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 1265 1266void page_cache_ra_unbounded(struct readahead_control *, 1267 unsigned long nr_to_read, unsigned long lookahead_count); 1268void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 1269void page_cache_async_ra(struct readahead_control *, struct folio *, 1270 unsigned long req_count); 1271void readahead_expand(struct readahead_control *ractl, 1272 loff_t new_start, size_t new_len); 1273 1274/** 1275 * page_cache_sync_readahead - generic file readahead 1276 * @mapping: address_space which holds the pagecache and I/O vectors 1277 * @ra: file_ra_state which holds the readahead state 1278 * @file: Used by the filesystem for authentication. 1279 * @index: Index of first page to be read. 1280 * @req_count: Total number of pages being read by the caller. 1281 * 1282 * page_cache_sync_readahead() should be called when a cache miss happened: 1283 * it will submit the read. The readahead logic may decide to piggyback more 1284 * pages onto the read request if access patterns suggest it will improve 1285 * performance. 1286 */ 1287static inline 1288void page_cache_sync_readahead(struct address_space *mapping, 1289 struct file_ra_state *ra, struct file *file, pgoff_t index, 1290 unsigned long req_count) 1291{ 1292 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1293 page_cache_sync_ra(&ractl, req_count); 1294} 1295 1296/** 1297 * page_cache_async_readahead - file readahead for marked pages 1298 * @mapping: address_space which holds the pagecache and I/O vectors 1299 * @ra: file_ra_state which holds the readahead state 1300 * @file: Used by the filesystem for authentication. 1301 * @folio: The folio at @index which triggered the readahead call. 1302 * @index: Index of first page to be read. 1303 * @req_count: Total number of pages being read by the caller. 1304 * 1305 * page_cache_async_readahead() should be called when a page is used which 1306 * is marked as PageReadahead; this is a marker to suggest that the application 1307 * has used up enough of the readahead window that we should start pulling in 1308 * more pages. 1309 */ 1310static inline 1311void page_cache_async_readahead(struct address_space *mapping, 1312 struct file_ra_state *ra, struct file *file, 1313 struct folio *folio, pgoff_t index, unsigned long req_count) 1314{ 1315 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1316 page_cache_async_ra(&ractl, folio, req_count); 1317} 1318 1319static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1320{ 1321 struct folio *folio; 1322 1323 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1324 ractl->_nr_pages -= ractl->_batch_count; 1325 ractl->_index += ractl->_batch_count; 1326 1327 if (!ractl->_nr_pages) { 1328 ractl->_batch_count = 0; 1329 return NULL; 1330 } 1331 1332 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1333 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1334 ractl->_batch_count = folio_nr_pages(folio); 1335 1336 return folio; 1337} 1338 1339/** 1340 * readahead_page - Get the next page to read. 1341 * @ractl: The current readahead request. 1342 * 1343 * Context: The page is locked and has an elevated refcount. The caller 1344 * should decreases the refcount once the page has been submitted for I/O 1345 * and unlock the page once all I/O to that page has completed. 1346 * Return: A pointer to the next page, or %NULL if we are done. 1347 */ 1348static inline struct page *readahead_page(struct readahead_control *ractl) 1349{ 1350 struct folio *folio = __readahead_folio(ractl); 1351 1352 return &folio->page; 1353} 1354 1355/** 1356 * readahead_folio - Get the next folio to read. 1357 * @ractl: The current readahead request. 1358 * 1359 * Context: The folio is locked. The caller should unlock the folio once 1360 * all I/O to that folio has completed. 1361 * Return: A pointer to the next folio, or %NULL if we are done. 1362 */ 1363static inline struct folio *readahead_folio(struct readahead_control *ractl) 1364{ 1365 struct folio *folio = __readahead_folio(ractl); 1366 1367 if (folio) 1368 folio_put(folio); 1369 return folio; 1370} 1371 1372static inline unsigned int __readahead_batch(struct readahead_control *rac, 1373 struct page **array, unsigned int array_sz) 1374{ 1375 unsigned int i = 0; 1376 XA_STATE(xas, &rac->mapping->i_pages, 0); 1377 struct page *page; 1378 1379 BUG_ON(rac->_batch_count > rac->_nr_pages); 1380 rac->_nr_pages -= rac->_batch_count; 1381 rac->_index += rac->_batch_count; 1382 rac->_batch_count = 0; 1383 1384 xas_set(&xas, rac->_index); 1385 rcu_read_lock(); 1386 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1387 if (xas_retry(&xas, page)) 1388 continue; 1389 VM_BUG_ON_PAGE(!PageLocked(page), page); 1390 VM_BUG_ON_PAGE(PageTail(page), page); 1391 array[i++] = page; 1392 rac->_batch_count += thp_nr_pages(page); 1393 if (i == array_sz) 1394 break; 1395 } 1396 rcu_read_unlock(); 1397 1398 return i; 1399} 1400 1401/** 1402 * readahead_page_batch - Get a batch of pages to read. 1403 * @rac: The current readahead request. 1404 * @array: An array of pointers to struct page. 1405 * 1406 * Context: The pages are locked and have an elevated refcount. The caller 1407 * should decreases the refcount once the page has been submitted for I/O 1408 * and unlock the page once all I/O to that page has completed. 1409 * Return: The number of pages placed in the array. 0 indicates the request 1410 * is complete. 1411 */ 1412#define readahead_page_batch(rac, array) \ 1413 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1414 1415/** 1416 * readahead_pos - The byte offset into the file of this readahead request. 1417 * @rac: The readahead request. 1418 */ 1419static inline loff_t readahead_pos(struct readahead_control *rac) 1420{ 1421 return (loff_t)rac->_index * PAGE_SIZE; 1422} 1423 1424/** 1425 * readahead_length - The number of bytes in this readahead request. 1426 * @rac: The readahead request. 1427 */ 1428static inline size_t readahead_length(struct readahead_control *rac) 1429{ 1430 return rac->_nr_pages * PAGE_SIZE; 1431} 1432 1433/** 1434 * readahead_index - The index of the first page in this readahead request. 1435 * @rac: The readahead request. 1436 */ 1437static inline pgoff_t readahead_index(struct readahead_control *rac) 1438{ 1439 return rac->_index; 1440} 1441 1442/** 1443 * readahead_count - The number of pages in this readahead request. 1444 * @rac: The readahead request. 1445 */ 1446static inline unsigned int readahead_count(struct readahead_control *rac) 1447{ 1448 return rac->_nr_pages; 1449} 1450 1451/** 1452 * readahead_batch_length - The number of bytes in the current batch. 1453 * @rac: The readahead request. 1454 */ 1455static inline size_t readahead_batch_length(struct readahead_control *rac) 1456{ 1457 return rac->_batch_count * PAGE_SIZE; 1458} 1459 1460static inline unsigned long dir_pages(struct inode *inode) 1461{ 1462 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1463 PAGE_SHIFT; 1464} 1465 1466/** 1467 * folio_mkwrite_check_truncate - check if folio was truncated 1468 * @folio: the folio to check 1469 * @inode: the inode to check the folio against 1470 * 1471 * Return: the number of bytes in the folio up to EOF, 1472 * or -EFAULT if the folio was truncated. 1473 */ 1474static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1475 struct inode *inode) 1476{ 1477 loff_t size = i_size_read(inode); 1478 pgoff_t index = size >> PAGE_SHIFT; 1479 size_t offset = offset_in_folio(folio, size); 1480 1481 if (!folio->mapping) 1482 return -EFAULT; 1483 1484 /* folio is wholly inside EOF */ 1485 if (folio_next_index(folio) - 1 < index) 1486 return folio_size(folio); 1487 /* folio is wholly past EOF */ 1488 if (folio->index > index || !offset) 1489 return -EFAULT; 1490 /* folio is partially inside EOF */ 1491 return offset; 1492} 1493 1494/** 1495 * page_mkwrite_check_truncate - check if page was truncated 1496 * @page: the page to check 1497 * @inode: the inode to check the page against 1498 * 1499 * Returns the number of bytes in the page up to EOF, 1500 * or -EFAULT if the page was truncated. 1501 */ 1502static inline int page_mkwrite_check_truncate(struct page *page, 1503 struct inode *inode) 1504{ 1505 loff_t size = i_size_read(inode); 1506 pgoff_t index = size >> PAGE_SHIFT; 1507 int offset = offset_in_page(size); 1508 1509 if (page->mapping != inode->i_mapping) 1510 return -EFAULT; 1511 1512 /* page is wholly inside EOF */ 1513 if (page->index < index) 1514 return PAGE_SIZE; 1515 /* page is wholly past EOF */ 1516 if (page->index > index || !offset) 1517 return -EFAULT; 1518 /* page is partially inside EOF */ 1519 return offset; 1520} 1521 1522/** 1523 * i_blocks_per_folio - How many blocks fit in this folio. 1524 * @inode: The inode which contains the blocks. 1525 * @folio: The folio. 1526 * 1527 * If the block size is larger than the size of this folio, return zero. 1528 * 1529 * Context: The caller should hold a refcount on the folio to prevent it 1530 * from being split. 1531 * Return: The number of filesystem blocks covered by this folio. 1532 */ 1533static inline 1534unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1535{ 1536 return folio_size(folio) >> inode->i_blkbits; 1537} 1538 1539static inline 1540unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1541{ 1542 return i_blocks_per_folio(inode, page_folio(page)); 1543} 1544#endif /* _LINUX_PAGEMAP_H */