Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: return an ERR_PTR from __filemap_get_folio

Instead of returning NULL for all errors, distinguish between:

- no entry found and not asked to allocated (-ENOENT)
- failed to allocate memory (-ENOMEM)
- would block (-EAGAIN)

so that callers don't have to guess the error based on the passed in
flags.

Also pass through the error through the direct callers: filemap_get_folio,
filemap_lock_folio filemap_grab_folio and filemap_get_incore_folio.

[hch@lst.de: fix null-pointer deref]
Link: https://lkml.kernel.org/r/20230310070023.GA13563@lst.de
Link: https://lkml.kernel.org/r/20230310043137.GA1624890@u2004
Link: https://lkml.kernel.org/r/20230307143410.28031-8-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> [nilfs2]
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Andrew Morton
66dabbb6 48c9d113

+67 -65
+5 -5
fs/afs/dir.c
··· 319 319 struct folio *folio; 320 320 321 321 folio = filemap_get_folio(mapping, i); 322 - if (!folio) { 322 + if (IS_ERR(folio)) { 323 323 if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) 324 324 afs_stat_v(dvnode, n_inval); 325 - 326 - ret = -ENOMEM; 327 325 folio = __filemap_get_folio(mapping, 328 326 i, FGP_LOCK | FGP_CREAT, 329 327 mapping->gfp_mask); 330 - if (!folio) 328 + if (IS_ERR(folio)) { 329 + ret = PTR_ERR(folio); 331 330 goto error; 331 + } 332 332 folio_attach_private(folio, (void *)1); 333 333 folio_unlock(folio); 334 334 } ··· 524 524 */ 525 525 folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE, 526 526 FGP_ACCESSED, 0); 527 - if (!folio) { 527 + if (IS_ERR(folio)) { 528 528 ret = afs_bad(dvnode, afs_file_error_dir_missing_page); 529 529 break; 530 530 }
+1 -1
fs/afs/dir_edit.c
··· 115 115 folio = __filemap_get_folio(mapping, index, 116 116 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 117 117 mapping->gfp_mask); 118 - if (!folio) 118 + if (IS_ERR(folio)) 119 119 clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); 120 120 else if (folio && !folio_test_private(folio)) 121 121 folio_attach_private(folio, (void *)1);
+2 -2
fs/afs/write.c
··· 232 232 _debug("kill %lx (to %lx)", index, last); 233 233 234 234 folio = filemap_get_folio(mapping, index); 235 - if (!folio) { 235 + if (IS_ERR(folio)) { 236 236 next = index + 1; 237 237 continue; 238 238 } ··· 270 270 _debug("redirty %llx @%llx", len, start); 271 271 272 272 folio = filemap_get_folio(mapping, index); 273 - if (!folio) { 273 + if (IS_ERR(folio)) { 274 274 next = index + 1; 275 275 continue; 276 276 }
+1 -1
fs/ext4/inode.c
··· 5395 5395 while (1) { 5396 5396 struct folio *folio = filemap_lock_folio(inode->i_mapping, 5397 5397 inode->i_size >> PAGE_SHIFT); 5398 - if (!folio) 5398 + if (IS_ERR(folio)) 5399 5399 return; 5400 5400 ret = __ext4_journalled_invalidate_folio(folio, offset, 5401 5401 folio_size(folio) - offset);
+4 -4
fs/ext4/move_extent.c
··· 141 141 flags = memalloc_nofs_save(); 142 142 folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags, 143 143 mapping_gfp_mask(mapping[0])); 144 - if (!folio[0]) { 144 + if (IS_ERR(folio[0])) { 145 145 memalloc_nofs_restore(flags); 146 - return -ENOMEM; 146 + return PTR_ERR(folio[0]); 147 147 } 148 148 149 149 folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags, 150 150 mapping_gfp_mask(mapping[1])); 151 151 memalloc_nofs_restore(flags); 152 - if (!folio[1]) { 152 + if (IS_ERR(folio[1])) { 153 153 folio_unlock(folio[0]); 154 154 folio_put(folio[0]); 155 - return -ENOMEM; 155 + return PTR_ERR(folio[1]); 156 156 } 157 157 /* 158 158 * __filemap_get_folio() may not wait on folio's writeback if
+1 -1
fs/hugetlbfs/inode.c
··· 697 697 struct folio *folio; 698 698 699 699 folio = filemap_lock_folio(mapping, idx); 700 - if (!folio) 700 + if (IS_ERR(folio)) 701 701 return; 702 702 703 703 start = start & ~huge_page_mask(h);
+2 -9
fs/iomap/buffered-io.c
··· 468 468 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos) 469 469 { 470 470 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 471 - struct folio *folio; 472 471 473 472 if (iter->flags & IOMAP_NOWAIT) 474 473 fgp |= FGP_NOWAIT; 475 474 476 - folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 475 + return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 477 476 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 478 - if (folio) 479 - return folio; 480 - 481 - if (iter->flags & IOMAP_NOWAIT) 482 - return ERR_PTR(-EAGAIN); 483 - return ERR_PTR(-ENOMEM); 484 477 } 485 478 EXPORT_SYMBOL_GPL(iomap_get_folio); 486 479 ··· 904 911 /* grab locked page */ 905 912 folio = filemap_lock_folio(inode->i_mapping, 906 913 start_byte >> PAGE_SHIFT); 907 - if (!folio) { 914 + if (IS_ERR(folio)) { 908 915 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + 909 916 PAGE_SIZE; 910 917 continue;
+2 -2
fs/netfs/buffered_read.c
··· 350 350 retry: 351 351 folio = __filemap_get_folio(mapping, index, fgp_flags, 352 352 mapping_gfp_mask(mapping)); 353 - if (!folio) 354 - return -ENOMEM; 353 + if (IS_ERR(folio)) 354 + return PTR_ERR(folio); 355 355 356 356 if (ctx->ops->check_write_begin) { 357 357 /* Allow the netfs (eg. ceph) to flush conflicts. */
+2 -2
fs/nfs/file.c
··· 336 336 337 337 start: 338 338 folio = nfs_folio_grab_cache_write_begin(mapping, pos >> PAGE_SHIFT); 339 - if (!folio) 340 - return -ENOMEM; 339 + if (IS_ERR(folio)) 340 + return PTR_ERR(folio); 341 341 *pagep = &folio->page; 342 342 343 343 ret = nfs_flush_incompatible(file, folio);
+3 -3
fs/nilfs2/page.c
··· 259 259 NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state"); 260 260 261 261 dfolio = filemap_grab_folio(dmap, folio->index); 262 - if (unlikely(!dfolio)) { 262 + if (unlikely(IS_ERR(dfolio))) { 263 263 /* No empty page is added to the page cache */ 264 - err = -ENOMEM; 265 264 folio_unlock(folio); 265 + err = PTR_ERR(dfolio); 266 266 break; 267 267 } 268 268 if (unlikely(!folio_buffers(folio))) ··· 311 311 312 312 folio_lock(folio); 313 313 dfolio = filemap_lock_folio(dmap, index); 314 - if (dfolio) { 314 + if (!IS_ERR(dfolio)) { 315 315 /* overwrite existing folio in the destination cache */ 316 316 WARN_ON(folio_test_dirty(dfolio)); 317 317 nilfs_copy_page(&dfolio->page, &folio->page, 0);
+6 -5
include/linux/pagemap.h
··· 520 520 * Looks up the page cache entry at @mapping & @index. If a folio is 521 521 * present, it is returned with an increased refcount. 522 522 * 523 - * Otherwise, %NULL is returned. 523 + * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 524 + * this index. Will not return a shadow, swap or DAX entry. 524 525 */ 525 526 static inline struct folio *filemap_get_folio(struct address_space *mapping, 526 527 pgoff_t index) ··· 538 537 * present, it is returned locked with an increased refcount. 539 538 * 540 539 * Context: May sleep. 541 - * Return: A folio or %NULL if there is no folio in the cache for this 542 - * index. Will not return a shadow, swap or DAX entry. 540 + * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 541 + * this index. Will not return a shadow, swap or DAX entry. 543 542 */ 544 543 static inline struct folio *filemap_lock_folio(struct address_space *mapping, 545 544 pgoff_t index) ··· 556 555 * a new folio is created. The folio is locked, marked as accessed, and 557 556 * returned. 558 557 * 559 - * Return: A found or created folio. NULL if no folio is found and failed to 560 - * create a folio. 558 + * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found 559 + * and failed to create a folio. 561 560 */ 562 561 static inline struct folio *filemap_grab_folio(struct address_space *mapping, 563 562 pgoff_t index)
+8 -6
mm/filemap.c
··· 1907 1907 * 1908 1908 * If there is a page cache page, it is returned with an increased refcount. 1909 1909 * 1910 - * Return: The found folio or %NULL otherwise. 1910 + * Return: The found folio or an ERR_PTR() otherwise. 1911 1911 */ 1912 1912 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 1913 1913 int fgp_flags, gfp_t gfp) ··· 1925 1925 if (fgp_flags & FGP_NOWAIT) { 1926 1926 if (!folio_trylock(folio)) { 1927 1927 folio_put(folio); 1928 - return NULL; 1928 + return ERR_PTR(-EAGAIN); 1929 1929 } 1930 1930 } else { 1931 1931 folio_lock(folio); ··· 1964 1964 1965 1965 folio = filemap_alloc_folio(gfp, 0); 1966 1966 if (!folio) 1967 - return NULL; 1967 + return ERR_PTR(-ENOMEM); 1968 1968 1969 1969 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1970 1970 fgp_flags |= FGP_LOCK; ··· 1989 1989 folio_unlock(folio); 1990 1990 } 1991 1991 1992 + if (!folio) 1993 + return ERR_PTR(-ENOENT); 1992 1994 return folio; 1993 1995 } 1994 1996 EXPORT_SYMBOL(__filemap_get_folio); ··· 3260 3258 * Do we have something in the page cache already? 3261 3259 */ 3262 3260 folio = filemap_get_folio(mapping, index); 3263 - if (likely(folio)) { 3261 + if (likely(!IS_ERR(folio))) { 3264 3262 /* 3265 3263 * We found the page, so try async readahead before waiting for 3266 3264 * the lock. ··· 3289 3287 folio = __filemap_get_folio(mapping, index, 3290 3288 FGP_CREAT|FGP_FOR_MMAP, 3291 3289 vmf->gfp_mask); 3292 - if (!folio) { 3290 + if (IS_ERR(folio)) { 3293 3291 if (fpin) 3294 3292 goto out_retry; 3295 3293 filemap_invalidate_unlock_shared(mapping); ··· 3640 3638 filler = mapping->a_ops->read_folio; 3641 3639 repeat: 3642 3640 folio = filemap_get_folio(mapping, index); 3643 - if (!folio) { 3641 + if (IS_ERR(folio)) { 3644 3642 folio = filemap_alloc_folio(gfp, 0); 3645 3643 if (!folio) 3646 3644 return ERR_PTR(-ENOMEM);
+1 -1
mm/folio-compat.c
··· 97 97 struct folio *folio; 98 98 99 99 folio = __filemap_get_folio(mapping, index, fgp_flags, gfp); 100 - if (!folio) 100 + if (IS_ERR(folio)) 101 101 return NULL; 102 102 return folio_file_page(folio, index); 103 103 }
+1 -1
mm/huge_memory.c
··· 3092 3092 struct folio *folio = filemap_get_folio(mapping, index); 3093 3093 3094 3094 nr_pages = 1; 3095 - if (!folio) 3095 + if (IS_ERR(folio)) 3096 3096 continue; 3097 3097 3098 3098 if (!folio_test_large(folio))
+4 -2
mm/hugetlb.c
··· 5780 5780 */ 5781 5781 new_folio = false; 5782 5782 folio = filemap_lock_folio(mapping, idx); 5783 - if (!folio) { 5783 + if (IS_ERR(folio)) { 5784 5784 size = i_size_read(mapping->host) >> huge_page_shift(h); 5785 5785 if (idx >= size) 5786 5786 goto out; ··· 6071 6071 vma_end_reservation(h, vma, haddr); 6072 6072 6073 6073 pagecache_folio = filemap_lock_folio(mapping, idx); 6074 + if (IS_ERR(pagecache_folio)) 6075 + pagecache_folio = NULL; 6074 6076 } 6075 6077 6076 6078 ptl = huge_pte_lock(h, mm, ptep); ··· 6184 6182 if (is_continue) { 6185 6183 ret = -EFAULT; 6186 6184 folio = filemap_lock_folio(mapping, idx); 6187 - if (!folio) 6185 + if (IS_ERR(folio)) 6188 6186 goto out; 6189 6187 folio_in_pagecache = true; 6190 6188 } else if (!*pagep) {
+1 -1
mm/memcontrol.c
··· 5705 5705 /* shmem/tmpfs may report page out on swap: account for that too. */ 5706 5706 index = linear_page_index(vma, addr); 5707 5707 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index); 5708 - if (!folio) 5708 + if (IS_ERR(folio)) 5709 5709 return NULL; 5710 5710 return folio_file_page(folio, index); 5711 5711 }
+1 -1
mm/mincore.c
··· 61 61 * tmpfs's .fault). So swapped out tmpfs mappings are tested here. 62 62 */ 63 63 folio = filemap_get_incore_folio(mapping, index); 64 - if (folio) { 64 + if (!IS_ERR(folio)) { 65 65 present = folio_test_uptodate(folio); 66 66 folio_put(folio); 67 67 }
+2 -2
mm/shmem.c
··· 605 605 606 606 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; 607 607 folio = filemap_get_folio(inode->i_mapping, index); 608 - if (!folio) 608 + if (IS_ERR(folio)) 609 609 goto drop; 610 610 611 611 /* No huge page at the end of the file: nothing to split */ ··· 3214 3214 3215 3215 if (!dentry) { 3216 3216 folio = filemap_get_folio(inode->i_mapping, 0); 3217 - if (!folio) 3217 + if (IS_ERR(folio)) 3218 3218 return ERR_PTR(-ECHILD); 3219 3219 if (PageHWPoison(folio_page(folio, 0)) || 3220 3220 !folio_test_uptodate(folio)) {
+10 -7
mm/swap_state.c
··· 336 336 struct folio *folio; 337 337 338 338 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); 339 - if (folio) { 339 + if (!IS_ERR(folio)) { 340 340 bool vma_ra = swap_use_vma_readahead(); 341 341 bool readahead; 342 342 ··· 366 366 if (!vma || !vma_ra) 367 367 atomic_inc(&swapin_readahead_hits); 368 368 } 369 + } else { 370 + folio = NULL; 369 371 } 370 372 371 373 return folio; ··· 390 388 struct swap_info_struct *si; 391 389 struct folio *folio = filemap_get_entry(mapping, index); 392 390 391 + if (!folio) 392 + return ERR_PTR(-ENOENT); 393 393 if (!xa_is_value(folio)) 394 - goto out; 394 + return folio; 395 395 if (!shmem_mapping(mapping)) 396 - return NULL; 396 + return ERR_PTR(-ENOENT); 397 397 398 398 swp = radix_to_swp_entry(folio); 399 399 /* There might be swapin error entries in shmem mapping. */ 400 400 if (non_swap_entry(swp)) 401 - return NULL; 401 + return ERR_PTR(-ENOENT); 402 402 /* Prevent swapoff from happening to us */ 403 403 si = get_swap_device(swp); 404 404 if (!si) 405 - return NULL; 405 + return ERR_PTR(-ENOENT); 406 406 index = swp_offset(swp); 407 407 folio = filemap_get_folio(swap_address_space(swp), index); 408 408 put_swap_device(si); 409 - out: 410 409 return folio; 411 410 } 412 411 ··· 434 431 folio = filemap_get_folio(swap_address_space(entry), 435 432 swp_offset(entry)); 436 433 put_swap_device(si); 437 - if (folio) 434 + if (!IS_ERR(folio)) 438 435 return folio_file_page(folio, swp_offset(entry)); 439 436 440 437 /*
+2 -2
mm/swapfile.c
··· 136 136 int ret = 0; 137 137 138 138 folio = filemap_get_folio(swap_address_space(entry), offset); 139 - if (!folio) 139 + if (IS_ERR(folio)) 140 140 return 0; 141 141 /* 142 142 * When this function is called from scan_swap_map_slots() and it's ··· 2095 2095 2096 2096 entry = swp_entry(type, i); 2097 2097 folio = filemap_get_folio(swap_address_space(entry), i); 2098 - if (!folio) 2098 + if (IS_ERR(folio)) 2099 2099 continue; 2100 2100 2101 2101 /*
+8 -7
mm/truncate.c
··· 375 375 376 376 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 377 377 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); 378 - if (folio) { 378 + if (!IS_ERR(folio)) { 379 379 same_folio = lend < folio_pos(folio) + folio_size(folio); 380 380 if (!truncate_inode_partial_folio(folio, lstart, lend)) { 381 381 start = folio->index + folio_nr_pages(folio); ··· 387 387 folio = NULL; 388 388 } 389 389 390 - if (!same_folio) 390 + if (!same_folio) { 391 391 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, 392 392 FGP_LOCK, 0); 393 - if (folio) { 394 - if (!truncate_inode_partial_folio(folio, lstart, lend)) 395 - end = folio->index; 396 - folio_unlock(folio); 397 - folio_put(folio); 393 + if (!IS_ERR(folio)) { 394 + if (!truncate_inode_partial_folio(folio, lstart, lend)) 395 + end = folio->index; 396 + folio_unlock(folio); 397 + folio_put(folio); 398 + } 398 399 } 399 400 400 401 index = start;