Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/filemap: Add readahead_folio()

The pointers stored in the page cache are folios, by definition.
This change comes with a behaviour change -- callers of readahead_folio()
are no longer required to put the page reference themselves. This matches
how readpage works, rather than matching how readpages used to work.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>

+39 -15
+39 -15
include/linux/pagemap.h
··· 987 987 page_cache_async_ra(&ractl, page, req_count); 988 988 } 989 989 990 + static inline struct folio *__readahead_folio(struct readahead_control *ractl) 991 + { 992 + struct folio *folio; 993 + 994 + BUG_ON(ractl->_batch_count > ractl->_nr_pages); 995 + ractl->_nr_pages -= ractl->_batch_count; 996 + ractl->_index += ractl->_batch_count; 997 + 998 + if (!ractl->_nr_pages) { 999 + ractl->_batch_count = 0; 1000 + return NULL; 1001 + } 1002 + 1003 + folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1004 + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1005 + ractl->_batch_count = folio_nr_pages(folio); 1006 + 1007 + return folio; 1008 + } 1009 + 990 1010 /** 991 1011 * readahead_page - Get the next page to read. 992 - * @rac: The current readahead request. 1012 + * @ractl: The current readahead request. 993 1013 * 994 1014 * Context: The page is locked and has an elevated refcount. The caller 995 1015 * should decreases the refcount once the page has been submitted for I/O 996 1016 * and unlock the page once all I/O to that page has completed. 997 1017 * Return: A pointer to the next page, or %NULL if we are done. 998 1018 */ 999 - static inline struct page *readahead_page(struct readahead_control *rac) 1019 + static inline struct page *readahead_page(struct readahead_control *ractl) 1000 1020 { 1001 - struct page *page; 1021 + struct folio *folio = __readahead_folio(ractl); 1002 1022 1003 - BUG_ON(rac->_batch_count > rac->_nr_pages); 1004 - rac->_nr_pages -= rac->_batch_count; 1005 - rac->_index += rac->_batch_count; 1023 + return &folio->page; 1024 + } 1006 1025 1007 - if (!rac->_nr_pages) { 1008 - rac->_batch_count = 0; 1009 - return NULL; 1010 - } 1026 + /** 1027 + * readahead_folio - Get the next folio to read. 1028 + * @ractl: The current readahead request. 1029 + * 1030 + * Context: The folio is locked. The caller should unlock the folio once 1031 + * all I/O to that folio has completed. 1032 + * Return: A pointer to the next folio, or %NULL if we are done. 1033 + */ 1034 + static inline struct folio *readahead_folio(struct readahead_control *ractl) 1035 + { 1036 + struct folio *folio = __readahead_folio(ractl); 1011 1037 1012 - page = xa_load(&rac->mapping->i_pages, rac->_index); 1013 - VM_BUG_ON_PAGE(!PageLocked(page), page); 1014 - rac->_batch_count = thp_nr_pages(page); 1015 - 1016 - return page; 1038 + if (folio) 1039 + folio_put(folio); 1040 + return folio; 1017 1041 } 1018 1042 1019 1043 static inline unsigned int __readahead_batch(struct readahead_control *rac,