Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: return void from folio_start_writeback() and related functions

Nobody now checks the return value from any of these functions, so
add an assertion at the beginning of the function and return void.

Link: https://lkml.kernel.org/r/20231108204605.745109-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Steve French <sfrench@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
b5612c36 a9540e35

+29 -33
+2 -2
include/linux/page-flags.h
··· 772 772 773 773 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 774 774 775 - bool __folio_start_writeback(struct folio *folio, bool keep_write); 776 - bool set_page_writeback(struct page *page); 775 + void __folio_start_writeback(struct folio *folio, bool keep_write); 776 + void set_page_writeback(struct page *page); 777 777 778 778 #define folio_start_writeback(folio) \ 779 779 __folio_start_writeback(folio, false)
+2 -2
mm/folio-compat.c
··· 46 46 } 47 47 EXPORT_SYMBOL(mark_page_accessed); 48 48 49 - bool set_page_writeback(struct page *page) 49 + void set_page_writeback(struct page *page) 50 50 { 51 - return folio_start_writeback(page_folio(page)); 51 + folio_start_writeback(page_folio(page)); 52 52 } 53 53 EXPORT_SYMBOL(set_page_writeback); 54 54
+25 -29
mm/page-writeback.c
··· 2982 2982 return ret; 2983 2983 } 2984 2984 2985 - bool __folio_start_writeback(struct folio *folio, bool keep_write) 2985 + void __folio_start_writeback(struct folio *folio, bool keep_write) 2986 2986 { 2987 2987 long nr = folio_nr_pages(folio); 2988 2988 struct address_space *mapping = folio_mapping(folio); 2989 - bool ret; 2990 2989 int access_ret; 2990 + 2991 + VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); 2991 2992 2992 2993 folio_memcg_lock(folio); 2993 2994 if (mapping && mapping_use_writeback_tags(mapping)) { ··· 2996 2995 struct inode *inode = mapping->host; 2997 2996 struct backing_dev_info *bdi = inode_to_bdi(inode); 2998 2997 unsigned long flags; 2998 + bool on_wblist; 2999 2999 3000 3000 xas_lock_irqsave(&xas, flags); 3001 3001 xas_load(&xas); 3002 - ret = folio_test_set_writeback(folio); 3003 - if (!ret) { 3004 - bool on_wblist; 3002 + folio_test_set_writeback(folio); 3005 3003 3006 - on_wblist = mapping_tagged(mapping, 3007 - PAGECACHE_TAG_WRITEBACK); 3004 + on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); 3008 3005 3009 - xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); 3010 - if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { 3011 - struct bdi_writeback *wb = inode_to_wb(inode); 3006 + xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); 3007 + if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { 3008 + struct bdi_writeback *wb = inode_to_wb(inode); 3012 3009 3013 - wb_stat_mod(wb, WB_WRITEBACK, nr); 3014 - if (!on_wblist) 3015 - wb_inode_writeback_start(wb); 3016 - } 3017 - 3018 - /* 3019 - * We can come through here when swapping 3020 - * anonymous folios, so we don't necessarily 3021 - * have an inode to track for sync. 3022 - */ 3023 - if (mapping->host && !on_wblist) 3024 - sb_mark_inode_writeback(mapping->host); 3010 + wb_stat_mod(wb, WB_WRITEBACK, nr); 3011 + if (!on_wblist) 3012 + wb_inode_writeback_start(wb); 3025 3013 } 3014 + 3015 + /* 3016 + * We can come through here when swapping anonymous 3017 + * folios, so we don't necessarily have an inode to 3018 + * track for sync. 3019 + */ 3020 + if (mapping->host && !on_wblist) 3021 + sb_mark_inode_writeback(mapping->host); 3026 3022 if (!folio_test_dirty(folio)) 3027 3023 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); 3028 3024 if (!keep_write) 3029 3025 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); 3030 3026 xas_unlock_irqrestore(&xas, flags); 3031 3027 } else { 3032 - ret = folio_test_set_writeback(folio); 3028 + folio_test_set_writeback(folio); 3033 3029 } 3034 - if (!ret) { 3035 - lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); 3036 - zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); 3037 - } 3030 + 3031 + lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); 3032 + zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); 3038 3033 folio_memcg_unlock(folio); 3034 + 3039 3035 access_ret = arch_make_folio_accessible(folio); 3040 3036 /* 3041 3037 * If writeback has been triggered on a page that cannot be made 3042 3038 * accessible, it is too late to recover here. 3043 3039 */ 3044 3040 VM_BUG_ON_FOLIO(access_ret != 0, folio); 3045 - 3046 - return ret; 3047 3041 } 3048 3042 EXPORT_SYMBOL(__folio_start_writeback); 3049 3043