Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

slab: Add SL_pfmemalloc flag

Give slab its own name for this flag. Move the implementation from
slab.h to slub.c since it's only used inside slub.c.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20250611155916.2579160-5-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

authored by

Matthew Wilcox (Oracle) and committed by
Vlastimil Babka
3df29914 c5c44900

+21 -24
-24
mm/slab.h
··· 167 167 */ 168 168 #define slab_page(s) folio_page(slab_folio(s), 0) 169 169 170 - /* 171 - * If network-based swap is enabled, sl*b must keep track of whether pages 172 - * were allocated from pfmemalloc reserves. 173 - */ 174 - static inline bool slab_test_pfmemalloc(const struct slab *slab) 175 - { 176 - return folio_test_active(slab_folio(slab)); 177 - } 178 - 179 - static inline void slab_set_pfmemalloc(struct slab *slab) 180 - { 181 - folio_set_active(slab_folio(slab)); 182 - } 183 - 184 - static inline void slab_clear_pfmemalloc(struct slab *slab) 185 - { 186 - folio_clear_active(slab_folio(slab)); 187 - } 188 - 189 - static inline void __slab_clear_pfmemalloc(struct slab *slab) 190 - { 191 - __folio_clear_active(slab_folio(slab)); 192 - } 193 - 194 170 static inline void *slab_address(const struct slab *slab) 195 171 { 196 172 return folio_address(slab_folio(slab));
+21
mm/slub.c
··· 187 187 * enum slab_flags - How the slab flags bits are used. 188 188 * @SL_locked: Is locked with slab_lock() 189 189 * @SL_partial: On the per-node partial list 190 + * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves 190 191 * 191 192 * The slab flags share space with the page flags but some bits have 192 193 * different interpretations. The high bits are used for information ··· 196 195 enum slab_flags { 197 196 SL_locked = PG_locked, 198 197 SL_partial = PG_workingset, /* Historical reasons for this bit */ 198 + SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ 199 199 }; 200 200 201 201 /* ··· 649 647 return 0; 650 648 } 651 649 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 650 + 651 + /* 652 + * If network-based swap is enabled, slub must keep track of whether memory 653 + * were allocated from pfmemalloc reserves. 654 + */ 655 + static inline bool slab_test_pfmemalloc(const struct slab *slab) 656 + { 657 + return test_bit(SL_pfmemalloc, &slab->flags); 658 + } 659 + 660 + static inline void slab_set_pfmemalloc(struct slab *slab) 661 + { 662 + set_bit(SL_pfmemalloc, &slab->flags); 663 + } 664 + 665 + static inline void __slab_clear_pfmemalloc(struct slab *slab) 666 + { 667 + __clear_bit(SL_pfmemalloc, &slab->flags); 668 + } 652 669 653 670 /* 654 671 * Per slab locking using the pagelock