Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
9#include <linux/memcontrol.h>
10#include <linux/sched.h>
11#include <linux/node.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
14#include <linux/atomic.h>
15#include <linux/page-flags.h>
16#include <uapi/linux/mempolicy.h>
17#include <asm/page.h>
18
19struct notifier_block;
20
21struct bio;
22
23struct pagevec;
24
25#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26#define SWAP_FLAG_PRIO_MASK 0x7fff
27#define SWAP_FLAG_PRIO_SHIFT 0
28#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
31
32#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
35#define SWAP_BATCH 64
36
37static inline int current_is_kswapd(void)
38{
39 return current->flags & PF_KSWAPD;
40}
41
42/*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50#define MAX_SWAPFILES_SHIFT 5
51
52/*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
58#define SWP_SWAPIN_ERROR_NUM 1
59#define SWP_SWAPIN_ERROR (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
60 SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
61 SWP_PTE_MARKER_NUM)
62/*
63 * PTE markers are used to persist information onto PTEs that are mapped with
64 * file-backed memories. As its name "PTE" hints, it should only be applied to
65 * the leaves of pgtables.
66 */
67#ifdef CONFIG_PTE_MARKER
68#define SWP_PTE_MARKER_NUM 1
69#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
70 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
71#else
72#define SWP_PTE_MARKER_NUM 0
73#endif
74
75/*
76 * Unaddressable device memory support. See include/linux/hmm.h and
77 * Documentation/mm/hmm.rst. Short description is we need struct pages for
78 * device memory that is unaddressable (inaccessible) by CPU, so that we can
79 * migrate part of a process memory to device memory.
80 *
81 * When a page is migrated from CPU to device, we set the CPU page table entry
82 * to a special SWP_DEVICE_{READ|WRITE} entry.
83 *
84 * When a page is mapped by the device for exclusive access we set the CPU page
85 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
86 */
87#ifdef CONFIG_DEVICE_PRIVATE
88#define SWP_DEVICE_NUM 4
89#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
90#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
91#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
92#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
93#else
94#define SWP_DEVICE_NUM 0
95#endif
96
97/*
98 * Page migration support.
99 *
100 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
101 * indicates that the referenced (part of) an anonymous page is exclusive to
102 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
103 * (part of) an anonymous page that are mapped writable are exclusive to a
104 * single process.
105 */
106#ifdef CONFIG_MIGRATION
107#define SWP_MIGRATION_NUM 3
108#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
109#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
110#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
111#else
112#define SWP_MIGRATION_NUM 0
113#endif
114
115/*
116 * Handling of hardware poisoned pages with memory corruption.
117 */
118#ifdef CONFIG_MEMORY_FAILURE
119#define SWP_HWPOISON_NUM 1
120#define SWP_HWPOISON MAX_SWAPFILES
121#else
122#define SWP_HWPOISON_NUM 0
123#endif
124
125#define MAX_SWAPFILES \
126 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
127 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
128 SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
129
130/*
131 * Magic header for a swap area. The first part of the union is
132 * what the swap magic looks like for the old (limited to 128MB)
133 * swap area format, the second part of the union adds - in the
134 * old reserved area - some extra information. Note that the first
135 * kilobyte is reserved for boot loader or disk label stuff...
136 *
137 * Having the magic at the end of the PAGE_SIZE makes detecting swap
138 * areas somewhat tricky on machines that support multiple page sizes.
139 * For 2.5 we'll probably want to move the magic to just beyond the
140 * bootbits...
141 */
142union swap_header {
143 struct {
144 char reserved[PAGE_SIZE - 10];
145 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
146 } magic;
147 struct {
148 char bootbits[1024]; /* Space for disklabel etc. */
149 __u32 version;
150 __u32 last_page;
151 __u32 nr_badpages;
152 unsigned char sws_uuid[16];
153 unsigned char sws_volume[16];
154 __u32 padding[117];
155 __u32 badpages[1];
156 } info;
157};
158
159/*
160 * current->reclaim_state points to one of these when a task is running
161 * memory reclaim
162 */
163struct reclaim_state {
164 unsigned long reclaimed_slab;
165#ifdef CONFIG_LRU_GEN
166 /* per-thread mm walk data */
167 struct lru_gen_mm_walk *mm_walk;
168#endif
169};
170
171#ifdef __KERNEL__
172
173struct address_space;
174struct sysinfo;
175struct writeback_control;
176struct zone;
177
178/*
179 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
180 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
181 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
182 * from setup, they're handled identically.
183 *
184 * We always assume that blocks are of size PAGE_SIZE.
185 */
186struct swap_extent {
187 struct rb_node rb_node;
188 pgoff_t start_page;
189 pgoff_t nr_pages;
190 sector_t start_block;
191};
192
193/*
194 * Max bad pages in the new format..
195 */
196#define MAX_SWAP_BADPAGES \
197 ((offsetof(union swap_header, magic.magic) - \
198 offsetof(union swap_header, info.badpages)) / sizeof(int))
199
200enum {
201 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
202 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
203 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
204 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
205 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
206 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
207 SWP_BLKDEV = (1 << 6), /* its a block device */
208 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
209 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
210 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
211 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
212 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
213 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
214 /* add others here before... */
215 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
216};
217
218#define SWAP_CLUSTER_MAX 32UL
219#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
220
221/* Bit flag in swap_map */
222#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
223#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
224
225/* Special value in first swap_map */
226#define SWAP_MAP_MAX 0x3e /* Max count */
227#define SWAP_MAP_BAD 0x3f /* Note page is bad */
228#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
229
230/* Special value in each swap_map continuation */
231#define SWAP_CONT_MAX 0x7f /* Max count */
232
233/*
234 * We use this to track usage of a cluster. A cluster is a block of swap disk
235 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
236 * free clusters are organized into a list. We fetch an entry from the list to
237 * get a free cluster.
238 *
239 * The data field stores next cluster if the cluster is free or cluster usage
240 * counter otherwise. The flags field determines if a cluster is free. This is
241 * protected by swap_info_struct.lock.
242 */
243struct swap_cluster_info {
244 spinlock_t lock; /*
245 * Protect swap_cluster_info fields
246 * and swap_info_struct->swap_map
247 * elements correspond to the swap
248 * cluster
249 */
250 unsigned int data:24;
251 unsigned int flags:8;
252};
253#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
254#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
255#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
256
257/*
258 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
259 * its own cluster and swapout sequentially. The purpose is to optimize swapout
260 * throughput.
261 */
262struct percpu_cluster {
263 struct swap_cluster_info index; /* Current cluster index */
264 unsigned int next; /* Likely next allocation offset */
265};
266
267struct swap_cluster_list {
268 struct swap_cluster_info head;
269 struct swap_cluster_info tail;
270};
271
272/*
273 * The in-memory structure used to track swap areas.
274 */
275struct swap_info_struct {
276 struct percpu_ref users; /* indicate and keep swap device valid. */
277 unsigned long flags; /* SWP_USED etc: see above */
278 signed short prio; /* swap priority of this type */
279 struct plist_node list; /* entry in swap_active_head */
280 signed char type; /* strange name for an index */
281 unsigned int max; /* extent of the swap_map */
282 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
283 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
284 struct swap_cluster_list free_clusters; /* free clusters list */
285 unsigned int lowest_bit; /* index of first free in swap_map */
286 unsigned int highest_bit; /* index of last free in swap_map */
287 unsigned int pages; /* total of usable pages of swap */
288 unsigned int inuse_pages; /* number of those currently in use */
289 unsigned int cluster_next; /* likely index for next allocation */
290 unsigned int cluster_nr; /* countdown to next cluster search */
291 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
292 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
293 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
294 struct block_device *bdev; /* swap device or bdev of swap file */
295 struct file *swap_file; /* seldom referenced */
296 unsigned int old_block_size; /* seldom referenced */
297 struct completion comp; /* seldom referenced */
298#ifdef CONFIG_FRONTSWAP
299 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
300 atomic_t frontswap_pages; /* frontswap pages in-use counter */
301#endif
302 spinlock_t lock; /*
303 * protect map scan related fields like
304 * swap_map, lowest_bit, highest_bit,
305 * inuse_pages, cluster_next,
306 * cluster_nr, lowest_alloc,
307 * highest_alloc, free/discard cluster
308 * list. other fields are only changed
309 * at swapon/swapoff, so are protected
310 * by swap_lock. changing flags need
311 * hold this lock and swap_lock. If
312 * both locks need hold, hold swap_lock
313 * first.
314 */
315 spinlock_t cont_lock; /*
316 * protect swap count continuation page
317 * list.
318 */
319 struct work_struct discard_work; /* discard worker */
320 struct swap_cluster_list discard_clusters; /* discard clusters list */
321 struct plist_node avail_lists[]; /*
322 * entries in swap_avail_heads, one
323 * entry per node.
324 * Must be last as the number of the
325 * array is nr_node_ids, which is not
326 * a fixed value so have to allocate
327 * dynamically.
328 * And it has to be an array so that
329 * plist_for_each_* can work.
330 */
331};
332
333#ifdef CONFIG_64BIT
334#define SWAP_RA_ORDER_CEILING 5
335#else
336/* Avoid stack overflow, because we need to save part of page table */
337#define SWAP_RA_ORDER_CEILING 3
338#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
339#endif
340
341struct vma_swap_readahead {
342 unsigned short win;
343 unsigned short offset;
344 unsigned short nr_pte;
345#ifdef CONFIG_64BIT
346 pte_t *ptes;
347#else
348 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
349#endif
350};
351
352static inline swp_entry_t folio_swap_entry(struct folio *folio)
353{
354 swp_entry_t entry = { .val = page_private(&folio->page) };
355 return entry;
356}
357
358static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
359{
360 folio->private = (void *)entry.val;
361}
362
363/* linux/mm/workingset.c */
364void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
365void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
366void workingset_refault(struct folio *folio, void *shadow);
367void workingset_activation(struct folio *folio);
368
369/* Only track the nodes of mappings with shadow entries */
370void workingset_update_node(struct xa_node *node);
371extern struct list_lru shadow_nodes;
372#define mapping_set_update(xas, mapping) do { \
373 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
374 xas_set_update(xas, workingset_update_node); \
375 xas_set_lru(xas, &shadow_nodes); \
376 } \
377} while (0)
378
379/* linux/mm/page_alloc.c */
380extern unsigned long totalreserve_pages;
381
382/* Definition of global_zone_page_state not available yet */
383#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
384
385
386/* linux/mm/swap.c */
387void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
388void lru_note_cost_folio(struct folio *);
389void folio_add_lru(struct folio *);
390void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
391void lru_cache_add(struct page *);
392void mark_page_accessed(struct page *);
393void folio_mark_accessed(struct folio *);
394
395extern atomic_t lru_disable_count;
396
397static inline bool lru_cache_disabled(void)
398{
399 return atomic_read(&lru_disable_count);
400}
401
402static inline void lru_cache_enable(void)
403{
404 atomic_dec(&lru_disable_count);
405}
406
407extern void lru_cache_disable(void);
408extern void lru_add_drain(void);
409extern void lru_add_drain_cpu(int cpu);
410extern void lru_add_drain_cpu_zone(struct zone *zone);
411extern void lru_add_drain_all(void);
412extern void deactivate_page(struct page *page);
413extern void mark_page_lazyfree(struct page *page);
414extern void swap_setup(void);
415
416extern void lru_cache_add_inactive_or_unevictable(struct page *page,
417 struct vm_area_struct *vma);
418
419/* linux/mm/vmscan.c */
420extern unsigned long zone_reclaimable_pages(struct zone *zone);
421extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
422 gfp_t gfp_mask, nodemask_t *mask);
423
424#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
425#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
426extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
427 unsigned long nr_pages,
428 gfp_t gfp_mask,
429 unsigned int reclaim_options);
430extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
431 gfp_t gfp_mask, bool noswap,
432 pg_data_t *pgdat,
433 unsigned long *nr_scanned);
434extern unsigned long shrink_all_memory(unsigned long nr_pages);
435extern int vm_swappiness;
436long remove_mapping(struct address_space *mapping, struct folio *folio);
437
438extern unsigned long reclaim_pages(struct list_head *page_list);
439#ifdef CONFIG_NUMA
440extern int node_reclaim_mode;
441extern int sysctl_min_unmapped_ratio;
442extern int sysctl_min_slab_ratio;
443#else
444#define node_reclaim_mode 0
445#endif
446
447static inline bool node_reclaim_enabled(void)
448{
449 /* Is any node_reclaim_mode bit set? */
450 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
451}
452
453void check_move_unevictable_folios(struct folio_batch *fbatch);
454void check_move_unevictable_pages(struct pagevec *pvec);
455
456extern void kswapd_run(int nid);
457extern void kswapd_stop(int nid);
458
459#ifdef CONFIG_SWAP
460
461int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
462 unsigned long nr_pages, sector_t start_block);
463int generic_swapfile_activate(struct swap_info_struct *, struct file *,
464 sector_t *);
465
466static inline unsigned long total_swapcache_pages(void)
467{
468 return global_node_page_state(NR_SWAPCACHE);
469}
470
471extern void free_swap_cache(struct page *page);
472extern void free_page_and_swap_cache(struct page *);
473extern void free_pages_and_swap_cache(struct page **, int);
474/* linux/mm/swapfile.c */
475extern atomic_long_t nr_swap_pages;
476extern long total_swap_pages;
477extern atomic_t nr_rotate_swap;
478extern bool has_usable_swap(void);
479
480/* Swap 50% full? Release swapcache more aggressively.. */
481static inline bool vm_swap_full(void)
482{
483 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
484}
485
486static inline long get_nr_swap_pages(void)
487{
488 return atomic_long_read(&nr_swap_pages);
489}
490
491extern void si_swapinfo(struct sysinfo *);
492swp_entry_t folio_alloc_swap(struct folio *folio);
493bool folio_free_swap(struct folio *folio);
494void put_swap_folio(struct folio *folio, swp_entry_t entry);
495extern swp_entry_t get_swap_page_of_type(int);
496extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
497extern int add_swap_count_continuation(swp_entry_t, gfp_t);
498extern void swap_shmem_alloc(swp_entry_t);
499extern int swap_duplicate(swp_entry_t);
500extern int swapcache_prepare(swp_entry_t);
501extern void swap_free(swp_entry_t);
502extern void swapcache_free_entries(swp_entry_t *entries, int n);
503extern int free_swap_and_cache(swp_entry_t);
504int swap_type_of(dev_t device, sector_t offset);
505int find_first_swap(dev_t *device);
506extern unsigned int count_swap_pages(int, int);
507extern sector_t swapdev_block(int, pgoff_t);
508extern int __swap_count(swp_entry_t entry);
509extern int __swp_swapcount(swp_entry_t entry);
510extern int swp_swapcount(swp_entry_t entry);
511extern struct swap_info_struct *page_swap_info(struct page *);
512extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
513struct backing_dev_info;
514extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
515extern void exit_swap_address_space(unsigned int type);
516extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
517sector_t swap_page_sector(struct page *page);
518
519static inline void put_swap_device(struct swap_info_struct *si)
520{
521 percpu_ref_put(&si->users);
522}
523
524#else /* CONFIG_SWAP */
525static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
526{
527 return NULL;
528}
529
530static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
531{
532 return NULL;
533}
534
535static inline void put_swap_device(struct swap_info_struct *si)
536{
537}
538
539#define get_nr_swap_pages() 0L
540#define total_swap_pages 0L
541#define total_swapcache_pages() 0UL
542#define vm_swap_full() 0
543
544#define si_swapinfo(val) \
545 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
546/* only sparc can not include linux/pagemap.h in this file
547 * so leave put_page and release_pages undeclared... */
548#define free_page_and_swap_cache(page) \
549 put_page(page)
550#define free_pages_and_swap_cache(pages, nr) \
551 release_pages((pages), (nr));
552
553/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
554#define free_swap_and_cache(e) is_pfn_swap_entry(e)
555
556static inline void free_swap_cache(struct page *page)
557{
558}
559
560static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
561{
562 return 0;
563}
564
565static inline void swap_shmem_alloc(swp_entry_t swp)
566{
567}
568
569static inline int swap_duplicate(swp_entry_t swp)
570{
571 return 0;
572}
573
574static inline void swap_free(swp_entry_t swp)
575{
576}
577
578static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
579{
580}
581
582static inline int __swap_count(swp_entry_t entry)
583{
584 return 0;
585}
586
587static inline int __swp_swapcount(swp_entry_t entry)
588{
589 return 0;
590}
591
592static inline int swp_swapcount(swp_entry_t entry)
593{
594 return 0;
595}
596
597static inline swp_entry_t folio_alloc_swap(struct folio *folio)
598{
599 swp_entry_t entry;
600 entry.val = 0;
601 return entry;
602}
603
604static inline bool folio_free_swap(struct folio *folio)
605{
606 return false;
607}
608
609static inline int add_swap_extent(struct swap_info_struct *sis,
610 unsigned long start_page,
611 unsigned long nr_pages, sector_t start_block)
612{
613 return -EINVAL;
614}
615#endif /* CONFIG_SWAP */
616
617#ifdef CONFIG_THP_SWAP
618extern int split_swap_cluster(swp_entry_t entry);
619#else
620static inline int split_swap_cluster(swp_entry_t entry)
621{
622 return 0;
623}
624#endif
625
626#ifdef CONFIG_MEMCG
627static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
628{
629 /* Cgroup2 doesn't have per-cgroup swappiness */
630 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
631 return vm_swappiness;
632
633 /* root ? */
634 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
635 return vm_swappiness;
636
637 return memcg->swappiness;
638}
639#else
640static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
641{
642 return vm_swappiness;
643}
644#endif
645
646#ifdef CONFIG_ZSWAP
647extern u64 zswap_pool_total_size;
648extern atomic_t zswap_stored_pages;
649#endif
650
651#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
652extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
653static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
654{
655 if (mem_cgroup_disabled())
656 return;
657 __cgroup_throttle_swaprate(page, gfp_mask);
658}
659#else
660static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
661{
662}
663#endif
664static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
665{
666 cgroup_throttle_swaprate(&folio->page, gfp);
667}
668
669#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
670void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
671int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
672static inline int mem_cgroup_try_charge_swap(struct folio *folio,
673 swp_entry_t entry)
674{
675 if (mem_cgroup_disabled())
676 return 0;
677 return __mem_cgroup_try_charge_swap(folio, entry);
678}
679
680extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
681static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
682{
683 if (mem_cgroup_disabled())
684 return;
685 __mem_cgroup_uncharge_swap(entry, nr_pages);
686}
687
688extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
689extern bool mem_cgroup_swap_full(struct folio *folio);
690#else
691static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
692{
693}
694
695static inline int mem_cgroup_try_charge_swap(struct folio *folio,
696 swp_entry_t entry)
697{
698 return 0;
699}
700
701static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
702 unsigned int nr_pages)
703{
704}
705
706static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
707{
708 return get_nr_swap_pages();
709}
710
711static inline bool mem_cgroup_swap_full(struct folio *folio)
712{
713 return vm_swap_full();
714}
715#endif
716
717#endif /* __KERNEL__*/
718#endif /* _LINUX_SWAP_H */