at v4.14 21 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_SWAP_H 3#define _LINUX_SWAP_H 4 5#include <linux/spinlock.h> 6#include <linux/linkage.h> 7#include <linux/mmzone.h> 8#include <linux/list.h> 9#include <linux/memcontrol.h> 10#include <linux/sched.h> 11#include <linux/node.h> 12#include <linux/fs.h> 13#include <linux/atomic.h> 14#include <linux/page-flags.h> 15#include <asm/page.h> 16 17struct notifier_block; 18 19struct bio; 20 21#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 22#define SWAP_FLAG_PRIO_MASK 0x7fff 23#define SWAP_FLAG_PRIO_SHIFT 0 24#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ 25#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ 26#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ 27 28#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ 29 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ 30 SWAP_FLAG_DISCARD_PAGES) 31#define SWAP_BATCH 64 32 33static inline int current_is_kswapd(void) 34{ 35 return current->flags & PF_KSWAPD; 36} 37 38/* 39 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can 40 * be swapped to. The swap type and the offset into that swap type are 41 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits 42 * for the type means that the maximum number of swapcache pages is 27 bits 43 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs 44 * the type/offset into the pte as 5/27 as well. 45 */ 46#define MAX_SWAPFILES_SHIFT 5 47 48/* 49 * Use some of the swap files numbers for other purposes. This 50 * is a convenient way to hook into the VM to trigger special 51 * actions on faults. 52 */ 53 54/* 55 * Unaddressable device memory support. See include/linux/hmm.h and 56 * Documentation/vm/hmm.txt. Short description is we need struct pages for 57 * device memory that is unaddressable (inaccessible) by CPU, so that we can 58 * migrate part of a process memory to device memory. 59 * 60 * When a page is migrated from CPU to device, we set the CPU page table entry 61 * to a special SWP_DEVICE_* entry. 62 */ 63#ifdef CONFIG_DEVICE_PRIVATE 64#define SWP_DEVICE_NUM 2 65#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) 66#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) 67#else 68#define SWP_DEVICE_NUM 0 69#endif 70 71/* 72 * NUMA node memory migration support 73 */ 74#ifdef CONFIG_MIGRATION 75#define SWP_MIGRATION_NUM 2 76#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) 77#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) 78#else 79#define SWP_MIGRATION_NUM 0 80#endif 81 82/* 83 * Handling of hardware poisoned pages with memory corruption. 84 */ 85#ifdef CONFIG_MEMORY_FAILURE 86#define SWP_HWPOISON_NUM 1 87#define SWP_HWPOISON MAX_SWAPFILES 88#else 89#define SWP_HWPOISON_NUM 0 90#endif 91 92#define MAX_SWAPFILES \ 93 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ 94 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) 95 96/* 97 * Magic header for a swap area. The first part of the union is 98 * what the swap magic looks like for the old (limited to 128MB) 99 * swap area format, the second part of the union adds - in the 100 * old reserved area - some extra information. Note that the first 101 * kilobyte is reserved for boot loader or disk label stuff... 102 * 103 * Having the magic at the end of the PAGE_SIZE makes detecting swap 104 * areas somewhat tricky on machines that support multiple page sizes. 105 * For 2.5 we'll probably want to move the magic to just beyond the 106 * bootbits... 107 */ 108union swap_header { 109 struct { 110 char reserved[PAGE_SIZE - 10]; 111 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ 112 } magic; 113 struct { 114 char bootbits[1024]; /* Space for disklabel etc. */ 115 __u32 version; 116 __u32 last_page; 117 __u32 nr_badpages; 118 unsigned char sws_uuid[16]; 119 unsigned char sws_volume[16]; 120 __u32 padding[117]; 121 __u32 badpages[1]; 122 } info; 123}; 124 125/* 126 * current->reclaim_state points to one of these when a task is running 127 * memory reclaim 128 */ 129struct reclaim_state { 130 unsigned long reclaimed_slab; 131}; 132 133#ifdef __KERNEL__ 134 135struct address_space; 136struct sysinfo; 137struct writeback_control; 138struct zone; 139 140/* 141 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 142 * disk blocks. A list of swap extents maps the entire swapfile. (Where the 143 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart 144 * from setup, they're handled identically. 145 * 146 * We always assume that blocks are of size PAGE_SIZE. 147 */ 148struct swap_extent { 149 struct list_head list; 150 pgoff_t start_page; 151 pgoff_t nr_pages; 152 sector_t start_block; 153}; 154 155/* 156 * Max bad pages in the new format.. 157 */ 158#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) 159#define MAX_SWAP_BADPAGES \ 160 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) 161 162enum { 163 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 164 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 165 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ 166 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 167 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 168 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 169 SWP_BLKDEV = (1 << 6), /* its a block device */ 170 SWP_FILE = (1 << 7), /* set after swap_activate success */ 171 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ 172 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ 173 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ 174 /* add others here before... */ 175 SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ 176}; 177 178#define SWAP_CLUSTER_MAX 32UL 179#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 180 181#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ 182#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ 183#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 184#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ 185#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ 186#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ 187 188/* 189 * We use this to track usage of a cluster. A cluster is a block of swap disk 190 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All 191 * free clusters are organized into a list. We fetch an entry from the list to 192 * get a free cluster. 193 * 194 * The data field stores next cluster if the cluster is free or cluster usage 195 * counter otherwise. The flags field determines if a cluster is free. This is 196 * protected by swap_info_struct.lock. 197 */ 198struct swap_cluster_info { 199 spinlock_t lock; /* 200 * Protect swap_cluster_info fields 201 * and swap_info_struct->swap_map 202 * elements correspond to the swap 203 * cluster 204 */ 205 unsigned int data:24; 206 unsigned int flags:8; 207}; 208#define CLUSTER_FLAG_FREE 1 /* This cluster is free */ 209#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ 210#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ 211 212/* 213 * We assign a cluster to each CPU, so each CPU can allocate swap entry from 214 * its own cluster and swapout sequentially. The purpose is to optimize swapout 215 * throughput. 216 */ 217struct percpu_cluster { 218 struct swap_cluster_info index; /* Current cluster index */ 219 unsigned int next; /* Likely next allocation offset */ 220}; 221 222struct swap_cluster_list { 223 struct swap_cluster_info head; 224 struct swap_cluster_info tail; 225}; 226 227/* 228 * The in-memory structure used to track swap areas. 229 */ 230struct swap_info_struct { 231 unsigned long flags; /* SWP_USED etc: see above */ 232 signed short prio; /* swap priority of this type */ 233 struct plist_node list; /* entry in swap_active_head */ 234 struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */ 235 signed char type; /* strange name for an index */ 236 unsigned int max; /* extent of the swap_map */ 237 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 238 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ 239 struct swap_cluster_list free_clusters; /* free clusters list */ 240 unsigned int lowest_bit; /* index of first free in swap_map */ 241 unsigned int highest_bit; /* index of last free in swap_map */ 242 unsigned int pages; /* total of usable pages of swap */ 243 unsigned int inuse_pages; /* number of those currently in use */ 244 unsigned int cluster_next; /* likely index for next allocation */ 245 unsigned int cluster_nr; /* countdown to next cluster search */ 246 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ 247 struct swap_extent *curr_swap_extent; 248 struct swap_extent first_swap_extent; 249 struct block_device *bdev; /* swap device or bdev of swap file */ 250 struct file *swap_file; /* seldom referenced */ 251 unsigned int old_block_size; /* seldom referenced */ 252#ifdef CONFIG_FRONTSWAP 253 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ 254 atomic_t frontswap_pages; /* frontswap pages in-use counter */ 255#endif 256 spinlock_t lock; /* 257 * protect map scan related fields like 258 * swap_map, lowest_bit, highest_bit, 259 * inuse_pages, cluster_next, 260 * cluster_nr, lowest_alloc, 261 * highest_alloc, free/discard cluster 262 * list. other fields are only changed 263 * at swapon/swapoff, so are protected 264 * by swap_lock. changing flags need 265 * hold this lock and swap_lock. If 266 * both locks need hold, hold swap_lock 267 * first. 268 */ 269 spinlock_t cont_lock; /* 270 * protect swap count continuation page 271 * list. 272 */ 273 struct work_struct discard_work; /* discard worker */ 274 struct swap_cluster_list discard_clusters; /* discard clusters list */ 275}; 276 277#ifdef CONFIG_64BIT 278#define SWAP_RA_ORDER_CEILING 5 279#else 280/* Avoid stack overflow, because we need to save part of page table */ 281#define SWAP_RA_ORDER_CEILING 3 282#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING) 283#endif 284 285struct vma_swap_readahead { 286 unsigned short win; 287 unsigned short offset; 288 unsigned short nr_pte; 289#ifdef CONFIG_64BIT 290 pte_t *ptes; 291#else 292 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE]; 293#endif 294}; 295 296/* linux/mm/workingset.c */ 297void *workingset_eviction(struct address_space *mapping, struct page *page); 298bool workingset_refault(void *shadow); 299void workingset_activation(struct page *page); 300void workingset_update_node(struct radix_tree_node *node, void *private); 301 302/* linux/mm/page_alloc.c */ 303extern unsigned long totalram_pages; 304extern unsigned long totalreserve_pages; 305extern unsigned long nr_free_buffer_pages(void); 306extern unsigned long nr_free_pagecache_pages(void); 307 308/* Definition of global_zone_page_state not available yet */ 309#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) 310 311 312/* linux/mm/swap.c */ 313extern void lru_cache_add(struct page *); 314extern void lru_cache_add_anon(struct page *page); 315extern void lru_cache_add_file(struct page *page); 316extern void lru_add_page_tail(struct page *page, struct page *page_tail, 317 struct lruvec *lruvec, struct list_head *head); 318extern void activate_page(struct page *); 319extern void mark_page_accessed(struct page *); 320extern void lru_add_drain(void); 321extern void lru_add_drain_cpu(int cpu); 322extern void lru_add_drain_all(void); 323extern void lru_add_drain_all_cpuslocked(void); 324extern void rotate_reclaimable_page(struct page *page); 325extern void deactivate_file_page(struct page *page); 326extern void mark_page_lazyfree(struct page *page); 327extern void swap_setup(void); 328 329extern void add_page_to_unevictable_list(struct page *page); 330 331extern void lru_cache_add_active_or_unevictable(struct page *page, 332 struct vm_area_struct *vma); 333 334/* linux/mm/vmscan.c */ 335extern unsigned long zone_reclaimable_pages(struct zone *zone); 336extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); 337extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 338 gfp_t gfp_mask, nodemask_t *mask); 339extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); 340extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 341 unsigned long nr_pages, 342 gfp_t gfp_mask, 343 bool may_swap); 344extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, 345 gfp_t gfp_mask, bool noswap, 346 pg_data_t *pgdat, 347 unsigned long *nr_scanned); 348extern unsigned long shrink_all_memory(unsigned long nr_pages); 349extern int vm_swappiness; 350extern int remove_mapping(struct address_space *mapping, struct page *page); 351extern unsigned long vm_total_pages; 352 353#ifdef CONFIG_NUMA 354extern int node_reclaim_mode; 355extern int sysctl_min_unmapped_ratio; 356extern int sysctl_min_slab_ratio; 357extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); 358#else 359#define node_reclaim_mode 0 360static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, 361 unsigned int order) 362{ 363 return 0; 364} 365#endif 366 367extern int page_evictable(struct page *page); 368extern void check_move_unevictable_pages(struct page **, int nr_pages); 369 370extern int kswapd_run(int nid); 371extern void kswapd_stop(int nid); 372 373#ifdef CONFIG_SWAP 374 375#include <linux/blk_types.h> /* for bio_end_io_t */ 376 377/* linux/mm/page_io.c */ 378extern int swap_readpage(struct page *page, bool do_poll); 379extern int swap_writepage(struct page *page, struct writeback_control *wbc); 380extern void end_swap_bio_write(struct bio *bio); 381extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 382 bio_end_io_t end_write_func); 383extern int swap_set_page_dirty(struct page *page); 384 385int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 386 unsigned long nr_pages, sector_t start_block); 387int generic_swapfile_activate(struct swap_info_struct *, struct file *, 388 sector_t *); 389 390/* linux/mm/swap_state.c */ 391/* One swap address space for each 64M swap space */ 392#define SWAP_ADDRESS_SPACE_SHIFT 14 393#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) 394extern struct address_space *swapper_spaces[]; 395extern bool swap_vma_readahead; 396#define swap_address_space(entry) \ 397 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ 398 >> SWAP_ADDRESS_SPACE_SHIFT]) 399extern unsigned long total_swapcache_pages(void); 400extern void show_swap_cache_info(void); 401extern int add_to_swap(struct page *page); 402extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); 403extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); 404extern void __delete_from_swap_cache(struct page *); 405extern void delete_from_swap_cache(struct page *); 406extern void free_page_and_swap_cache(struct page *); 407extern void free_pages_and_swap_cache(struct page **, int); 408extern struct page *lookup_swap_cache(swp_entry_t entry, 409 struct vm_area_struct *vma, 410 unsigned long addr); 411extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, 412 struct vm_area_struct *vma, unsigned long addr, 413 bool do_poll); 414extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, 415 struct vm_area_struct *vma, unsigned long addr, 416 bool *new_page_allocated); 417extern struct page *swapin_readahead(swp_entry_t, gfp_t, 418 struct vm_area_struct *vma, unsigned long addr); 419 420extern struct page *swap_readahead_detect(struct vm_fault *vmf, 421 struct vma_swap_readahead *swap_ra); 422extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, 423 struct vm_fault *vmf, 424 struct vma_swap_readahead *swap_ra); 425 426/* linux/mm/swapfile.c */ 427extern atomic_long_t nr_swap_pages; 428extern long total_swap_pages; 429extern atomic_t nr_rotate_swap; 430extern bool has_usable_swap(void); 431 432static inline bool swap_use_vma_readahead(void) 433{ 434 return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap); 435} 436 437/* Swap 50% full? Release swapcache more aggressively.. */ 438static inline bool vm_swap_full(void) 439{ 440 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; 441} 442 443static inline long get_nr_swap_pages(void) 444{ 445 return atomic_long_read(&nr_swap_pages); 446} 447 448extern void si_swapinfo(struct sysinfo *); 449extern swp_entry_t get_swap_page(struct page *page); 450extern void put_swap_page(struct page *page, swp_entry_t entry); 451extern swp_entry_t get_swap_page_of_type(int); 452extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]); 453extern int add_swap_count_continuation(swp_entry_t, gfp_t); 454extern void swap_shmem_alloc(swp_entry_t); 455extern int swap_duplicate(swp_entry_t); 456extern int swapcache_prepare(swp_entry_t); 457extern void swap_free(swp_entry_t); 458extern void swapcache_free_entries(swp_entry_t *entries, int n); 459extern int free_swap_and_cache(swp_entry_t); 460extern int swap_type_of(dev_t, sector_t, struct block_device **); 461extern unsigned int count_swap_pages(int, int); 462extern sector_t map_swap_page(struct page *, struct block_device **); 463extern sector_t swapdev_block(int, pgoff_t); 464extern int page_swapcount(struct page *); 465extern int __swp_swapcount(swp_entry_t entry); 466extern int swp_swapcount(swp_entry_t entry); 467extern struct swap_info_struct *page_swap_info(struct page *); 468extern bool reuse_swap_page(struct page *, int *); 469extern int try_to_free_swap(struct page *); 470struct backing_dev_info; 471extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); 472extern void exit_swap_address_space(unsigned int type); 473 474#else /* CONFIG_SWAP */ 475 476#define swap_address_space(entry) (NULL) 477#define get_nr_swap_pages() 0L 478#define total_swap_pages 0L 479#define total_swapcache_pages() 0UL 480#define vm_swap_full() 0 481 482#define si_swapinfo(val) \ 483 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 484/* only sparc can not include linux/pagemap.h in this file 485 * so leave put_page and release_pages undeclared... */ 486#define free_page_and_swap_cache(page) \ 487 put_page(page) 488#define free_pages_and_swap_cache(pages, nr) \ 489 release_pages((pages), (nr), false); 490 491static inline void show_swap_cache_info(void) 492{ 493} 494 495#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) 496#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) 497 498static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) 499{ 500 return 0; 501} 502 503static inline void swap_shmem_alloc(swp_entry_t swp) 504{ 505} 506 507static inline int swap_duplicate(swp_entry_t swp) 508{ 509 return 0; 510} 511 512static inline void swap_free(swp_entry_t swp) 513{ 514} 515 516static inline void put_swap_page(struct page *page, swp_entry_t swp) 517{ 518} 519 520static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, 521 struct vm_area_struct *vma, unsigned long addr) 522{ 523 return NULL; 524} 525 526static inline bool swap_use_vma_readahead(void) 527{ 528 return false; 529} 530 531static inline struct page *swap_readahead_detect( 532 struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) 533{ 534 return NULL; 535} 536 537static inline struct page *do_swap_page_readahead( 538 swp_entry_t fentry, gfp_t gfp_mask, 539 struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) 540{ 541 return NULL; 542} 543 544static inline int swap_writepage(struct page *p, struct writeback_control *wbc) 545{ 546 return 0; 547} 548 549static inline struct page *lookup_swap_cache(swp_entry_t swp, 550 struct vm_area_struct *vma, 551 unsigned long addr) 552{ 553 return NULL; 554} 555 556static inline int add_to_swap(struct page *page) 557{ 558 return 0; 559} 560 561static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, 562 gfp_t gfp_mask) 563{ 564 return -1; 565} 566 567static inline void __delete_from_swap_cache(struct page *page) 568{ 569} 570 571static inline void delete_from_swap_cache(struct page *page) 572{ 573} 574 575static inline int page_swapcount(struct page *page) 576{ 577 return 0; 578} 579 580static inline int __swp_swapcount(swp_entry_t entry) 581{ 582 return 0; 583} 584 585static inline int swp_swapcount(swp_entry_t entry) 586{ 587 return 0; 588} 589 590#define reuse_swap_page(page, total_map_swapcount) \ 591 (page_trans_huge_mapcount(page, total_map_swapcount) == 1) 592 593static inline int try_to_free_swap(struct page *page) 594{ 595 return 0; 596} 597 598static inline swp_entry_t get_swap_page(struct page *page) 599{ 600 swp_entry_t entry; 601 entry.val = 0; 602 return entry; 603} 604 605#endif /* CONFIG_SWAP */ 606 607#ifdef CONFIG_THP_SWAP 608extern int split_swap_cluster(swp_entry_t entry); 609#else 610static inline int split_swap_cluster(swp_entry_t entry) 611{ 612 return 0; 613} 614#endif 615 616#ifdef CONFIG_MEMCG 617static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 618{ 619 /* Cgroup2 doesn't have per-cgroup swappiness */ 620 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 621 return vm_swappiness; 622 623 /* root ? */ 624 if (mem_cgroup_disabled() || !memcg->css.parent) 625 return vm_swappiness; 626 627 return memcg->swappiness; 628} 629 630#else 631static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 632{ 633 return vm_swappiness; 634} 635#endif 636 637#ifdef CONFIG_MEMCG_SWAP 638extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); 639extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); 640extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); 641extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 642extern bool mem_cgroup_swap_full(struct page *page); 643#else 644static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 645{ 646} 647 648static inline int mem_cgroup_try_charge_swap(struct page *page, 649 swp_entry_t entry) 650{ 651 return 0; 652} 653 654static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, 655 unsigned int nr_pages) 656{ 657} 658 659static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 660{ 661 return get_nr_swap_pages(); 662} 663 664static inline bool mem_cgroup_swap_full(struct page *page) 665{ 666 return vm_swap_full(); 667} 668#endif 669 670#endif /* __KERNEL__*/ 671#endif /* _LINUX_SWAP_H */