at v4.10 18 kB view raw
1#ifndef _LINUX_SWAP_H 2#define _LINUX_SWAP_H 3 4#include <linux/spinlock.h> 5#include <linux/linkage.h> 6#include <linux/mmzone.h> 7#include <linux/list.h> 8#include <linux/memcontrol.h> 9#include <linux/sched.h> 10#include <linux/node.h> 11#include <linux/fs.h> 12#include <linux/atomic.h> 13#include <linux/page-flags.h> 14#include <asm/page.h> 15 16struct notifier_block; 17 18struct bio; 19 20#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 21#define SWAP_FLAG_PRIO_MASK 0x7fff 22#define SWAP_FLAG_PRIO_SHIFT 0 23#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ 24#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ 25#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ 26 27#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ 28 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ 29 SWAP_FLAG_DISCARD_PAGES) 30 31static inline int current_is_kswapd(void) 32{ 33 return current->flags & PF_KSWAPD; 34} 35 36/* 37 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can 38 * be swapped to. The swap type and the offset into that swap type are 39 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits 40 * for the type means that the maximum number of swapcache pages is 27 bits 41 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs 42 * the type/offset into the pte as 5/27 as well. 43 */ 44#define MAX_SWAPFILES_SHIFT 5 45 46/* 47 * Use some of the swap files numbers for other purposes. This 48 * is a convenient way to hook into the VM to trigger special 49 * actions on faults. 50 */ 51 52/* 53 * NUMA node memory migration support 54 */ 55#ifdef CONFIG_MIGRATION 56#define SWP_MIGRATION_NUM 2 57#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) 58#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) 59#else 60#define SWP_MIGRATION_NUM 0 61#endif 62 63/* 64 * Handling of hardware poisoned pages with memory corruption. 65 */ 66#ifdef CONFIG_MEMORY_FAILURE 67#define SWP_HWPOISON_NUM 1 68#define SWP_HWPOISON MAX_SWAPFILES 69#else 70#define SWP_HWPOISON_NUM 0 71#endif 72 73#define MAX_SWAPFILES \ 74 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) 75 76/* 77 * Magic header for a swap area. The first part of the union is 78 * what the swap magic looks like for the old (limited to 128MB) 79 * swap area format, the second part of the union adds - in the 80 * old reserved area - some extra information. Note that the first 81 * kilobyte is reserved for boot loader or disk label stuff... 82 * 83 * Having the magic at the end of the PAGE_SIZE makes detecting swap 84 * areas somewhat tricky on machines that support multiple page sizes. 85 * For 2.5 we'll probably want to move the magic to just beyond the 86 * bootbits... 87 */ 88union swap_header { 89 struct { 90 char reserved[PAGE_SIZE - 10]; 91 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ 92 } magic; 93 struct { 94 char bootbits[1024]; /* Space for disklabel etc. */ 95 __u32 version; 96 __u32 last_page; 97 __u32 nr_badpages; 98 unsigned char sws_uuid[16]; 99 unsigned char sws_volume[16]; 100 __u32 padding[117]; 101 __u32 badpages[1]; 102 } info; 103}; 104 105/* 106 * current->reclaim_state points to one of these when a task is running 107 * memory reclaim 108 */ 109struct reclaim_state { 110 unsigned long reclaimed_slab; 111}; 112 113#ifdef __KERNEL__ 114 115struct address_space; 116struct sysinfo; 117struct writeback_control; 118struct zone; 119 120/* 121 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 122 * disk blocks. A list of swap extents maps the entire swapfile. (Where the 123 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart 124 * from setup, they're handled identically. 125 * 126 * We always assume that blocks are of size PAGE_SIZE. 127 */ 128struct swap_extent { 129 struct list_head list; 130 pgoff_t start_page; 131 pgoff_t nr_pages; 132 sector_t start_block; 133}; 134 135/* 136 * Max bad pages in the new format.. 137 */ 138#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) 139#define MAX_SWAP_BADPAGES \ 140 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) 141 142enum { 143 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 144 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 145 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ 146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 148 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 149 SWP_BLKDEV = (1 << 6), /* its a block device */ 150 SWP_FILE = (1 << 7), /* set after swap_activate success */ 151 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ 152 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ 153 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ 154 /* add others here before... */ 155 SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ 156}; 157 158#define SWAP_CLUSTER_MAX 32UL 159#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 160 161#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ 162#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ 163#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 164#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ 165#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ 166#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ 167 168/* 169 * We use this to track usage of a cluster. A cluster is a block of swap disk 170 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All 171 * free clusters are organized into a list. We fetch an entry from the list to 172 * get a free cluster. 173 * 174 * The data field stores next cluster if the cluster is free or cluster usage 175 * counter otherwise. The flags field determines if a cluster is free. This is 176 * protected by swap_info_struct.lock. 177 */ 178struct swap_cluster_info { 179 unsigned int data:24; 180 unsigned int flags:8; 181}; 182#define CLUSTER_FLAG_FREE 1 /* This cluster is free */ 183#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ 184 185/* 186 * We assign a cluster to each CPU, so each CPU can allocate swap entry from 187 * its own cluster and swapout sequentially. The purpose is to optimize swapout 188 * throughput. 189 */ 190struct percpu_cluster { 191 struct swap_cluster_info index; /* Current cluster index */ 192 unsigned int next; /* Likely next allocation offset */ 193}; 194 195struct swap_cluster_list { 196 struct swap_cluster_info head; 197 struct swap_cluster_info tail; 198}; 199 200/* 201 * The in-memory structure used to track swap areas. 202 */ 203struct swap_info_struct { 204 unsigned long flags; /* SWP_USED etc: see above */ 205 signed short prio; /* swap priority of this type */ 206 struct plist_node list; /* entry in swap_active_head */ 207 struct plist_node avail_list; /* entry in swap_avail_head */ 208 signed char type; /* strange name for an index */ 209 unsigned int max; /* extent of the swap_map */ 210 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 211 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ 212 struct swap_cluster_list free_clusters; /* free clusters list */ 213 unsigned int lowest_bit; /* index of first free in swap_map */ 214 unsigned int highest_bit; /* index of last free in swap_map */ 215 unsigned int pages; /* total of usable pages of swap */ 216 unsigned int inuse_pages; /* number of those currently in use */ 217 unsigned int cluster_next; /* likely index for next allocation */ 218 unsigned int cluster_nr; /* countdown to next cluster search */ 219 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ 220 struct swap_extent *curr_swap_extent; 221 struct swap_extent first_swap_extent; 222 struct block_device *bdev; /* swap device or bdev of swap file */ 223 struct file *swap_file; /* seldom referenced */ 224 unsigned int old_block_size; /* seldom referenced */ 225#ifdef CONFIG_FRONTSWAP 226 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ 227 atomic_t frontswap_pages; /* frontswap pages in-use counter */ 228#endif 229 spinlock_t lock; /* 230 * protect map scan related fields like 231 * swap_map, lowest_bit, highest_bit, 232 * inuse_pages, cluster_next, 233 * cluster_nr, lowest_alloc, 234 * highest_alloc, free/discard cluster 235 * list. other fields are only changed 236 * at swapon/swapoff, so are protected 237 * by swap_lock. changing flags need 238 * hold this lock and swap_lock. If 239 * both locks need hold, hold swap_lock 240 * first. 241 */ 242 struct work_struct discard_work; /* discard worker */ 243 struct swap_cluster_list discard_clusters; /* discard clusters list */ 244}; 245 246/* linux/mm/workingset.c */ 247void *workingset_eviction(struct address_space *mapping, struct page *page); 248bool workingset_refault(void *shadow); 249void workingset_activation(struct page *page); 250void workingset_update_node(struct radix_tree_node *node, void *private); 251 252/* linux/mm/page_alloc.c */ 253extern unsigned long totalram_pages; 254extern unsigned long totalreserve_pages; 255extern unsigned long nr_free_buffer_pages(void); 256extern unsigned long nr_free_pagecache_pages(void); 257 258/* Definition of global_page_state not available yet */ 259#define nr_free_pages() global_page_state(NR_FREE_PAGES) 260 261 262/* linux/mm/swap.c */ 263extern void lru_cache_add(struct page *); 264extern void lru_cache_add_anon(struct page *page); 265extern void lru_cache_add_file(struct page *page); 266extern void lru_add_page_tail(struct page *page, struct page *page_tail, 267 struct lruvec *lruvec, struct list_head *head); 268extern void activate_page(struct page *); 269extern void mark_page_accessed(struct page *); 270extern void lru_add_drain(void); 271extern void lru_add_drain_cpu(int cpu); 272extern void lru_add_drain_all(void); 273extern void rotate_reclaimable_page(struct page *page); 274extern void deactivate_file_page(struct page *page); 275extern void deactivate_page(struct page *page); 276extern void swap_setup(void); 277 278extern void add_page_to_unevictable_list(struct page *page); 279 280extern void lru_cache_add_active_or_unevictable(struct page *page, 281 struct vm_area_struct *vma); 282 283/* linux/mm/vmscan.c */ 284extern unsigned long zone_reclaimable_pages(struct zone *zone); 285extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); 286extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 287 gfp_t gfp_mask, nodemask_t *mask); 288extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); 289extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 290 unsigned long nr_pages, 291 gfp_t gfp_mask, 292 bool may_swap); 293extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, 294 gfp_t gfp_mask, bool noswap, 295 pg_data_t *pgdat, 296 unsigned long *nr_scanned); 297extern unsigned long shrink_all_memory(unsigned long nr_pages); 298extern int vm_swappiness; 299extern int remove_mapping(struct address_space *mapping, struct page *page); 300extern unsigned long vm_total_pages; 301 302#ifdef CONFIG_NUMA 303extern int node_reclaim_mode; 304extern int sysctl_min_unmapped_ratio; 305extern int sysctl_min_slab_ratio; 306extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); 307#else 308#define node_reclaim_mode 0 309static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, 310 unsigned int order) 311{ 312 return 0; 313} 314#endif 315 316extern int page_evictable(struct page *page); 317extern void check_move_unevictable_pages(struct page **, int nr_pages); 318 319extern int kswapd_run(int nid); 320extern void kswapd_stop(int nid); 321 322#ifdef CONFIG_SWAP 323 324#include <linux/blk_types.h> /* for bio_end_io_t */ 325 326/* linux/mm/page_io.c */ 327extern int swap_readpage(struct page *); 328extern int swap_writepage(struct page *page, struct writeback_control *wbc); 329extern void end_swap_bio_write(struct bio *bio); 330extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 331 bio_end_io_t end_write_func); 332extern int swap_set_page_dirty(struct page *page); 333 334int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 335 unsigned long nr_pages, sector_t start_block); 336int generic_swapfile_activate(struct swap_info_struct *, struct file *, 337 sector_t *); 338 339/* linux/mm/swap_state.c */ 340extern struct address_space swapper_spaces[]; 341#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) 342extern unsigned long total_swapcache_pages(void); 343extern void show_swap_cache_info(void); 344extern int add_to_swap(struct page *, struct list_head *list); 345extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); 346extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); 347extern void __delete_from_swap_cache(struct page *); 348extern void delete_from_swap_cache(struct page *); 349extern void free_page_and_swap_cache(struct page *); 350extern void free_pages_and_swap_cache(struct page **, int); 351extern struct page *lookup_swap_cache(swp_entry_t); 352extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, 353 struct vm_area_struct *vma, unsigned long addr); 354extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, 355 struct vm_area_struct *vma, unsigned long addr, 356 bool *new_page_allocated); 357extern struct page *swapin_readahead(swp_entry_t, gfp_t, 358 struct vm_area_struct *vma, unsigned long addr); 359 360/* linux/mm/swapfile.c */ 361extern atomic_long_t nr_swap_pages; 362extern long total_swap_pages; 363 364/* Swap 50% full? Release swapcache more aggressively.. */ 365static inline bool vm_swap_full(void) 366{ 367 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; 368} 369 370static inline long get_nr_swap_pages(void) 371{ 372 return atomic_long_read(&nr_swap_pages); 373} 374 375extern void si_swapinfo(struct sysinfo *); 376extern swp_entry_t get_swap_page(void); 377extern swp_entry_t get_swap_page_of_type(int); 378extern int add_swap_count_continuation(swp_entry_t, gfp_t); 379extern void swap_shmem_alloc(swp_entry_t); 380extern int swap_duplicate(swp_entry_t); 381extern int swapcache_prepare(swp_entry_t); 382extern void swap_free(swp_entry_t); 383extern void swapcache_free(swp_entry_t); 384extern int free_swap_and_cache(swp_entry_t); 385extern int swap_type_of(dev_t, sector_t, struct block_device **); 386extern unsigned int count_swap_pages(int, int); 387extern sector_t map_swap_page(struct page *, struct block_device **); 388extern sector_t swapdev_block(int, pgoff_t); 389extern int page_swapcount(struct page *); 390extern int swp_swapcount(swp_entry_t entry); 391extern struct swap_info_struct *page_swap_info(struct page *); 392extern bool reuse_swap_page(struct page *, int *); 393extern int try_to_free_swap(struct page *); 394struct backing_dev_info; 395 396#else /* CONFIG_SWAP */ 397 398#define swap_address_space(entry) (NULL) 399#define get_nr_swap_pages() 0L 400#define total_swap_pages 0L 401#define total_swapcache_pages() 0UL 402#define vm_swap_full() 0 403 404#define si_swapinfo(val) \ 405 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 406/* only sparc can not include linux/pagemap.h in this file 407 * so leave put_page and release_pages undeclared... */ 408#define free_page_and_swap_cache(page) \ 409 put_page(page) 410#define free_pages_and_swap_cache(pages, nr) \ 411 release_pages((pages), (nr), false); 412 413static inline void show_swap_cache_info(void) 414{ 415} 416 417#define free_swap_and_cache(swp) is_migration_entry(swp) 418#define swapcache_prepare(swp) is_migration_entry(swp) 419 420static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) 421{ 422 return 0; 423} 424 425static inline void swap_shmem_alloc(swp_entry_t swp) 426{ 427} 428 429static inline int swap_duplicate(swp_entry_t swp) 430{ 431 return 0; 432} 433 434static inline void swap_free(swp_entry_t swp) 435{ 436} 437 438static inline void swapcache_free(swp_entry_t swp) 439{ 440} 441 442static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, 443 struct vm_area_struct *vma, unsigned long addr) 444{ 445 return NULL; 446} 447 448static inline int swap_writepage(struct page *p, struct writeback_control *wbc) 449{ 450 return 0; 451} 452 453static inline struct page *lookup_swap_cache(swp_entry_t swp) 454{ 455 return NULL; 456} 457 458static inline int add_to_swap(struct page *page, struct list_head *list) 459{ 460 return 0; 461} 462 463static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, 464 gfp_t gfp_mask) 465{ 466 return -1; 467} 468 469static inline void __delete_from_swap_cache(struct page *page) 470{ 471} 472 473static inline void delete_from_swap_cache(struct page *page) 474{ 475} 476 477static inline int page_swapcount(struct page *page) 478{ 479 return 0; 480} 481 482static inline int swp_swapcount(swp_entry_t entry) 483{ 484 return 0; 485} 486 487#define reuse_swap_page(page, total_mapcount) \ 488 (page_trans_huge_mapcount(page, total_mapcount) == 1) 489 490static inline int try_to_free_swap(struct page *page) 491{ 492 return 0; 493} 494 495static inline swp_entry_t get_swap_page(void) 496{ 497 swp_entry_t entry; 498 entry.val = 0; 499 return entry; 500} 501 502#endif /* CONFIG_SWAP */ 503 504#ifdef CONFIG_MEMCG 505static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 506{ 507 /* Cgroup2 doesn't have per-cgroup swappiness */ 508 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 509 return vm_swappiness; 510 511 /* root ? */ 512 if (mem_cgroup_disabled() || !memcg->css.parent) 513 return vm_swappiness; 514 515 return memcg->swappiness; 516} 517 518#else 519static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 520{ 521 return vm_swappiness; 522} 523#endif 524 525#ifdef CONFIG_MEMCG_SWAP 526extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); 527extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); 528extern void mem_cgroup_uncharge_swap(swp_entry_t entry); 529extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 530extern bool mem_cgroup_swap_full(struct page *page); 531#else 532static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 533{ 534} 535 536static inline int mem_cgroup_try_charge_swap(struct page *page, 537 swp_entry_t entry) 538{ 539 return 0; 540} 541 542static inline void mem_cgroup_uncharge_swap(swp_entry_t entry) 543{ 544} 545 546static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 547{ 548 return get_nr_swap_pages(); 549} 550 551static inline bool mem_cgroup_swap_full(struct page *page) 552{ 553 return vm_swap_full(); 554} 555#endif 556 557#endif /* __KERNEL__*/ 558#endif /* _LINUX_SWAP_H */