at v4.13 19 kB view raw
1#ifndef _LINUX_SWAP_H 2#define _LINUX_SWAP_H 3 4#include <linux/spinlock.h> 5#include <linux/linkage.h> 6#include <linux/mmzone.h> 7#include <linux/list.h> 8#include <linux/memcontrol.h> 9#include <linux/sched.h> 10#include <linux/node.h> 11#include <linux/fs.h> 12#include <linux/atomic.h> 13#include <linux/page-flags.h> 14#include <asm/page.h> 15 16struct notifier_block; 17 18struct bio; 19 20#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 21#define SWAP_FLAG_PRIO_MASK 0x7fff 22#define SWAP_FLAG_PRIO_SHIFT 0 23#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ 24#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ 25#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ 26 27#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ 28 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ 29 SWAP_FLAG_DISCARD_PAGES) 30#define SWAP_BATCH 64 31 32static inline int current_is_kswapd(void) 33{ 34 return current->flags & PF_KSWAPD; 35} 36 37/* 38 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can 39 * be swapped to. The swap type and the offset into that swap type are 40 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits 41 * for the type means that the maximum number of swapcache pages is 27 bits 42 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs 43 * the type/offset into the pte as 5/27 as well. 44 */ 45#define MAX_SWAPFILES_SHIFT 5 46 47/* 48 * Use some of the swap files numbers for other purposes. This 49 * is a convenient way to hook into the VM to trigger special 50 * actions on faults. 51 */ 52 53/* 54 * NUMA node memory migration support 55 */ 56#ifdef CONFIG_MIGRATION 57#define SWP_MIGRATION_NUM 2 58#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) 59#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) 60#else 61#define SWP_MIGRATION_NUM 0 62#endif 63 64/* 65 * Handling of hardware poisoned pages with memory corruption. 66 */ 67#ifdef CONFIG_MEMORY_FAILURE 68#define SWP_HWPOISON_NUM 1 69#define SWP_HWPOISON MAX_SWAPFILES 70#else 71#define SWP_HWPOISON_NUM 0 72#endif 73 74#define MAX_SWAPFILES \ 75 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) 76 77/* 78 * Magic header for a swap area. The first part of the union is 79 * what the swap magic looks like for the old (limited to 128MB) 80 * swap area format, the second part of the union adds - in the 81 * old reserved area - some extra information. Note that the first 82 * kilobyte is reserved for boot loader or disk label stuff... 83 * 84 * Having the magic at the end of the PAGE_SIZE makes detecting swap 85 * areas somewhat tricky on machines that support multiple page sizes. 86 * For 2.5 we'll probably want to move the magic to just beyond the 87 * bootbits... 88 */ 89union swap_header { 90 struct { 91 char reserved[PAGE_SIZE - 10]; 92 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ 93 } magic; 94 struct { 95 char bootbits[1024]; /* Space for disklabel etc. */ 96 __u32 version; 97 __u32 last_page; 98 __u32 nr_badpages; 99 unsigned char sws_uuid[16]; 100 unsigned char sws_volume[16]; 101 __u32 padding[117]; 102 __u32 badpages[1]; 103 } info; 104}; 105 106/* 107 * current->reclaim_state points to one of these when a task is running 108 * memory reclaim 109 */ 110struct reclaim_state { 111 unsigned long reclaimed_slab; 112}; 113 114#ifdef __KERNEL__ 115 116struct address_space; 117struct sysinfo; 118struct writeback_control; 119struct zone; 120 121/* 122 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 123 * disk blocks. A list of swap extents maps the entire swapfile. (Where the 124 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart 125 * from setup, they're handled identically. 126 * 127 * We always assume that blocks are of size PAGE_SIZE. 128 */ 129struct swap_extent { 130 struct list_head list; 131 pgoff_t start_page; 132 pgoff_t nr_pages; 133 sector_t start_block; 134}; 135 136/* 137 * Max bad pages in the new format.. 138 */ 139#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) 140#define MAX_SWAP_BADPAGES \ 141 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) 142 143enum { 144 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 145 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 146 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ 147 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 148 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 149 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 150 SWP_BLKDEV = (1 << 6), /* its a block device */ 151 SWP_FILE = (1 << 7), /* set after swap_activate success */ 152 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ 153 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ 154 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ 155 /* add others here before... */ 156 SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ 157}; 158 159#define SWAP_CLUSTER_MAX 32UL 160#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 161 162#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ 163#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ 164#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 165#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ 166#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ 167#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ 168 169/* 170 * We use this to track usage of a cluster. A cluster is a block of swap disk 171 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All 172 * free clusters are organized into a list. We fetch an entry from the list to 173 * get a free cluster. 174 * 175 * The data field stores next cluster if the cluster is free or cluster usage 176 * counter otherwise. The flags field determines if a cluster is free. This is 177 * protected by swap_info_struct.lock. 178 */ 179struct swap_cluster_info { 180 spinlock_t lock; /* 181 * Protect swap_cluster_info fields 182 * and swap_info_struct->swap_map 183 * elements correspond to the swap 184 * cluster 185 */ 186 unsigned int data:24; 187 unsigned int flags:8; 188}; 189#define CLUSTER_FLAG_FREE 1 /* This cluster is free */ 190#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ 191 192/* 193 * We assign a cluster to each CPU, so each CPU can allocate swap entry from 194 * its own cluster and swapout sequentially. The purpose is to optimize swapout 195 * throughput. 196 */ 197struct percpu_cluster { 198 struct swap_cluster_info index; /* Current cluster index */ 199 unsigned int next; /* Likely next allocation offset */ 200}; 201 202struct swap_cluster_list { 203 struct swap_cluster_info head; 204 struct swap_cluster_info tail; 205}; 206 207/* 208 * The in-memory structure used to track swap areas. 209 */ 210struct swap_info_struct { 211 unsigned long flags; /* SWP_USED etc: see above */ 212 signed short prio; /* swap priority of this type */ 213 struct plist_node list; /* entry in swap_active_head */ 214 struct plist_node avail_list; /* entry in swap_avail_head */ 215 signed char type; /* strange name for an index */ 216 unsigned int max; /* extent of the swap_map */ 217 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 218 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ 219 struct swap_cluster_list free_clusters; /* free clusters list */ 220 unsigned int lowest_bit; /* index of first free in swap_map */ 221 unsigned int highest_bit; /* index of last free in swap_map */ 222 unsigned int pages; /* total of usable pages of swap */ 223 unsigned int inuse_pages; /* number of those currently in use */ 224 unsigned int cluster_next; /* likely index for next allocation */ 225 unsigned int cluster_nr; /* countdown to next cluster search */ 226 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ 227 struct swap_extent *curr_swap_extent; 228 struct swap_extent first_swap_extent; 229 struct block_device *bdev; /* swap device or bdev of swap file */ 230 struct file *swap_file; /* seldom referenced */ 231 unsigned int old_block_size; /* seldom referenced */ 232#ifdef CONFIG_FRONTSWAP 233 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ 234 atomic_t frontswap_pages; /* frontswap pages in-use counter */ 235#endif 236 spinlock_t lock; /* 237 * protect map scan related fields like 238 * swap_map, lowest_bit, highest_bit, 239 * inuse_pages, cluster_next, 240 * cluster_nr, lowest_alloc, 241 * highest_alloc, free/discard cluster 242 * list. other fields are only changed 243 * at swapon/swapoff, so are protected 244 * by swap_lock. changing flags need 245 * hold this lock and swap_lock. If 246 * both locks need hold, hold swap_lock 247 * first. 248 */ 249 struct work_struct discard_work; /* discard worker */ 250 struct swap_cluster_list discard_clusters; /* discard clusters list */ 251}; 252 253/* linux/mm/workingset.c */ 254void *workingset_eviction(struct address_space *mapping, struct page *page); 255bool workingset_refault(void *shadow); 256void workingset_activation(struct page *page); 257void workingset_update_node(struct radix_tree_node *node, void *private); 258 259/* linux/mm/page_alloc.c */ 260extern unsigned long totalram_pages; 261extern unsigned long totalreserve_pages; 262extern unsigned long nr_free_buffer_pages(void); 263extern unsigned long nr_free_pagecache_pages(void); 264 265/* Definition of global_page_state not available yet */ 266#define nr_free_pages() global_page_state(NR_FREE_PAGES) 267 268 269/* linux/mm/swap.c */ 270extern void lru_cache_add(struct page *); 271extern void lru_cache_add_anon(struct page *page); 272extern void lru_cache_add_file(struct page *page); 273extern void lru_add_page_tail(struct page *page, struct page *page_tail, 274 struct lruvec *lruvec, struct list_head *head); 275extern void activate_page(struct page *); 276extern void mark_page_accessed(struct page *); 277extern void lru_add_drain(void); 278extern void lru_add_drain_cpu(int cpu); 279extern void lru_add_drain_all(void); 280extern void lru_add_drain_all_cpuslocked(void); 281extern void rotate_reclaimable_page(struct page *page); 282extern void deactivate_file_page(struct page *page); 283extern void mark_page_lazyfree(struct page *page); 284extern void swap_setup(void); 285 286extern void add_page_to_unevictable_list(struct page *page); 287 288extern void lru_cache_add_active_or_unevictable(struct page *page, 289 struct vm_area_struct *vma); 290 291/* linux/mm/vmscan.c */ 292extern unsigned long zone_reclaimable_pages(struct zone *zone); 293extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); 294extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 295 gfp_t gfp_mask, nodemask_t *mask); 296extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); 297extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 298 unsigned long nr_pages, 299 gfp_t gfp_mask, 300 bool may_swap); 301extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, 302 gfp_t gfp_mask, bool noswap, 303 pg_data_t *pgdat, 304 unsigned long *nr_scanned); 305extern unsigned long shrink_all_memory(unsigned long nr_pages); 306extern int vm_swappiness; 307extern int remove_mapping(struct address_space *mapping, struct page *page); 308extern unsigned long vm_total_pages; 309 310#ifdef CONFIG_NUMA 311extern int node_reclaim_mode; 312extern int sysctl_min_unmapped_ratio; 313extern int sysctl_min_slab_ratio; 314extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); 315#else 316#define node_reclaim_mode 0 317static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, 318 unsigned int order) 319{ 320 return 0; 321} 322#endif 323 324extern int page_evictable(struct page *page); 325extern void check_move_unevictable_pages(struct page **, int nr_pages); 326 327extern int kswapd_run(int nid); 328extern void kswapd_stop(int nid); 329 330#ifdef CONFIG_SWAP 331 332#include <linux/blk_types.h> /* for bio_end_io_t */ 333 334/* linux/mm/page_io.c */ 335extern int swap_readpage(struct page *page, bool do_poll); 336extern int swap_writepage(struct page *page, struct writeback_control *wbc); 337extern void end_swap_bio_write(struct bio *bio); 338extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 339 bio_end_io_t end_write_func); 340extern int swap_set_page_dirty(struct page *page); 341 342int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 343 unsigned long nr_pages, sector_t start_block); 344int generic_swapfile_activate(struct swap_info_struct *, struct file *, 345 sector_t *); 346 347/* linux/mm/swap_state.c */ 348/* One swap address space for each 64M swap space */ 349#define SWAP_ADDRESS_SPACE_SHIFT 14 350#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) 351extern struct address_space *swapper_spaces[]; 352#define swap_address_space(entry) \ 353 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ 354 >> SWAP_ADDRESS_SPACE_SHIFT]) 355extern unsigned long total_swapcache_pages(void); 356extern void show_swap_cache_info(void); 357extern int add_to_swap(struct page *page); 358extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); 359extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); 360extern void __delete_from_swap_cache(struct page *); 361extern void delete_from_swap_cache(struct page *); 362extern void free_page_and_swap_cache(struct page *); 363extern void free_pages_and_swap_cache(struct page **, int); 364extern struct page *lookup_swap_cache(swp_entry_t); 365extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, 366 struct vm_area_struct *vma, unsigned long addr, 367 bool do_poll); 368extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, 369 struct vm_area_struct *vma, unsigned long addr, 370 bool *new_page_allocated); 371extern struct page *swapin_readahead(swp_entry_t, gfp_t, 372 struct vm_area_struct *vma, unsigned long addr); 373 374/* linux/mm/swapfile.c */ 375extern atomic_long_t nr_swap_pages; 376extern long total_swap_pages; 377extern bool has_usable_swap(void); 378 379/* Swap 50% full? Release swapcache more aggressively.. */ 380static inline bool vm_swap_full(void) 381{ 382 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; 383} 384 385static inline long get_nr_swap_pages(void) 386{ 387 return atomic_long_read(&nr_swap_pages); 388} 389 390extern void si_swapinfo(struct sysinfo *); 391extern swp_entry_t get_swap_page(struct page *page); 392extern void put_swap_page(struct page *page, swp_entry_t entry); 393extern swp_entry_t get_swap_page_of_type(int); 394extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]); 395extern int add_swap_count_continuation(swp_entry_t, gfp_t); 396extern void swap_shmem_alloc(swp_entry_t); 397extern int swap_duplicate(swp_entry_t); 398extern int swapcache_prepare(swp_entry_t); 399extern void swap_free(swp_entry_t); 400extern void swapcache_free_entries(swp_entry_t *entries, int n); 401extern int free_swap_and_cache(swp_entry_t); 402extern int swap_type_of(dev_t, sector_t, struct block_device **); 403extern unsigned int count_swap_pages(int, int); 404extern sector_t map_swap_page(struct page *, struct block_device **); 405extern sector_t swapdev_block(int, pgoff_t); 406extern int page_swapcount(struct page *); 407extern int __swp_swapcount(swp_entry_t entry); 408extern int swp_swapcount(swp_entry_t entry); 409extern struct swap_info_struct *page_swap_info(struct page *); 410extern bool reuse_swap_page(struct page *, int *); 411extern int try_to_free_swap(struct page *); 412struct backing_dev_info; 413extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); 414extern void exit_swap_address_space(unsigned int type); 415 416#else /* CONFIG_SWAP */ 417 418#define swap_address_space(entry) (NULL) 419#define get_nr_swap_pages() 0L 420#define total_swap_pages 0L 421#define total_swapcache_pages() 0UL 422#define vm_swap_full() 0 423 424#define si_swapinfo(val) \ 425 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 426/* only sparc can not include linux/pagemap.h in this file 427 * so leave put_page and release_pages undeclared... */ 428#define free_page_and_swap_cache(page) \ 429 put_page(page) 430#define free_pages_and_swap_cache(pages, nr) \ 431 release_pages((pages), (nr), false); 432 433static inline void show_swap_cache_info(void) 434{ 435} 436 437#define free_swap_and_cache(swp) is_migration_entry(swp) 438#define swapcache_prepare(swp) is_migration_entry(swp) 439 440static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) 441{ 442 return 0; 443} 444 445static inline void swap_shmem_alloc(swp_entry_t swp) 446{ 447} 448 449static inline int swap_duplicate(swp_entry_t swp) 450{ 451 return 0; 452} 453 454static inline void swap_free(swp_entry_t swp) 455{ 456} 457 458static inline void put_swap_page(struct page *page, swp_entry_t swp) 459{ 460} 461 462static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, 463 struct vm_area_struct *vma, unsigned long addr) 464{ 465 return NULL; 466} 467 468static inline int swap_writepage(struct page *p, struct writeback_control *wbc) 469{ 470 return 0; 471} 472 473static inline struct page *lookup_swap_cache(swp_entry_t swp) 474{ 475 return NULL; 476} 477 478static inline int add_to_swap(struct page *page) 479{ 480 return 0; 481} 482 483static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, 484 gfp_t gfp_mask) 485{ 486 return -1; 487} 488 489static inline void __delete_from_swap_cache(struct page *page) 490{ 491} 492 493static inline void delete_from_swap_cache(struct page *page) 494{ 495} 496 497static inline int page_swapcount(struct page *page) 498{ 499 return 0; 500} 501 502static inline int __swp_swapcount(swp_entry_t entry) 503{ 504 return 0; 505} 506 507static inline int swp_swapcount(swp_entry_t entry) 508{ 509 return 0; 510} 511 512#define reuse_swap_page(page, total_mapcount) \ 513 (page_trans_huge_mapcount(page, total_mapcount) == 1) 514 515static inline int try_to_free_swap(struct page *page) 516{ 517 return 0; 518} 519 520static inline swp_entry_t get_swap_page(struct page *page) 521{ 522 swp_entry_t entry; 523 entry.val = 0; 524 return entry; 525} 526 527#endif /* CONFIG_SWAP */ 528 529#ifdef CONFIG_MEMCG 530static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 531{ 532 /* Cgroup2 doesn't have per-cgroup swappiness */ 533 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 534 return vm_swappiness; 535 536 /* root ? */ 537 if (mem_cgroup_disabled() || !memcg->css.parent) 538 return vm_swappiness; 539 540 return memcg->swappiness; 541} 542 543#else 544static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 545{ 546 return vm_swappiness; 547} 548#endif 549 550#ifdef CONFIG_MEMCG_SWAP 551extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); 552extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); 553extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); 554extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 555extern bool mem_cgroup_swap_full(struct page *page); 556#else 557static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 558{ 559} 560 561static inline int mem_cgroup_try_charge_swap(struct page *page, 562 swp_entry_t entry) 563{ 564 return 0; 565} 566 567static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, 568 unsigned int nr_pages) 569{ 570} 571 572static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 573{ 574 return get_nr_swap_pages(); 575} 576 577static inline bool mem_cgroup_swap_full(struct page *page) 578{ 579 return vm_swap_full(); 580} 581#endif 582 583#endif /* __KERNEL__*/ 584#endif /* _LINUX_SWAP_H */