at v4.12 22 kB view raw
1#ifndef __LINUX_GFP_H 2#define __LINUX_GFP_H 3 4#include <linux/mmdebug.h> 5#include <linux/mmzone.h> 6#include <linux/stddef.h> 7#include <linux/linkage.h> 8#include <linux/topology.h> 9 10struct vm_area_struct; 11 12/* 13 * In case of changes, please don't forget to update 14 * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c 15 */ 16 17/* Plain integer GFP bitmasks. Do not use this directly. */ 18#define ___GFP_DMA 0x01u 19#define ___GFP_HIGHMEM 0x02u 20#define ___GFP_DMA32 0x04u 21#define ___GFP_MOVABLE 0x08u 22#define ___GFP_RECLAIMABLE 0x10u 23#define ___GFP_HIGH 0x20u 24#define ___GFP_IO 0x40u 25#define ___GFP_FS 0x80u 26#define ___GFP_COLD 0x100u 27#define ___GFP_NOWARN 0x200u 28#define ___GFP_REPEAT 0x400u 29#define ___GFP_NOFAIL 0x800u 30#define ___GFP_NORETRY 0x1000u 31#define ___GFP_MEMALLOC 0x2000u 32#define ___GFP_COMP 0x4000u 33#define ___GFP_ZERO 0x8000u 34#define ___GFP_NOMEMALLOC 0x10000u 35#define ___GFP_HARDWALL 0x20000u 36#define ___GFP_THISNODE 0x40000u 37#define ___GFP_ATOMIC 0x80000u 38#define ___GFP_ACCOUNT 0x100000u 39#define ___GFP_NOTRACK 0x200000u 40#define ___GFP_DIRECT_RECLAIM 0x400000u 41#define ___GFP_WRITE 0x800000u 42#define ___GFP_KSWAPD_RECLAIM 0x1000000u 43#ifdef CONFIG_LOCKDEP 44#define ___GFP_NOLOCKDEP 0x2000000u 45#else 46#define ___GFP_NOLOCKDEP 0 47#endif 48/* If the above are modified, __GFP_BITS_SHIFT may need updating */ 49 50/* 51 * Physical address zone modifiers (see linux/mmzone.h - low four bits) 52 * 53 * Do not put any conditional on these. If necessary modify the definitions 54 * without the underscores and use them consistently. The definitions here may 55 * be used in bit comparisons. 56 */ 57#define __GFP_DMA ((__force gfp_t)___GFP_DMA) 58#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) 59#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) 60#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ 61#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) 62 63/* 64 * Page mobility and placement hints 65 * 66 * These flags provide hints about how mobile the page is. Pages with similar 67 * mobility are placed within the same pageblocks to minimise problems due 68 * to external fragmentation. 69 * 70 * __GFP_MOVABLE (also a zone modifier) indicates that the page can be 71 * moved by page migration during memory compaction or can be reclaimed. 72 * 73 * __GFP_RECLAIMABLE is used for slab allocations that specify 74 * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. 75 * 76 * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, 77 * these pages will be spread between local zones to avoid all the dirty 78 * pages being in one zone (fair zone allocation policy). 79 * 80 * __GFP_HARDWALL enforces the cpuset memory allocation policy. 81 * 82 * __GFP_THISNODE forces the allocation to be satisified from the requested 83 * node with no fallbacks or placement policy enforcements. 84 * 85 * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. 86 */ 87#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 88#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) 89#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) 90#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) 91#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) 92 93/* 94 * Watermark modifiers -- controls access to emergency reserves 95 * 96 * __GFP_HIGH indicates that the caller is high-priority and that granting 97 * the request is necessary before the system can make forward progress. 98 * For example, creating an IO context to clean pages. 99 * 100 * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is 101 * high priority. Users are typically interrupt handlers. This may be 102 * used in conjunction with __GFP_HIGH 103 * 104 * __GFP_MEMALLOC allows access to all memory. This should only be used when 105 * the caller guarantees the allocation will allow more memory to be freed 106 * very shortly e.g. process exiting or swapping. Users either should 107 * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). 108 * 109 * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. 110 * This takes precedence over the __GFP_MEMALLOC flag if both are set. 111 */ 112#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) 113#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) 114#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) 115#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) 116 117/* 118 * Reclaim modifiers 119 * 120 * __GFP_IO can start physical IO. 121 * 122 * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the 123 * allocator recursing into the filesystem which might already be holding 124 * locks. 125 * 126 * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. 127 * This flag can be cleared to avoid unnecessary delays when a fallback 128 * option is available. 129 * 130 * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when 131 * the low watermark is reached and have it reclaim pages until the high 132 * watermark is reached. A caller may wish to clear this flag when fallback 133 * options are available and the reclaim is likely to disrupt the system. The 134 * canonical example is THP allocation where a fallback is cheap but 135 * reclaim/compaction may cause indirect stalls. 136 * 137 * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. 138 * 139 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt 140 * _might_ fail. This depends upon the particular VM implementation. 141 * 142 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller 143 * cannot handle allocation failures. New users should be evaluated carefully 144 * (and the flag should be used only when there is no reasonable failure 145 * policy) but it is definitely preferable to use the flag rather than 146 * opencode endless loop around allocator. 147 * 148 * __GFP_NORETRY: The VM implementation must not retry indefinitely and will 149 * return NULL when direct reclaim and memory compaction have failed to allow 150 * the allocation to succeed. The OOM killer is not called with the current 151 * implementation. 152 */ 153#define __GFP_IO ((__force gfp_t)___GFP_IO) 154#define __GFP_FS ((__force gfp_t)___GFP_FS) 155#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ 156#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ 157#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 158#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) 159#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) 160#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) 161 162/* 163 * Action modifiers 164 * 165 * __GFP_COLD indicates that the caller does not expect to be used in the near 166 * future. Where possible, a cache-cold page will be returned. 167 * 168 * __GFP_NOWARN suppresses allocation failure reports. 169 * 170 * __GFP_COMP address compound page metadata. 171 * 172 * __GFP_ZERO returns a zeroed page on success. 173 * 174 * __GFP_NOTRACK avoids tracking with kmemcheck. 175 * 176 * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of 177 * distinguishing in the source between false positives and allocations that 178 * cannot be supported (e.g. page tables). 179 */ 180#define __GFP_COLD ((__force gfp_t)___GFP_COLD) 181#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) 182#define __GFP_COMP ((__force gfp_t)___GFP_COMP) 183#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 184#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) 185#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) 186 187/* Disable lockdep for GFP context tracking */ 188#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) 189 190/* Room for N __GFP_FOO bits */ 191#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) 192#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 193 194/* 195 * Useful GFP flag combinations that are commonly used. It is recommended 196 * that subsystems start with one of these combinations and then set/clear 197 * __GFP_FOO flags as necessary. 198 * 199 * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower 200 * watermark is applied to allow access to "atomic reserves" 201 * 202 * GFP_KERNEL is typical for kernel-internal allocations. The caller requires 203 * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. 204 * 205 * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is 206 * accounted to kmemcg. 207 * 208 * GFP_NOWAIT is for kernel allocations that should not stall for direct 209 * reclaim, start physical IO or use any filesystem callback. 210 * 211 * GFP_NOIO will use direct reclaim to discard clean pages or slab pages 212 * that do not require the starting of any physical IO. 213 * Please try to avoid using this flag directly and instead use 214 * memalloc_noio_{save,restore} to mark the whole scope which cannot 215 * perform any IO with a short explanation why. All allocation requests 216 * will inherit GFP_NOIO implicitly. 217 * 218 * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. 219 * Please try to avoid using this flag directly and instead use 220 * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't 221 * recurse into the FS layer with a short explanation why. All allocation 222 * requests will inherit GFP_NOFS implicitly. 223 * 224 * GFP_USER is for userspace allocations that also need to be directly 225 * accessibly by the kernel or hardware. It is typically used by hardware 226 * for buffers that are mapped to userspace (e.g. graphics) that hardware 227 * still must DMA to. cpuset limits are enforced for these allocations. 228 * 229 * GFP_DMA exists for historical reasons and should be avoided where possible. 230 * The flags indicates that the caller requires that the lowest zone be 231 * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but 232 * it would require careful auditing as some users really require it and 233 * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the 234 * lowest zone as a type of emergency reserve. 235 * 236 * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit 237 * address. 238 * 239 * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, 240 * do not need to be directly accessible by the kernel but that cannot 241 * move once in use. An example may be a hardware allocation that maps 242 * data directly into userspace but has no addressing limitations. 243 * 244 * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not 245 * need direct access to but can use kmap() when access is required. They 246 * are expected to be movable via page reclaim or page migration. Typically, 247 * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. 248 * 249 * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are 250 * compound allocations that will generally fail quickly if memory is not 251 * available and will not wake kswapd/kcompactd on failure. The _LIGHT 252 * version does not attempt reclaim/compaction at all and is by default used 253 * in page fault path, while the non-light is used by khugepaged. 254 */ 255#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) 256#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) 257#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) 258#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 259#define GFP_NOIO (__GFP_RECLAIM) 260#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 261#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ 262 __GFP_RECLAIMABLE) 263#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 264#define GFP_DMA __GFP_DMA 265#define GFP_DMA32 __GFP_DMA32 266#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 267#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 268#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 269 __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) 270#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) 271 272/* Convert GFP flags to their corresponding migrate type */ 273#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 274#define GFP_MOVABLE_SHIFT 3 275 276static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) 277{ 278 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 279 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); 280 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); 281 282 if (unlikely(page_group_by_mobility_disabled)) 283 return MIGRATE_UNMOVABLE; 284 285 /* Group based on mobility */ 286 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; 287} 288#undef GFP_MOVABLE_MASK 289#undef GFP_MOVABLE_SHIFT 290 291static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 292{ 293 return !!(gfp_flags & __GFP_DIRECT_RECLAIM); 294} 295 296#ifdef CONFIG_HIGHMEM 297#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 298#else 299#define OPT_ZONE_HIGHMEM ZONE_NORMAL 300#endif 301 302#ifdef CONFIG_ZONE_DMA 303#define OPT_ZONE_DMA ZONE_DMA 304#else 305#define OPT_ZONE_DMA ZONE_NORMAL 306#endif 307 308#ifdef CONFIG_ZONE_DMA32 309#define OPT_ZONE_DMA32 ZONE_DMA32 310#else 311#define OPT_ZONE_DMA32 ZONE_NORMAL 312#endif 313 314/* 315 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the 316 * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT 317 * bits long and there are 16 of them to cover all possible combinations of 318 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. 319 * 320 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 321 * But GFP_MOVABLE is not only a zone specifier but also an allocation 322 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. 323 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". 324 * 325 * bit result 326 * ================= 327 * 0x0 => NORMAL 328 * 0x1 => DMA or NORMAL 329 * 0x2 => HIGHMEM or NORMAL 330 * 0x3 => BAD (DMA+HIGHMEM) 331 * 0x4 => DMA32 or DMA or NORMAL 332 * 0x5 => BAD (DMA+DMA32) 333 * 0x6 => BAD (HIGHMEM+DMA32) 334 * 0x7 => BAD (HIGHMEM+DMA32+DMA) 335 * 0x8 => NORMAL (MOVABLE+0) 336 * 0x9 => DMA or NORMAL (MOVABLE+DMA) 337 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) 338 * 0xb => BAD (MOVABLE+HIGHMEM+DMA) 339 * 0xc => DMA32 (MOVABLE+DMA32) 340 * 0xd => BAD (MOVABLE+DMA32+DMA) 341 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) 342 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) 343 * 344 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. 345 */ 346 347#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 348/* ZONE_DEVICE is not a valid GFP zone specifier */ 349#define GFP_ZONES_SHIFT 2 350#else 351#define GFP_ZONES_SHIFT ZONES_SHIFT 352#endif 353 354#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG 355#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer 356#endif 357 358#define GFP_ZONE_TABLE ( \ 359 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ 360 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ 361 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ 362 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ 363 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ 364 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ 365 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ 366 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ 367) 368 369/* 370 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 371 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per 372 * entry starting with bit 0. Bit is set if the combination is not 373 * allowed. 374 */ 375#define GFP_ZONE_BAD ( \ 376 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ 377 | 1 << (___GFP_DMA | ___GFP_DMA32) \ 378 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ 379 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 380 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ 381 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ 382 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 383 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ 384) 385 386static inline enum zone_type gfp_zone(gfp_t flags) 387{ 388 enum zone_type z; 389 int bit = (__force int) (flags & GFP_ZONEMASK); 390 391 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & 392 ((1 << GFP_ZONES_SHIFT) - 1); 393 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); 394 return z; 395} 396 397/* 398 * There is only one page-allocator function, and two main namespaces to 399 * it. The alloc_page*() variants return 'struct page *' and as such 400 * can allocate highmem pages, the *get*page*() variants return 401 * virtual kernel addresses to the allocated page(s). 402 */ 403 404static inline int gfp_zonelist(gfp_t flags) 405{ 406#ifdef CONFIG_NUMA 407 if (unlikely(flags & __GFP_THISNODE)) 408 return ZONELIST_NOFALLBACK; 409#endif 410 return ZONELIST_FALLBACK; 411} 412 413/* 414 * We get the zone list from the current node and the gfp_mask. 415 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. 416 * There are two zonelists per node, one for all zones with memory and 417 * one containing just zones from the node the zonelist belongs to. 418 * 419 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets 420 * optimized to &contig_page_data at compile-time. 421 */ 422static inline struct zonelist *node_zonelist(int nid, gfp_t flags) 423{ 424 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); 425} 426 427#ifndef HAVE_ARCH_FREE_PAGE 428static inline void arch_free_page(struct page *page, int order) { } 429#endif 430#ifndef HAVE_ARCH_ALLOC_PAGE 431static inline void arch_alloc_page(struct page *page, int order) { } 432#endif 433 434struct page * 435__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 436 struct zonelist *zonelist, nodemask_t *nodemask); 437 438static inline struct page * 439__alloc_pages(gfp_t gfp_mask, unsigned int order, 440 struct zonelist *zonelist) 441{ 442 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); 443} 444 445/* 446 * Allocate pages, preferring the node given as nid. The node must be valid and 447 * online. For more general interface, see alloc_pages_node(). 448 */ 449static inline struct page * 450__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 451{ 452 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 453 VM_WARN_ON(!node_online(nid)); 454 455 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 456} 457 458/* 459 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, 460 * prefer the current CPU's closest node. Otherwise node must be valid and 461 * online. 462 */ 463static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 464 unsigned int order) 465{ 466 if (nid == NUMA_NO_NODE) 467 nid = numa_mem_id(); 468 469 return __alloc_pages_node(nid, gfp_mask, order); 470} 471 472#ifdef CONFIG_NUMA 473extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 474 475static inline struct page * 476alloc_pages(gfp_t gfp_mask, unsigned int order) 477{ 478 return alloc_pages_current(gfp_mask, order); 479} 480extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 481 struct vm_area_struct *vma, unsigned long addr, 482 int node, bool hugepage); 483#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 484 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) 485#else 486#define alloc_pages(gfp_mask, order) \ 487 alloc_pages_node(numa_node_id(), gfp_mask, order) 488#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 489 alloc_pages(gfp_mask, order) 490#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 491 alloc_pages(gfp_mask, order) 492#endif 493#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 494#define alloc_page_vma(gfp_mask, vma, addr) \ 495 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 496#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 497 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 498 499extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 500extern unsigned long get_zeroed_page(gfp_t gfp_mask); 501 502void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 503void free_pages_exact(void *virt, size_t size); 504void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 505 506#define __get_free_page(gfp_mask) \ 507 __get_free_pages((gfp_mask), 0) 508 509#define __get_dma_pages(gfp_mask, order) \ 510 __get_free_pages((gfp_mask) | GFP_DMA, (order)) 511 512extern void __free_pages(struct page *page, unsigned int order); 513extern void free_pages(unsigned long addr, unsigned int order); 514extern void free_hot_cold_page(struct page *page, bool cold); 515extern void free_hot_cold_page_list(struct list_head *list, bool cold); 516 517struct page_frag_cache; 518extern void __page_frag_cache_drain(struct page *page, unsigned int count); 519extern void *page_frag_alloc(struct page_frag_cache *nc, 520 unsigned int fragsz, gfp_t gfp_mask); 521extern void page_frag_free(void *addr); 522 523#define __free_page(page) __free_pages((page), 0) 524#define free_page(addr) free_pages((addr), 0) 525 526void page_alloc_init(void); 527void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 528void drain_all_pages(struct zone *zone); 529void drain_local_pages(struct zone *zone); 530 531void page_alloc_init_late(void); 532 533/* 534 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 535 * GFP flags are used before interrupts are enabled. Once interrupts are 536 * enabled, it is set to __GFP_BITS_MASK while the system is running. During 537 * hibernation, it is used by PM to avoid I/O during memory allocation while 538 * devices are suspended. 539 */ 540extern gfp_t gfp_allowed_mask; 541 542/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ 543bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); 544 545extern void pm_restrict_gfp_mask(void); 546extern void pm_restore_gfp_mask(void); 547 548#ifdef CONFIG_PM_SLEEP 549extern bool pm_suspended_storage(void); 550#else 551static inline bool pm_suspended_storage(void) 552{ 553 return false; 554} 555#endif /* CONFIG_PM_SLEEP */ 556 557#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 558/* The below functions must be run on a range from a single zone. */ 559extern int alloc_contig_range(unsigned long start, unsigned long end, 560 unsigned migratetype, gfp_t gfp_mask); 561extern void free_contig_range(unsigned long pfn, unsigned nr_pages); 562#endif 563 564#ifdef CONFIG_CMA 565/* CMA stuff */ 566extern void init_cma_reserved_pageblock(struct page *page); 567#endif 568 569#endif /* __LINUX_GFP_H */