at v6.14 15 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_GFP_H 3#define __LINUX_GFP_H 4 5#include <linux/gfp_types.h> 6 7#include <linux/mmzone.h> 8#include <linux/topology.h> 9#include <linux/alloc_tag.h> 10#include <linux/sched.h> 11 12struct vm_area_struct; 13struct mempolicy; 14 15/* Convert GFP flags to their corresponding migrate type */ 16#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 17#define GFP_MOVABLE_SHIFT 3 18 19static inline int gfp_migratetype(const gfp_t gfp_flags) 20{ 21 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 22 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); 23 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); 24 BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); 25 BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> 26 GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); 27 28 if (unlikely(page_group_by_mobility_disabled)) 29 return MIGRATE_UNMOVABLE; 30 31 /* Group based on mobility */ 32 return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; 33} 34#undef GFP_MOVABLE_MASK 35#undef GFP_MOVABLE_SHIFT 36 37static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 38{ 39 return !!(gfp_flags & __GFP_DIRECT_RECLAIM); 40} 41 42#ifdef CONFIG_HIGHMEM 43#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 44#else 45#define OPT_ZONE_HIGHMEM ZONE_NORMAL 46#endif 47 48#ifdef CONFIG_ZONE_DMA 49#define OPT_ZONE_DMA ZONE_DMA 50#else 51#define OPT_ZONE_DMA ZONE_NORMAL 52#endif 53 54#ifdef CONFIG_ZONE_DMA32 55#define OPT_ZONE_DMA32 ZONE_DMA32 56#else 57#define OPT_ZONE_DMA32 ZONE_NORMAL 58#endif 59 60/* 61 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the 62 * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT 63 * bits long and there are 16 of them to cover all possible combinations of 64 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. 65 * 66 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 67 * But GFP_MOVABLE is not only a zone specifier but also an allocation 68 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. 69 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". 70 * 71 * bit result 72 * ================= 73 * 0x0 => NORMAL 74 * 0x1 => DMA or NORMAL 75 * 0x2 => HIGHMEM or NORMAL 76 * 0x3 => BAD (DMA+HIGHMEM) 77 * 0x4 => DMA32 or NORMAL 78 * 0x5 => BAD (DMA+DMA32) 79 * 0x6 => BAD (HIGHMEM+DMA32) 80 * 0x7 => BAD (HIGHMEM+DMA32+DMA) 81 * 0x8 => NORMAL (MOVABLE+0) 82 * 0x9 => DMA or NORMAL (MOVABLE+DMA) 83 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) 84 * 0xb => BAD (MOVABLE+HIGHMEM+DMA) 85 * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) 86 * 0xd => BAD (MOVABLE+DMA32+DMA) 87 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) 88 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) 89 * 90 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. 91 */ 92 93#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 94/* ZONE_DEVICE is not a valid GFP zone specifier */ 95#define GFP_ZONES_SHIFT 2 96#else 97#define GFP_ZONES_SHIFT ZONES_SHIFT 98#endif 99 100#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG 101#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer 102#endif 103 104#define GFP_ZONE_TABLE ( \ 105 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ 106 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ 107 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ 108 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ 109 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ 110 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ 111 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ 112 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ 113) 114 115/* 116 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 117 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per 118 * entry starting with bit 0. Bit is set if the combination is not 119 * allowed. 120 */ 121#define GFP_ZONE_BAD ( \ 122 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ 123 | 1 << (___GFP_DMA | ___GFP_DMA32) \ 124 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ 125 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 126 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ 127 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ 128 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 129 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ 130) 131 132static inline enum zone_type gfp_zone(gfp_t flags) 133{ 134 enum zone_type z; 135 int bit = (__force int) (flags & GFP_ZONEMASK); 136 137 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & 138 ((1 << GFP_ZONES_SHIFT) - 1); 139 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); 140 return z; 141} 142 143/* 144 * There is only one page-allocator function, and two main namespaces to 145 * it. The alloc_page*() variants return 'struct page *' and as such 146 * can allocate highmem pages, the *get*page*() variants return 147 * virtual kernel addresses to the allocated page(s). 148 */ 149 150static inline int gfp_zonelist(gfp_t flags) 151{ 152#ifdef CONFIG_NUMA 153 if (unlikely(flags & __GFP_THISNODE)) 154 return ZONELIST_NOFALLBACK; 155#endif 156 return ZONELIST_FALLBACK; 157} 158 159/* 160 * gfp flag masking for nested internal allocations. 161 * 162 * For code that needs to do allocations inside the public allocation API (e.g. 163 * memory allocation tracking code) the allocations need to obey the caller 164 * allocation context constrains to prevent allocation context mismatches (e.g. 165 * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock 166 * situations. 167 * 168 * It is also assumed that these nested allocations are for internal kernel 169 * object storage purposes only and are not going to be used for DMA, etc. Hence 170 * we strip out all the zone information and leave just the context information 171 * intact. 172 * 173 * Further, internal allocations must fail before the higher level allocation 174 * can fail, so we must make them fail faster and fail silently. We also don't 175 * want them to deplete emergency reserves. Hence nested allocations must be 176 * prepared for these allocations to fail. 177 */ 178static inline gfp_t gfp_nested_mask(gfp_t flags) 179{ 180 return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) | 181 (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)); 182} 183 184/* 185 * We get the zone list from the current node and the gfp_mask. 186 * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. 187 * There are two zonelists per node, one for all zones with memory and 188 * one containing just zones from the node the zonelist belongs to. 189 * 190 * For the case of non-NUMA systems the NODE_DATA() gets optimized to 191 * &contig_page_data at compile-time. 192 */ 193static inline struct zonelist *node_zonelist(int nid, gfp_t flags) 194{ 195 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); 196} 197 198#ifndef HAVE_ARCH_FREE_PAGE 199static inline void arch_free_page(struct page *page, int order) { } 200#endif 201#ifndef HAVE_ARCH_ALLOC_PAGE 202static inline void arch_alloc_page(struct page *page, int order) { } 203#endif 204 205struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 206 nodemask_t *nodemask); 207#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) 208 209struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 210 nodemask_t *nodemask); 211#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) 212 213unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 214 nodemask_t *nodemask, int nr_pages, 215 struct page **page_array); 216#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) 217 218unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, 219 unsigned long nr_pages, 220 struct page **page_array); 221#define alloc_pages_bulk_mempolicy(...) \ 222 alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__)) 223 224/* Bulk allocate order-0 pages */ 225#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \ 226 __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) 227 228static inline unsigned long 229alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, 230 struct page **page_array) 231{ 232 if (nid == NUMA_NO_NODE) 233 nid = numa_mem_id(); 234 235 return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); 236} 237 238#define alloc_pages_bulk_node(...) \ 239 alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__)) 240 241static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) 242{ 243 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); 244 245 if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN)) 246 return; 247 248 if (node_online(this_node)) 249 return; 250 251 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); 252 dump_stack(); 253} 254 255/* 256 * Allocate pages, preferring the node given as nid. The node must be valid and 257 * online. For more general interface, see alloc_pages_node(). 258 */ 259static inline struct page * 260__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) 261{ 262 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 263 warn_if_node_offline(nid, gfp_mask); 264 265 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); 266} 267 268#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) 269 270static inline 271struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) 272{ 273 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 274 warn_if_node_offline(nid, gfp); 275 276 return __folio_alloc_noprof(gfp, order, nid, NULL); 277} 278 279#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) 280 281/* 282 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, 283 * prefer the current CPU's closest node. Otherwise node must be valid and 284 * online. 285 */ 286static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, 287 unsigned int order) 288{ 289 if (nid == NUMA_NO_NODE) 290 nid = numa_mem_id(); 291 292 return __alloc_pages_node_noprof(nid, gfp_mask, order); 293} 294 295#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) 296 297#ifdef CONFIG_NUMA 298struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); 299struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); 300struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, 301 struct mempolicy *mpol, pgoff_t ilx, int nid); 302struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, 303 unsigned long addr); 304#else 305static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) 306{ 307 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); 308} 309static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) 310{ 311 return __folio_alloc_node_noprof(gfp, order, numa_node_id()); 312} 313static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, 314 struct mempolicy *mpol, pgoff_t ilx, int nid) 315{ 316 return folio_alloc_noprof(gfp, order); 317} 318#define vma_alloc_folio_noprof(gfp, order, vma, addr) \ 319 folio_alloc_noprof(gfp, order) 320#endif 321 322#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) 323#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) 324#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) 325#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) 326 327#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 328 329static inline struct page *alloc_page_vma_noprof(gfp_t gfp, 330 struct vm_area_struct *vma, unsigned long addr) 331{ 332 struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr); 333 334 return &folio->page; 335} 336#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) 337 338extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); 339#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) 340 341extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); 342#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) 343 344void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); 345#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) 346 347void free_pages_exact(void *virt, size_t size); 348 349__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); 350#define alloc_pages_exact_nid(...) \ 351 alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) 352 353#define __get_free_page(gfp_mask) \ 354 __get_free_pages((gfp_mask), 0) 355 356#define __get_dma_pages(gfp_mask, order) \ 357 __get_free_pages((gfp_mask) | GFP_DMA, (order)) 358 359extern void __free_pages(struct page *page, unsigned int order); 360extern void free_pages(unsigned long addr, unsigned int order); 361 362#define __free_page(page) __free_pages((page), 0) 363#define free_page(addr) free_pages((addr), 0) 364 365void page_alloc_init_cpuhp(void); 366int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); 367void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 368void drain_all_pages(struct zone *zone); 369void drain_local_pages(struct zone *zone); 370 371void page_alloc_init_late(void); 372void setup_pcp_cacheinfo(unsigned int cpu); 373 374/* 375 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 376 * GFP flags are used before interrupts are enabled. Once interrupts are 377 * enabled, it is set to __GFP_BITS_MASK while the system is running. During 378 * hibernation, it is used by PM to avoid I/O during memory allocation while 379 * devices are suspended. 380 */ 381extern gfp_t gfp_allowed_mask; 382 383/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ 384bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); 385 386static inline bool gfp_has_io_fs(gfp_t gfp) 387{ 388 return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); 389} 390 391/* 392 * Check if the gfp flags allow compaction - GFP_NOIO is a really 393 * tricky context because the migration might require IO. 394 */ 395static inline bool gfp_compaction_allowed(gfp_t gfp_mask) 396{ 397 return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); 398} 399 400extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); 401 402#ifdef CONFIG_CONTIG_ALLOC 403/* The below functions must be run on a range from a single zone. */ 404extern int alloc_contig_range_noprof(unsigned long start, unsigned long end, 405 unsigned migratetype, gfp_t gfp_mask); 406#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) 407 408extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 409 int nid, nodemask_t *nodemask); 410#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) 411 412#endif 413void free_contig_range(unsigned long pfn, unsigned long nr_pages); 414 415#ifdef CONFIG_CONTIG_ALLOC 416static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, 417 int nid, nodemask_t *node) 418{ 419 struct page *page; 420 421 if (WARN_ON(!order || !(gfp & __GFP_COMP))) 422 return NULL; 423 424 page = alloc_contig_pages_noprof(1 << order, gfp, nid, node); 425 426 return page ? page_folio(page) : NULL; 427} 428#else 429static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, 430 int nid, nodemask_t *node) 431{ 432 return NULL; 433} 434#endif 435/* This should be paired with folio_put() rather than free_contig_range(). */ 436#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__)) 437 438#endif /* __LINUX_GFP_H */