at v2.6.15 5.6 kB view raw
1#ifndef __LINUX_GFP_H 2#define __LINUX_GFP_H 3 4#include <linux/mmzone.h> 5#include <linux/stddef.h> 6#include <linux/linkage.h> 7#include <linux/config.h> 8 9struct vm_area_struct; 10 11/* 12 * GFP bitmasks.. 13 */ 14/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low three bits) */ 15#define __GFP_DMA ((__force gfp_t)0x01u) 16#define __GFP_HIGHMEM ((__force gfp_t)0x02u) 17#ifdef CONFIG_DMA_IS_DMA32 18#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */ 19#elif BITS_PER_LONG < 64 20#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */ 21#else 22#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */ 23#endif 24 25/* 26 * Action modifiers - doesn't change the zoning 27 * 28 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt 29 * _might_ fail. This depends upon the particular VM implementation. 30 * 31 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller 32 * cannot handle allocation failures. 33 * 34 * __GFP_NORETRY: The VM implementation must not retry indefinitely. 35 */ 36#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ 37#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ 38#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ 39#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ 40#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ 41#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ 42#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ 43#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ 44#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ 45#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */ 46#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ 47#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ 48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 50 51#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 52#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 53 54/* if you forget to add the bitmask here kernel will crash, period */ 55#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 56 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 57 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ 58 __GFP_NOMEMALLOC|__GFP_HARDWALL) 59 60#define GFP_ATOMIC (__GFP_HIGH) 61#define GFP_NOIO (__GFP_WAIT) 62#define GFP_NOFS (__GFP_WAIT | __GFP_IO) 63#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 64#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 65#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 66 __GFP_HIGHMEM) 67 68/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some 69 platforms, used as appropriate on others */ 70 71#define GFP_DMA __GFP_DMA 72 73/* 4GB DMA on some platforms */ 74#define GFP_DMA32 __GFP_DMA32 75 76 77static inline int gfp_zone(gfp_t gfp) 78{ 79 int zone = GFP_ZONEMASK & (__force int) gfp; 80 BUG_ON(zone >= GFP_ZONETYPES); 81 return zone; 82} 83 84/* 85 * There is only one page-allocator function, and two main namespaces to 86 * it. The alloc_page*() variants return 'struct page *' and as such 87 * can allocate highmem pages, the *get*page*() variants return 88 * virtual kernel addresses to the allocated page(s). 89 */ 90 91/* 92 * We get the zone list from the current node and the gfp_mask. 93 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. 94 * 95 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets 96 * optimized to &contig_page_data at compile-time. 97 */ 98 99#ifndef HAVE_ARCH_FREE_PAGE 100static inline void arch_free_page(struct page *page, int order) { } 101#endif 102 103extern struct page * 104FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); 105 106static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 107 unsigned int order) 108{ 109 if (unlikely(order >= MAX_ORDER)) 110 return NULL; 111 112 return __alloc_pages(gfp_mask, order, 113 NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask)); 114} 115 116#ifdef CONFIG_NUMA 117extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 118 119static inline struct page * 120alloc_pages(gfp_t gfp_mask, unsigned int order) 121{ 122 if (unlikely(order >= MAX_ORDER)) 123 return NULL; 124 125 return alloc_pages_current(gfp_mask, order); 126} 127extern struct page *alloc_page_vma(gfp_t gfp_mask, 128 struct vm_area_struct *vma, unsigned long addr); 129#else 130#define alloc_pages(gfp_mask, order) \ 131 alloc_pages_node(numa_node_id(), gfp_mask, order) 132#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) 133#endif 134#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 135 136extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order)); 137extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask)); 138 139#define __get_free_page(gfp_mask) \ 140 __get_free_pages((gfp_mask),0) 141 142#define __get_dma_pages(gfp_mask, order) \ 143 __get_free_pages((gfp_mask) | GFP_DMA,(order)) 144 145extern void FASTCALL(__free_pages(struct page *page, unsigned int order)); 146extern void FASTCALL(free_pages(unsigned long addr, unsigned int order)); 147extern void FASTCALL(free_hot_page(struct page *page)); 148extern void FASTCALL(free_cold_page(struct page *page)); 149 150#define __free_page(page) __free_pages((page), 0) 151#define free_page(addr) free_pages((addr),0) 152 153void page_alloc_init(void); 154#ifdef CONFIG_NUMA 155void drain_remote_pages(void); 156#else 157static inline void drain_remote_pages(void) { }; 158#endif 159 160#endif /* __LINUX_GFP_H */