at v2.6.17 6.8 kB view raw
1/* 2 * linux/mm/slab.h 3 * Written by Mark Hemment, 1996. 4 * (markhe@nextd.demon.co.uk) 5 */ 6 7#ifndef _LINUX_SLAB_H 8#define _LINUX_SLAB_H 9 10#if defined(__KERNEL__) 11 12typedef struct kmem_cache kmem_cache_t; 13 14#include <linux/config.h> /* kmalloc_sizes.h needs CONFIG_ options */ 15#include <linux/gfp.h> 16#include <linux/init.h> 17#include <linux/types.h> 18#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 19#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 20 21/* flags for kmem_cache_alloc() */ 22#define SLAB_NOFS GFP_NOFS 23#define SLAB_NOIO GFP_NOIO 24#define SLAB_ATOMIC GFP_ATOMIC 25#define SLAB_USER GFP_USER 26#define SLAB_KERNEL GFP_KERNEL 27#define SLAB_DMA GFP_DMA 28 29#define SLAB_LEVEL_MASK GFP_LEVEL_MASK 30 31#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */ 32 33/* flags to pass to kmem_cache_create(). 34 * The first 3 are only valid when the allocator as been build 35 * SLAB_DEBUG_SUPPORT. 36 */ 37#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ 38#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 39#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 40#define SLAB_POISON 0x00000800UL /* Poison objects */ 41#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 42#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 43#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 44#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ 45#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate 46 what is reclaimable later*/ 47#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ 48#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ 49#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 50 51/* flags passed to a constructor func */ 52#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 53#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ 54#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ 55 56#ifndef CONFIG_SLOB 57 58/* prototypes */ 59extern void __init kmem_cache_init(void); 60 61extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, 62 void (*)(void *, kmem_cache_t *, unsigned long), 63 void (*)(void *, kmem_cache_t *, unsigned long)); 64extern int kmem_cache_destroy(kmem_cache_t *); 65extern int kmem_cache_shrink(kmem_cache_t *); 66extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); 67extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 68extern void kmem_cache_free(kmem_cache_t *, void *); 69extern unsigned int kmem_cache_size(kmem_cache_t *); 70extern const char *kmem_cache_name(kmem_cache_t *); 71extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags); 72 73/* Size description struct for general caches. */ 74struct cache_sizes { 75 size_t cs_size; 76 kmem_cache_t *cs_cachep; 77 kmem_cache_t *cs_dmacachep; 78}; 79extern struct cache_sizes malloc_sizes[]; 80 81extern void *__kmalloc(size_t, gfp_t); 82#ifndef CONFIG_DEBUG_SLAB 83#define ____kmalloc(size, flags) __kmalloc(size, flags) 84#else 85extern void *__kmalloc_track_caller(size_t, gfp_t, void*); 86#define ____kmalloc(size, flags) \ 87 __kmalloc_track_caller(size, flags, __builtin_return_address(0)) 88#endif 89 90static inline void *kmalloc(size_t size, gfp_t flags) 91{ 92 if (__builtin_constant_p(size)) { 93 int i = 0; 94#define CACHE(x) \ 95 if (size <= x) \ 96 goto found; \ 97 else \ 98 i++; 99#include "kmalloc_sizes.h" 100#undef CACHE 101 { 102 extern void __you_cannot_kmalloc_that_much(void); 103 __you_cannot_kmalloc_that_much(); 104 } 105found: 106 return kmem_cache_alloc((flags & GFP_DMA) ? 107 malloc_sizes[i].cs_dmacachep : 108 malloc_sizes[i].cs_cachep, flags); 109 } 110 return __kmalloc(size, flags); 111} 112 113extern void *__kzalloc(size_t, gfp_t); 114 115static inline void *kzalloc(size_t size, gfp_t flags) 116{ 117 if (__builtin_constant_p(size)) { 118 int i = 0; 119#define CACHE(x) \ 120 if (size <= x) \ 121 goto found; \ 122 else \ 123 i++; 124#include "kmalloc_sizes.h" 125#undef CACHE 126 { 127 extern void __you_cannot_kzalloc_that_much(void); 128 __you_cannot_kzalloc_that_much(); 129 } 130found: 131 return kmem_cache_zalloc((flags & GFP_DMA) ? 132 malloc_sizes[i].cs_dmacachep : 133 malloc_sizes[i].cs_cachep, flags); 134 } 135 return __kzalloc(size, flags); 136} 137 138/** 139 * kcalloc - allocate memory for an array. The memory is set to zero. 140 * @n: number of elements. 141 * @size: element size. 142 * @flags: the type of memory to allocate. 143 */ 144static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 145{ 146 if (n != 0 && size > ULONG_MAX / n) 147 return NULL; 148 return kzalloc(n * size, flags); 149} 150 151extern void kfree(const void *); 152extern unsigned int ksize(const void *); 153extern int slab_is_available(void); 154 155#ifdef CONFIG_NUMA 156extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); 157extern void *kmalloc_node(size_t size, gfp_t flags, int node); 158#else 159static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) 160{ 161 return kmem_cache_alloc(cachep, flags); 162} 163static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 164{ 165 return kmalloc(size, flags); 166} 167#endif 168 169extern int FASTCALL(kmem_cache_reap(int)); 170extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); 171 172#else /* CONFIG_SLOB */ 173 174/* SLOB allocator routines */ 175 176void kmem_cache_init(void); 177struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags); 178struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, 179 unsigned long, 180 void (*)(void *, struct kmem_cache *, unsigned long), 181 void (*)(void *, struct kmem_cache *, unsigned long)); 182int kmem_cache_destroy(struct kmem_cache *c); 183void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags); 184void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 185void kmem_cache_free(struct kmem_cache *c, void *b); 186const char *kmem_cache_name(struct kmem_cache *); 187void *kmalloc(size_t size, gfp_t flags); 188void *__kzalloc(size_t size, gfp_t flags); 189void kfree(const void *m); 190unsigned int ksize(const void *m); 191unsigned int kmem_cache_size(struct kmem_cache *c); 192 193static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 194{ 195 return __kzalloc(n * size, flags); 196} 197 198#define kmem_cache_shrink(d) (0) 199#define kmem_cache_reap(a) 200#define kmem_ptr_validate(a, b) (0) 201#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) 202#define kmalloc_node(s, f, n) kmalloc(s, f) 203#define kzalloc(s, f) __kzalloc(s, f) 204#define ____kmalloc kmalloc 205 206#endif /* CONFIG_SLOB */ 207 208/* System wide caches */ 209extern kmem_cache_t *vm_area_cachep; 210extern kmem_cache_t *names_cachep; 211extern kmem_cache_t *files_cachep; 212extern kmem_cache_t *filp_cachep; 213extern kmem_cache_t *fs_cachep; 214extern kmem_cache_t *sighand_cachep; 215extern kmem_cache_t *bio_cachep; 216 217extern atomic_t slab_reclaim_pages; 218 219#endif /* __KERNEL__ */ 220 221#endif /* _LINUX_SLAB_H */