Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.38-rc3 301 lines 8.4 kB view raw
1#ifndef _LINUX_SLUB_DEF_H 2#define _LINUX_SLUB_DEF_H 3 4/* 5 * SLUB : A Slab allocator without object queues. 6 * 7 * (C) 2007 SGI, Christoph Lameter 8 */ 9#include <linux/types.h> 10#include <linux/gfp.h> 11#include <linux/workqueue.h> 12#include <linux/kobject.h> 13 14#include <linux/kmemleak.h> 15 16enum stat_item { 17 ALLOC_FASTPATH, /* Allocation from cpu slab */ 18 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 19 FREE_FASTPATH, /* Free to cpu slub */ 20 FREE_SLOWPATH, /* Freeing not to cpu slab */ 21 FREE_FROZEN, /* Freeing to frozen slab */ 22 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 23 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 24 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ 25 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 26 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 27 FREE_SLAB, /* Slab freed to the page allocator */ 28 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 29 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 30 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 31 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 34 ORDER_FALLBACK, /* Number of times fallback was necessary */ 35 NR_SLUB_STAT_ITEMS }; 36 37struct kmem_cache_cpu { 38 void **freelist; /* Pointer to first free per cpu object */ 39 struct page *page; /* The slab from which we are allocating */ 40 int node; /* The node of the page (or -1 for debug) */ 41#ifdef CONFIG_SLUB_STATS 42 unsigned stat[NR_SLUB_STAT_ITEMS]; 43#endif 44}; 45 46struct kmem_cache_node { 47 spinlock_t list_lock; /* Protect partial list and nr_partial */ 48 unsigned long nr_partial; 49 struct list_head partial; 50#ifdef CONFIG_SLUB_DEBUG 51 atomic_long_t nr_slabs; 52 atomic_long_t total_objects; 53 struct list_head full; 54#endif 55}; 56 57/* 58 * Word size structure that can be atomically updated or read and that 59 * contains both the order and the number of objects that a slab of the 60 * given order would contain. 61 */ 62struct kmem_cache_order_objects { 63 unsigned long x; 64}; 65 66/* 67 * Slab cache management. 68 */ 69struct kmem_cache { 70 struct kmem_cache_cpu __percpu *cpu_slab; 71 /* Used for retriving partial slabs etc */ 72 unsigned long flags; 73 int size; /* The size of an object including meta data */ 74 int objsize; /* The size of an object without meta data */ 75 int offset; /* Free pointer offset. */ 76 struct kmem_cache_order_objects oo; 77 78 /* Allocation and freeing of slabs */ 79 struct kmem_cache_order_objects max; 80 struct kmem_cache_order_objects min; 81 gfp_t allocflags; /* gfp flags to use on each alloc */ 82 int refcount; /* Refcount for slab cache destroy */ 83 void (*ctor)(void *); 84 int inuse; /* Offset to metadata */ 85 int align; /* Alignment */ 86 unsigned long min_partial; 87 const char *name; /* Name (only for display!) */ 88 struct list_head list; /* List of slab caches */ 89#ifdef CONFIG_SYSFS 90 struct kobject kobj; /* For sysfs */ 91#endif 92 93#ifdef CONFIG_NUMA 94 /* 95 * Defragmentation by allocating from a remote node. 96 */ 97 int remote_node_defrag_ratio; 98#endif 99 struct kmem_cache_node *node[MAX_NUMNODES]; 100}; 101 102/* 103 * Kmalloc subsystem. 104 */ 105#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 106#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 107#else 108#define KMALLOC_MIN_SIZE 8 109#endif 110 111#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 112 113#ifdef ARCH_DMA_MINALIGN 114#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 115#else 116#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 117#endif 118 119#ifndef ARCH_SLAB_MINALIGN 120#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 121#endif 122 123/* 124 * Maximum kmalloc object size handled by SLUB. Larger object allocations 125 * are passed through to the page allocator. The page allocator "fastpath" 126 * is relatively slow so we need this value sufficiently high so that 127 * performance critical objects are allocated through the SLUB fastpath. 128 * 129 * This should be dropped to PAGE_SIZE / 2 once the page allocator 130 * "fastpath" becomes competitive with the slab allocator fastpaths. 131 */ 132#define SLUB_MAX_SIZE (2 * PAGE_SIZE) 133 134#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) 135 136#ifdef CONFIG_ZONE_DMA 137#define SLUB_DMA __GFP_DMA 138#else 139/* Disable DMA functionality */ 140#define SLUB_DMA (__force gfp_t)0 141#endif 142 143/* 144 * We keep the general caches in an array of slab caches that are used for 145 * 2^x bytes of allocations. 146 */ 147extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 148 149/* 150 * Sorry that the following has to be that ugly but some versions of GCC 151 * have trouble with constant propagation and loops. 152 */ 153static __always_inline int kmalloc_index(size_t size) 154{ 155 if (!size) 156 return 0; 157 158 if (size <= KMALLOC_MIN_SIZE) 159 return KMALLOC_SHIFT_LOW; 160 161 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 162 return 1; 163 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 164 return 2; 165 if (size <= 8) return 3; 166 if (size <= 16) return 4; 167 if (size <= 32) return 5; 168 if (size <= 64) return 6; 169 if (size <= 128) return 7; 170 if (size <= 256) return 8; 171 if (size <= 512) return 9; 172 if (size <= 1024) return 10; 173 if (size <= 2 * 1024) return 11; 174 if (size <= 4 * 1024) return 12; 175/* 176 * The following is only needed to support architectures with a larger page 177 * size than 4k. 178 */ 179 if (size <= 8 * 1024) return 13; 180 if (size <= 16 * 1024) return 14; 181 if (size <= 32 * 1024) return 15; 182 if (size <= 64 * 1024) return 16; 183 if (size <= 128 * 1024) return 17; 184 if (size <= 256 * 1024) return 18; 185 if (size <= 512 * 1024) return 19; 186 if (size <= 1024 * 1024) return 20; 187 if (size <= 2 * 1024 * 1024) return 21; 188 return -1; 189 190/* 191 * What we really wanted to do and cannot do because of compiler issues is: 192 * int i; 193 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 194 * if (size <= (1 << i)) 195 * return i; 196 */ 197} 198 199/* 200 * Find the slab cache for a given combination of allocation flags and size. 201 * 202 * This ought to end up with a global pointer to the right cache 203 * in kmalloc_caches. 204 */ 205static __always_inline struct kmem_cache *kmalloc_slab(size_t size) 206{ 207 int index = kmalloc_index(size); 208 209 if (index == 0) 210 return NULL; 211 212 return kmalloc_caches[index]; 213} 214 215void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 216void *__kmalloc(size_t size, gfp_t flags); 217 218static __always_inline void * 219kmalloc_order(size_t size, gfp_t flags, unsigned int order) 220{ 221 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 222 kmemleak_alloc(ret, size, 1, flags); 223 return ret; 224} 225 226#ifdef CONFIG_TRACING 227extern void * 228kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); 229extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 230#else 231static __always_inline void * 232kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 233{ 234 return kmem_cache_alloc(s, gfpflags); 235} 236 237static __always_inline void * 238kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 239{ 240 return kmalloc_order(size, flags, order); 241} 242#endif 243 244static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 245{ 246 unsigned int order = get_order(size); 247 return kmalloc_order_trace(size, flags, order); 248} 249 250static __always_inline void *kmalloc(size_t size, gfp_t flags) 251{ 252 if (__builtin_constant_p(size)) { 253 if (size > SLUB_MAX_SIZE) 254 return kmalloc_large(size, flags); 255 256 if (!(flags & SLUB_DMA)) { 257 struct kmem_cache *s = kmalloc_slab(size); 258 259 if (!s) 260 return ZERO_SIZE_PTR; 261 262 return kmem_cache_alloc_trace(s, flags, size); 263 } 264 } 265 return __kmalloc(size, flags); 266} 267 268#ifdef CONFIG_NUMA 269void *__kmalloc_node(size_t size, gfp_t flags, int node); 270void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 271 272#ifdef CONFIG_TRACING 273extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 274 gfp_t gfpflags, 275 int node, size_t size); 276#else 277static __always_inline void * 278kmem_cache_alloc_node_trace(struct kmem_cache *s, 279 gfp_t gfpflags, 280 int node, size_t size) 281{ 282 return kmem_cache_alloc_node(s, gfpflags, node); 283} 284#endif 285 286static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 287{ 288 if (__builtin_constant_p(size) && 289 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 290 struct kmem_cache *s = kmalloc_slab(size); 291 292 if (!s) 293 return ZERO_SIZE_PTR; 294 295 return kmem_cache_alloc_node_trace(s, flags, node, size); 296 } 297 return __kmalloc_node(size, flags, node); 298} 299#endif 300 301#endif /* _LINUX_SLUB_DEF_H */