Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 2d68b7fe55d9e19a8a868224ed0dfd6526568521 305 lines 8.4 kB view raw
1#ifndef _LINUX_SLUB_DEF_H 2#define _LINUX_SLUB_DEF_H 3 4/* 5 * SLUB : A Slab allocator without object queues. 6 * 7 * (C) 2007 SGI, Christoph Lameter 8 */ 9#include <linux/types.h> 10#include <linux/gfp.h> 11#include <linux/workqueue.h> 12#include <linux/kobject.h> 13#include <linux/kmemtrace.h> 14#include <linux/kmemleak.h> 15 16enum stat_item { 17 ALLOC_FASTPATH, /* Allocation from cpu slab */ 18 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 19 FREE_FASTPATH, /* Free to cpu slub */ 20 FREE_SLOWPATH, /* Freeing not to cpu slab */ 21 FREE_FROZEN, /* Freeing to frozen slab */ 22 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 23 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 24 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ 25 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 26 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 27 FREE_SLAB, /* Slab freed to the page allocator */ 28 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 29 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 30 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 31 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 34 ORDER_FALLBACK, /* Number of times fallback was necessary */ 35 NR_SLUB_STAT_ITEMS }; 36 37struct kmem_cache_cpu { 38 void **freelist; /* Pointer to first free per cpu object */ 39 struct page *page; /* The slab from which we are allocating */ 40 int node; /* The node of the page (or -1 for debug) */ 41 unsigned int offset; /* Freepointer offset (in word units) */ 42 unsigned int objsize; /* Size of an object (from kmem_cache) */ 43#ifdef CONFIG_SLUB_STATS 44 unsigned stat[NR_SLUB_STAT_ITEMS]; 45#endif 46}; 47 48struct kmem_cache_node { 49 spinlock_t list_lock; /* Protect partial list and nr_partial */ 50 unsigned long nr_partial; 51 struct list_head partial; 52#ifdef CONFIG_SLUB_DEBUG 53 atomic_long_t nr_slabs; 54 atomic_long_t total_objects; 55 struct list_head full; 56#endif 57}; 58 59/* 60 * Word size structure that can be atomically updated or read and that 61 * contains both the order and the number of objects that a slab of the 62 * given order would contain. 63 */ 64struct kmem_cache_order_objects { 65 unsigned long x; 66}; 67 68/* 69 * Slab cache management. 70 */ 71struct kmem_cache { 72 /* Used for retriving partial slabs etc */ 73 unsigned long flags; 74 int size; /* The size of an object including meta data */ 75 int objsize; /* The size of an object without meta data */ 76 int offset; /* Free pointer offset. */ 77 struct kmem_cache_order_objects oo; 78 79 /* 80 * Avoid an extra cache line for UP, SMP and for the node local to 81 * struct kmem_cache. 82 */ 83 struct kmem_cache_node local_node; 84 85 /* Allocation and freeing of slabs */ 86 struct kmem_cache_order_objects max; 87 struct kmem_cache_order_objects min; 88 gfp_t allocflags; /* gfp flags to use on each alloc */ 89 int refcount; /* Refcount for slab cache destroy */ 90 void (*ctor)(void *); 91 int inuse; /* Offset to metadata */ 92 int align; /* Alignment */ 93 unsigned long min_partial; 94 const char *name; /* Name (only for display!) */ 95 struct list_head list; /* List of slab caches */ 96#ifdef CONFIG_SLUB_DEBUG 97 struct kobject kobj; /* For sysfs */ 98#endif 99 100#ifdef CONFIG_NUMA 101 /* 102 * Defragmentation by allocating from a remote node. 103 */ 104 int remote_node_defrag_ratio; 105 struct kmem_cache_node *node[MAX_NUMNODES]; 106#endif 107#ifdef CONFIG_SMP 108 struct kmem_cache_cpu *cpu_slab[NR_CPUS]; 109#else 110 struct kmem_cache_cpu cpu_slab; 111#endif 112}; 113 114/* 115 * Kmalloc subsystem. 116 */ 117#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 118#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN 119#else 120#define KMALLOC_MIN_SIZE 8 121#endif 122 123#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 124 125/* 126 * Maximum kmalloc object size handled by SLUB. Larger object allocations 127 * are passed through to the page allocator. The page allocator "fastpath" 128 * is relatively slow so we need this value sufficiently high so that 129 * performance critical objects are allocated through the SLUB fastpath. 130 * 131 * This should be dropped to PAGE_SIZE / 2 once the page allocator 132 * "fastpath" becomes competitive with the slab allocator fastpaths. 133 */ 134#define SLUB_MAX_SIZE (2 * PAGE_SIZE) 135 136#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) 137 138/* 139 * We keep the general caches in an array of slab caches that are used for 140 * 2^x bytes of allocations. 141 */ 142extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; 143 144/* 145 * Sorry that the following has to be that ugly but some versions of GCC 146 * have trouble with constant propagation and loops. 147 */ 148static __always_inline int kmalloc_index(size_t size) 149{ 150 if (!size) 151 return 0; 152 153 if (size <= KMALLOC_MIN_SIZE) 154 return KMALLOC_SHIFT_LOW; 155 156 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 157 return 1; 158 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 159 return 2; 160 if (size <= 8) return 3; 161 if (size <= 16) return 4; 162 if (size <= 32) return 5; 163 if (size <= 64) return 6; 164 if (size <= 128) return 7; 165 if (size <= 256) return 8; 166 if (size <= 512) return 9; 167 if (size <= 1024) return 10; 168 if (size <= 2 * 1024) return 11; 169 if (size <= 4 * 1024) return 12; 170/* 171 * The following is only needed to support architectures with a larger page 172 * size than 4k. 173 */ 174 if (size <= 8 * 1024) return 13; 175 if (size <= 16 * 1024) return 14; 176 if (size <= 32 * 1024) return 15; 177 if (size <= 64 * 1024) return 16; 178 if (size <= 128 * 1024) return 17; 179 if (size <= 256 * 1024) return 18; 180 if (size <= 512 * 1024) return 19; 181 if (size <= 1024 * 1024) return 20; 182 if (size <= 2 * 1024 * 1024) return 21; 183 return -1; 184 185/* 186 * What we really wanted to do and cannot do because of compiler issues is: 187 * int i; 188 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 189 * if (size <= (1 << i)) 190 * return i; 191 */ 192} 193 194/* 195 * Find the slab cache for a given combination of allocation flags and size. 196 * 197 * This ought to end up with a global pointer to the right cache 198 * in kmalloc_caches. 199 */ 200static __always_inline struct kmem_cache *kmalloc_slab(size_t size) 201{ 202 int index = kmalloc_index(size); 203 204 if (index == 0) 205 return NULL; 206 207 return &kmalloc_caches[index]; 208} 209 210#ifdef CONFIG_ZONE_DMA 211#define SLUB_DMA __GFP_DMA 212#else 213/* Disable DMA functionality */ 214#define SLUB_DMA (__force gfp_t)0 215#endif 216 217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *__kmalloc(size_t size, gfp_t flags); 219 220#ifdef CONFIG_TRACING 221extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 222#else 223static __always_inline void * 224kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 225{ 226 return kmem_cache_alloc(s, gfpflags); 227} 228#endif 229 230static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231{ 232 unsigned int order = get_order(size); 233 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 234 235 kmemleak_alloc(ret, size, 1, flags); 236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 237 238 return ret; 239} 240 241static __always_inline void *kmalloc(size_t size, gfp_t flags) 242{ 243 void *ret; 244 245 if (__builtin_constant_p(size)) { 246 if (size > SLUB_MAX_SIZE) 247 return kmalloc_large(size, flags); 248 249 if (!(flags & SLUB_DMA)) { 250 struct kmem_cache *s = kmalloc_slab(size); 251 252 if (!s) 253 return ZERO_SIZE_PTR; 254 255 ret = kmem_cache_alloc_notrace(s, flags); 256 257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); 258 259 return ret; 260 } 261 } 262 return __kmalloc(size, flags); 263} 264 265#ifdef CONFIG_NUMA 266void *__kmalloc_node(size_t size, gfp_t flags, int node); 267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 268 269#ifdef CONFIG_TRACING 270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 271 gfp_t gfpflags, 272 int node); 273#else 274static __always_inline void * 275kmem_cache_alloc_node_notrace(struct kmem_cache *s, 276 gfp_t gfpflags, 277 int node) 278{ 279 return kmem_cache_alloc_node(s, gfpflags, node); 280} 281#endif 282 283static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 284{ 285 void *ret; 286 287 if (__builtin_constant_p(size) && 288 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 289 struct kmem_cache *s = kmalloc_slab(size); 290 291 if (!s) 292 return ZERO_SIZE_PTR; 293 294 ret = kmem_cache_alloc_node_notrace(s, flags, node); 295 296 trace_kmalloc_node(_THIS_IP_, ret, 297 size, s->size, flags, node); 298 299 return ret; 300 } 301 return __kmalloc_node(size, flags, node); 302} 303#endif 304 305#endif /* _LINUX_SLUB_DEF_H */