Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 76d1f7bfcd5872056902c5a88b5fcd5d4d00a7a9 304 lines 8.3 kB view raw
1#ifndef _LINUX_SLUB_DEF_H 2#define _LINUX_SLUB_DEF_H 3 4/* 5 * SLUB : A Slab allocator without object queues. 6 * 7 * (C) 2007 SGI, Christoph Lameter 8 */ 9#include <linux/types.h> 10#include <linux/gfp.h> 11#include <linux/workqueue.h> 12#include <linux/kobject.h> 13#include <linux/kmemleak.h> 14 15#include <trace/events/kmem.h> 16 17enum stat_item { 18 ALLOC_FASTPATH, /* Allocation from cpu slab */ 19 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 20 FREE_FASTPATH, /* Free to cpu slub */ 21 FREE_SLOWPATH, /* Freeing not to cpu slab */ 22 FREE_FROZEN, /* Freeing to frozen slab */ 23 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 24 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 25 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ 26 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 27 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 28 FREE_SLAB, /* Slab freed to the page allocator */ 29 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 30 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 31 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 32 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 33 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 34 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 35 ORDER_FALLBACK, /* Number of times fallback was necessary */ 36 NR_SLUB_STAT_ITEMS }; 37 38struct kmem_cache_cpu { 39 void **freelist; /* Pointer to first free per cpu object */ 40 struct page *page; /* The slab from which we are allocating */ 41 int node; /* The node of the page (or -1 for debug) */ 42#ifdef CONFIG_SLUB_STATS 43 unsigned stat[NR_SLUB_STAT_ITEMS]; 44#endif 45}; 46 47struct kmem_cache_node { 48 spinlock_t list_lock; /* Protect partial list and nr_partial */ 49 unsigned long nr_partial; 50 struct list_head partial; 51#ifdef CONFIG_SLUB_DEBUG 52 atomic_long_t nr_slabs; 53 atomic_long_t total_objects; 54 struct list_head full; 55#endif 56}; 57 58/* 59 * Word size structure that can be atomically updated or read and that 60 * contains both the order and the number of objects that a slab of the 61 * given order would contain. 62 */ 63struct kmem_cache_order_objects { 64 unsigned long x; 65}; 66 67/* 68 * Slab cache management. 69 */ 70struct kmem_cache { 71 struct kmem_cache_cpu __percpu *cpu_slab; 72 /* Used for retriving partial slabs etc */ 73 unsigned long flags; 74 int size; /* The size of an object including meta data */ 75 int objsize; /* The size of an object without meta data */ 76 int offset; /* Free pointer offset. */ 77 struct kmem_cache_order_objects oo; 78 79 /* Allocation and freeing of slabs */ 80 struct kmem_cache_order_objects max; 81 struct kmem_cache_order_objects min; 82 gfp_t allocflags; /* gfp flags to use on each alloc */ 83 int refcount; /* Refcount for slab cache destroy */ 84 void (*ctor)(void *); 85 int inuse; /* Offset to metadata */ 86 int align; /* Alignment */ 87 unsigned long min_partial; 88 const char *name; /* Name (only for display!) */ 89 struct list_head list; /* List of slab caches */ 90#ifdef CONFIG_SYSFS 91 struct kobject kobj; /* For sysfs */ 92#endif 93 94#ifdef CONFIG_NUMA 95 /* 96 * Defragmentation by allocating from a remote node. 97 */ 98 int remote_node_defrag_ratio; 99#endif 100 struct kmem_cache_node *node[MAX_NUMNODES]; 101}; 102 103/* 104 * Kmalloc subsystem. 105 */ 106#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 107#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 108#else 109#define KMALLOC_MIN_SIZE 8 110#endif 111 112#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 113 114#ifdef ARCH_DMA_MINALIGN 115#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 116#else 117#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 118#endif 119 120#ifndef ARCH_SLAB_MINALIGN 121#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 122#endif 123 124/* 125 * Maximum kmalloc object size handled by SLUB. Larger object allocations 126 * are passed through to the page allocator. The page allocator "fastpath" 127 * is relatively slow so we need this value sufficiently high so that 128 * performance critical objects are allocated through the SLUB fastpath. 129 * 130 * This should be dropped to PAGE_SIZE / 2 once the page allocator 131 * "fastpath" becomes competitive with the slab allocator fastpaths. 132 */ 133#define SLUB_MAX_SIZE (2 * PAGE_SIZE) 134 135#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) 136 137#ifdef CONFIG_ZONE_DMA 138#define SLUB_DMA __GFP_DMA 139#else 140/* Disable DMA functionality */ 141#define SLUB_DMA (__force gfp_t)0 142#endif 143 144/* 145 * We keep the general caches in an array of slab caches that are used for 146 * 2^x bytes of allocations. 147 */ 148extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 149 150/* 151 * Sorry that the following has to be that ugly but some versions of GCC 152 * have trouble with constant propagation and loops. 153 */ 154static __always_inline int kmalloc_index(size_t size) 155{ 156 if (!size) 157 return 0; 158 159 if (size <= KMALLOC_MIN_SIZE) 160 return KMALLOC_SHIFT_LOW; 161 162 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 163 return 1; 164 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 165 return 2; 166 if (size <= 8) return 3; 167 if (size <= 16) return 4; 168 if (size <= 32) return 5; 169 if (size <= 64) return 6; 170 if (size <= 128) return 7; 171 if (size <= 256) return 8; 172 if (size <= 512) return 9; 173 if (size <= 1024) return 10; 174 if (size <= 2 * 1024) return 11; 175 if (size <= 4 * 1024) return 12; 176/* 177 * The following is only needed to support architectures with a larger page 178 * size than 4k. 179 */ 180 if (size <= 8 * 1024) return 13; 181 if (size <= 16 * 1024) return 14; 182 if (size <= 32 * 1024) return 15; 183 if (size <= 64 * 1024) return 16; 184 if (size <= 128 * 1024) return 17; 185 if (size <= 256 * 1024) return 18; 186 if (size <= 512 * 1024) return 19; 187 if (size <= 1024 * 1024) return 20; 188 if (size <= 2 * 1024 * 1024) return 21; 189 return -1; 190 191/* 192 * What we really wanted to do and cannot do because of compiler issues is: 193 * int i; 194 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 195 * if (size <= (1 << i)) 196 * return i; 197 */ 198} 199 200/* 201 * Find the slab cache for a given combination of allocation flags and size. 202 * 203 * This ought to end up with a global pointer to the right cache 204 * in kmalloc_caches. 205 */ 206static __always_inline struct kmem_cache *kmalloc_slab(size_t size) 207{ 208 int index = kmalloc_index(size); 209 210 if (index == 0) 211 return NULL; 212 213 return kmalloc_caches[index]; 214} 215 216void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 217void *__kmalloc(size_t size, gfp_t flags); 218 219#ifdef CONFIG_TRACING 220extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 221#else 222static __always_inline void * 223kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 224{ 225 return kmem_cache_alloc(s, gfpflags); 226} 227#endif 228 229static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 230{ 231 unsigned int order = get_order(size); 232 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 233 234 kmemleak_alloc(ret, size, 1, flags); 235 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 236 237 return ret; 238} 239 240static __always_inline void *kmalloc(size_t size, gfp_t flags) 241{ 242 void *ret; 243 244 if (__builtin_constant_p(size)) { 245 if (size > SLUB_MAX_SIZE) 246 return kmalloc_large(size, flags); 247 248 if (!(flags & SLUB_DMA)) { 249 struct kmem_cache *s = kmalloc_slab(size); 250 251 if (!s) 252 return ZERO_SIZE_PTR; 253 254 ret = kmem_cache_alloc_notrace(s, flags); 255 256 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); 257 258 return ret; 259 } 260 } 261 return __kmalloc(size, flags); 262} 263 264#ifdef CONFIG_NUMA 265void *__kmalloc_node(size_t size, gfp_t flags, int node); 266void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 267 268#ifdef CONFIG_TRACING 269extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 270 gfp_t gfpflags, 271 int node); 272#else 273static __always_inline void * 274kmem_cache_alloc_node_notrace(struct kmem_cache *s, 275 gfp_t gfpflags, 276 int node) 277{ 278 return kmem_cache_alloc_node(s, gfpflags, node); 279} 280#endif 281 282static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 283{ 284 void *ret; 285 286 if (__builtin_constant_p(size) && 287 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 288 struct kmem_cache *s = kmalloc_slab(size); 289 290 if (!s) 291 return ZERO_SIZE_PTR; 292 293 ret = kmem_cache_alloc_node_notrace(s, flags, node); 294 295 trace_kmalloc_node(_THIS_IP_, ret, 296 size, s->size, flags, node); 297 298 return ret; 299 } 300 return __kmalloc_node(size, flags, node); 301} 302#endif 303 304#endif /* _LINUX_SLUB_DEF_H */