Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 95ee46aa8698f2000647dfb362400fadbb5807cf 310 lines 8.5 kB view raw
1#ifndef _LINUX_SLUB_DEF_H 2#define _LINUX_SLUB_DEF_H 3 4/* 5 * SLUB : A Slab allocator without object queues. 6 * 7 * (C) 2007 SGI, Christoph Lameter 8 */ 9#include <linux/types.h> 10#include <linux/gfp.h> 11#include <linux/workqueue.h> 12#include <linux/kobject.h> 13#include <linux/kmemleak.h> 14 15#include <trace/events/kmem.h> 16 17enum stat_item { 18 ALLOC_FASTPATH, /* Allocation from cpu slab */ 19 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 20 FREE_FASTPATH, /* Free to cpu slub */ 21 FREE_SLOWPATH, /* Freeing not to cpu slab */ 22 FREE_FROZEN, /* Freeing to frozen slab */ 23 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 24 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 25 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ 26 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 27 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 28 FREE_SLAB, /* Slab freed to the page allocator */ 29 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 30 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 31 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 32 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 33 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 34 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 35 ORDER_FALLBACK, /* Number of times fallback was necessary */ 36 NR_SLUB_STAT_ITEMS }; 37 38struct kmem_cache_cpu { 39 void **freelist; /* Pointer to first free per cpu object */ 40 struct page *page; /* The slab from which we are allocating */ 41 int node; /* The node of the page (or -1 for debug) */ 42#ifdef CONFIG_SLUB_STATS 43 unsigned stat[NR_SLUB_STAT_ITEMS]; 44#endif 45}; 46 47struct kmem_cache_node { 48 spinlock_t list_lock; /* Protect partial list and nr_partial */ 49 unsigned long nr_partial; 50 struct list_head partial; 51#ifdef CONFIG_SLUB_DEBUG 52 atomic_long_t nr_slabs; 53 atomic_long_t total_objects; 54 struct list_head full; 55#endif 56}; 57 58/* 59 * Word size structure that can be atomically updated or read and that 60 * contains both the order and the number of objects that a slab of the 61 * given order would contain. 62 */ 63struct kmem_cache_order_objects { 64 unsigned long x; 65}; 66 67/* 68 * Slab cache management. 69 */ 70struct kmem_cache { 71 struct kmem_cache_cpu *cpu_slab; 72 /* Used for retriving partial slabs etc */ 73 unsigned long flags; 74 int size; /* The size of an object including meta data */ 75 int objsize; /* The size of an object without meta data */ 76 int offset; /* Free pointer offset. */ 77 struct kmem_cache_order_objects oo; 78 79 /* Allocation and freeing of slabs */ 80 struct kmem_cache_order_objects max; 81 struct kmem_cache_order_objects min; 82 gfp_t allocflags; /* gfp flags to use on each alloc */ 83 int refcount; /* Refcount for slab cache destroy */ 84 void (*ctor)(void *); 85 int inuse; /* Offset to metadata */ 86 int align; /* Alignment */ 87 unsigned long min_partial; 88 const char *name; /* Name (only for display!) */ 89 struct list_head list; /* List of slab caches */ 90#ifdef CONFIG_SLUB_DEBUG 91 struct kobject kobj; /* For sysfs */ 92#endif 93 94#ifdef CONFIG_NUMA 95 /* 96 * Defragmentation by allocating from a remote node. 97 */ 98 int remote_node_defrag_ratio; 99 struct kmem_cache_node *node[MAX_NUMNODES]; 100#else 101 /* Avoid an extra cache line for UP */ 102 struct kmem_cache_node local_node; 103#endif 104}; 105 106/* 107 * Kmalloc subsystem. 108 */ 109#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 110#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 111#else 112#define KMALLOC_MIN_SIZE 8 113#endif 114 115#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 116 117#ifdef ARCH_DMA_MINALIGN 118#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 119#else 120#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 121#endif 122 123#ifndef ARCH_SLAB_MINALIGN 124#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 125#endif 126 127/* 128 * Maximum kmalloc object size handled by SLUB. Larger object allocations 129 * are passed through to the page allocator. The page allocator "fastpath" 130 * is relatively slow so we need this value sufficiently high so that 131 * performance critical objects are allocated through the SLUB fastpath. 132 * 133 * This should be dropped to PAGE_SIZE / 2 once the page allocator 134 * "fastpath" becomes competitive with the slab allocator fastpaths. 135 */ 136#define SLUB_MAX_SIZE (2 * PAGE_SIZE) 137 138#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) 139 140#ifdef CONFIG_ZONE_DMA 141#define SLUB_DMA __GFP_DMA 142/* Reserve extra caches for potential DMA use */ 143#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) 144#else 145/* Disable DMA functionality */ 146#define SLUB_DMA (__force gfp_t)0 147#define KMALLOC_CACHES SLUB_PAGE_SHIFT 148#endif 149 150/* 151 * We keep the general caches in an array of slab caches that are used for 152 * 2^x bytes of allocations. 153 */ 154extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; 155 156/* 157 * Sorry that the following has to be that ugly but some versions of GCC 158 * have trouble with constant propagation and loops. 159 */ 160static __always_inline int kmalloc_index(size_t size) 161{ 162 if (!size) 163 return 0; 164 165 if (size <= KMALLOC_MIN_SIZE) 166 return KMALLOC_SHIFT_LOW; 167 168 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 169 return 1; 170 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 171 return 2; 172 if (size <= 8) return 3; 173 if (size <= 16) return 4; 174 if (size <= 32) return 5; 175 if (size <= 64) return 6; 176 if (size <= 128) return 7; 177 if (size <= 256) return 8; 178 if (size <= 512) return 9; 179 if (size <= 1024) return 10; 180 if (size <= 2 * 1024) return 11; 181 if (size <= 4 * 1024) return 12; 182/* 183 * The following is only needed to support architectures with a larger page 184 * size than 4k. 185 */ 186 if (size <= 8 * 1024) return 13; 187 if (size <= 16 * 1024) return 14; 188 if (size <= 32 * 1024) return 15; 189 if (size <= 64 * 1024) return 16; 190 if (size <= 128 * 1024) return 17; 191 if (size <= 256 * 1024) return 18; 192 if (size <= 512 * 1024) return 19; 193 if (size <= 1024 * 1024) return 20; 194 if (size <= 2 * 1024 * 1024) return 21; 195 return -1; 196 197/* 198 * What we really wanted to do and cannot do because of compiler issues is: 199 * int i; 200 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 201 * if (size <= (1 << i)) 202 * return i; 203 */ 204} 205 206/* 207 * Find the slab cache for a given combination of allocation flags and size. 208 * 209 * This ought to end up with a global pointer to the right cache 210 * in kmalloc_caches. 211 */ 212static __always_inline struct kmem_cache *kmalloc_slab(size_t size) 213{ 214 int index = kmalloc_index(size); 215 216 if (index == 0) 217 return NULL; 218 219 return &kmalloc_caches[index]; 220} 221 222void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 223void *__kmalloc(size_t size, gfp_t flags); 224 225#ifdef CONFIG_TRACING 226extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 227#else 228static __always_inline void * 229kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 230{ 231 return kmem_cache_alloc(s, gfpflags); 232} 233#endif 234 235static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 236{ 237 unsigned int order = get_order(size); 238 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 239 240 kmemleak_alloc(ret, size, 1, flags); 241 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 242 243 return ret; 244} 245 246static __always_inline void *kmalloc(size_t size, gfp_t flags) 247{ 248 void *ret; 249 250 if (__builtin_constant_p(size)) { 251 if (size > SLUB_MAX_SIZE) 252 return kmalloc_large(size, flags); 253 254 if (!(flags & SLUB_DMA)) { 255 struct kmem_cache *s = kmalloc_slab(size); 256 257 if (!s) 258 return ZERO_SIZE_PTR; 259 260 ret = kmem_cache_alloc_notrace(s, flags); 261 262 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); 263 264 return ret; 265 } 266 } 267 return __kmalloc(size, flags); 268} 269 270#ifdef CONFIG_NUMA 271void *__kmalloc_node(size_t size, gfp_t flags, int node); 272void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 273 274#ifdef CONFIG_TRACING 275extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 276 gfp_t gfpflags, 277 int node); 278#else 279static __always_inline void * 280kmem_cache_alloc_node_notrace(struct kmem_cache *s, 281 gfp_t gfpflags, 282 int node) 283{ 284 return kmem_cache_alloc_node(s, gfpflags, node); 285} 286#endif 287 288static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 289{ 290 void *ret; 291 292 if (__builtin_constant_p(size) && 293 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 294 struct kmem_cache *s = kmalloc_slab(size); 295 296 if (!s) 297 return ZERO_SIZE_PTR; 298 299 ret = kmem_cache_alloc_node_notrace(s, flags, node); 300 301 trace_kmalloc_node(_THIS_IP_, ret, 302 size, s->size, flags, node); 303 304 return ret; 305 } 306 return __kmalloc_node(size, flags, node); 307} 308#endif 309 310#endif /* _LINUX_SLUB_DEF_H */