Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 33bc227e4e48ddadcf2eacb381c19df338f0a6c8 3634 lines 97 kB view raw
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in kmem_cache_t and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the semaphore 'cache_chain_sem'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/config.h> 90#include <linux/slab.h> 91#include <linux/mm.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/seq_file.h> 98#include <linux/notifier.h> 99#include <linux/kallsyms.h> 100#include <linux/cpu.h> 101#include <linux/sysctl.h> 102#include <linux/module.h> 103#include <linux/rcupdate.h> 104#include <linux/string.h> 105#include <linux/nodemask.h> 106 107#include <asm/uaccess.h> 108#include <asm/cacheflush.h> 109#include <asm/tlbflush.h> 110#include <asm/page.h> 111 112/* 113 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, 114 * SLAB_RED_ZONE & SLAB_POISON. 115 * 0 for faster, smaller code (especially in the critical paths). 116 * 117 * STATS - 1 to collect stats for /proc/slabinfo. 118 * 0 for faster, smaller code (especially in the critical paths). 119 * 120 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 121 */ 122 123#ifdef CONFIG_DEBUG_SLAB 124#define DEBUG 1 125#define STATS 1 126#define FORCED_DEBUG 1 127#else 128#define DEBUG 0 129#define STATS 0 130#define FORCED_DEBUG 0 131#endif 132 133 134/* Shouldn't this be in a header file somewhere? */ 135#define BYTES_PER_WORD sizeof(void *) 136 137#ifndef cache_line_size 138#define cache_line_size() L1_CACHE_BYTES 139#endif 140 141#ifndef ARCH_KMALLOC_MINALIGN 142/* 143 * Enforce a minimum alignment for the kmalloc caches. 144 * Usually, the kmalloc caches are cache_line_size() aligned, except when 145 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 146 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 147 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 148 * Note that this flag disables some debug features. 149 */ 150#define ARCH_KMALLOC_MINALIGN 0 151#endif 152 153#ifndef ARCH_SLAB_MINALIGN 154/* 155 * Enforce a minimum alignment for all caches. 156 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 157 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 158 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 159 * some debug features. 160 */ 161#define ARCH_SLAB_MINALIGN 0 162#endif 163 164#ifndef ARCH_KMALLOC_FLAGS 165#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 166#endif 167 168/* Legal flag mask for kmem_cache_create(). */ 169#if DEBUG 170# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 171 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 172 SLAB_NO_REAP | SLAB_CACHE_DMA | \ 173 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 174 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 175 SLAB_DESTROY_BY_RCU) 176#else 177# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \ 178 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_DESTROY_BY_RCU) 181#endif 182 183/* 184 * kmem_bufctl_t: 185 * 186 * Bufctl's are used for linking objs within a slab 187 * linked offsets. 188 * 189 * This implementation relies on "struct page" for locating the cache & 190 * slab an object belongs to. 191 * This allows the bufctl structure to be small (one int), but limits 192 * the number of objects a slab (not a cache) can contain when off-slab 193 * bufctls are used. The limit is the size of the largest general cache 194 * that does not use off-slab slabs. 195 * For 32bit archs with 4 kB pages, is this 56. 196 * This is not serious, as it is only for large objects, when it is unwise 197 * to have too many per slab. 198 * Note: This limit can be raised by introducing a general cache whose size 199 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 200 */ 201 202typedef unsigned int kmem_bufctl_t; 203#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 204#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 205#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) 206 207/* Max number of objs-per-slab for caches which use off-slab slabs. 208 * Needed to avoid a possible looping condition in cache_grow(). 209 */ 210static unsigned long offslab_limit; 211 212/* 213 * struct slab 214 * 215 * Manages the objs in a slab. Placed either at the beginning of mem allocated 216 * for a slab, or allocated from an general cache. 217 * Slabs are chained into three list: fully used, partial, fully free slabs. 218 */ 219struct slab { 220 struct list_head list; 221 unsigned long colouroff; 222 void *s_mem; /* including colour offset */ 223 unsigned int inuse; /* num of objs active in slab */ 224 kmem_bufctl_t free; 225 unsigned short nodeid; 226}; 227 228/* 229 * struct slab_rcu 230 * 231 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 232 * arrange for kmem_freepages to be called via RCU. This is useful if 233 * we need to approach a kernel structure obliquely, from its address 234 * obtained without the usual locking. We can lock the structure to 235 * stabilize it and check it's still at the given address, only if we 236 * can be sure that the memory has not been meanwhile reused for some 237 * other kind of object (which our subsystem's lock might corrupt). 238 * 239 * rcu_read_lock before reading the address, then rcu_read_unlock after 240 * taking the spinlock within the structure expected at that address. 241 * 242 * We assume struct slab_rcu can overlay struct slab when destroying. 243 */ 244struct slab_rcu { 245 struct rcu_head head; 246 kmem_cache_t *cachep; 247 void *addr; 248}; 249 250/* 251 * struct array_cache 252 * 253 * Purpose: 254 * - LIFO ordering, to hand out cache-warm objects from _alloc 255 * - reduce the number of linked list operations 256 * - reduce spinlock operations 257 * 258 * The limit is stored in the per-cpu structure to reduce the data cache 259 * footprint. 260 * 261 */ 262struct array_cache { 263 unsigned int avail; 264 unsigned int limit; 265 unsigned int batchcount; 266 unsigned int touched; 267 spinlock_t lock; 268 void *entry[0]; /* 269 * Must have this definition in here for the proper 270 * alignment of array_cache. Also simplifies accessing 271 * the entries. 272 * [0] is for gcc 2.95. It should really be []. 273 */ 274}; 275 276/* bootstrap: The caches do not work without cpuarrays anymore, 277 * but the cpuarrays are allocated from the generic caches... 278 */ 279#define BOOT_CPUCACHE_ENTRIES 1 280struct arraycache_init { 281 struct array_cache cache; 282 void * entries[BOOT_CPUCACHE_ENTRIES]; 283}; 284 285/* 286 * The slab lists for all objects. 287 */ 288struct kmem_list3 { 289 struct list_head slabs_partial; /* partial list first, better asm code */ 290 struct list_head slabs_full; 291 struct list_head slabs_free; 292 unsigned long free_objects; 293 unsigned long next_reap; 294 int free_touched; 295 unsigned int free_limit; 296 spinlock_t list_lock; 297 struct array_cache *shared; /* shared per node */ 298 struct array_cache **alien; /* on other nodes */ 299}; 300 301/* 302 * Need this for bootstrapping a per node allocator. 303 */ 304#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 305struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 306#define CACHE_CACHE 0 307#define SIZE_AC 1 308#define SIZE_L3 (1 + MAX_NUMNODES) 309 310/* 311 * This function must be completely optimized away if 312 * a constant is passed to it. Mostly the same as 313 * what is in linux/slab.h except it returns an 314 * index. 315 */ 316static __always_inline int index_of(const size_t size) 317{ 318 if (__builtin_constant_p(size)) { 319 int i = 0; 320 321#define CACHE(x) \ 322 if (size <=x) \ 323 return i; \ 324 else \ 325 i++; 326#include "linux/kmalloc_sizes.h" 327#undef CACHE 328 { 329 extern void __bad_size(void); 330 __bad_size(); 331 } 332 } else 333 BUG(); 334 return 0; 335} 336 337#define INDEX_AC index_of(sizeof(struct arraycache_init)) 338#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 339 340static inline void kmem_list3_init(struct kmem_list3 *parent) 341{ 342 INIT_LIST_HEAD(&parent->slabs_full); 343 INIT_LIST_HEAD(&parent->slabs_partial); 344 INIT_LIST_HEAD(&parent->slabs_free); 345 parent->shared = NULL; 346 parent->alien = NULL; 347 spin_lock_init(&parent->list_lock); 348 parent->free_objects = 0; 349 parent->free_touched = 0; 350} 351 352#define MAKE_LIST(cachep, listp, slab, nodeid) \ 353 do { \ 354 INIT_LIST_HEAD(listp); \ 355 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 356 } while (0) 357 358#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 359 do { \ 360 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 361 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 362 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 363 } while (0) 364 365/* 366 * kmem_cache_t 367 * 368 * manages a cache. 369 */ 370 371struct kmem_cache { 372/* 1) per-cpu data, touched during every alloc/free */ 373 struct array_cache *array[NR_CPUS]; 374 unsigned int batchcount; 375 unsigned int limit; 376 unsigned int shared; 377 unsigned int objsize; 378/* 2) touched by every alloc & free from the backend */ 379 struct kmem_list3 *nodelists[MAX_NUMNODES]; 380 unsigned int flags; /* constant flags */ 381 unsigned int num; /* # of objs per slab */ 382 spinlock_t spinlock; 383 384/* 3) cache_grow/shrink */ 385 /* order of pgs per slab (2^n) */ 386 unsigned int gfporder; 387 388 /* force GFP flags, e.g. GFP_DMA */ 389 gfp_t gfpflags; 390 391 size_t colour; /* cache colouring range */ 392 unsigned int colour_off; /* colour offset */ 393 unsigned int colour_next; /* cache colouring */ 394 kmem_cache_t *slabp_cache; 395 unsigned int slab_size; 396 unsigned int dflags; /* dynamic flags */ 397 398 /* constructor func */ 399 void (*ctor)(void *, kmem_cache_t *, unsigned long); 400 401 /* de-constructor func */ 402 void (*dtor)(void *, kmem_cache_t *, unsigned long); 403 404/* 4) cache creation/removal */ 405 const char *name; 406 struct list_head next; 407 408/* 5) statistics */ 409#if STATS 410 unsigned long num_active; 411 unsigned long num_allocations; 412 unsigned long high_mark; 413 unsigned long grown; 414 unsigned long reaped; 415 unsigned long errors; 416 unsigned long max_freeable; 417 unsigned long node_allocs; 418 unsigned long node_frees; 419 atomic_t allochit; 420 atomic_t allocmiss; 421 atomic_t freehit; 422 atomic_t freemiss; 423#endif 424#if DEBUG 425 int dbghead; 426 int reallen; 427#endif 428}; 429 430#define CFLGS_OFF_SLAB (0x80000000UL) 431#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 432 433#define BATCHREFILL_LIMIT 16 434/* Optimization question: fewer reaps means less 435 * probability for unnessary cpucache drain/refill cycles. 436 * 437 * OTOH the cpuarrays can contain lots of objects, 438 * which could lock up otherwise freeable slabs. 439 */ 440#define REAPTIMEOUT_CPUC (2*HZ) 441#define REAPTIMEOUT_LIST3 (4*HZ) 442 443#if STATS 444#define STATS_INC_ACTIVE(x) ((x)->num_active++) 445#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 446#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 447#define STATS_INC_GROWN(x) ((x)->grown++) 448#define STATS_INC_REAPED(x) ((x)->reaped++) 449#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \ 450 (x)->high_mark = (x)->num_active; \ 451 } while (0) 452#define STATS_INC_ERR(x) ((x)->errors++) 453#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 454#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 455#define STATS_SET_FREEABLE(x, i) \ 456 do { if ((x)->max_freeable < i) \ 457 (x)->max_freeable = i; \ 458 } while (0) 459 460#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 461#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 462#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 463#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 464#else 465#define STATS_INC_ACTIVE(x) do { } while (0) 466#define STATS_DEC_ACTIVE(x) do { } while (0) 467#define STATS_INC_ALLOCED(x) do { } while (0) 468#define STATS_INC_GROWN(x) do { } while (0) 469#define STATS_INC_REAPED(x) do { } while (0) 470#define STATS_SET_HIGH(x) do { } while (0) 471#define STATS_INC_ERR(x) do { } while (0) 472#define STATS_INC_NODEALLOCS(x) do { } while (0) 473#define STATS_INC_NODEFREES(x) do { } while (0) 474#define STATS_SET_FREEABLE(x, i) \ 475 do { } while (0) 476 477#define STATS_INC_ALLOCHIT(x) do { } while (0) 478#define STATS_INC_ALLOCMISS(x) do { } while (0) 479#define STATS_INC_FREEHIT(x) do { } while (0) 480#define STATS_INC_FREEMISS(x) do { } while (0) 481#endif 482 483#if DEBUG 484/* Magic nums for obj red zoning. 485 * Placed in the first word before and the first word after an obj. 486 */ 487#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ 488#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ 489 490/* ...and for poisoning */ 491#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ 492#define POISON_FREE 0x6b /* for use-after-free poisoning */ 493#define POISON_END 0xa5 /* end-byte of poisoning */ 494 495/* memory layout of objects: 496 * 0 : objp 497 * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that 498 * the end of an object is aligned with the end of the real 499 * allocation. Catches writes behind the end of the allocation. 500 * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1: 501 * redzone word. 502 * cachep->dbghead: The real object. 503 * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 504 * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] 505 */ 506static int obj_dbghead(kmem_cache_t *cachep) 507{ 508 return cachep->dbghead; 509} 510 511static int obj_reallen(kmem_cache_t *cachep) 512{ 513 return cachep->reallen; 514} 515 516static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) 517{ 518 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 519 return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD); 520} 521 522static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) 523{ 524 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 525 if (cachep->flags & SLAB_STORE_USER) 526 return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); 527 return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); 528} 529 530static void **dbg_userword(kmem_cache_t *cachep, void *objp) 531{ 532 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 533 return (void**)(objp+cachep->objsize-BYTES_PER_WORD); 534} 535 536#else 537 538#define obj_dbghead(x) 0 539#define obj_reallen(cachep) (cachep->objsize) 540#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 541#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 542#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 543 544#endif 545 546/* 547 * Maximum size of an obj (in 2^order pages) 548 * and absolute limit for the gfp order. 549 */ 550#if defined(CONFIG_LARGE_ALLOCS) 551#define MAX_OBJ_ORDER 13 /* up to 32Mb */ 552#define MAX_GFP_ORDER 13 /* up to 32Mb */ 553#elif defined(CONFIG_MMU) 554#define MAX_OBJ_ORDER 5 /* 32 pages */ 555#define MAX_GFP_ORDER 5 /* 32 pages */ 556#else 557#define MAX_OBJ_ORDER 8 /* up to 1Mb */ 558#define MAX_GFP_ORDER 8 /* up to 1Mb */ 559#endif 560 561/* 562 * Do not go above this order unless 0 objects fit into the slab. 563 */ 564#define BREAK_GFP_ORDER_HI 1 565#define BREAK_GFP_ORDER_LO 0 566static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 567 568/* Functions for storing/retrieving the cachep and or slab from the 569 * global 'mem_map'. These are used to find the slab an obj belongs to. 570 * With kfree(), these are used to find the cache which an obj belongs to. 571 */ 572static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 573{ 574 page->lru.next = (struct list_head *)cache; 575} 576 577static inline struct kmem_cache *page_get_cache(struct page *page) 578{ 579 return (struct kmem_cache *)page->lru.next; 580} 581 582static inline void page_set_slab(struct page *page, struct slab *slab) 583{ 584 page->lru.prev = (struct list_head *)slab; 585} 586 587static inline struct slab *page_get_slab(struct page *page) 588{ 589 return (struct slab *)page->lru.prev; 590} 591 592/* These are the default caches for kmalloc. Custom caches can have other sizes. */ 593struct cache_sizes malloc_sizes[] = { 594#define CACHE(x) { .cs_size = (x) }, 595#include <linux/kmalloc_sizes.h> 596 CACHE(ULONG_MAX) 597#undef CACHE 598}; 599EXPORT_SYMBOL(malloc_sizes); 600 601/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 602struct cache_names { 603 char *name; 604 char *name_dma; 605}; 606 607static struct cache_names __initdata cache_names[] = { 608#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 609#include <linux/kmalloc_sizes.h> 610 { NULL, } 611#undef CACHE 612}; 613 614static struct arraycache_init initarray_cache __initdata = 615 { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 616static struct arraycache_init initarray_generic = 617 { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 618 619/* internal cache of cache description objs */ 620static kmem_cache_t cache_cache = { 621 .batchcount = 1, 622 .limit = BOOT_CPUCACHE_ENTRIES, 623 .shared = 1, 624 .objsize = sizeof(kmem_cache_t), 625 .flags = SLAB_NO_REAP, 626 .spinlock = SPIN_LOCK_UNLOCKED, 627 .name = "kmem_cache", 628#if DEBUG 629 .reallen = sizeof(kmem_cache_t), 630#endif 631}; 632 633/* Guard access to the cache-chain. */ 634static struct semaphore cache_chain_sem; 635static struct list_head cache_chain; 636 637/* 638 * vm_enough_memory() looks at this to determine how many 639 * slab-allocated pages are possibly freeable under pressure 640 * 641 * SLAB_RECLAIM_ACCOUNT turns this on per-slab 642 */ 643atomic_t slab_reclaim_pages; 644 645/* 646 * chicken and egg problem: delay the per-cpu array allocation 647 * until the general caches are up. 648 */ 649static enum { 650 NONE, 651 PARTIAL_AC, 652 PARTIAL_L3, 653 FULL 654} g_cpucache_up; 655 656static DEFINE_PER_CPU(struct work_struct, reap_work); 657 658static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); 659static void enable_cpucache (kmem_cache_t *cachep); 660static void cache_reap (void *unused); 661static int __node_shrink(kmem_cache_t *cachep, int node); 662 663static inline struct array_cache *ac_data(kmem_cache_t *cachep) 664{ 665 return cachep->array[smp_processor_id()]; 666} 667 668static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) 669{ 670 struct cache_sizes *csizep = malloc_sizes; 671 672#if DEBUG 673 /* This happens if someone tries to call 674 * kmem_cache_create(), or __kmalloc(), before 675 * the generic caches are initialized. 676 */ 677 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 678#endif 679 while (size > csizep->cs_size) 680 csizep++; 681 682 /* 683 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 684 * has cs_{dma,}cachep==NULL. Thus no special case 685 * for large kmalloc calls required. 686 */ 687 if (unlikely(gfpflags & GFP_DMA)) 688 return csizep->cs_dmacachep; 689 return csizep->cs_cachep; 690} 691 692kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 693{ 694 return __find_general_cachep(size, gfpflags); 695} 696EXPORT_SYMBOL(kmem_find_general_cachep); 697 698/* Cal the num objs, wastage, and bytes left over for a given slab size. */ 699static void cache_estimate(unsigned long gfporder, size_t size, size_t align, 700 int flags, size_t *left_over, unsigned int *num) 701{ 702 int i; 703 size_t wastage = PAGE_SIZE<<gfporder; 704 size_t extra = 0; 705 size_t base = 0; 706 707 if (!(flags & CFLGS_OFF_SLAB)) { 708 base = sizeof(struct slab); 709 extra = sizeof(kmem_bufctl_t); 710 } 711 i = 0; 712 while (i*size + ALIGN(base+i*extra, align) <= wastage) 713 i++; 714 if (i > 0) 715 i--; 716 717 if (i > SLAB_LIMIT) 718 i = SLAB_LIMIT; 719 720 *num = i; 721 wastage -= i*size; 722 wastage -= ALIGN(base+i*extra, align); 723 *left_over = wastage; 724} 725 726#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 727 728static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) 729{ 730 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 731 function, cachep->name, msg); 732 dump_stack(); 733} 734 735/* 736 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 737 * via the workqueue/eventd. 738 * Add the CPU number into the expiration time to minimize the possibility of 739 * the CPUs getting into lockstep and contending for the global cache chain 740 * lock. 741 */ 742static void __devinit start_cpu_timer(int cpu) 743{ 744 struct work_struct *reap_work = &per_cpu(reap_work, cpu); 745 746 /* 747 * When this gets called from do_initcalls via cpucache_init(), 748 * init_workqueues() has already run, so keventd will be setup 749 * at that time. 750 */ 751 if (keventd_up() && reap_work->func == NULL) { 752 INIT_WORK(reap_work, cache_reap, NULL); 753 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 754 } 755} 756 757static struct array_cache *alloc_arraycache(int node, int entries, 758 int batchcount) 759{ 760 int memsize = sizeof(void*)*entries+sizeof(struct array_cache); 761 struct array_cache *nc = NULL; 762 763 nc = kmalloc_node(memsize, GFP_KERNEL, node); 764 if (nc) { 765 nc->avail = 0; 766 nc->limit = entries; 767 nc->batchcount = batchcount; 768 nc->touched = 0; 769 spin_lock_init(&nc->lock); 770 } 771 return nc; 772} 773 774#ifdef CONFIG_NUMA 775static inline struct array_cache **alloc_alien_cache(int node, int limit) 776{ 777 struct array_cache **ac_ptr; 778 int memsize = sizeof(void*)*MAX_NUMNODES; 779 int i; 780 781 if (limit > 1) 782 limit = 12; 783 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 784 if (ac_ptr) { 785 for_each_node(i) { 786 if (i == node || !node_online(i)) { 787 ac_ptr[i] = NULL; 788 continue; 789 } 790 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 791 if (!ac_ptr[i]) { 792 for (i--; i <=0; i--) 793 kfree(ac_ptr[i]); 794 kfree(ac_ptr); 795 return NULL; 796 } 797 } 798 } 799 return ac_ptr; 800} 801 802static inline void free_alien_cache(struct array_cache **ac_ptr) 803{ 804 int i; 805 806 if (!ac_ptr) 807 return; 808 809 for_each_node(i) 810 kfree(ac_ptr[i]); 811 812 kfree(ac_ptr); 813} 814 815static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node) 816{ 817 struct kmem_list3 *rl3 = cachep->nodelists[node]; 818 819 if (ac->avail) { 820 spin_lock(&rl3->list_lock); 821 free_block(cachep, ac->entry, ac->avail, node); 822 ac->avail = 0; 823 spin_unlock(&rl3->list_lock); 824 } 825} 826 827static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) 828{ 829 int i=0; 830 struct array_cache *ac; 831 unsigned long flags; 832 833 for_each_online_node(i) { 834 ac = l3->alien[i]; 835 if (ac) { 836 spin_lock_irqsave(&ac->lock, flags); 837 __drain_alien_cache(cachep, ac, i); 838 spin_unlock_irqrestore(&ac->lock, flags); 839 } 840 } 841} 842#else 843#define alloc_alien_cache(node, limit) do { } while (0) 844#define free_alien_cache(ac_ptr) do { } while (0) 845#define drain_alien_cache(cachep, l3) do { } while (0) 846#endif 847 848static int __devinit cpuup_callback(struct notifier_block *nfb, 849 unsigned long action, void *hcpu) 850{ 851 long cpu = (long)hcpu; 852 kmem_cache_t* cachep; 853 struct kmem_list3 *l3 = NULL; 854 int node = cpu_to_node(cpu); 855 int memsize = sizeof(struct kmem_list3); 856 struct array_cache *nc = NULL; 857 858 switch (action) { 859 case CPU_UP_PREPARE: 860 down(&cache_chain_sem); 861 /* we need to do this right in the beginning since 862 * alloc_arraycache's are going to use this list. 863 * kmalloc_node allows us to add the slab to the right 864 * kmem_list3 and not this cpu's kmem_list3 865 */ 866 867 list_for_each_entry(cachep, &cache_chain, next) { 868 /* setup the size64 kmemlist for cpu before we can 869 * begin anything. Make sure some other cpu on this 870 * node has not already allocated this 871 */ 872 if (!cachep->nodelists[node]) { 873 if (!(l3 = kmalloc_node(memsize, 874 GFP_KERNEL, node))) 875 goto bad; 876 kmem_list3_init(l3); 877 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 878 ((unsigned long)cachep)%REAPTIMEOUT_LIST3; 879 880 cachep->nodelists[node] = l3; 881 } 882 883 spin_lock_irq(&cachep->nodelists[node]->list_lock); 884 cachep->nodelists[node]->free_limit = 885 (1 + nr_cpus_node(node)) * 886 cachep->batchcount + cachep->num; 887 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 888 } 889 890 /* Now we can go ahead with allocating the shared array's 891 & array cache's */ 892 list_for_each_entry(cachep, &cache_chain, next) { 893 nc = alloc_arraycache(node, cachep->limit, 894 cachep->batchcount); 895 if (!nc) 896 goto bad; 897 cachep->array[cpu] = nc; 898 899 l3 = cachep->nodelists[node]; 900 BUG_ON(!l3); 901 if (!l3->shared) { 902 if (!(nc = alloc_arraycache(node, 903 cachep->shared*cachep->batchcount, 904 0xbaadf00d))) 905 goto bad; 906 907 /* we are serialised from CPU_DEAD or 908 CPU_UP_CANCELLED by the cpucontrol lock */ 909 l3->shared = nc; 910 } 911 } 912 up(&cache_chain_sem); 913 break; 914 case CPU_ONLINE: 915 start_cpu_timer(cpu); 916 break; 917#ifdef CONFIG_HOTPLUG_CPU 918 case CPU_DEAD: 919 /* fall thru */ 920 case CPU_UP_CANCELED: 921 down(&cache_chain_sem); 922 923 list_for_each_entry(cachep, &cache_chain, next) { 924 struct array_cache *nc; 925 cpumask_t mask; 926 927 mask = node_to_cpumask(node); 928 spin_lock_irq(&cachep->spinlock); 929 /* cpu is dead; no one can alloc from it. */ 930 nc = cachep->array[cpu]; 931 cachep->array[cpu] = NULL; 932 l3 = cachep->nodelists[node]; 933 934 if (!l3) 935 goto unlock_cache; 936 937 spin_lock(&l3->list_lock); 938 939 /* Free limit for this kmem_list3 */ 940 l3->free_limit -= cachep->batchcount; 941 if (nc) 942 free_block(cachep, nc->entry, nc->avail, node); 943 944 if (!cpus_empty(mask)) { 945 spin_unlock(&l3->list_lock); 946 goto unlock_cache; 947 } 948 949 if (l3->shared) { 950 free_block(cachep, l3->shared->entry, 951 l3->shared->avail, node); 952 kfree(l3->shared); 953 l3->shared = NULL; 954 } 955 if (l3->alien) { 956 drain_alien_cache(cachep, l3); 957 free_alien_cache(l3->alien); 958 l3->alien = NULL; 959 } 960 961 /* free slabs belonging to this node */ 962 if (__node_shrink(cachep, node)) { 963 cachep->nodelists[node] = NULL; 964 spin_unlock(&l3->list_lock); 965 kfree(l3); 966 } else { 967 spin_unlock(&l3->list_lock); 968 } 969unlock_cache: 970 spin_unlock_irq(&cachep->spinlock); 971 kfree(nc); 972 } 973 up(&cache_chain_sem); 974 break; 975#endif 976 } 977 return NOTIFY_OK; 978bad: 979 up(&cache_chain_sem); 980 return NOTIFY_BAD; 981} 982 983static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; 984 985/* 986 * swap the static kmem_list3 with kmalloced memory 987 */ 988static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, 989 int nodeid) 990{ 991 struct kmem_list3 *ptr; 992 993 BUG_ON(cachep->nodelists[nodeid] != list); 994 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 995 BUG_ON(!ptr); 996 997 local_irq_disable(); 998 memcpy(ptr, list, sizeof(struct kmem_list3)); 999 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1000 cachep->nodelists[nodeid] = ptr; 1001 local_irq_enable(); 1002} 1003 1004/* Initialisation. 1005 * Called after the gfp() functions have been enabled, and before smp_init(). 1006 */ 1007void __init kmem_cache_init(void) 1008{ 1009 size_t left_over; 1010 struct cache_sizes *sizes; 1011 struct cache_names *names; 1012 int i; 1013 1014 for (i = 0; i < NUM_INIT_LISTS; i++) { 1015 kmem_list3_init(&initkmem_list3[i]); 1016 if (i < MAX_NUMNODES) 1017 cache_cache.nodelists[i] = NULL; 1018 } 1019 1020 /* 1021 * Fragmentation resistance on low memory - only use bigger 1022 * page orders on machines with more than 32MB of memory. 1023 */ 1024 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1025 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1026 1027 /* Bootstrap is tricky, because several objects are allocated 1028 * from caches that do not exist yet: 1029 * 1) initialize the cache_cache cache: it contains the kmem_cache_t 1030 * structures of all caches, except cache_cache itself: cache_cache 1031 * is statically allocated. 1032 * Initially an __init data area is used for the head array and the 1033 * kmem_list3 structures, it's replaced with a kmalloc allocated 1034 * array at the end of the bootstrap. 1035 * 2) Create the first kmalloc cache. 1036 * The kmem_cache_t for the new cache is allocated normally. 1037 * An __init data area is used for the head array. 1038 * 3) Create the remaining kmalloc caches, with minimally sized 1039 * head arrays. 1040 * 4) Replace the __init data head arrays for cache_cache and the first 1041 * kmalloc cache with kmalloc allocated arrays. 1042 * 5) Replace the __init data for kmem_list3 for cache_cache and 1043 * the other cache's with kmalloc allocated memory. 1044 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1045 */ 1046 1047 /* 1) create the cache_cache */ 1048 init_MUTEX(&cache_chain_sem); 1049 INIT_LIST_HEAD(&cache_chain); 1050 list_add(&cache_cache.next, &cache_chain); 1051 cache_cache.colour_off = cache_line_size(); 1052 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1053 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; 1054 1055 cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); 1056 1057 cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, 1058 &left_over, &cache_cache.num); 1059 if (!cache_cache.num) 1060 BUG(); 1061 1062 cache_cache.colour = left_over/cache_cache.colour_off; 1063 cache_cache.colour_next = 0; 1064 cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) + 1065 sizeof(struct slab), cache_line_size()); 1066 1067 /* 2+3) create the kmalloc caches */ 1068 sizes = malloc_sizes; 1069 names = cache_names; 1070 1071 /* Initialize the caches that provide memory for the array cache 1072 * and the kmem_list3 structures first. 1073 * Without this, further allocations will bug 1074 */ 1075 1076 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1077 sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, 1078 (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); 1079 1080 if (INDEX_AC != INDEX_L3) 1081 sizes[INDEX_L3].cs_cachep = 1082 kmem_cache_create(names[INDEX_L3].name, 1083 sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, 1084 (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); 1085 1086 while (sizes->cs_size != ULONG_MAX) { 1087 /* 1088 * For performance, all the general caches are L1 aligned. 1089 * This should be particularly beneficial on SMP boxes, as it 1090 * eliminates "false sharing". 1091 * Note for systems short on memory removing the alignment will 1092 * allow tighter packing of the smaller caches. 1093 */ 1094 if(!sizes->cs_cachep) 1095 sizes->cs_cachep = kmem_cache_create(names->name, 1096 sizes->cs_size, ARCH_KMALLOC_MINALIGN, 1097 (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); 1098 1099 /* Inc off-slab bufctl limit until the ceiling is hit. */ 1100 if (!(OFF_SLAB(sizes->cs_cachep))) { 1101 offslab_limit = sizes->cs_size-sizeof(struct slab); 1102 offslab_limit /= sizeof(kmem_bufctl_t); 1103 } 1104 1105 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1106 sizes->cs_size, ARCH_KMALLOC_MINALIGN, 1107 (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC), 1108 NULL, NULL); 1109 1110 sizes++; 1111 names++; 1112 } 1113 /* 4) Replace the bootstrap head arrays */ 1114 { 1115 void * ptr; 1116 1117 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1118 1119 local_irq_disable(); 1120 BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); 1121 memcpy(ptr, ac_data(&cache_cache), 1122 sizeof(struct arraycache_init)); 1123 cache_cache.array[smp_processor_id()] = ptr; 1124 local_irq_enable(); 1125 1126 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1127 1128 local_irq_disable(); 1129 BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) 1130 != &initarray_generic.cache); 1131 memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), 1132 sizeof(struct arraycache_init)); 1133 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1134 ptr; 1135 local_irq_enable(); 1136 } 1137 /* 5) Replace the bootstrap kmem_list3's */ 1138 { 1139 int node; 1140 /* Replace the static kmem_list3 structures for the boot cpu */ 1141 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], 1142 numa_node_id()); 1143 1144 for_each_online_node(node) { 1145 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1146 &initkmem_list3[SIZE_AC+node], node); 1147 1148 if (INDEX_AC != INDEX_L3) { 1149 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1150 &initkmem_list3[SIZE_L3+node], 1151 node); 1152 } 1153 } 1154 } 1155 1156 /* 6) resize the head arrays to their final sizes */ 1157 { 1158 kmem_cache_t *cachep; 1159 down(&cache_chain_sem); 1160 list_for_each_entry(cachep, &cache_chain, next) 1161 enable_cpucache(cachep); 1162 up(&cache_chain_sem); 1163 } 1164 1165 /* Done! */ 1166 g_cpucache_up = FULL; 1167 1168 /* Register a cpu startup notifier callback 1169 * that initializes ac_data for all new cpus 1170 */ 1171 register_cpu_notifier(&cpucache_notifier); 1172 1173 /* The reap timers are started later, with a module init call: 1174 * That part of the kernel is not yet operational. 1175 */ 1176} 1177 1178static int __init cpucache_init(void) 1179{ 1180 int cpu; 1181 1182 /* 1183 * Register the timers that return unneeded 1184 * pages to gfp. 1185 */ 1186 for_each_online_cpu(cpu) 1187 start_cpu_timer(cpu); 1188 1189 return 0; 1190} 1191 1192__initcall(cpucache_init); 1193 1194/* 1195 * Interface to system's page allocator. No need to hold the cache-lock. 1196 * 1197 * If we requested dmaable memory, we will get it. Even if we 1198 * did not request dmaable memory, we might get it, but that 1199 * would be relatively rare and ignorable. 1200 */ 1201static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) 1202{ 1203 struct page *page; 1204 void *addr; 1205 int i; 1206 1207 flags |= cachep->gfpflags; 1208 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1209 if (!page) 1210 return NULL; 1211 addr = page_address(page); 1212 1213 i = (1 << cachep->gfporder); 1214 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1215 atomic_add(i, &slab_reclaim_pages); 1216 add_page_state(nr_slab, i); 1217 while (i--) { 1218 SetPageSlab(page); 1219 page++; 1220 } 1221 return addr; 1222} 1223 1224/* 1225 * Interface to system's page release. 1226 */ 1227static void kmem_freepages(kmem_cache_t *cachep, void *addr) 1228{ 1229 unsigned long i = (1<<cachep->gfporder); 1230 struct page *page = virt_to_page(addr); 1231 const unsigned long nr_freed = i; 1232 1233 while (i--) { 1234 if (!TestClearPageSlab(page)) 1235 BUG(); 1236 page++; 1237 } 1238 sub_page_state(nr_slab, nr_freed); 1239 if (current->reclaim_state) 1240 current->reclaim_state->reclaimed_slab += nr_freed; 1241 free_pages((unsigned long)addr, cachep->gfporder); 1242 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1243 atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages); 1244} 1245 1246static void kmem_rcu_free(struct rcu_head *head) 1247{ 1248 struct slab_rcu *slab_rcu = (struct slab_rcu *) head; 1249 kmem_cache_t *cachep = slab_rcu->cachep; 1250 1251 kmem_freepages(cachep, slab_rcu->addr); 1252 if (OFF_SLAB(cachep)) 1253 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1254} 1255 1256#if DEBUG 1257 1258#ifdef CONFIG_DEBUG_PAGEALLOC 1259static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, 1260 unsigned long caller) 1261{ 1262 int size = obj_reallen(cachep); 1263 1264 addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)]; 1265 1266 if (size < 5*sizeof(unsigned long)) 1267 return; 1268 1269 *addr++=0x12345678; 1270 *addr++=caller; 1271 *addr++=smp_processor_id(); 1272 size -= 3*sizeof(unsigned long); 1273 { 1274 unsigned long *sptr = &caller; 1275 unsigned long svalue; 1276 1277 while (!kstack_end(sptr)) { 1278 svalue = *sptr++; 1279 if (kernel_text_address(svalue)) { 1280 *addr++=svalue; 1281 size -= sizeof(unsigned long); 1282 if (size <= sizeof(unsigned long)) 1283 break; 1284 } 1285 } 1286 1287 } 1288 *addr++=0x87654321; 1289} 1290#endif 1291 1292static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) 1293{ 1294 int size = obj_reallen(cachep); 1295 addr = &((char*)addr)[obj_dbghead(cachep)]; 1296 1297 memset(addr, val, size); 1298 *(unsigned char *)(addr+size-1) = POISON_END; 1299} 1300 1301static void dump_line(char *data, int offset, int limit) 1302{ 1303 int i; 1304 printk(KERN_ERR "%03x:", offset); 1305 for (i=0;i<limit;i++) { 1306 printk(" %02x", (unsigned char)data[offset+i]); 1307 } 1308 printk("\n"); 1309} 1310#endif 1311 1312#if DEBUG 1313 1314static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) 1315{ 1316 int i, size; 1317 char *realobj; 1318 1319 if (cachep->flags & SLAB_RED_ZONE) { 1320 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1321 *dbg_redzone1(cachep, objp), 1322 *dbg_redzone2(cachep, objp)); 1323 } 1324 1325 if (cachep->flags & SLAB_STORE_USER) { 1326 printk(KERN_ERR "Last user: [<%p>]", 1327 *dbg_userword(cachep, objp)); 1328 print_symbol("(%s)", 1329 (unsigned long)*dbg_userword(cachep, objp)); 1330 printk("\n"); 1331 } 1332 realobj = (char*)objp+obj_dbghead(cachep); 1333 size = obj_reallen(cachep); 1334 for (i=0; i<size && lines;i+=16, lines--) { 1335 int limit; 1336 limit = 16; 1337 if (i+limit > size) 1338 limit = size-i; 1339 dump_line(realobj, i, limit); 1340 } 1341} 1342 1343static void check_poison_obj(kmem_cache_t *cachep, void *objp) 1344{ 1345 char *realobj; 1346 int size, i; 1347 int lines = 0; 1348 1349 realobj = (char*)objp+obj_dbghead(cachep); 1350 size = obj_reallen(cachep); 1351 1352 for (i=0;i<size;i++) { 1353 char exp = POISON_FREE; 1354 if (i == size-1) 1355 exp = POISON_END; 1356 if (realobj[i] != exp) { 1357 int limit; 1358 /* Mismatch ! */ 1359 /* Print header */ 1360 if (lines == 0) { 1361 printk(KERN_ERR "Slab corruption: start=%p, len=%d\n", 1362 realobj, size); 1363 print_objinfo(cachep, objp, 0); 1364 } 1365 /* Hexdump the affected line */ 1366 i = (i/16)*16; 1367 limit = 16; 1368 if (i+limit > size) 1369 limit = size-i; 1370 dump_line(realobj, i, limit); 1371 i += 16; 1372 lines++; 1373 /* Limit to 5 lines */ 1374 if (lines > 5) 1375 break; 1376 } 1377 } 1378 if (lines != 0) { 1379 /* Print some data about the neighboring objects, if they 1380 * exist: 1381 */ 1382 struct slab *slabp = page_get_slab(virt_to_page(objp)); 1383 int objnr; 1384 1385 objnr = (objp-slabp->s_mem)/cachep->objsize; 1386 if (objnr) { 1387 objp = slabp->s_mem+(objnr-1)*cachep->objsize; 1388 realobj = (char*)objp+obj_dbghead(cachep); 1389 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1390 realobj, size); 1391 print_objinfo(cachep, objp, 2); 1392 } 1393 if (objnr+1 < cachep->num) { 1394 objp = slabp->s_mem+(objnr+1)*cachep->objsize; 1395 realobj = (char*)objp+obj_dbghead(cachep); 1396 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1397 realobj, size); 1398 print_objinfo(cachep, objp, 2); 1399 } 1400 } 1401} 1402#endif 1403 1404/* Destroy all the objs in a slab, and release the mem back to the system. 1405 * Before calling the slab must have been unlinked from the cache. 1406 * The cache-lock is not held/needed. 1407 */ 1408static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) 1409{ 1410 void *addr = slabp->s_mem - slabp->colouroff; 1411 1412#if DEBUG 1413 int i; 1414 for (i = 0; i < cachep->num; i++) { 1415 void *objp = slabp->s_mem + cachep->objsize * i; 1416 1417 if (cachep->flags & SLAB_POISON) { 1418#ifdef CONFIG_DEBUG_PAGEALLOC 1419 if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep)) 1420 kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1); 1421 else 1422 check_poison_obj(cachep, objp); 1423#else 1424 check_poison_obj(cachep, objp); 1425#endif 1426 } 1427 if (cachep->flags & SLAB_RED_ZONE) { 1428 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1429 slab_error(cachep, "start of a freed object " 1430 "was overwritten"); 1431 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1432 slab_error(cachep, "end of a freed object " 1433 "was overwritten"); 1434 } 1435 if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1436 (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0); 1437 } 1438#else 1439 if (cachep->dtor) { 1440 int i; 1441 for (i = 0; i < cachep->num; i++) { 1442 void* objp = slabp->s_mem+cachep->objsize*i; 1443 (cachep->dtor)(objp, cachep, 0); 1444 } 1445 } 1446#endif 1447 1448 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1449 struct slab_rcu *slab_rcu; 1450 1451 slab_rcu = (struct slab_rcu *) slabp; 1452 slab_rcu->cachep = cachep; 1453 slab_rcu->addr = addr; 1454 call_rcu(&slab_rcu->head, kmem_rcu_free); 1455 } else { 1456 kmem_freepages(cachep, addr); 1457 if (OFF_SLAB(cachep)) 1458 kmem_cache_free(cachep->slabp_cache, slabp); 1459 } 1460} 1461 1462/* For setting up all the kmem_list3s for cache whose objsize is same 1463 as size of kmem_list3. */ 1464static inline void set_up_list3s(kmem_cache_t *cachep, int index) 1465{ 1466 int node; 1467 1468 for_each_online_node(node) { 1469 cachep->nodelists[node] = &initkmem_list3[index+node]; 1470 cachep->nodelists[node]->next_reap = jiffies + 1471 REAPTIMEOUT_LIST3 + 1472 ((unsigned long)cachep)%REAPTIMEOUT_LIST3; 1473 } 1474} 1475 1476/** 1477 * kmem_cache_create - Create a cache. 1478 * @name: A string which is used in /proc/slabinfo to identify this cache. 1479 * @size: The size of objects to be created in this cache. 1480 * @align: The required alignment for the objects. 1481 * @flags: SLAB flags 1482 * @ctor: A constructor for the objects. 1483 * @dtor: A destructor for the objects. 1484 * 1485 * Returns a ptr to the cache on success, NULL on failure. 1486 * Cannot be called within a int, but can be interrupted. 1487 * The @ctor is run when new pages are allocated by the cache 1488 * and the @dtor is run before the pages are handed back. 1489 * 1490 * @name must be valid until the cache is destroyed. This implies that 1491 * the module calling this has to destroy the cache before getting 1492 * unloaded. 1493 * 1494 * The flags are 1495 * 1496 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 1497 * to catch references to uninitialised memory. 1498 * 1499 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1500 * for buffer overruns. 1501 * 1502 * %SLAB_NO_REAP - Don't automatically reap this cache when we're under 1503 * memory pressure. 1504 * 1505 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1506 * cacheline. This can be beneficial if you're counting cycles as closely 1507 * as davem. 1508 */ 1509kmem_cache_t * 1510kmem_cache_create (const char *name, size_t size, size_t align, 1511 unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), 1512 void (*dtor)(void*, kmem_cache_t *, unsigned long)) 1513{ 1514 size_t left_over, slab_size, ralign; 1515 kmem_cache_t *cachep = NULL; 1516 struct list_head *p; 1517 1518 /* 1519 * Sanity checks... these are all serious usage bugs. 1520 */ 1521 if ((!name) || 1522 in_interrupt() || 1523 (size < BYTES_PER_WORD) || 1524 (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) || 1525 (dtor && !ctor)) { 1526 printk(KERN_ERR "%s: Early error in slab %s\n", 1527 __FUNCTION__, name); 1528 BUG(); 1529 } 1530 1531 down(&cache_chain_sem); 1532 1533 list_for_each(p, &cache_chain) { 1534 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); 1535 mm_segment_t old_fs = get_fs(); 1536 char tmp; 1537 int res; 1538 1539 /* 1540 * This happens when the module gets unloaded and doesn't 1541 * destroy its slab cache and no-one else reuses the vmalloc 1542 * area of the module. Print a warning. 1543 */ 1544 set_fs(KERNEL_DS); 1545 res = __get_user(tmp, pc->name); 1546 set_fs(old_fs); 1547 if (res) { 1548 printk("SLAB: cache with size %d has lost its name\n", 1549 pc->objsize); 1550 continue; 1551 } 1552 1553 if (!strcmp(pc->name,name)) { 1554 printk("kmem_cache_create: duplicate cache %s\n", name); 1555 dump_stack(); 1556 goto oops; 1557 } 1558 } 1559 1560#if DEBUG 1561 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 1562 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 1563 /* No constructor, but inital state check requested */ 1564 printk(KERN_ERR "%s: No con, but init state check " 1565 "requested - %s\n", __FUNCTION__, name); 1566 flags &= ~SLAB_DEBUG_INITIAL; 1567 } 1568 1569#if FORCED_DEBUG 1570 /* 1571 * Enable redzoning and last user accounting, except for caches with 1572 * large objects, if the increased size would increase the object size 1573 * above the next power of two: caches with object sizes just above a 1574 * power of two have a significant amount of internal fragmentation. 1575 */ 1576 if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD))) 1577 flags |= SLAB_RED_ZONE|SLAB_STORE_USER; 1578 if (!(flags & SLAB_DESTROY_BY_RCU)) 1579 flags |= SLAB_POISON; 1580#endif 1581 if (flags & SLAB_DESTROY_BY_RCU) 1582 BUG_ON(flags & SLAB_POISON); 1583#endif 1584 if (flags & SLAB_DESTROY_BY_RCU) 1585 BUG_ON(dtor); 1586 1587 /* 1588 * Always checks flags, a caller might be expecting debug 1589 * support which isn't available. 1590 */ 1591 if (flags & ~CREATE_MASK) 1592 BUG(); 1593 1594 /* Check that size is in terms of words. This is needed to avoid 1595 * unaligned accesses for some archs when redzoning is used, and makes 1596 * sure any on-slab bufctl's are also correctly aligned. 1597 */ 1598 if (size & (BYTES_PER_WORD-1)) { 1599 size += (BYTES_PER_WORD-1); 1600 size &= ~(BYTES_PER_WORD-1); 1601 } 1602 1603 /* calculate out the final buffer alignment: */ 1604 /* 1) arch recommendation: can be overridden for debug */ 1605 if (flags & SLAB_HWCACHE_ALIGN) { 1606 /* Default alignment: as specified by the arch code. 1607 * Except if an object is really small, then squeeze multiple 1608 * objects into one cacheline. 1609 */ 1610 ralign = cache_line_size(); 1611 while (size <= ralign/2) 1612 ralign /= 2; 1613 } else { 1614 ralign = BYTES_PER_WORD; 1615 } 1616 /* 2) arch mandated alignment: disables debug if necessary */ 1617 if (ralign < ARCH_SLAB_MINALIGN) { 1618 ralign = ARCH_SLAB_MINALIGN; 1619 if (ralign > BYTES_PER_WORD) 1620 flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); 1621 } 1622 /* 3) caller mandated alignment: disables debug if necessary */ 1623 if (ralign < align) { 1624 ralign = align; 1625 if (ralign > BYTES_PER_WORD) 1626 flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); 1627 } 1628 /* 4) Store it. Note that the debug code below can reduce 1629 * the alignment to BYTES_PER_WORD. 1630 */ 1631 align = ralign; 1632 1633 /* Get cache's description obj. */ 1634 cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); 1635 if (!cachep) 1636 goto oops; 1637 memset(cachep, 0, sizeof(kmem_cache_t)); 1638 1639#if DEBUG 1640 cachep->reallen = size; 1641 1642 if (flags & SLAB_RED_ZONE) { 1643 /* redzoning only works with word aligned caches */ 1644 align = BYTES_PER_WORD; 1645 1646 /* add space for red zone words */ 1647 cachep->dbghead += BYTES_PER_WORD; 1648 size += 2*BYTES_PER_WORD; 1649 } 1650 if (flags & SLAB_STORE_USER) { 1651 /* user store requires word alignment and 1652 * one word storage behind the end of the real 1653 * object. 1654 */ 1655 align = BYTES_PER_WORD; 1656 size += BYTES_PER_WORD; 1657 } 1658#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 1659 if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { 1660 cachep->dbghead += PAGE_SIZE - size; 1661 size = PAGE_SIZE; 1662 } 1663#endif 1664#endif 1665 1666 /* Determine if the slab management is 'on' or 'off' slab. */ 1667 if (size >= (PAGE_SIZE>>3)) 1668 /* 1669 * Size is large, assume best to place the slab management obj 1670 * off-slab (should allow better packing of objs). 1671 */ 1672 flags |= CFLGS_OFF_SLAB; 1673 1674 size = ALIGN(size, align); 1675 1676 if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { 1677 /* 1678 * A VFS-reclaimable slab tends to have most allocations 1679 * as GFP_NOFS and we really don't want to have to be allocating 1680 * higher-order pages when we are unable to shrink dcache. 1681 */ 1682 cachep->gfporder = 0; 1683 cache_estimate(cachep->gfporder, size, align, flags, 1684 &left_over, &cachep->num); 1685 } else { 1686 /* 1687 * Calculate size (in pages) of slabs, and the num of objs per 1688 * slab. This could be made much more intelligent. For now, 1689 * try to avoid using high page-orders for slabs. When the 1690 * gfp() funcs are more friendly towards high-order requests, 1691 * this should be changed. 1692 */ 1693 do { 1694 unsigned int break_flag = 0; 1695cal_wastage: 1696 cache_estimate(cachep->gfporder, size, align, flags, 1697 &left_over, &cachep->num); 1698 if (break_flag) 1699 break; 1700 if (cachep->gfporder >= MAX_GFP_ORDER) 1701 break; 1702 if (!cachep->num) 1703 goto next; 1704 if (flags & CFLGS_OFF_SLAB && 1705 cachep->num > offslab_limit) { 1706 /* This num of objs will cause problems. */ 1707 cachep->gfporder--; 1708 break_flag++; 1709 goto cal_wastage; 1710 } 1711 1712 /* 1713 * Large num of objs is good, but v. large slabs are 1714 * currently bad for the gfp()s. 1715 */ 1716 if (cachep->gfporder >= slab_break_gfp_order) 1717 break; 1718 1719 if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder)) 1720 break; /* Acceptable internal fragmentation. */ 1721next: 1722 cachep->gfporder++; 1723 } while (1); 1724 } 1725 1726 if (!cachep->num) { 1727 printk("kmem_cache_create: couldn't create cache %s.\n", name); 1728 kmem_cache_free(&cache_cache, cachep); 1729 cachep = NULL; 1730 goto oops; 1731 } 1732 slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) 1733 + sizeof(struct slab), align); 1734 1735 /* 1736 * If the slab has been placed off-slab, and we have enough space then 1737 * move it on-slab. This is at the expense of any extra colouring. 1738 */ 1739 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 1740 flags &= ~CFLGS_OFF_SLAB; 1741 left_over -= slab_size; 1742 } 1743 1744 if (flags & CFLGS_OFF_SLAB) { 1745 /* really off slab. No need for manual alignment */ 1746 slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab); 1747 } 1748 1749 cachep->colour_off = cache_line_size(); 1750 /* Offset must be a multiple of the alignment. */ 1751 if (cachep->colour_off < align) 1752 cachep->colour_off = align; 1753 cachep->colour = left_over/cachep->colour_off; 1754 cachep->slab_size = slab_size; 1755 cachep->flags = flags; 1756 cachep->gfpflags = 0; 1757 if (flags & SLAB_CACHE_DMA) 1758 cachep->gfpflags |= GFP_DMA; 1759 spin_lock_init(&cachep->spinlock); 1760 cachep->objsize = size; 1761 1762 if (flags & CFLGS_OFF_SLAB) 1763 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 1764 cachep->ctor = ctor; 1765 cachep->dtor = dtor; 1766 cachep->name = name; 1767 1768 /* Don't let CPUs to come and go */ 1769 lock_cpu_hotplug(); 1770 1771 if (g_cpucache_up == FULL) { 1772 enable_cpucache(cachep); 1773 } else { 1774 if (g_cpucache_up == NONE) { 1775 /* Note: the first kmem_cache_create must create 1776 * the cache that's used by kmalloc(24), otherwise 1777 * the creation of further caches will BUG(). 1778 */ 1779 cachep->array[smp_processor_id()] = 1780 &initarray_generic.cache; 1781 1782 /* If the cache that's used by 1783 * kmalloc(sizeof(kmem_list3)) is the first cache, 1784 * then we need to set up all its list3s, otherwise 1785 * the creation of further caches will BUG(). 1786 */ 1787 set_up_list3s(cachep, SIZE_AC); 1788 if (INDEX_AC == INDEX_L3) 1789 g_cpucache_up = PARTIAL_L3; 1790 else 1791 g_cpucache_up = PARTIAL_AC; 1792 } else { 1793 cachep->array[smp_processor_id()] = 1794 kmalloc(sizeof(struct arraycache_init), 1795 GFP_KERNEL); 1796 1797 if (g_cpucache_up == PARTIAL_AC) { 1798 set_up_list3s(cachep, SIZE_L3); 1799 g_cpucache_up = PARTIAL_L3; 1800 } else { 1801 int node; 1802 for_each_online_node(node) { 1803 1804 cachep->nodelists[node] = 1805 kmalloc_node(sizeof(struct kmem_list3), 1806 GFP_KERNEL, node); 1807 BUG_ON(!cachep->nodelists[node]); 1808 kmem_list3_init(cachep->nodelists[node]); 1809 } 1810 } 1811 } 1812 cachep->nodelists[numa_node_id()]->next_reap = 1813 jiffies + REAPTIMEOUT_LIST3 + 1814 ((unsigned long)cachep)%REAPTIMEOUT_LIST3; 1815 1816 BUG_ON(!ac_data(cachep)); 1817 ac_data(cachep)->avail = 0; 1818 ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 1819 ac_data(cachep)->batchcount = 1; 1820 ac_data(cachep)->touched = 0; 1821 cachep->batchcount = 1; 1822 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1823 } 1824 1825 /* cache setup completed, link it into the list */ 1826 list_add(&cachep->next, &cache_chain); 1827 unlock_cpu_hotplug(); 1828oops: 1829 if (!cachep && (flags & SLAB_PANIC)) 1830 panic("kmem_cache_create(): failed to create slab `%s'\n", 1831 name); 1832 up(&cache_chain_sem); 1833 return cachep; 1834} 1835EXPORT_SYMBOL(kmem_cache_create); 1836 1837#if DEBUG 1838static void check_irq_off(void) 1839{ 1840 BUG_ON(!irqs_disabled()); 1841} 1842 1843static void check_irq_on(void) 1844{ 1845 BUG_ON(irqs_disabled()); 1846} 1847 1848static void check_spinlock_acquired(kmem_cache_t *cachep) 1849{ 1850#ifdef CONFIG_SMP 1851 check_irq_off(); 1852 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 1853#endif 1854} 1855 1856static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) 1857{ 1858#ifdef CONFIG_SMP 1859 check_irq_off(); 1860 assert_spin_locked(&cachep->nodelists[node]->list_lock); 1861#endif 1862} 1863 1864#else 1865#define check_irq_off() do { } while(0) 1866#define check_irq_on() do { } while(0) 1867#define check_spinlock_acquired(x) do { } while(0) 1868#define check_spinlock_acquired_node(x, y) do { } while(0) 1869#endif 1870 1871/* 1872 * Waits for all CPUs to execute func(). 1873 */ 1874static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) 1875{ 1876 check_irq_on(); 1877 preempt_disable(); 1878 1879 local_irq_disable(); 1880 func(arg); 1881 local_irq_enable(); 1882 1883 if (smp_call_function(func, arg, 1, 1)) 1884 BUG(); 1885 1886 preempt_enable(); 1887} 1888 1889static void drain_array_locked(kmem_cache_t* cachep, 1890 struct array_cache *ac, int force, int node); 1891 1892static void do_drain(void *arg) 1893{ 1894 kmem_cache_t *cachep = (kmem_cache_t*)arg; 1895 struct array_cache *ac; 1896 int node = numa_node_id(); 1897 1898 check_irq_off(); 1899 ac = ac_data(cachep); 1900 spin_lock(&cachep->nodelists[node]->list_lock); 1901 free_block(cachep, ac->entry, ac->avail, node); 1902 spin_unlock(&cachep->nodelists[node]->list_lock); 1903 ac->avail = 0; 1904} 1905 1906static void drain_cpu_caches(kmem_cache_t *cachep) 1907{ 1908 struct kmem_list3 *l3; 1909 int node; 1910 1911 smp_call_function_all_cpus(do_drain, cachep); 1912 check_irq_on(); 1913 spin_lock_irq(&cachep->spinlock); 1914 for_each_online_node(node) { 1915 l3 = cachep->nodelists[node]; 1916 if (l3) { 1917 spin_lock(&l3->list_lock); 1918 drain_array_locked(cachep, l3->shared, 1, node); 1919 spin_unlock(&l3->list_lock); 1920 if (l3->alien) 1921 drain_alien_cache(cachep, l3); 1922 } 1923 } 1924 spin_unlock_irq(&cachep->spinlock); 1925} 1926 1927static int __node_shrink(kmem_cache_t *cachep, int node) 1928{ 1929 struct slab *slabp; 1930 struct kmem_list3 *l3 = cachep->nodelists[node]; 1931 int ret; 1932 1933 for (;;) { 1934 struct list_head *p; 1935 1936 p = l3->slabs_free.prev; 1937 if (p == &l3->slabs_free) 1938 break; 1939 1940 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 1941#if DEBUG 1942 if (slabp->inuse) 1943 BUG(); 1944#endif 1945 list_del(&slabp->list); 1946 1947 l3->free_objects -= cachep->num; 1948 spin_unlock_irq(&l3->list_lock); 1949 slab_destroy(cachep, slabp); 1950 spin_lock_irq(&l3->list_lock); 1951 } 1952 ret = !list_empty(&l3->slabs_full) || 1953 !list_empty(&l3->slabs_partial); 1954 return ret; 1955} 1956 1957static int __cache_shrink(kmem_cache_t *cachep) 1958{ 1959 int ret = 0, i = 0; 1960 struct kmem_list3 *l3; 1961 1962 drain_cpu_caches(cachep); 1963 1964 check_irq_on(); 1965 for_each_online_node(i) { 1966 l3 = cachep->nodelists[i]; 1967 if (l3) { 1968 spin_lock_irq(&l3->list_lock); 1969 ret += __node_shrink(cachep, i); 1970 spin_unlock_irq(&l3->list_lock); 1971 } 1972 } 1973 return (ret ? 1 : 0); 1974} 1975 1976/** 1977 * kmem_cache_shrink - Shrink a cache. 1978 * @cachep: The cache to shrink. 1979 * 1980 * Releases as many slabs as possible for a cache. 1981 * To help debugging, a zero exit status indicates all slabs were released. 1982 */ 1983int kmem_cache_shrink(kmem_cache_t *cachep) 1984{ 1985 if (!cachep || in_interrupt()) 1986 BUG(); 1987 1988 return __cache_shrink(cachep); 1989} 1990EXPORT_SYMBOL(kmem_cache_shrink); 1991 1992/** 1993 * kmem_cache_destroy - delete a cache 1994 * @cachep: the cache to destroy 1995 * 1996 * Remove a kmem_cache_t object from the slab cache. 1997 * Returns 0 on success. 1998 * 1999 * It is expected this function will be called by a module when it is 2000 * unloaded. This will remove the cache completely, and avoid a duplicate 2001 * cache being allocated each time a module is loaded and unloaded, if the 2002 * module doesn't have persistent in-kernel storage across loads and unloads. 2003 * 2004 * The cache must be empty before calling this function. 2005 * 2006 * The caller must guarantee that noone will allocate memory from the cache 2007 * during the kmem_cache_destroy(). 2008 */ 2009int kmem_cache_destroy(kmem_cache_t * cachep) 2010{ 2011 int i; 2012 struct kmem_list3 *l3; 2013 2014 if (!cachep || in_interrupt()) 2015 BUG(); 2016 2017 /* Don't let CPUs to come and go */ 2018 lock_cpu_hotplug(); 2019 2020 /* Find the cache in the chain of caches. */ 2021 down(&cache_chain_sem); 2022 /* 2023 * the chain is never empty, cache_cache is never destroyed 2024 */ 2025 list_del(&cachep->next); 2026 up(&cache_chain_sem); 2027 2028 if (__cache_shrink(cachep)) { 2029 slab_error(cachep, "Can't free all objects"); 2030 down(&cache_chain_sem); 2031 list_add(&cachep->next,&cache_chain); 2032 up(&cache_chain_sem); 2033 unlock_cpu_hotplug(); 2034 return 1; 2035 } 2036 2037 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2038 synchronize_rcu(); 2039 2040 for_each_online_cpu(i) 2041 kfree(cachep->array[i]); 2042 2043 /* NUMA: free the list3 structures */ 2044 for_each_online_node(i) { 2045 if ((l3 = cachep->nodelists[i])) { 2046 kfree(l3->shared); 2047 free_alien_cache(l3->alien); 2048 kfree(l3); 2049 } 2050 } 2051 kmem_cache_free(&cache_cache, cachep); 2052 2053 unlock_cpu_hotplug(); 2054 2055 return 0; 2056} 2057EXPORT_SYMBOL(kmem_cache_destroy); 2058 2059/* Get the memory for a slab management obj. */ 2060static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2061 int colour_off, gfp_t local_flags) 2062{ 2063 struct slab *slabp; 2064 2065 if (OFF_SLAB(cachep)) { 2066 /* Slab management obj is off-slab. */ 2067 slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); 2068 if (!slabp) 2069 return NULL; 2070 } else { 2071 slabp = objp+colour_off; 2072 colour_off += cachep->slab_size; 2073 } 2074 slabp->inuse = 0; 2075 slabp->colouroff = colour_off; 2076 slabp->s_mem = objp+colour_off; 2077 2078 return slabp; 2079} 2080 2081static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2082{ 2083 return (kmem_bufctl_t *)(slabp+1); 2084} 2085 2086static void cache_init_objs(kmem_cache_t *cachep, 2087 struct slab *slabp, unsigned long ctor_flags) 2088{ 2089 int i; 2090 2091 for (i = 0; i < cachep->num; i++) { 2092 void *objp = slabp->s_mem+cachep->objsize*i; 2093#if DEBUG 2094 /* need to poison the objs? */ 2095 if (cachep->flags & SLAB_POISON) 2096 poison_obj(cachep, objp, POISON_FREE); 2097 if (cachep->flags & SLAB_STORE_USER) 2098 *dbg_userword(cachep, objp) = NULL; 2099 2100 if (cachep->flags & SLAB_RED_ZONE) { 2101 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2102 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2103 } 2104 /* 2105 * Constructors are not allowed to allocate memory from 2106 * the same cache which they are a constructor for. 2107 * Otherwise, deadlock. They must also be threaded. 2108 */ 2109 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2110 cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags); 2111 2112 if (cachep->flags & SLAB_RED_ZONE) { 2113 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2114 slab_error(cachep, "constructor overwrote the" 2115 " end of an object"); 2116 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2117 slab_error(cachep, "constructor overwrote the" 2118 " start of an object"); 2119 } 2120 if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2121 kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); 2122#else 2123 if (cachep->ctor) 2124 cachep->ctor(objp, cachep, ctor_flags); 2125#endif 2126 slab_bufctl(slabp)[i] = i+1; 2127 } 2128 slab_bufctl(slabp)[i-1] = BUFCTL_END; 2129 slabp->free = 0; 2130} 2131 2132static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) 2133{ 2134 if (flags & SLAB_DMA) { 2135 if (!(cachep->gfpflags & GFP_DMA)) 2136 BUG(); 2137 } else { 2138 if (cachep->gfpflags & GFP_DMA) 2139 BUG(); 2140 } 2141} 2142 2143static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) 2144{ 2145 int i; 2146 struct page *page; 2147 2148 /* Nasty!!!!!! I hope this is OK. */ 2149 i = 1 << cachep->gfporder; 2150 page = virt_to_page(objp); 2151 do { 2152 page_set_cache(page, cachep); 2153 page_set_slab(page, slabp); 2154 page++; 2155 } while (--i); 2156} 2157 2158/* 2159 * Grow (by 1) the number of slabs within a cache. This is called by 2160 * kmem_cache_alloc() when there are no active objs left in a cache. 2161 */ 2162static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2163{ 2164 struct slab *slabp; 2165 void *objp; 2166 size_t offset; 2167 gfp_t local_flags; 2168 unsigned long ctor_flags; 2169 struct kmem_list3 *l3; 2170 2171 /* Be lazy and only check for valid flags here, 2172 * keeping it out of the critical path in kmem_cache_alloc(). 2173 */ 2174 if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW)) 2175 BUG(); 2176 if (flags & SLAB_NO_GROW) 2177 return 0; 2178 2179 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2180 local_flags = (flags & SLAB_LEVEL_MASK); 2181 if (!(local_flags & __GFP_WAIT)) 2182 /* 2183 * Not allowed to sleep. Need to tell a constructor about 2184 * this - it might need to know... 2185 */ 2186 ctor_flags |= SLAB_CTOR_ATOMIC; 2187 2188 /* About to mess with non-constant members - lock. */ 2189 check_irq_off(); 2190 spin_lock(&cachep->spinlock); 2191 2192 /* Get colour for the slab, and cal the next value. */ 2193 offset = cachep->colour_next; 2194 cachep->colour_next++; 2195 if (cachep->colour_next >= cachep->colour) 2196 cachep->colour_next = 0; 2197 offset *= cachep->colour_off; 2198 2199 spin_unlock(&cachep->spinlock); 2200 2201 check_irq_off(); 2202 if (local_flags & __GFP_WAIT) 2203 local_irq_enable(); 2204 2205 /* 2206 * The test for missing atomic flag is performed here, rather than 2207 * the more obvious place, simply to reduce the critical path length 2208 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2209 * will eventually be caught here (where it matters). 2210 */ 2211 kmem_flagcheck(cachep, flags); 2212 2213 /* Get mem for the objs. 2214 * Attempt to allocate a physical page from 'nodeid', 2215 */ 2216 if (!(objp = kmem_getpages(cachep, flags, nodeid))) 2217 goto failed; 2218 2219 /* Get slab management. */ 2220 if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags))) 2221 goto opps1; 2222 2223 slabp->nodeid = nodeid; 2224 set_slab_attr(cachep, slabp, objp); 2225 2226 cache_init_objs(cachep, slabp, ctor_flags); 2227 2228 if (local_flags & __GFP_WAIT) 2229 local_irq_disable(); 2230 check_irq_off(); 2231 l3 = cachep->nodelists[nodeid]; 2232 spin_lock(&l3->list_lock); 2233 2234 /* Make slab active. */ 2235 list_add_tail(&slabp->list, &(l3->slabs_free)); 2236 STATS_INC_GROWN(cachep); 2237 l3->free_objects += cachep->num; 2238 spin_unlock(&l3->list_lock); 2239 return 1; 2240opps1: 2241 kmem_freepages(cachep, objp); 2242failed: 2243 if (local_flags & __GFP_WAIT) 2244 local_irq_disable(); 2245 return 0; 2246} 2247 2248#if DEBUG 2249 2250/* 2251 * Perform extra freeing checks: 2252 * - detect bad pointers. 2253 * - POISON/RED_ZONE checking 2254 * - destructor calls, for caches with POISON+dtor 2255 */ 2256static void kfree_debugcheck(const void *objp) 2257{ 2258 struct page *page; 2259 2260 if (!virt_addr_valid(objp)) { 2261 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2262 (unsigned long)objp); 2263 BUG(); 2264 } 2265 page = virt_to_page(objp); 2266 if (!PageSlab(page)) { 2267 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp); 2268 BUG(); 2269 } 2270} 2271 2272static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, 2273 void *caller) 2274{ 2275 struct page *page; 2276 unsigned int objnr; 2277 struct slab *slabp; 2278 2279 objp -= obj_dbghead(cachep); 2280 kfree_debugcheck(objp); 2281 page = virt_to_page(objp); 2282 2283 if (page_get_cache(page) != cachep) { 2284 printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", 2285 page_get_cache(page),cachep); 2286 printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); 2287 printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name); 2288 WARN_ON(1); 2289 } 2290 slabp = page_get_slab(page); 2291 2292 if (cachep->flags & SLAB_RED_ZONE) { 2293 if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { 2294 slab_error(cachep, "double free, or memory outside" 2295 " object was overwritten"); 2296 printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", 2297 objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); 2298 } 2299 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2300 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2301 } 2302 if (cachep->flags & SLAB_STORE_USER) 2303 *dbg_userword(cachep, objp) = caller; 2304 2305 objnr = (objp-slabp->s_mem)/cachep->objsize; 2306 2307 BUG_ON(objnr >= cachep->num); 2308 BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize); 2309 2310 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2311 /* Need to call the slab's constructor so the 2312 * caller can perform a verify of its state (debugging). 2313 * Called without the cache-lock held. 2314 */ 2315 cachep->ctor(objp+obj_dbghead(cachep), 2316 cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY); 2317 } 2318 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2319 /* we want to cache poison the object, 2320 * call the destruction callback 2321 */ 2322 cachep->dtor(objp+obj_dbghead(cachep), cachep, 0); 2323 } 2324 if (cachep->flags & SLAB_POISON) { 2325#ifdef CONFIG_DEBUG_PAGEALLOC 2326 if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { 2327 store_stackinfo(cachep, objp, (unsigned long)caller); 2328 kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); 2329 } else { 2330 poison_obj(cachep, objp, POISON_FREE); 2331 } 2332#else 2333 poison_obj(cachep, objp, POISON_FREE); 2334#endif 2335 } 2336 return objp; 2337} 2338 2339static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) 2340{ 2341 kmem_bufctl_t i; 2342 int entries = 0; 2343 2344 /* Check slab's freelist to see if this obj is there. */ 2345 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2346 entries++; 2347 if (entries > cachep->num || i >= cachep->num) 2348 goto bad; 2349 } 2350 if (entries != cachep->num - slabp->inuse) { 2351bad: 2352 printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2353 cachep->name, cachep->num, slabp, slabp->inuse); 2354 for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) { 2355 if ((i%16)==0) 2356 printk("\n%03x:", i); 2357 printk(" %02x", ((unsigned char*)slabp)[i]); 2358 } 2359 printk("\n"); 2360 BUG(); 2361 } 2362} 2363#else 2364#define kfree_debugcheck(x) do { } while(0) 2365#define cache_free_debugcheck(x,objp,z) (objp) 2366#define check_slabp(x,y) do { } while(0) 2367#endif 2368 2369static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) 2370{ 2371 int batchcount; 2372 struct kmem_list3 *l3; 2373 struct array_cache *ac; 2374 2375 check_irq_off(); 2376 ac = ac_data(cachep); 2377retry: 2378 batchcount = ac->batchcount; 2379 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2380 /* if there was little recent activity on this 2381 * cache, then perform only a partial refill. 2382 * Otherwise we could generate refill bouncing. 2383 */ 2384 batchcount = BATCHREFILL_LIMIT; 2385 } 2386 l3 = cachep->nodelists[numa_node_id()]; 2387 2388 BUG_ON(ac->avail > 0 || !l3); 2389 spin_lock(&l3->list_lock); 2390 2391 if (l3->shared) { 2392 struct array_cache *shared_array = l3->shared; 2393 if (shared_array->avail) { 2394 if (batchcount > shared_array->avail) 2395 batchcount = shared_array->avail; 2396 shared_array->avail -= batchcount; 2397 ac->avail = batchcount; 2398 memcpy(ac->entry, 2399 &(shared_array->entry[shared_array->avail]), 2400 sizeof(void*)*batchcount); 2401 shared_array->touched = 1; 2402 goto alloc_done; 2403 } 2404 } 2405 while (batchcount > 0) { 2406 struct list_head *entry; 2407 struct slab *slabp; 2408 /* Get slab alloc is to come from. */ 2409 entry = l3->slabs_partial.next; 2410 if (entry == &l3->slabs_partial) { 2411 l3->free_touched = 1; 2412 entry = l3->slabs_free.next; 2413 if (entry == &l3->slabs_free) 2414 goto must_grow; 2415 } 2416 2417 slabp = list_entry(entry, struct slab, list); 2418 check_slabp(cachep, slabp); 2419 check_spinlock_acquired(cachep); 2420 while (slabp->inuse < cachep->num && batchcount--) { 2421 kmem_bufctl_t next; 2422 STATS_INC_ALLOCED(cachep); 2423 STATS_INC_ACTIVE(cachep); 2424 STATS_SET_HIGH(cachep); 2425 2426 /* get obj pointer */ 2427 ac->entry[ac->avail++] = slabp->s_mem + 2428 slabp->free*cachep->objsize; 2429 2430 slabp->inuse++; 2431 next = slab_bufctl(slabp)[slabp->free]; 2432#if DEBUG 2433 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2434 WARN_ON(numa_node_id() != slabp->nodeid); 2435#endif 2436 slabp->free = next; 2437 } 2438 check_slabp(cachep, slabp); 2439 2440 /* move slabp to correct slabp list: */ 2441 list_del(&slabp->list); 2442 if (slabp->free == BUFCTL_END) 2443 list_add(&slabp->list, &l3->slabs_full); 2444 else 2445 list_add(&slabp->list, &l3->slabs_partial); 2446 } 2447 2448must_grow: 2449 l3->free_objects -= ac->avail; 2450alloc_done: 2451 spin_unlock(&l3->list_lock); 2452 2453 if (unlikely(!ac->avail)) { 2454 int x; 2455 x = cache_grow(cachep, flags, numa_node_id()); 2456 2457 // cache_grow can reenable interrupts, then ac could change. 2458 ac = ac_data(cachep); 2459 if (!x && ac->avail == 0) // no objects in sight? abort 2460 return NULL; 2461 2462 if (!ac->avail) // objects refilled by interrupt? 2463 goto retry; 2464 } 2465 ac->touched = 1; 2466 return ac->entry[--ac->avail]; 2467} 2468 2469static inline void 2470cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) 2471{ 2472 might_sleep_if(flags & __GFP_WAIT); 2473#if DEBUG 2474 kmem_flagcheck(cachep, flags); 2475#endif 2476} 2477 2478#if DEBUG 2479static void * 2480cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2481 gfp_t flags, void *objp, void *caller) 2482{ 2483 if (!objp) 2484 return objp; 2485 if (cachep->flags & SLAB_POISON) { 2486#ifdef CONFIG_DEBUG_PAGEALLOC 2487 if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 2488 kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1); 2489 else 2490 check_poison_obj(cachep, objp); 2491#else 2492 check_poison_obj(cachep, objp); 2493#endif 2494 poison_obj(cachep, objp, POISON_INUSE); 2495 } 2496 if (cachep->flags & SLAB_STORE_USER) 2497 *dbg_userword(cachep, objp) = caller; 2498 2499 if (cachep->flags & SLAB_RED_ZONE) { 2500 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2501 slab_error(cachep, "double free, or memory outside" 2502 " object was overwritten"); 2503 printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", 2504 objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); 2505 } 2506 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2507 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2508 } 2509 objp += obj_dbghead(cachep); 2510 if (cachep->ctor && cachep->flags & SLAB_POISON) { 2511 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2512 2513 if (!(flags & __GFP_WAIT)) 2514 ctor_flags |= SLAB_CTOR_ATOMIC; 2515 2516 cachep->ctor(objp, cachep, ctor_flags); 2517 } 2518 return objp; 2519} 2520#else 2521#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2522#endif 2523 2524static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2525{ 2526 void* objp; 2527 struct array_cache *ac; 2528 2529 check_irq_off(); 2530 ac = ac_data(cachep); 2531 if (likely(ac->avail)) { 2532 STATS_INC_ALLOCHIT(cachep); 2533 ac->touched = 1; 2534 objp = ac->entry[--ac->avail]; 2535 } else { 2536 STATS_INC_ALLOCMISS(cachep); 2537 objp = cache_alloc_refill(cachep, flags); 2538 } 2539 return objp; 2540} 2541 2542static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2543{ 2544 unsigned long save_flags; 2545 void* objp; 2546 2547 cache_alloc_debugcheck_before(cachep, flags); 2548 2549 local_irq_save(save_flags); 2550 objp = ____cache_alloc(cachep, flags); 2551 local_irq_restore(save_flags); 2552 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 2553 __builtin_return_address(0)); 2554 prefetchw(objp); 2555 return objp; 2556} 2557 2558#ifdef CONFIG_NUMA 2559/* 2560 * A interface to enable slab creation on nodeid 2561 */ 2562static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2563{ 2564 struct list_head *entry; 2565 struct slab *slabp; 2566 struct kmem_list3 *l3; 2567 void *obj; 2568 kmem_bufctl_t next; 2569 int x; 2570 2571 l3 = cachep->nodelists[nodeid]; 2572 BUG_ON(!l3); 2573 2574retry: 2575 spin_lock(&l3->list_lock); 2576 entry = l3->slabs_partial.next; 2577 if (entry == &l3->slabs_partial) { 2578 l3->free_touched = 1; 2579 entry = l3->slabs_free.next; 2580 if (entry == &l3->slabs_free) 2581 goto must_grow; 2582 } 2583 2584 slabp = list_entry(entry, struct slab, list); 2585 check_spinlock_acquired_node(cachep, nodeid); 2586 check_slabp(cachep, slabp); 2587 2588 STATS_INC_NODEALLOCS(cachep); 2589 STATS_INC_ACTIVE(cachep); 2590 STATS_SET_HIGH(cachep); 2591 2592 BUG_ON(slabp->inuse == cachep->num); 2593 2594 /* get obj pointer */ 2595 obj = slabp->s_mem + slabp->free*cachep->objsize; 2596 slabp->inuse++; 2597 next = slab_bufctl(slabp)[slabp->free]; 2598#if DEBUG 2599 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2600#endif 2601 slabp->free = next; 2602 check_slabp(cachep, slabp); 2603 l3->free_objects--; 2604 /* move slabp to correct slabp list: */ 2605 list_del(&slabp->list); 2606 2607 if (slabp->free == BUFCTL_END) { 2608 list_add(&slabp->list, &l3->slabs_full); 2609 } else { 2610 list_add(&slabp->list, &l3->slabs_partial); 2611 } 2612 2613 spin_unlock(&l3->list_lock); 2614 goto done; 2615 2616must_grow: 2617 spin_unlock(&l3->list_lock); 2618 x = cache_grow(cachep, flags, nodeid); 2619 2620 if (!x) 2621 return NULL; 2622 2623 goto retry; 2624done: 2625 return obj; 2626} 2627#endif 2628 2629/* 2630 * Caller needs to acquire correct kmem_list's list_lock 2631 */ 2632static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) 2633{ 2634 int i; 2635 struct kmem_list3 *l3; 2636 2637 for (i = 0; i < nr_objects; i++) { 2638 void *objp = objpp[i]; 2639 struct slab *slabp; 2640 unsigned int objnr; 2641 2642 slabp = page_get_slab(virt_to_page(objp)); 2643 l3 = cachep->nodelists[node]; 2644 list_del(&slabp->list); 2645 objnr = (objp - slabp->s_mem) / cachep->objsize; 2646 check_spinlock_acquired_node(cachep, node); 2647 check_slabp(cachep, slabp); 2648 2649#if DEBUG 2650 /* Verify that the slab belongs to the intended node */ 2651 WARN_ON(slabp->nodeid != node); 2652 2653 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { 2654 printk(KERN_ERR "slab: double free detected in cache " 2655 "'%s', objp %p\n", cachep->name, objp); 2656 BUG(); 2657 } 2658#endif 2659 slab_bufctl(slabp)[objnr] = slabp->free; 2660 slabp->free = objnr; 2661 STATS_DEC_ACTIVE(cachep); 2662 slabp->inuse--; 2663 l3->free_objects++; 2664 check_slabp(cachep, slabp); 2665 2666 /* fixup slab chains */ 2667 if (slabp->inuse == 0) { 2668 if (l3->free_objects > l3->free_limit) { 2669 l3->free_objects -= cachep->num; 2670 slab_destroy(cachep, slabp); 2671 } else { 2672 list_add(&slabp->list, &l3->slabs_free); 2673 } 2674 } else { 2675 /* Unconditionally move a slab to the end of the 2676 * partial list on free - maximum time for the 2677 * other objects to be freed, too. 2678 */ 2679 list_add_tail(&slabp->list, &l3->slabs_partial); 2680 } 2681 } 2682} 2683 2684static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) 2685{ 2686 int batchcount; 2687 struct kmem_list3 *l3; 2688 int node = numa_node_id(); 2689 2690 batchcount = ac->batchcount; 2691#if DEBUG 2692 BUG_ON(!batchcount || batchcount > ac->avail); 2693#endif 2694 check_irq_off(); 2695 l3 = cachep->nodelists[node]; 2696 spin_lock(&l3->list_lock); 2697 if (l3->shared) { 2698 struct array_cache *shared_array = l3->shared; 2699 int max = shared_array->limit-shared_array->avail; 2700 if (max) { 2701 if (batchcount > max) 2702 batchcount = max; 2703 memcpy(&(shared_array->entry[shared_array->avail]), 2704 ac->entry, 2705 sizeof(void*)*batchcount); 2706 shared_array->avail += batchcount; 2707 goto free_done; 2708 } 2709 } 2710 2711 free_block(cachep, ac->entry, batchcount, node); 2712free_done: 2713#if STATS 2714 { 2715 int i = 0; 2716 struct list_head *p; 2717 2718 p = l3->slabs_free.next; 2719 while (p != &(l3->slabs_free)) { 2720 struct slab *slabp; 2721 2722 slabp = list_entry(p, struct slab, list); 2723 BUG_ON(slabp->inuse); 2724 2725 i++; 2726 p = p->next; 2727 } 2728 STATS_SET_FREEABLE(cachep, i); 2729 } 2730#endif 2731 spin_unlock(&l3->list_lock); 2732 ac->avail -= batchcount; 2733 memmove(ac->entry, &(ac->entry[batchcount]), 2734 sizeof(void*)*ac->avail); 2735} 2736 2737 2738/* 2739 * __cache_free 2740 * Release an obj back to its cache. If the obj has a constructed 2741 * state, it must be in this state _before_ it is released. 2742 * 2743 * Called with disabled ints. 2744 */ 2745static inline void __cache_free(kmem_cache_t *cachep, void *objp) 2746{ 2747 struct array_cache *ac = ac_data(cachep); 2748 2749 check_irq_off(); 2750 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 2751 2752 /* Make sure we are not freeing a object from another 2753 * node to the array cache on this cpu. 2754 */ 2755#ifdef CONFIG_NUMA 2756 { 2757 struct slab *slabp; 2758 slabp = page_get_slab(virt_to_page(objp)); 2759 if (unlikely(slabp->nodeid != numa_node_id())) { 2760 struct array_cache *alien = NULL; 2761 int nodeid = slabp->nodeid; 2762 struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()]; 2763 2764 STATS_INC_NODEFREES(cachep); 2765 if (l3->alien && l3->alien[nodeid]) { 2766 alien = l3->alien[nodeid]; 2767 spin_lock(&alien->lock); 2768 if (unlikely(alien->avail == alien->limit)) 2769 __drain_alien_cache(cachep, 2770 alien, nodeid); 2771 alien->entry[alien->avail++] = objp; 2772 spin_unlock(&alien->lock); 2773 } else { 2774 spin_lock(&(cachep->nodelists[nodeid])-> 2775 list_lock); 2776 free_block(cachep, &objp, 1, nodeid); 2777 spin_unlock(&(cachep->nodelists[nodeid])-> 2778 list_lock); 2779 } 2780 return; 2781 } 2782 } 2783#endif 2784 if (likely(ac->avail < ac->limit)) { 2785 STATS_INC_FREEHIT(cachep); 2786 ac->entry[ac->avail++] = objp; 2787 return; 2788 } else { 2789 STATS_INC_FREEMISS(cachep); 2790 cache_flusharray(cachep, ac); 2791 ac->entry[ac->avail++] = objp; 2792 } 2793} 2794 2795/** 2796 * kmem_cache_alloc - Allocate an object 2797 * @cachep: The cache to allocate from. 2798 * @flags: See kmalloc(). 2799 * 2800 * Allocate an object from this cache. The flags are only relevant 2801 * if the cache has no available objects. 2802 */ 2803void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2804{ 2805 return __cache_alloc(cachep, flags); 2806} 2807EXPORT_SYMBOL(kmem_cache_alloc); 2808 2809/** 2810 * kmem_ptr_validate - check if an untrusted pointer might 2811 * be a slab entry. 2812 * @cachep: the cache we're checking against 2813 * @ptr: pointer to validate 2814 * 2815 * This verifies that the untrusted pointer looks sane: 2816 * it is _not_ a guarantee that the pointer is actually 2817 * part of the slab cache in question, but it at least 2818 * validates that the pointer can be dereferenced and 2819 * looks half-way sane. 2820 * 2821 * Currently only used for dentry validation. 2822 */ 2823int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) 2824{ 2825 unsigned long addr = (unsigned long) ptr; 2826 unsigned long min_addr = PAGE_OFFSET; 2827 unsigned long align_mask = BYTES_PER_WORD-1; 2828 unsigned long size = cachep->objsize; 2829 struct page *page; 2830 2831 if (unlikely(addr < min_addr)) 2832 goto out; 2833 if (unlikely(addr > (unsigned long)high_memory - size)) 2834 goto out; 2835 if (unlikely(addr & align_mask)) 2836 goto out; 2837 if (unlikely(!kern_addr_valid(addr))) 2838 goto out; 2839 if (unlikely(!kern_addr_valid(addr + size - 1))) 2840 goto out; 2841 page = virt_to_page(ptr); 2842 if (unlikely(!PageSlab(page))) 2843 goto out; 2844 if (unlikely(page_get_cache(page) != cachep)) 2845 goto out; 2846 return 1; 2847out: 2848 return 0; 2849} 2850 2851#ifdef CONFIG_NUMA 2852/** 2853 * kmem_cache_alloc_node - Allocate an object on the specified node 2854 * @cachep: The cache to allocate from. 2855 * @flags: See kmalloc(). 2856 * @nodeid: node number of the target node. 2857 * 2858 * Identical to kmem_cache_alloc, except that this function is slow 2859 * and can sleep. And it will allocate memory on the given node, which 2860 * can improve the performance for cpu bound structures. 2861 * New and improved: it will now make sure that the object gets 2862 * put on the correct node list so that there is no false sharing. 2863 */ 2864void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2865{ 2866 unsigned long save_flags; 2867 void *ptr; 2868 2869 if (nodeid == -1) 2870 return __cache_alloc(cachep, flags); 2871 2872 if (unlikely(!cachep->nodelists[nodeid])) { 2873 /* Fall back to __cache_alloc if we run into trouble */ 2874 printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name); 2875 return __cache_alloc(cachep,flags); 2876 } 2877 2878 cache_alloc_debugcheck_before(cachep, flags); 2879 local_irq_save(save_flags); 2880 if (nodeid == numa_node_id()) 2881 ptr = ____cache_alloc(cachep, flags); 2882 else 2883 ptr = __cache_alloc_node(cachep, flags, nodeid); 2884 local_irq_restore(save_flags); 2885 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); 2886 2887 return ptr; 2888} 2889EXPORT_SYMBOL(kmem_cache_alloc_node); 2890 2891void *kmalloc_node(size_t size, gfp_t flags, int node) 2892{ 2893 kmem_cache_t *cachep; 2894 2895 cachep = kmem_find_general_cachep(size, flags); 2896 if (unlikely(cachep == NULL)) 2897 return NULL; 2898 return kmem_cache_alloc_node(cachep, flags, node); 2899} 2900EXPORT_SYMBOL(kmalloc_node); 2901#endif 2902 2903/** 2904 * kmalloc - allocate memory 2905 * @size: how many bytes of memory are required. 2906 * @flags: the type of memory to allocate. 2907 * 2908 * kmalloc is the normal method of allocating memory 2909 * in the kernel. 2910 * 2911 * The @flags argument may be one of: 2912 * 2913 * %GFP_USER - Allocate memory on behalf of user. May sleep. 2914 * 2915 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 2916 * 2917 * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers. 2918 * 2919 * Additionally, the %GFP_DMA flag may be set to indicate the memory 2920 * must be suitable for DMA. This can mean different things on different 2921 * platforms. For example, on i386, it means that the memory must come 2922 * from the first 16MB. 2923 */ 2924void *__kmalloc(size_t size, gfp_t flags) 2925{ 2926 kmem_cache_t *cachep; 2927 2928 /* If you want to save a few bytes .text space: replace 2929 * __ with kmem_. 2930 * Then kmalloc uses the uninlined functions instead of the inline 2931 * functions. 2932 */ 2933 cachep = __find_general_cachep(size, flags); 2934 if (unlikely(cachep == NULL)) 2935 return NULL; 2936 return __cache_alloc(cachep, flags); 2937} 2938EXPORT_SYMBOL(__kmalloc); 2939 2940#ifdef CONFIG_SMP 2941/** 2942 * __alloc_percpu - allocate one copy of the object for every present 2943 * cpu in the system, zeroing them. 2944 * Objects should be dereferenced using the per_cpu_ptr macro only. 2945 * 2946 * @size: how many bytes of memory are required. 2947 * @align: the alignment, which can't be greater than SMP_CACHE_BYTES. 2948 */ 2949void *__alloc_percpu(size_t size, size_t align) 2950{ 2951 int i; 2952 struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); 2953 2954 if (!pdata) 2955 return NULL; 2956 2957 /* 2958 * Cannot use for_each_online_cpu since a cpu may come online 2959 * and we have no way of figuring out how to fix the array 2960 * that we have allocated then.... 2961 */ 2962 for_each_cpu(i) { 2963 int node = cpu_to_node(i); 2964 2965 if (node_online(node)) 2966 pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node); 2967 else 2968 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); 2969 2970 if (!pdata->ptrs[i]) 2971 goto unwind_oom; 2972 memset(pdata->ptrs[i], 0, size); 2973 } 2974 2975 /* Catch derefs w/o wrappers */ 2976 return (void *) (~(unsigned long) pdata); 2977 2978unwind_oom: 2979 while (--i >= 0) { 2980 if (!cpu_possible(i)) 2981 continue; 2982 kfree(pdata->ptrs[i]); 2983 } 2984 kfree(pdata); 2985 return NULL; 2986} 2987EXPORT_SYMBOL(__alloc_percpu); 2988#endif 2989 2990/** 2991 * kmem_cache_free - Deallocate an object 2992 * @cachep: The cache the allocation was from. 2993 * @objp: The previously allocated object. 2994 * 2995 * Free an object which was previously allocated from this 2996 * cache. 2997 */ 2998void kmem_cache_free(kmem_cache_t *cachep, void *objp) 2999{ 3000 unsigned long flags; 3001 3002 local_irq_save(flags); 3003 __cache_free(cachep, objp); 3004 local_irq_restore(flags); 3005} 3006EXPORT_SYMBOL(kmem_cache_free); 3007 3008/** 3009 * kzalloc - allocate memory. The memory is set to zero. 3010 * @size: how many bytes of memory are required. 3011 * @flags: the type of memory to allocate. 3012 */ 3013void *kzalloc(size_t size, gfp_t flags) 3014{ 3015 void *ret = kmalloc(size, flags); 3016 if (ret) 3017 memset(ret, 0, size); 3018 return ret; 3019} 3020EXPORT_SYMBOL(kzalloc); 3021 3022/** 3023 * kfree - free previously allocated memory 3024 * @objp: pointer returned by kmalloc. 3025 * 3026 * If @objp is NULL, no operation is performed. 3027 * 3028 * Don't free memory not originally allocated by kmalloc() 3029 * or you will run into trouble. 3030 */ 3031void kfree(const void *objp) 3032{ 3033 kmem_cache_t *c; 3034 unsigned long flags; 3035 3036 if (unlikely(!objp)) 3037 return; 3038 local_irq_save(flags); 3039 kfree_debugcheck(objp); 3040 c = page_get_cache(virt_to_page(objp)); 3041 __cache_free(c, (void*)objp); 3042 local_irq_restore(flags); 3043} 3044EXPORT_SYMBOL(kfree); 3045 3046#ifdef CONFIG_SMP 3047/** 3048 * free_percpu - free previously allocated percpu memory 3049 * @objp: pointer returned by alloc_percpu. 3050 * 3051 * Don't free memory not originally allocated by alloc_percpu() 3052 * The complemented objp is to check for that. 3053 */ 3054void 3055free_percpu(const void *objp) 3056{ 3057 int i; 3058 struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); 3059 3060 /* 3061 * We allocate for all cpus so we cannot use for online cpu here. 3062 */ 3063 for_each_cpu(i) 3064 kfree(p->ptrs[i]); 3065 kfree(p); 3066} 3067EXPORT_SYMBOL(free_percpu); 3068#endif 3069 3070unsigned int kmem_cache_size(kmem_cache_t *cachep) 3071{ 3072 return obj_reallen(cachep); 3073} 3074EXPORT_SYMBOL(kmem_cache_size); 3075 3076const char *kmem_cache_name(kmem_cache_t *cachep) 3077{ 3078 return cachep->name; 3079} 3080EXPORT_SYMBOL_GPL(kmem_cache_name); 3081 3082/* 3083 * This initializes kmem_list3 for all nodes. 3084 */ 3085static int alloc_kmemlist(kmem_cache_t *cachep) 3086{ 3087 int node; 3088 struct kmem_list3 *l3; 3089 int err = 0; 3090 3091 for_each_online_node(node) { 3092 struct array_cache *nc = NULL, *new; 3093 struct array_cache **new_alien = NULL; 3094#ifdef CONFIG_NUMA 3095 if (!(new_alien = alloc_alien_cache(node, cachep->limit))) 3096 goto fail; 3097#endif 3098 if (!(new = alloc_arraycache(node, (cachep->shared* 3099 cachep->batchcount), 0xbaadf00d))) 3100 goto fail; 3101 if ((l3 = cachep->nodelists[node])) { 3102 3103 spin_lock_irq(&l3->list_lock); 3104 3105 if ((nc = cachep->nodelists[node]->shared)) 3106 free_block(cachep, nc->entry, 3107 nc->avail, node); 3108 3109 l3->shared = new; 3110 if (!cachep->nodelists[node]->alien) { 3111 l3->alien = new_alien; 3112 new_alien = NULL; 3113 } 3114 l3->free_limit = (1 + nr_cpus_node(node))* 3115 cachep->batchcount + cachep->num; 3116 spin_unlock_irq(&l3->list_lock); 3117 kfree(nc); 3118 free_alien_cache(new_alien); 3119 continue; 3120 } 3121 if (!(l3 = kmalloc_node(sizeof(struct kmem_list3), 3122 GFP_KERNEL, node))) 3123 goto fail; 3124 3125 kmem_list3_init(l3); 3126 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3127 ((unsigned long)cachep)%REAPTIMEOUT_LIST3; 3128 l3->shared = new; 3129 l3->alien = new_alien; 3130 l3->free_limit = (1 + nr_cpus_node(node))* 3131 cachep->batchcount + cachep->num; 3132 cachep->nodelists[node] = l3; 3133 } 3134 return err; 3135fail: 3136 err = -ENOMEM; 3137 return err; 3138} 3139 3140struct ccupdate_struct { 3141 kmem_cache_t *cachep; 3142 struct array_cache *new[NR_CPUS]; 3143}; 3144 3145static void do_ccupdate_local(void *info) 3146{ 3147 struct ccupdate_struct *new = (struct ccupdate_struct *)info; 3148 struct array_cache *old; 3149 3150 check_irq_off(); 3151 old = ac_data(new->cachep); 3152 3153 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3154 new->new[smp_processor_id()] = old; 3155} 3156 3157 3158static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, 3159 int shared) 3160{ 3161 struct ccupdate_struct new; 3162 int i, err; 3163 3164 memset(&new.new,0,sizeof(new.new)); 3165 for_each_online_cpu(i) { 3166 new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); 3167 if (!new.new[i]) { 3168 for (i--; i >= 0; i--) kfree(new.new[i]); 3169 return -ENOMEM; 3170 } 3171 } 3172 new.cachep = cachep; 3173 3174 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); 3175 3176 check_irq_on(); 3177 spin_lock_irq(&cachep->spinlock); 3178 cachep->batchcount = batchcount; 3179 cachep->limit = limit; 3180 cachep->shared = shared; 3181 spin_unlock_irq(&cachep->spinlock); 3182 3183 for_each_online_cpu(i) { 3184 struct array_cache *ccold = new.new[i]; 3185 if (!ccold) 3186 continue; 3187 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3188 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3189 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3190 kfree(ccold); 3191 } 3192 3193 err = alloc_kmemlist(cachep); 3194 if (err) { 3195 printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", 3196 cachep->name, -err); 3197 BUG(); 3198 } 3199 return 0; 3200} 3201 3202 3203static void enable_cpucache(kmem_cache_t *cachep) 3204{ 3205 int err; 3206 int limit, shared; 3207 3208 /* The head array serves three purposes: 3209 * - create a LIFO ordering, i.e. return objects that are cache-warm 3210 * - reduce the number of spinlock operations. 3211 * - reduce the number of linked list operations on the slab and 3212 * bufctl chains: array operations are cheaper. 3213 * The numbers are guessed, we should auto-tune as described by 3214 * Bonwick. 3215 */ 3216 if (cachep->objsize > 131072) 3217 limit = 1; 3218 else if (cachep->objsize > PAGE_SIZE) 3219 limit = 8; 3220 else if (cachep->objsize > 1024) 3221 limit = 24; 3222 else if (cachep->objsize > 256) 3223 limit = 54; 3224 else 3225 limit = 120; 3226 3227 /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound 3228 * allocation behaviour: Most allocs on one cpu, most free operations 3229 * on another cpu. For these cases, an efficient object passing between 3230 * cpus is necessary. This is provided by a shared array. The array 3231 * replaces Bonwick's magazine layer. 3232 * On uniprocessor, it's functionally equivalent (but less efficient) 3233 * to a larger limit. Thus disabled by default. 3234 */ 3235 shared = 0; 3236#ifdef CONFIG_SMP 3237 if (cachep->objsize <= PAGE_SIZE) 3238 shared = 8; 3239#endif 3240 3241#if DEBUG 3242 /* With debugging enabled, large batchcount lead to excessively 3243 * long periods with disabled local interrupts. Limit the 3244 * batchcount 3245 */ 3246 if (limit > 32) 3247 limit = 32; 3248#endif 3249 err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared); 3250 if (err) 3251 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3252 cachep->name, -err); 3253} 3254 3255static void drain_array_locked(kmem_cache_t *cachep, 3256 struct array_cache *ac, int force, int node) 3257{ 3258 int tofree; 3259 3260 check_spinlock_acquired_node(cachep, node); 3261 if (ac->touched && !force) { 3262 ac->touched = 0; 3263 } else if (ac->avail) { 3264 tofree = force ? ac->avail : (ac->limit+4)/5; 3265 if (tofree > ac->avail) { 3266 tofree = (ac->avail+1)/2; 3267 } 3268 free_block(cachep, ac->entry, tofree, node); 3269 ac->avail -= tofree; 3270 memmove(ac->entry, &(ac->entry[tofree]), 3271 sizeof(void*)*ac->avail); 3272 } 3273} 3274 3275/** 3276 * cache_reap - Reclaim memory from caches. 3277 * @unused: unused parameter 3278 * 3279 * Called from workqueue/eventd every few seconds. 3280 * Purpose: 3281 * - clear the per-cpu caches for this CPU. 3282 * - return freeable pages to the main free memory pool. 3283 * 3284 * If we cannot acquire the cache chain semaphore then just give up - we'll 3285 * try again on the next iteration. 3286 */ 3287static void cache_reap(void *unused) 3288{ 3289 struct list_head *walk; 3290 struct kmem_list3 *l3; 3291 3292 if (down_trylock(&cache_chain_sem)) { 3293 /* Give up. Setup the next iteration. */ 3294 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3295 return; 3296 } 3297 3298 list_for_each(walk, &cache_chain) { 3299 kmem_cache_t *searchp; 3300 struct list_head* p; 3301 int tofree; 3302 struct slab *slabp; 3303 3304 searchp = list_entry(walk, kmem_cache_t, next); 3305 3306 if (searchp->flags & SLAB_NO_REAP) 3307 goto next; 3308 3309 check_irq_on(); 3310 3311 l3 = searchp->nodelists[numa_node_id()]; 3312 if (l3->alien) 3313 drain_alien_cache(searchp, l3); 3314 spin_lock_irq(&l3->list_lock); 3315 3316 drain_array_locked(searchp, ac_data(searchp), 0, 3317 numa_node_id()); 3318 3319 if (time_after(l3->next_reap, jiffies)) 3320 goto next_unlock; 3321 3322 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 3323 3324 if (l3->shared) 3325 drain_array_locked(searchp, l3->shared, 0, 3326 numa_node_id()); 3327 3328 if (l3->free_touched) { 3329 l3->free_touched = 0; 3330 goto next_unlock; 3331 } 3332 3333 tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num); 3334 do { 3335 p = l3->slabs_free.next; 3336 if (p == &(l3->slabs_free)) 3337 break; 3338 3339 slabp = list_entry(p, struct slab, list); 3340 BUG_ON(slabp->inuse); 3341 list_del(&slabp->list); 3342 STATS_INC_REAPED(searchp); 3343 3344 /* Safe to drop the lock. The slab is no longer 3345 * linked to the cache. 3346 * searchp cannot disappear, we hold 3347 * cache_chain_lock 3348 */ 3349 l3->free_objects -= searchp->num; 3350 spin_unlock_irq(&l3->list_lock); 3351 slab_destroy(searchp, slabp); 3352 spin_lock_irq(&l3->list_lock); 3353 } while(--tofree > 0); 3354next_unlock: 3355 spin_unlock_irq(&l3->list_lock); 3356next: 3357 cond_resched(); 3358 } 3359 check_irq_on(); 3360 up(&cache_chain_sem); 3361 drain_remote_pages(); 3362 /* Setup the next iteration */ 3363 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3364} 3365 3366#ifdef CONFIG_PROC_FS 3367 3368static void *s_start(struct seq_file *m, loff_t *pos) 3369{ 3370 loff_t n = *pos; 3371 struct list_head *p; 3372 3373 down(&cache_chain_sem); 3374 if (!n) { 3375 /* 3376 * Output format version, so at least we can change it 3377 * without _too_ many complaints. 3378 */ 3379#if STATS 3380 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 3381#else 3382 seq_puts(m, "slabinfo - version: 2.1\n"); 3383#endif 3384 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); 3385 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 3386 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 3387#if STATS 3388 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>" 3389 " <error> <maxfreeable> <nodeallocs> <remotefrees>"); 3390 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 3391#endif 3392 seq_putc(m, '\n'); 3393 } 3394 p = cache_chain.next; 3395 while (n--) { 3396 p = p->next; 3397 if (p == &cache_chain) 3398 return NULL; 3399 } 3400 return list_entry(p, kmem_cache_t, next); 3401} 3402 3403static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3404{ 3405 kmem_cache_t *cachep = p; 3406 ++*pos; 3407 return cachep->next.next == &cache_chain ? NULL 3408 : list_entry(cachep->next.next, kmem_cache_t, next); 3409} 3410 3411static void s_stop(struct seq_file *m, void *p) 3412{ 3413 up(&cache_chain_sem); 3414} 3415 3416static int s_show(struct seq_file *m, void *p) 3417{ 3418 kmem_cache_t *cachep = p; 3419 struct list_head *q; 3420 struct slab *slabp; 3421 unsigned long active_objs; 3422 unsigned long num_objs; 3423 unsigned long active_slabs = 0; 3424 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 3425 const char *name; 3426 char *error = NULL; 3427 int node; 3428 struct kmem_list3 *l3; 3429 3430 check_irq_on(); 3431 spin_lock_irq(&cachep->spinlock); 3432 active_objs = 0; 3433 num_slabs = 0; 3434 for_each_online_node(node) { 3435 l3 = cachep->nodelists[node]; 3436 if (!l3) 3437 continue; 3438 3439 spin_lock(&l3->list_lock); 3440 3441 list_for_each(q,&l3->slabs_full) { 3442 slabp = list_entry(q, struct slab, list); 3443 if (slabp->inuse != cachep->num && !error) 3444 error = "slabs_full accounting error"; 3445 active_objs += cachep->num; 3446 active_slabs++; 3447 } 3448 list_for_each(q,&l3->slabs_partial) { 3449 slabp = list_entry(q, struct slab, list); 3450 if (slabp->inuse == cachep->num && !error) 3451 error = "slabs_partial inuse accounting error"; 3452 if (!slabp->inuse && !error) 3453 error = "slabs_partial/inuse accounting error"; 3454 active_objs += slabp->inuse; 3455 active_slabs++; 3456 } 3457 list_for_each(q,&l3->slabs_free) { 3458 slabp = list_entry(q, struct slab, list); 3459 if (slabp->inuse && !error) 3460 error = "slabs_free/inuse accounting error"; 3461 num_slabs++; 3462 } 3463 free_objects += l3->free_objects; 3464 shared_avail += l3->shared->avail; 3465 3466 spin_unlock(&l3->list_lock); 3467 } 3468 num_slabs+=active_slabs; 3469 num_objs = num_slabs*cachep->num; 3470 if (num_objs - active_objs != free_objects && !error) 3471 error = "free_objects accounting error"; 3472 3473 name = cachep->name; 3474 if (error) 3475 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 3476 3477 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 3478 name, active_objs, num_objs, cachep->objsize, 3479 cachep->num, (1<<cachep->gfporder)); 3480 seq_printf(m, " : tunables %4u %4u %4u", 3481 cachep->limit, cachep->batchcount, 3482 cachep->shared); 3483 seq_printf(m, " : slabdata %6lu %6lu %6lu", 3484 active_slabs, num_slabs, shared_avail); 3485#if STATS 3486 { /* list3 stats */ 3487 unsigned long high = cachep->high_mark; 3488 unsigned long allocs = cachep->num_allocations; 3489 unsigned long grown = cachep->grown; 3490 unsigned long reaped = cachep->reaped; 3491 unsigned long errors = cachep->errors; 3492 unsigned long max_freeable = cachep->max_freeable; 3493 unsigned long node_allocs = cachep->node_allocs; 3494 unsigned long node_frees = cachep->node_frees; 3495 3496 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 3497 %4lu %4lu %4lu %4lu", 3498 allocs, high, grown, reaped, errors, 3499 max_freeable, node_allocs, node_frees); 3500 } 3501 /* cpu stats */ 3502 { 3503 unsigned long allochit = atomic_read(&cachep->allochit); 3504 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 3505 unsigned long freehit = atomic_read(&cachep->freehit); 3506 unsigned long freemiss = atomic_read(&cachep->freemiss); 3507 3508 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 3509 allochit, allocmiss, freehit, freemiss); 3510 } 3511#endif 3512 seq_putc(m, '\n'); 3513 spin_unlock_irq(&cachep->spinlock); 3514 return 0; 3515} 3516 3517/* 3518 * slabinfo_op - iterator that generates /proc/slabinfo 3519 * 3520 * Output layout: 3521 * cache-name 3522 * num-active-objs 3523 * total-objs 3524 * object size 3525 * num-active-slabs 3526 * total-slabs 3527 * num-pages-per-slab 3528 * + further values on SMP and with statistics enabled 3529 */ 3530 3531struct seq_operations slabinfo_op = { 3532 .start = s_start, 3533 .next = s_next, 3534 .stop = s_stop, 3535 .show = s_show, 3536}; 3537 3538#define MAX_SLABINFO_WRITE 128 3539/** 3540 * slabinfo_write - Tuning for the slab allocator 3541 * @file: unused 3542 * @buffer: user buffer 3543 * @count: data length 3544 * @ppos: unused 3545 */ 3546ssize_t slabinfo_write(struct file *file, const char __user *buffer, 3547 size_t count, loff_t *ppos) 3548{ 3549 char kbuf[MAX_SLABINFO_WRITE+1], *tmp; 3550 int limit, batchcount, shared, res; 3551 struct list_head *p; 3552 3553 if (count > MAX_SLABINFO_WRITE) 3554 return -EINVAL; 3555 if (copy_from_user(&kbuf, buffer, count)) 3556 return -EFAULT; 3557 kbuf[MAX_SLABINFO_WRITE] = '\0'; 3558 3559 tmp = strchr(kbuf, ' '); 3560 if (!tmp) 3561 return -EINVAL; 3562 *tmp = '\0'; 3563 tmp++; 3564 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 3565 return -EINVAL; 3566 3567 /* Find the cache in the chain of caches. */ 3568 down(&cache_chain_sem); 3569 res = -EINVAL; 3570 list_for_each(p,&cache_chain) { 3571 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); 3572 3573 if (!strcmp(cachep->name, kbuf)) { 3574 if (limit < 1 || 3575 batchcount < 1 || 3576 batchcount > limit || 3577 shared < 0) { 3578 res = 0; 3579 } else { 3580 res = do_tune_cpucache(cachep, limit, 3581 batchcount, shared); 3582 } 3583 break; 3584 } 3585 } 3586 up(&cache_chain_sem); 3587 if (res >= 0) 3588 res = count; 3589 return res; 3590} 3591#endif 3592 3593/** 3594 * ksize - get the actual amount of memory allocated for a given object 3595 * @objp: Pointer to the object 3596 * 3597 * kmalloc may internally round up allocations and return more memory 3598 * than requested. ksize() can be used to determine the actual amount of 3599 * memory allocated. The caller may use this additional memory, even though 3600 * a smaller amount of memory was initially specified with the kmalloc call. 3601 * The caller must guarantee that objp points to a valid object previously 3602 * allocated with either kmalloc() or kmem_cache_alloc(). The object 3603 * must not be freed during the duration of the call. 3604 */ 3605unsigned int ksize(const void *objp) 3606{ 3607 if (unlikely(objp == NULL)) 3608 return 0; 3609 3610 return obj_reallen(page_get_cache(virt_to_page(objp))); 3611} 3612 3613 3614/* 3615 * kstrdup - allocate space for and copy an existing string 3616 * 3617 * @s: the string to duplicate 3618 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3619 */ 3620char *kstrdup(const char *s, gfp_t gfp) 3621{ 3622 size_t len; 3623 char *buf; 3624 3625 if (!s) 3626 return NULL; 3627 3628 len = strlen(s) + 1; 3629 buf = kmalloc(len, gfp); 3630 if (buf) 3631 memcpy(buf, s, len); 3632 return buf; 3633} 3634EXPORT_SYMBOL(kstrdup);