Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32 699 lines 18 kB view raw
1/* 2 * SLOB Allocator: Simple List Of Blocks 3 * 4 * Matt Mackall <mpm@selenic.com> 12/30/03 5 * 6 * NUMA support by Paul Mundt, 2007. 7 * 8 * How SLOB works: 9 * 10 * The core of SLOB is a traditional K&R style heap allocator, with 11 * support for returning aligned objects. The granularity of this 12 * allocator is as little as 2 bytes, however typically most architectures 13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit. 14 * 15 * The slob heap is a set of linked list of pages from alloc_pages(), 16 * and within each page, there is a singly-linked list of free blocks 17 * (slob_t). The heap is grown on demand. To reduce fragmentation, 18 * heap pages are segregated into three lists, with objects less than 19 * 256 bytes, objects less than 1024 bytes, and all other objects. 20 * 21 * Allocation from heap involves first searching for a page with 22 * sufficient free blocks (using a next-fit-like approach) followed by 23 * a first-fit scan of the page. Deallocation inserts objects back 24 * into the free list in address order, so this is effectively an 25 * address-ordered first fit. 26 * 27 * Above this is an implementation of kmalloc/kfree. Blocks returned 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls 30 * alloc_pages() directly, allocating compound pages so the page order 31 * does not have to be separately tracked, and also stores the exact 32 * allocation size in page->private so that it can be used to accurately 33 * provide ksize(). These objects are detected in kfree() because slob_page() 34 * is false for them. 35 * 36 * SLAB is emulated on top of SLOB by simply calling constructors and 37 * destructors for every SLAB allocation. Objects are returned with the 38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which 39 * case the low-level allocator will fragment blocks to create the proper 40 * alignment. Again, objects of page-size or greater are allocated by 41 * calling alloc_pages(). As SLAB objects know their size, no separate 42 * size bookkeeping is necessary and there is essentially no allocation 43 * space overhead, and compound pages aren't needed for multi-page 44 * allocations. 45 * 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real 47 * logic down to the page allocator, and simply doing the node accounting 48 * on the upper levels. In the event that a node id is explicitly 49 * provided, alloc_pages_exact_node() with the specified node id is used 50 * instead. The common case (or when the node id isn't explicitly provided) 51 * will default to the current node, as per numa_node_id(). 52 * 53 * Node aware pages are still inserted in to the global freelist, and 54 * these are scanned for by matching against the node id encoded in the 55 * page flags. As a result, block allocations that can be satisfied from 56 * the freelist will only be done so on pages residing on the same node, 57 * in order to prevent random node placement. 58 */ 59 60#include <linux/kernel.h> 61#include <linux/slab.h> 62#include <linux/mm.h> 63#include <linux/swap.h> /* struct reclaim_state */ 64#include <linux/cache.h> 65#include <linux/init.h> 66#include <linux/module.h> 67#include <linux/rcupdate.h> 68#include <linux/list.h> 69#include <linux/kmemtrace.h> 70#include <linux/kmemleak.h> 71#include <asm/atomic.h> 72 73/* 74 * slob_block has a field 'units', which indicates size of block if +ve, 75 * or offset of next block if -ve (in SLOB_UNITs). 76 * 77 * Free blocks of size 1 unit simply contain the offset of the next block. 78 * Those with larger size contain their size in the first SLOB_UNIT of 79 * memory, and the offset of the next free block in the second SLOB_UNIT. 80 */ 81#if PAGE_SIZE <= (32767 * 2) 82typedef s16 slobidx_t; 83#else 84typedef s32 slobidx_t; 85#endif 86 87struct slob_block { 88 slobidx_t units; 89}; 90typedef struct slob_block slob_t; 91 92/* 93 * We use struct page fields to manage some slob allocation aspects, 94 * however to avoid the horrible mess in include/linux/mm_types.h, we'll 95 * just define our own struct page type variant here. 96 */ 97struct slob_page { 98 union { 99 struct { 100 unsigned long flags; /* mandatory */ 101 atomic_t _count; /* mandatory */ 102 slobidx_t units; /* free units left in page */ 103 unsigned long pad[2]; 104 slob_t *free; /* first free slob_t in page */ 105 struct list_head list; /* linked list of free pages */ 106 }; 107 struct page page; 108 }; 109}; 110static inline void struct_slob_page_wrong_size(void) 111{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } 112 113/* 114 * free_slob_page: call before a slob_page is returned to the page allocator. 115 */ 116static inline void free_slob_page(struct slob_page *sp) 117{ 118 reset_page_mapcount(&sp->page); 119 sp->page.mapping = NULL; 120} 121 122/* 123 * All partially free slob pages go on these lists. 124 */ 125#define SLOB_BREAK1 256 126#define SLOB_BREAK2 1024 127static LIST_HEAD(free_slob_small); 128static LIST_HEAD(free_slob_medium); 129static LIST_HEAD(free_slob_large); 130 131/* 132 * is_slob_page: True for all slob pages (false for bigblock pages) 133 */ 134static inline int is_slob_page(struct slob_page *sp) 135{ 136 return PageSlab((struct page *)sp); 137} 138 139static inline void set_slob_page(struct slob_page *sp) 140{ 141 __SetPageSlab((struct page *)sp); 142} 143 144static inline void clear_slob_page(struct slob_page *sp) 145{ 146 __ClearPageSlab((struct page *)sp); 147} 148 149static inline struct slob_page *slob_page(const void *addr) 150{ 151 return (struct slob_page *)virt_to_page(addr); 152} 153 154/* 155 * slob_page_free: true for pages on free_slob_pages list. 156 */ 157static inline int slob_page_free(struct slob_page *sp) 158{ 159 return PageSlobFree((struct page *)sp); 160} 161 162static void set_slob_page_free(struct slob_page *sp, struct list_head *list) 163{ 164 list_add(&sp->list, list); 165 __SetPageSlobFree((struct page *)sp); 166} 167 168static inline void clear_slob_page_free(struct slob_page *sp) 169{ 170 list_del(&sp->list); 171 __ClearPageSlobFree((struct page *)sp); 172} 173 174#define SLOB_UNIT sizeof(slob_t) 175#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) 176#define SLOB_ALIGN L1_CACHE_BYTES 177 178/* 179 * struct slob_rcu is inserted at the tail of allocated slob blocks, which 180 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free 181 * the block using call_rcu. 182 */ 183struct slob_rcu { 184 struct rcu_head head; 185 int size; 186}; 187 188/* 189 * slob_lock protects all slob allocator structures. 190 */ 191static DEFINE_SPINLOCK(slob_lock); 192 193/* 194 * Encode the given size and next info into a free slob block s. 195 */ 196static void set_slob(slob_t *s, slobidx_t size, slob_t *next) 197{ 198 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); 199 slobidx_t offset = next - base; 200 201 if (size > 1) { 202 s[0].units = size; 203 s[1].units = offset; 204 } else 205 s[0].units = -offset; 206} 207 208/* 209 * Return the size of a slob block. 210 */ 211static slobidx_t slob_units(slob_t *s) 212{ 213 if (s->units > 0) 214 return s->units; 215 return 1; 216} 217 218/* 219 * Return the next free slob block pointer after this one. 220 */ 221static slob_t *slob_next(slob_t *s) 222{ 223 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); 224 slobidx_t next; 225 226 if (s[0].units < 0) 227 next = -s[0].units; 228 else 229 next = s[1].units; 230 return base+next; 231} 232 233/* 234 * Returns true if s is the last free block in its page. 235 */ 236static int slob_last(slob_t *s) 237{ 238 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 239} 240 241static void *slob_new_pages(gfp_t gfp, int order, int node) 242{ 243 void *page; 244 245#ifdef CONFIG_NUMA 246 if (node != -1) 247 page = alloc_pages_exact_node(node, gfp, order); 248 else 249#endif 250 page = alloc_pages(gfp, order); 251 252 if (!page) 253 return NULL; 254 255 return page_address(page); 256} 257 258static void slob_free_pages(void *b, int order) 259{ 260 if (current->reclaim_state) 261 current->reclaim_state->reclaimed_slab += 1 << order; 262 free_pages((unsigned long)b, order); 263} 264 265/* 266 * Allocate a slob block within a given slob_page sp. 267 */ 268static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 269{ 270 slob_t *prev, *cur, *aligned = NULL; 271 int delta = 0, units = SLOB_UNITS(size); 272 273 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 274 slobidx_t avail = slob_units(cur); 275 276 if (align) { 277 aligned = (slob_t *)ALIGN((unsigned long)cur, align); 278 delta = aligned - cur; 279 } 280 if (avail >= units + delta) { /* room enough? */ 281 slob_t *next; 282 283 if (delta) { /* need to fragment head to align? */ 284 next = slob_next(cur); 285 set_slob(aligned, avail - delta, next); 286 set_slob(cur, delta, aligned); 287 prev = cur; 288 cur = aligned; 289 avail = slob_units(cur); 290 } 291 292 next = slob_next(cur); 293 if (avail == units) { /* exact fit? unlink. */ 294 if (prev) 295 set_slob(prev, slob_units(prev), next); 296 else 297 sp->free = next; 298 } else { /* fragment */ 299 if (prev) 300 set_slob(prev, slob_units(prev), cur + units); 301 else 302 sp->free = cur + units; 303 set_slob(cur + units, avail - units, next); 304 } 305 306 sp->units -= units; 307 if (!sp->units) 308 clear_slob_page_free(sp); 309 return cur; 310 } 311 if (slob_last(cur)) 312 return NULL; 313 } 314} 315 316/* 317 * slob_alloc: entry point into the slob allocator. 318 */ 319static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) 320{ 321 struct slob_page *sp; 322 struct list_head *prev; 323 struct list_head *slob_list; 324 slob_t *b = NULL; 325 unsigned long flags; 326 327 if (size < SLOB_BREAK1) 328 slob_list = &free_slob_small; 329 else if (size < SLOB_BREAK2) 330 slob_list = &free_slob_medium; 331 else 332 slob_list = &free_slob_large; 333 334 spin_lock_irqsave(&slob_lock, flags); 335 /* Iterate through each partially free page, try to find room */ 336 list_for_each_entry(sp, slob_list, list) { 337#ifdef CONFIG_NUMA 338 /* 339 * If there's a node specification, search for a partial 340 * page with a matching node id in the freelist. 341 */ 342 if (node != -1 && page_to_nid(&sp->page) != node) 343 continue; 344#endif 345 /* Enough room on this page? */ 346 if (sp->units < SLOB_UNITS(size)) 347 continue; 348 349 /* Attempt to alloc */ 350 prev = sp->list.prev; 351 b = slob_page_alloc(sp, size, align); 352 if (!b) 353 continue; 354 355 /* Improve fragment distribution and reduce our average 356 * search time by starting our next search here. (see 357 * Knuth vol 1, sec 2.5, pg 449) */ 358 if (prev != slob_list->prev && 359 slob_list->next != prev->next) 360 list_move_tail(slob_list, prev->next); 361 break; 362 } 363 spin_unlock_irqrestore(&slob_lock, flags); 364 365 /* Not enough space: must allocate a new page */ 366 if (!b) { 367 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); 368 if (!b) 369 return NULL; 370 sp = slob_page(b); 371 set_slob_page(sp); 372 373 spin_lock_irqsave(&slob_lock, flags); 374 sp->units = SLOB_UNITS(PAGE_SIZE); 375 sp->free = b; 376 INIT_LIST_HEAD(&sp->list); 377 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); 378 set_slob_page_free(sp, slob_list); 379 b = slob_page_alloc(sp, size, align); 380 BUG_ON(!b); 381 spin_unlock_irqrestore(&slob_lock, flags); 382 } 383 if (unlikely((gfp & __GFP_ZERO) && b)) 384 memset(b, 0, size); 385 return b; 386} 387 388/* 389 * slob_free: entry point into the slob allocator. 390 */ 391static void slob_free(void *block, int size) 392{ 393 struct slob_page *sp; 394 slob_t *prev, *next, *b = (slob_t *)block; 395 slobidx_t units; 396 unsigned long flags; 397 398 if (unlikely(ZERO_OR_NULL_PTR(block))) 399 return; 400 BUG_ON(!size); 401 402 sp = slob_page(block); 403 units = SLOB_UNITS(size); 404 405 spin_lock_irqsave(&slob_lock, flags); 406 407 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { 408 /* Go directly to page allocator. Do not pass slob allocator */ 409 if (slob_page_free(sp)) 410 clear_slob_page_free(sp); 411 spin_unlock_irqrestore(&slob_lock, flags); 412 clear_slob_page(sp); 413 free_slob_page(sp); 414 slob_free_pages(b, 0); 415 return; 416 } 417 418 if (!slob_page_free(sp)) { 419 /* This slob page is about to become partially free. Easy! */ 420 sp->units = units; 421 sp->free = b; 422 set_slob(b, units, 423 (void *)((unsigned long)(b + 424 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); 425 set_slob_page_free(sp, &free_slob_small); 426 goto out; 427 } 428 429 /* 430 * Otherwise the page is already partially free, so find reinsertion 431 * point. 432 */ 433 sp->units += units; 434 435 if (b < sp->free) { 436 if (b + units == sp->free) { 437 units += slob_units(sp->free); 438 sp->free = slob_next(sp->free); 439 } 440 set_slob(b, units, sp->free); 441 sp->free = b; 442 } else { 443 prev = sp->free; 444 next = slob_next(prev); 445 while (b > next) { 446 prev = next; 447 next = slob_next(prev); 448 } 449 450 if (!slob_last(prev) && b + units == next) { 451 units += slob_units(next); 452 set_slob(b, units, slob_next(next)); 453 } else 454 set_slob(b, units, next); 455 456 if (prev + slob_units(prev) == b) { 457 units = slob_units(b) + slob_units(prev); 458 set_slob(prev, units, slob_next(b)); 459 } else 460 set_slob(prev, slob_units(prev), b); 461 } 462out: 463 spin_unlock_irqrestore(&slob_lock, flags); 464} 465 466/* 467 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. 468 */ 469 470#ifndef ARCH_KMALLOC_MINALIGN 471#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) 472#endif 473 474#ifndef ARCH_SLAB_MINALIGN 475#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) 476#endif 477 478void *__kmalloc_node(size_t size, gfp_t gfp, int node) 479{ 480 unsigned int *m; 481 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 482 void *ret; 483 484 lockdep_trace_alloc(gfp); 485 486 if (size < PAGE_SIZE - align) { 487 if (!size) 488 return ZERO_SIZE_PTR; 489 490 m = slob_alloc(size + align, gfp, align, node); 491 492 if (!m) 493 return NULL; 494 *m = size; 495 ret = (void *)m + align; 496 497 trace_kmalloc_node(_RET_IP_, ret, 498 size, size + align, gfp, node); 499 } else { 500 unsigned int order = get_order(size); 501 502 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); 503 if (ret) { 504 struct page *page; 505 page = virt_to_page(ret); 506 page->private = size; 507 } 508 509 trace_kmalloc_node(_RET_IP_, ret, 510 size, PAGE_SIZE << order, gfp, node); 511 } 512 513 kmemleak_alloc(ret, size, 1, gfp); 514 return ret; 515} 516EXPORT_SYMBOL(__kmalloc_node); 517 518void kfree(const void *block) 519{ 520 struct slob_page *sp; 521 522 trace_kfree(_RET_IP_, block); 523 524 if (unlikely(ZERO_OR_NULL_PTR(block))) 525 return; 526 kmemleak_free(block); 527 528 sp = slob_page(block); 529 if (is_slob_page(sp)) { 530 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 531 unsigned int *m = (unsigned int *)(block - align); 532 slob_free(m, *m + align); 533 } else 534 put_page(&sp->page); 535} 536EXPORT_SYMBOL(kfree); 537 538/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ 539size_t ksize(const void *block) 540{ 541 struct slob_page *sp; 542 543 BUG_ON(!block); 544 if (unlikely(block == ZERO_SIZE_PTR)) 545 return 0; 546 547 sp = slob_page(block); 548 if (is_slob_page(sp)) { 549 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 550 unsigned int *m = (unsigned int *)(block - align); 551 return SLOB_UNITS(*m) * SLOB_UNIT; 552 } else 553 return sp->page.private; 554} 555EXPORT_SYMBOL(ksize); 556 557struct kmem_cache { 558 unsigned int size, align; 559 unsigned long flags; 560 const char *name; 561 void (*ctor)(void *); 562}; 563 564struct kmem_cache *kmem_cache_create(const char *name, size_t size, 565 size_t align, unsigned long flags, void (*ctor)(void *)) 566{ 567 struct kmem_cache *c; 568 569 c = slob_alloc(sizeof(struct kmem_cache), 570 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); 571 572 if (c) { 573 c->name = name; 574 c->size = size; 575 if (flags & SLAB_DESTROY_BY_RCU) { 576 /* leave room for rcu footer at the end of object */ 577 c->size += sizeof(struct slob_rcu); 578 } 579 c->flags = flags; 580 c->ctor = ctor; 581 /* ignore alignment unless it's forced */ 582 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 583 if (c->align < ARCH_SLAB_MINALIGN) 584 c->align = ARCH_SLAB_MINALIGN; 585 if (c->align < align) 586 c->align = align; 587 } else if (flags & SLAB_PANIC) 588 panic("Cannot create slab cache %s\n", name); 589 590 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); 591 return c; 592} 593EXPORT_SYMBOL(kmem_cache_create); 594 595void kmem_cache_destroy(struct kmem_cache *c) 596{ 597 kmemleak_free(c); 598 if (c->flags & SLAB_DESTROY_BY_RCU) 599 rcu_barrier(); 600 slob_free(c, sizeof(struct kmem_cache)); 601} 602EXPORT_SYMBOL(kmem_cache_destroy); 603 604void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 605{ 606 void *b; 607 608 if (c->size < PAGE_SIZE) { 609 b = slob_alloc(c->size, flags, c->align, node); 610 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, 611 SLOB_UNITS(c->size) * SLOB_UNIT, 612 flags, node); 613 } else { 614 b = slob_new_pages(flags, get_order(c->size), node); 615 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, 616 PAGE_SIZE << get_order(c->size), 617 flags, node); 618 } 619 620 if (c->ctor) 621 c->ctor(b); 622 623 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); 624 return b; 625} 626EXPORT_SYMBOL(kmem_cache_alloc_node); 627 628static void __kmem_cache_free(void *b, int size) 629{ 630 if (size < PAGE_SIZE) 631 slob_free(b, size); 632 else 633 slob_free_pages(b, get_order(size)); 634} 635 636static void kmem_rcu_free(struct rcu_head *head) 637{ 638 struct slob_rcu *slob_rcu = (struct slob_rcu *)head; 639 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); 640 641 __kmem_cache_free(b, slob_rcu->size); 642} 643 644void kmem_cache_free(struct kmem_cache *c, void *b) 645{ 646 kmemleak_free_recursive(b, c->flags); 647 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { 648 struct slob_rcu *slob_rcu; 649 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); 650 INIT_RCU_HEAD(&slob_rcu->head); 651 slob_rcu->size = c->size; 652 call_rcu(&slob_rcu->head, kmem_rcu_free); 653 } else { 654 __kmem_cache_free(b, c->size); 655 } 656 657 trace_kmem_cache_free(_RET_IP_, b); 658} 659EXPORT_SYMBOL(kmem_cache_free); 660 661unsigned int kmem_cache_size(struct kmem_cache *c) 662{ 663 return c->size; 664} 665EXPORT_SYMBOL(kmem_cache_size); 666 667const char *kmem_cache_name(struct kmem_cache *c) 668{ 669 return c->name; 670} 671EXPORT_SYMBOL(kmem_cache_name); 672 673int kmem_cache_shrink(struct kmem_cache *d) 674{ 675 return 0; 676} 677EXPORT_SYMBOL(kmem_cache_shrink); 678 679int kmem_ptr_validate(struct kmem_cache *a, const void *b) 680{ 681 return 0; 682} 683 684static unsigned int slob_ready __read_mostly; 685 686int slab_is_available(void) 687{ 688 return slob_ready; 689} 690 691void __init kmem_cache_init(void) 692{ 693 slob_ready = 1; 694} 695 696void __init kmem_cache_init_late(void) 697{ 698 /* Nothing to do */ 699}