Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux

Pull SLAB changes from Pekka Enberg:
"New and noteworthy:

* More SLAB allocator unification patches from Christoph Lameter and
others. This paves the way for slab memcg patches that hopefully
will land in v3.8.

* SLAB tracing improvements from Ezequiel Garcia.

* Kernel tainting upon SLAB corruption from Dave Jones.

* Miscellanous SLAB allocator bug fixes and improvements from various
people."

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (43 commits)
slab: Fix build failure in __kmem_cache_create()
slub: init_kmem_cache_cpus() and put_cpu_partial() can be static
mm/slab: Fix kmem_cache_alloc_node_trace() declaration
Revert "mm/slab: Fix kmem_cache_alloc_node_trace() declaration"
mm, slob: fix build breakage in __kmalloc_node_track_caller
mm/slab: Fix kmem_cache_alloc_node_trace() declaration
mm/slab: Fix typo _RET_IP -> _RET_IP_
mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB
mm, slab: Rename __cache_alloc() -> slab_alloc()
mm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype
mm, slab: Replace 'caller' type, void* -> unsigned long
mm, slob: Add support for kmalloc_track_caller()
mm, slab: Remove silly function slab_buffer_size()
mm, slob: Use NUMA_NO_NODE instead of -1
mm, sl[au]b: Taint kernel when we detect a corrupted slab
slab: Only define slab_error for DEBUG
slab: fix the DEADLOCK issue on l3 alien lock
slub: Zero initial memory segment for kmem_cache and kmem_cache_node
Revert "mm/sl[aou]b: Move sysfs_slab_add to common"
mm/sl[aou]b: Move kmem_cache refcounting to common code
...

+466 -439
+4 -2
include/linux/slab.h
··· 321 321 * request comes from. 322 322 */ 323 323 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 324 - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) 324 + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 325 + (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 325 326 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 326 327 #define kmalloc_track_caller(size, flags) \ 327 328 __kmalloc_track_caller(size, flags, _RET_IP_) ··· 341 340 * allocation request comes from. 342 341 */ 343 342 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 344 - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) 343 + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 344 + (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 345 345 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 346 346 #define kmalloc_node_track_caller(size, flags, node) \ 347 347 __kmalloc_node_track_caller(size, flags, node, \
+10 -17
include/linux/slab_def.h
··· 45 45 unsigned int colour_off; /* colour offset */ 46 46 struct kmem_cache *slabp_cache; 47 47 unsigned int slab_size; 48 - unsigned int dflags; /* dynamic flags */ 49 48 50 49 /* constructor func */ 51 50 void (*ctor)(void *obj); ··· 111 112 void *__kmalloc(size_t size, gfp_t flags); 112 113 113 114 #ifdef CONFIG_TRACING 114 - extern void *kmem_cache_alloc_trace(size_t size, 115 - struct kmem_cache *cachep, gfp_t flags); 116 - extern size_t slab_buffer_size(struct kmem_cache *cachep); 115 + extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 117 116 #else 118 117 static __always_inline void * 119 - kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) 118 + kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 120 119 { 121 120 return kmem_cache_alloc(cachep, flags); 122 - } 123 - static inline size_t slab_buffer_size(struct kmem_cache *cachep) 124 - { 125 - return 0; 126 121 } 127 122 #endif 128 123 ··· 147 154 #endif 148 155 cachep = malloc_sizes[i].cs_cachep; 149 156 150 - ret = kmem_cache_alloc_trace(size, cachep, flags); 157 + ret = kmem_cache_alloc_trace(cachep, flags, size); 151 158 152 159 return ret; 153 160 } ··· 159 166 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 160 167 161 168 #ifdef CONFIG_TRACING 162 - extern void *kmem_cache_alloc_node_trace(size_t size, 163 - struct kmem_cache *cachep, 169 + extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 164 170 gfp_t flags, 165 - int nodeid); 171 + int nodeid, 172 + size_t size); 166 173 #else 167 174 static __always_inline void * 168 - kmem_cache_alloc_node_trace(size_t size, 169 - struct kmem_cache *cachep, 175 + kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 170 176 gfp_t flags, 171 - int nodeid) 177 + int nodeid, 178 + size_t size) 172 179 { 173 180 return kmem_cache_alloc_node(cachep, flags, nodeid); 174 181 } ··· 200 207 #endif 201 208 cachep = malloc_sizes[i].cs_cachep; 202 209 203 - return kmem_cache_alloc_node_trace(size, cachep, flags, node); 210 + return kmem_cache_alloc_node_trace(cachep, flags, node, size); 204 211 } 205 212 return __kmalloc_node(size, flags, node); 206 213 }
+4 -2
include/linux/slob_def.h
··· 1 1 #ifndef __LINUX_SLOB_DEF_H 2 2 #define __LINUX_SLOB_DEF_H 3 3 4 + #include <linux/numa.h> 5 + 4 6 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 5 7 6 8 static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, 7 9 gfp_t flags) 8 10 { 9 - return kmem_cache_alloc_node(cachep, flags, -1); 11 + return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE); 10 12 } 11 13 12 14 void *__kmalloc_node(size_t size, gfp_t flags, int node); ··· 28 26 */ 29 27 static __always_inline void *kmalloc(size_t size, gfp_t flags) 30 28 { 31 - return __kmalloc_node(size, flags, -1); 29 + return __kmalloc_node(size, flags, NUMA_NO_NODE); 32 30 } 33 31 34 32 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+144 -206
mm/slab.c
··· 498 498 499 499 #endif 500 500 501 - #ifdef CONFIG_TRACING 502 - size_t slab_buffer_size(struct kmem_cache *cachep) 503 - { 504 - return cachep->size; 505 - } 506 - EXPORT_SYMBOL(slab_buffer_size); 507 - #endif 508 - 509 501 /* 510 502 * Do not go above this order unless 0 objects fit into the slab or 511 503 * overridden on the command line. ··· 506 514 #define SLAB_MAX_ORDER_LO 0 507 515 static int slab_max_order = SLAB_MAX_ORDER_LO; 508 516 static bool slab_max_order_set __initdata; 509 - 510 - static inline struct kmem_cache *page_get_cache(struct page *page) 511 - { 512 - page = compound_head(page); 513 - BUG_ON(!PageSlab(page)); 514 - return page->slab_cache; 515 - } 516 517 517 518 static inline struct kmem_cache *virt_to_cache(const void *obj) 518 519 { ··· 570 585 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 571 586 572 587 /* internal cache of cache description objs */ 573 - static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; 574 - static struct kmem_cache cache_cache = { 575 - .nodelists = cache_cache_nodelists, 588 + static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; 589 + static struct kmem_cache kmem_cache_boot = { 590 + .nodelists = kmem_cache_nodelists, 576 591 .batchcount = 1, 577 592 .limit = BOOT_CPUCACHE_ENTRIES, 578 593 .shared = 1, ··· 795 810 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 796 811 } 797 812 813 + #if DEBUG 798 814 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 799 815 800 816 static void __slab_error(const char *function, struct kmem_cache *cachep, ··· 804 818 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 805 819 function, cachep->name, msg); 806 820 dump_stack(); 821 + add_taint(TAINT_BAD_PAGE); 807 822 } 823 + #endif 808 824 809 825 /* 810 826 * By default on NUMA we use alien caches to stage the freeing of ··· 1589 1601 int order; 1590 1602 int node; 1591 1603 1604 + kmem_cache = &kmem_cache_boot; 1605 + 1592 1606 if (num_possible_nodes() == 1) 1593 1607 use_alien_caches = 0; 1594 1608 1595 1609 for (i = 0; i < NUM_INIT_LISTS; i++) { 1596 1610 kmem_list3_init(&initkmem_list3[i]); 1597 1611 if (i < MAX_NUMNODES) 1598 - cache_cache.nodelists[i] = NULL; 1612 + kmem_cache->nodelists[i] = NULL; 1599 1613 } 1600 - set_up_list3s(&cache_cache, CACHE_CACHE); 1614 + set_up_list3s(kmem_cache, CACHE_CACHE); 1601 1615 1602 1616 /* 1603 1617 * Fragmentation resistance on low memory - only use bigger ··· 1611 1621 1612 1622 /* Bootstrap is tricky, because several objects are allocated 1613 1623 * from caches that do not exist yet: 1614 - * 1) initialize the cache_cache cache: it contains the struct 1615 - * kmem_cache structures of all caches, except cache_cache itself: 1616 - * cache_cache is statically allocated. 1624 + * 1) initialize the kmem_cache cache: it contains the struct 1625 + * kmem_cache structures of all caches, except kmem_cache itself: 1626 + * kmem_cache is statically allocated. 1617 1627 * Initially an __init data area is used for the head array and the 1618 1628 * kmem_list3 structures, it's replaced with a kmalloc allocated 1619 1629 * array at the end of the bootstrap. ··· 1622 1632 * An __init data area is used for the head array. 1623 1633 * 3) Create the remaining kmalloc caches, with minimally sized 1624 1634 * head arrays. 1625 - * 4) Replace the __init data head arrays for cache_cache and the first 1635 + * 4) Replace the __init data head arrays for kmem_cache and the first 1626 1636 * kmalloc cache with kmalloc allocated arrays. 1627 - * 5) Replace the __init data for kmem_list3 for cache_cache and 1637 + * 5) Replace the __init data for kmem_list3 for kmem_cache and 1628 1638 * the other cache's with kmalloc allocated memory. 1629 1639 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1630 1640 */ 1631 1641 1632 1642 node = numa_mem_id(); 1633 1643 1634 - /* 1) create the cache_cache */ 1644 + /* 1) create the kmem_cache */ 1635 1645 INIT_LIST_HEAD(&slab_caches); 1636 - list_add(&cache_cache.list, &slab_caches); 1637 - cache_cache.colour_off = cache_line_size(); 1638 - cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1639 - cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1646 + list_add(&kmem_cache->list, &slab_caches); 1647 + kmem_cache->colour_off = cache_line_size(); 1648 + kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; 1649 + kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1640 1650 1641 1651 /* 1642 1652 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1643 1653 */ 1644 - cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1654 + kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1645 1655 nr_node_ids * sizeof(struct kmem_list3 *); 1646 - cache_cache.object_size = cache_cache.size; 1647 - cache_cache.size = ALIGN(cache_cache.size, 1656 + kmem_cache->object_size = kmem_cache->size; 1657 + kmem_cache->size = ALIGN(kmem_cache->object_size, 1648 1658 cache_line_size()); 1649 - cache_cache.reciprocal_buffer_size = 1650 - reciprocal_value(cache_cache.size); 1659 + kmem_cache->reciprocal_buffer_size = 1660 + reciprocal_value(kmem_cache->size); 1651 1661 1652 1662 for (order = 0; order < MAX_ORDER; order++) { 1653 - cache_estimate(order, cache_cache.size, 1654 - cache_line_size(), 0, &left_over, &cache_cache.num); 1655 - if (cache_cache.num) 1663 + cache_estimate(order, kmem_cache->size, 1664 + cache_line_size(), 0, &left_over, &kmem_cache->num); 1665 + if (kmem_cache->num) 1656 1666 break; 1657 1667 } 1658 - BUG_ON(!cache_cache.num); 1659 - cache_cache.gfporder = order; 1660 - cache_cache.colour = left_over / cache_cache.colour_off; 1661 - cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1668 + BUG_ON(!kmem_cache->num); 1669 + kmem_cache->gfporder = order; 1670 + kmem_cache->colour = left_over / kmem_cache->colour_off; 1671 + kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + 1662 1672 sizeof(struct slab), cache_line_size()); 1663 1673 1664 1674 /* 2+3) create the kmalloc caches */ ··· 1671 1681 * bug. 1672 1682 */ 1673 1683 1674 - sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, 1675 - sizes[INDEX_AC].cs_size, 1676 - ARCH_KMALLOC_MINALIGN, 1677 - ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1678 - NULL); 1684 + sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1685 + sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; 1686 + sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; 1687 + sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; 1688 + sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; 1689 + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); 1690 + list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); 1679 1691 1680 1692 if (INDEX_AC != INDEX_L3) { 1681 - sizes[INDEX_L3].cs_cachep = 1682 - __kmem_cache_create(names[INDEX_L3].name, 1683 - sizes[INDEX_L3].cs_size, 1684 - ARCH_KMALLOC_MINALIGN, 1685 - ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1686 - NULL); 1693 + sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1694 + sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; 1695 + sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; 1696 + sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; 1697 + sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; 1698 + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); 1699 + list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); 1687 1700 } 1688 1701 1689 1702 slab_early_init = 0; ··· 1700 1707 * allow tighter packing of the smaller caches. 1701 1708 */ 1702 1709 if (!sizes->cs_cachep) { 1703 - sizes->cs_cachep = __kmem_cache_create(names->name, 1704 - sizes->cs_size, 1705 - ARCH_KMALLOC_MINALIGN, 1706 - ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1707 - NULL); 1710 + sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1711 + sizes->cs_cachep->name = names->name; 1712 + sizes->cs_cachep->size = sizes->cs_size; 1713 + sizes->cs_cachep->object_size = sizes->cs_size; 1714 + sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; 1715 + __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); 1716 + list_add(&sizes->cs_cachep->list, &slab_caches); 1708 1717 } 1709 1718 #ifdef CONFIG_ZONE_DMA 1710 - sizes->cs_dmacachep = __kmem_cache_create( 1711 - names->name_dma, 1712 - sizes->cs_size, 1713 - ARCH_KMALLOC_MINALIGN, 1714 - ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1715 - SLAB_PANIC, 1716 - NULL); 1719 + sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1720 + sizes->cs_dmacachep->name = names->name_dma; 1721 + sizes->cs_dmacachep->size = sizes->cs_size; 1722 + sizes->cs_dmacachep->object_size = sizes->cs_size; 1723 + sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; 1724 + __kmem_cache_create(sizes->cs_dmacachep, 1725 + ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); 1726 + list_add(&sizes->cs_dmacachep->list, &slab_caches); 1717 1727 #endif 1718 1728 sizes++; 1719 1729 names++; ··· 1727 1731 1728 1732 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1729 1733 1730 - BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1731 - memcpy(ptr, cpu_cache_get(&cache_cache), 1734 + BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); 1735 + memcpy(ptr, cpu_cache_get(kmem_cache), 1732 1736 sizeof(struct arraycache_init)); 1733 1737 /* 1734 1738 * Do not assume that spinlocks can be initialized via memcpy: 1735 1739 */ 1736 1740 spin_lock_init(&ptr->lock); 1737 1741 1738 - cache_cache.array[smp_processor_id()] = ptr; 1742 + kmem_cache->array[smp_processor_id()] = ptr; 1739 1743 1740 1744 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1741 1745 ··· 1756 1760 int nid; 1757 1761 1758 1762 for_each_online_node(nid) { 1759 - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1763 + init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1760 1764 1761 1765 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1762 1766 &initkmem_list3[SIZE_AC + nid], nid); ··· 1777 1781 1778 1782 slab_state = UP; 1779 1783 1780 - /* Annotate slab for lockdep -- annotate the malloc caches */ 1781 - init_lock_keys(); 1782 - 1783 1784 /* 6) resize the head arrays to their final sizes */ 1784 1785 mutex_lock(&slab_mutex); 1785 1786 list_for_each_entry(cachep, &slab_caches, list) 1786 1787 if (enable_cpucache(cachep, GFP_NOWAIT)) 1787 1788 BUG(); 1788 1789 mutex_unlock(&slab_mutex); 1790 + 1791 + /* Annotate slab for lockdep -- annotate the malloc caches */ 1792 + init_lock_keys(); 1789 1793 1790 1794 /* Done! */ 1791 1795 slab_state = FULL; ··· 2205 2209 } 2206 2210 } 2207 2211 2208 - static void __kmem_cache_destroy(struct kmem_cache *cachep) 2209 - { 2210 - int i; 2211 - struct kmem_list3 *l3; 2212 - 2213 - for_each_online_cpu(i) 2214 - kfree(cachep->array[i]); 2215 - 2216 - /* NUMA: free the list3 structures */ 2217 - for_each_online_node(i) { 2218 - l3 = cachep->nodelists[i]; 2219 - if (l3) { 2220 - kfree(l3->shared); 2221 - free_alien_cache(l3->alien); 2222 - kfree(l3); 2223 - } 2224 - } 2225 - kmem_cache_free(&cache_cache, cachep); 2226 - } 2227 - 2228 - 2229 2212 /** 2230 2213 * calculate_slab_order - calculate size (page order) of slabs 2231 2214 * @cachep: pointer to the cache that is being created ··· 2341 2366 * Cannot be called within a int, but can be interrupted. 2342 2367 * The @ctor is run when new pages are allocated by the cache. 2343 2368 * 2344 - * @name must be valid until the cache is destroyed. This implies that 2345 - * the module calling this has to destroy the cache before getting unloaded. 2346 - * 2347 2369 * The flags are 2348 2370 * 2349 2371 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) ··· 2353 2381 * cacheline. This can be beneficial if you're counting cycles as closely 2354 2382 * as davem. 2355 2383 */ 2356 - struct kmem_cache * 2357 - __kmem_cache_create (const char *name, size_t size, size_t align, 2358 - unsigned long flags, void (*ctor)(void *)) 2384 + int 2385 + __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2359 2386 { 2360 2387 size_t left_over, slab_size, ralign; 2361 - struct kmem_cache *cachep = NULL; 2362 2388 gfp_t gfp; 2389 + int err; 2390 + size_t size = cachep->size; 2363 2391 2364 2392 #if DEBUG 2365 2393 #if FORCED_DEBUG ··· 2431 2459 ralign = ARCH_SLAB_MINALIGN; 2432 2460 } 2433 2461 /* 3) caller mandated alignment */ 2434 - if (ralign < align) { 2435 - ralign = align; 2462 + if (ralign < cachep->align) { 2463 + ralign = cachep->align; 2436 2464 } 2437 2465 /* disable debug if necessary */ 2438 2466 if (ralign > __alignof__(unsigned long long)) ··· 2440 2468 /* 2441 2469 * 4) Store it. 2442 2470 */ 2443 - align = ralign; 2471 + cachep->align = ralign; 2444 2472 2445 2473 if (slab_is_available()) 2446 2474 gfp = GFP_KERNEL; 2447 2475 else 2448 2476 gfp = GFP_NOWAIT; 2449 2477 2450 - /* Get cache's description obj. */ 2451 - cachep = kmem_cache_zalloc(&cache_cache, gfp); 2452 - if (!cachep) 2453 - return NULL; 2454 - 2455 2478 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 2456 - cachep->object_size = size; 2457 - cachep->align = align; 2458 2479 #if DEBUG 2459 2480 2460 2481 /* ··· 2471 2506 } 2472 2507 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2473 2508 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2474 - && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { 2475 - cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); 2509 + && cachep->object_size > cache_line_size() 2510 + && ALIGN(size, cachep->align) < PAGE_SIZE) { 2511 + cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2476 2512 size = PAGE_SIZE; 2477 2513 } 2478 2514 #endif ··· 2493 2527 */ 2494 2528 flags |= CFLGS_OFF_SLAB; 2495 2529 2496 - size = ALIGN(size, align); 2530 + size = ALIGN(size, cachep->align); 2497 2531 2498 - left_over = calculate_slab_order(cachep, size, align, flags); 2532 + left_over = calculate_slab_order(cachep, size, cachep->align, flags); 2499 2533 2500 - if (!cachep->num) { 2501 - printk(KERN_ERR 2502 - "kmem_cache_create: couldn't create cache %s.\n", name); 2503 - kmem_cache_free(&cache_cache, cachep); 2504 - return NULL; 2505 - } 2534 + if (!cachep->num) 2535 + return -E2BIG; 2536 + 2506 2537 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2507 - + sizeof(struct slab), align); 2538 + + sizeof(struct slab), cachep->align); 2508 2539 2509 2540 /* 2510 2541 * If the slab has been placed off-slab, and we have enough space then ··· 2529 2566 2530 2567 cachep->colour_off = cache_line_size(); 2531 2568 /* Offset must be a multiple of the alignment. */ 2532 - if (cachep->colour_off < align) 2533 - cachep->colour_off = align; 2569 + if (cachep->colour_off < cachep->align) 2570 + cachep->colour_off = cachep->align; 2534 2571 cachep->colour = left_over / cachep->colour_off; 2535 2572 cachep->slab_size = slab_size; 2536 2573 cachep->flags = flags; ··· 2551 2588 */ 2552 2589 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2553 2590 } 2554 - cachep->ctor = ctor; 2555 - cachep->name = name; 2556 2591 2557 - if (setup_cpu_cache(cachep, gfp)) { 2558 - __kmem_cache_destroy(cachep); 2559 - return NULL; 2592 + err = setup_cpu_cache(cachep, gfp); 2593 + if (err) { 2594 + __kmem_cache_shutdown(cachep); 2595 + return err; 2560 2596 } 2561 2597 2562 2598 if (flags & SLAB_DEBUG_OBJECTS) { ··· 2568 2606 slab_set_debugobj_lock_classes(cachep); 2569 2607 } 2570 2608 2571 - /* cache setup completed, link it into the list */ 2572 - list_add(&cachep->list, &slab_caches); 2573 - return cachep; 2609 + return 0; 2574 2610 } 2575 2611 2576 2612 #if DEBUG ··· 2727 2767 } 2728 2768 EXPORT_SYMBOL(kmem_cache_shrink); 2729 2769 2730 - /** 2731 - * kmem_cache_destroy - delete a cache 2732 - * @cachep: the cache to destroy 2733 - * 2734 - * Remove a &struct kmem_cache object from the slab cache. 2735 - * 2736 - * It is expected this function will be called by a module when it is 2737 - * unloaded. This will remove the cache completely, and avoid a duplicate 2738 - * cache being allocated each time a module is loaded and unloaded, if the 2739 - * module doesn't have persistent in-kernel storage across loads and unloads. 2740 - * 2741 - * The cache must be empty before calling this function. 2742 - * 2743 - * The caller must guarantee that no one will allocate memory from the cache 2744 - * during the kmem_cache_destroy(). 2745 - */ 2746 - void kmem_cache_destroy(struct kmem_cache *cachep) 2770 + int __kmem_cache_shutdown(struct kmem_cache *cachep) 2747 2771 { 2748 - BUG_ON(!cachep || in_interrupt()); 2772 + int i; 2773 + struct kmem_list3 *l3; 2774 + int rc = __cache_shrink(cachep); 2749 2775 2750 - /* Find the cache in the chain of caches. */ 2751 - get_online_cpus(); 2752 - mutex_lock(&slab_mutex); 2753 - /* 2754 - * the chain is never empty, cache_cache is never destroyed 2755 - */ 2756 - list_del(&cachep->list); 2757 - if (__cache_shrink(cachep)) { 2758 - slab_error(cachep, "Can't free all objects"); 2759 - list_add(&cachep->list, &slab_caches); 2760 - mutex_unlock(&slab_mutex); 2761 - put_online_cpus(); 2762 - return; 2776 + if (rc) 2777 + return rc; 2778 + 2779 + for_each_online_cpu(i) 2780 + kfree(cachep->array[i]); 2781 + 2782 + /* NUMA: free the list3 structures */ 2783 + for_each_online_node(i) { 2784 + l3 = cachep->nodelists[i]; 2785 + if (l3) { 2786 + kfree(l3->shared); 2787 + free_alien_cache(l3->alien); 2788 + kfree(l3); 2789 + } 2763 2790 } 2764 - 2765 - if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2766 - rcu_barrier(); 2767 - 2768 - __kmem_cache_destroy(cachep); 2769 - mutex_unlock(&slab_mutex); 2770 - put_online_cpus(); 2791 + return 0; 2771 2792 } 2772 - EXPORT_SYMBOL(kmem_cache_destroy); 2773 2793 2774 2794 /* 2775 2795 * Get the memory for a slab management obj. ··· 3038 3098 } 3039 3099 3040 3100 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 3041 - void *caller) 3101 + unsigned long caller) 3042 3102 { 3043 3103 struct page *page; 3044 3104 unsigned int objnr; ··· 3058 3118 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 3059 3119 } 3060 3120 if (cachep->flags & SLAB_STORE_USER) 3061 - *dbg_userword(cachep, objp) = caller; 3121 + *dbg_userword(cachep, objp) = (void *)caller; 3062 3122 3063 3123 objnr = obj_to_index(cachep, slabp, objp); 3064 3124 ··· 3071 3131 if (cachep->flags & SLAB_POISON) { 3072 3132 #ifdef CONFIG_DEBUG_PAGEALLOC 3073 3133 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 3074 - store_stackinfo(cachep, objp, (unsigned long)caller); 3134 + store_stackinfo(cachep, objp, caller); 3075 3135 kernel_map_pages(virt_to_page(objp), 3076 3136 cachep->size / PAGE_SIZE, 0); 3077 3137 } else { ··· 3225 3285 3226 3286 #if DEBUG 3227 3287 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3228 - gfp_t flags, void *objp, void *caller) 3288 + gfp_t flags, void *objp, unsigned long caller) 3229 3289 { 3230 3290 if (!objp) 3231 3291 return objp; ··· 3242 3302 poison_obj(cachep, objp, POISON_INUSE); 3243 3303 } 3244 3304 if (cachep->flags & SLAB_STORE_USER) 3245 - *dbg_userword(cachep, objp) = caller; 3305 + *dbg_userword(cachep, objp) = (void *)caller; 3246 3306 3247 3307 if (cachep->flags & SLAB_RED_ZONE) { 3248 3308 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || ··· 3283 3343 3284 3344 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 3285 3345 { 3286 - if (cachep == &cache_cache) 3346 + if (cachep == kmem_cache) 3287 3347 return false; 3288 3348 3289 3349 return should_failslab(cachep->object_size, flags, cachep->flags); ··· 3516 3576 * Fallback to other node is possible if __GFP_THISNODE is not set. 3517 3577 */ 3518 3578 static __always_inline void * 3519 - __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3520 - void *caller) 3579 + slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3580 + unsigned long caller) 3521 3581 { 3522 3582 unsigned long save_flags; 3523 3583 void *ptr; ··· 3603 3663 #endif /* CONFIG_NUMA */ 3604 3664 3605 3665 static __always_inline void * 3606 - __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3666 + slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3607 3667 { 3608 3668 unsigned long save_flags; 3609 3669 void *objp; ··· 3739 3799 * be in this state _before_ it is released. Called with disabled ints. 3740 3800 */ 3741 3801 static inline void __cache_free(struct kmem_cache *cachep, void *objp, 3742 - void *caller) 3802 + unsigned long caller) 3743 3803 { 3744 3804 struct array_cache *ac = cpu_cache_get(cachep); 3745 3805 ··· 3779 3839 */ 3780 3840 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3781 3841 { 3782 - void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3842 + void *ret = slab_alloc(cachep, flags, _RET_IP_); 3783 3843 3784 3844 trace_kmem_cache_alloc(_RET_IP_, ret, 3785 3845 cachep->object_size, cachep->size, flags); ··· 3790 3850 3791 3851 #ifdef CONFIG_TRACING 3792 3852 void * 3793 - kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) 3853 + kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 3794 3854 { 3795 3855 void *ret; 3796 3856 3797 - ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3857 + ret = slab_alloc(cachep, flags, _RET_IP_); 3798 3858 3799 3859 trace_kmalloc(_RET_IP_, ret, 3800 - size, slab_buffer_size(cachep), flags); 3860 + size, cachep->size, flags); 3801 3861 return ret; 3802 3862 } 3803 3863 EXPORT_SYMBOL(kmem_cache_alloc_trace); ··· 3806 3866 #ifdef CONFIG_NUMA 3807 3867 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3808 3868 { 3809 - void *ret = __cache_alloc_node(cachep, flags, nodeid, 3810 - __builtin_return_address(0)); 3869 + void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3811 3870 3812 3871 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3813 3872 cachep->object_size, cachep->size, ··· 3817 3878 EXPORT_SYMBOL(kmem_cache_alloc_node); 3818 3879 3819 3880 #ifdef CONFIG_TRACING 3820 - void *kmem_cache_alloc_node_trace(size_t size, 3821 - struct kmem_cache *cachep, 3881 + void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 3822 3882 gfp_t flags, 3823 - int nodeid) 3883 + int nodeid, 3884 + size_t size) 3824 3885 { 3825 3886 void *ret; 3826 3887 3827 - ret = __cache_alloc_node(cachep, flags, nodeid, 3828 - __builtin_return_address(0)); 3888 + ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3889 + 3829 3890 trace_kmalloc_node(_RET_IP_, ret, 3830 - size, slab_buffer_size(cachep), 3891 + size, cachep->size, 3831 3892 flags, nodeid); 3832 3893 return ret; 3833 3894 } ··· 3835 3896 #endif 3836 3897 3837 3898 static __always_inline void * 3838 - __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3899 + __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3839 3900 { 3840 3901 struct kmem_cache *cachep; 3841 3902 3842 3903 cachep = kmem_find_general_cachep(size, flags); 3843 3904 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3844 3905 return cachep; 3845 - return kmem_cache_alloc_node_trace(size, cachep, flags, node); 3906 + return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3846 3907 } 3847 3908 3848 3909 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3849 3910 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3850 3911 { 3851 - return __do_kmalloc_node(size, flags, node, 3852 - __builtin_return_address(0)); 3912 + return __do_kmalloc_node(size, flags, node, _RET_IP_); 3853 3913 } 3854 3914 EXPORT_SYMBOL(__kmalloc_node); 3855 3915 3856 3916 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3857 3917 int node, unsigned long caller) 3858 3918 { 3859 - return __do_kmalloc_node(size, flags, node, (void *)caller); 3919 + return __do_kmalloc_node(size, flags, node, caller); 3860 3920 } 3861 3921 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3862 3922 #else 3863 3923 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3864 3924 { 3865 - return __do_kmalloc_node(size, flags, node, NULL); 3925 + return __do_kmalloc_node(size, flags, node, 0); 3866 3926 } 3867 3927 EXPORT_SYMBOL(__kmalloc_node); 3868 3928 #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ ··· 3874 3936 * @caller: function caller for debug tracking of the caller 3875 3937 */ 3876 3938 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3877 - void *caller) 3939 + unsigned long caller) 3878 3940 { 3879 3941 struct kmem_cache *cachep; 3880 3942 void *ret; ··· 3887 3949 cachep = __find_general_cachep(size, flags); 3888 3950 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3889 3951 return cachep; 3890 - ret = __cache_alloc(cachep, flags, caller); 3952 + ret = slab_alloc(cachep, flags, caller); 3891 3953 3892 - trace_kmalloc((unsigned long) caller, ret, 3954 + trace_kmalloc(caller, ret, 3893 3955 size, cachep->size, flags); 3894 3956 3895 3957 return ret; ··· 3899 3961 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3900 3962 void *__kmalloc(size_t size, gfp_t flags) 3901 3963 { 3902 - return __do_kmalloc(size, flags, __builtin_return_address(0)); 3964 + return __do_kmalloc(size, flags, _RET_IP_); 3903 3965 } 3904 3966 EXPORT_SYMBOL(__kmalloc); 3905 3967 3906 3968 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3907 3969 { 3908 - return __do_kmalloc(size, flags, (void *)caller); 3970 + return __do_kmalloc(size, flags, caller); 3909 3971 } 3910 3972 EXPORT_SYMBOL(__kmalloc_track_caller); 3911 3973 3912 3974 #else 3913 3975 void *__kmalloc(size_t size, gfp_t flags) 3914 3976 { 3915 - return __do_kmalloc(size, flags, NULL); 3977 + return __do_kmalloc(size, flags, 0); 3916 3978 } 3917 3979 EXPORT_SYMBOL(__kmalloc); 3918 3980 #endif ··· 3933 3995 debug_check_no_locks_freed(objp, cachep->object_size); 3934 3996 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3935 3997 debug_check_no_obj_freed(objp, cachep->object_size); 3936 - __cache_free(cachep, objp, __builtin_return_address(0)); 3998 + __cache_free(cachep, objp, _RET_IP_); 3937 3999 local_irq_restore(flags); 3938 4000 3939 4001 trace_kmem_cache_free(_RET_IP_, objp); ··· 3964 4026 debug_check_no_locks_freed(objp, c->object_size); 3965 4027 3966 4028 debug_check_no_obj_freed(objp, c->object_size); 3967 - __cache_free(c, (void *)objp, __builtin_return_address(0)); 4029 + __cache_free(c, (void *)objp, _RET_IP_); 3968 4030 local_irq_restore(flags); 3969 4031 } 3970 4032 EXPORT_SYMBOL(kfree);
+18 -1
mm/slab.h
··· 25 25 26 26 /* The slab cache mutex protects the management structures during changes */ 27 27 extern struct mutex slab_mutex; 28 + 29 + /* The list of all slab caches on the system */ 28 30 extern struct list_head slab_caches; 29 31 30 - struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 32 + /* The slab cache that manages slab cache information */ 33 + extern struct kmem_cache *kmem_cache; 34 + 35 + /* Functions provided by the slab allocators */ 36 + extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 37 + 38 + #ifdef CONFIG_SLUB 39 + struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 31 40 size_t align, unsigned long flags, void (*ctor)(void *)); 41 + #else 42 + static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 43 + size_t align, unsigned long flags, void (*ctor)(void *)) 44 + { return NULL; } 45 + #endif 46 + 47 + 48 + int __kmem_cache_shutdown(struct kmem_cache *); 32 49 33 50 #endif
+116 -45
mm/slab_common.c
··· 22 22 enum slab_state slab_state; 23 23 LIST_HEAD(slab_caches); 24 24 DEFINE_MUTEX(slab_mutex); 25 + struct kmem_cache *kmem_cache; 26 + 27 + #ifdef CONFIG_DEBUG_VM 28 + static int kmem_cache_sanity_check(const char *name, size_t size) 29 + { 30 + struct kmem_cache *s = NULL; 31 + 32 + if (!name || in_interrupt() || size < sizeof(void *) || 33 + size > KMALLOC_MAX_SIZE) { 34 + pr_err("kmem_cache_create(%s) integrity check failed\n", name); 35 + return -EINVAL; 36 + } 37 + 38 + list_for_each_entry(s, &slab_caches, list) { 39 + char tmp; 40 + int res; 41 + 42 + /* 43 + * This happens when the module gets unloaded and doesn't 44 + * destroy its slab cache and no-one else reuses the vmalloc 45 + * area of the module. Print a warning. 46 + */ 47 + res = probe_kernel_address(s->name, tmp); 48 + if (res) { 49 + pr_err("Slab cache with size %d has lost its name\n", 50 + s->object_size); 51 + continue; 52 + } 53 + 54 + if (!strcmp(s->name, name)) { 55 + pr_err("%s (%s): Cache name already exists.\n", 56 + __func__, name); 57 + dump_stack(); 58 + s = NULL; 59 + return -EINVAL; 60 + } 61 + } 62 + 63 + WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 64 + return 0; 65 + } 66 + #else 67 + static inline int kmem_cache_sanity_check(const char *name, size_t size) 68 + { 69 + return 0; 70 + } 71 + #endif 25 72 26 73 /* 27 74 * kmem_cache_create - Create a cache. ··· 99 52 unsigned long flags, void (*ctor)(void *)) 100 53 { 101 54 struct kmem_cache *s = NULL; 102 - 103 - #ifdef CONFIG_DEBUG_VM 104 - if (!name || in_interrupt() || size < sizeof(void *) || 105 - size > KMALLOC_MAX_SIZE) { 106 - printk(KERN_ERR "kmem_cache_create(%s) integrity check" 107 - " failed\n", name); 108 - goto out; 109 - } 110 - #endif 55 + int err = 0; 111 56 112 57 get_online_cpus(); 113 58 mutex_lock(&slab_mutex); 114 59 115 - #ifdef CONFIG_DEBUG_VM 116 - list_for_each_entry(s, &slab_caches, list) { 117 - char tmp; 118 - int res; 60 + if (!kmem_cache_sanity_check(name, size) == 0) 61 + goto out_locked; 119 62 120 - /* 121 - * This happens when the module gets unloaded and doesn't 122 - * destroy its slab cache and no-one else reuses the vmalloc 123 - * area of the module. Print a warning. 124 - */ 125 - res = probe_kernel_address(s->name, tmp); 126 - if (res) { 127 - printk(KERN_ERR 128 - "Slab cache with size %d has lost its name\n", 129 - s->object_size); 130 - continue; 63 + 64 + s = __kmem_cache_alias(name, size, align, flags, ctor); 65 + if (s) 66 + goto out_locked; 67 + 68 + s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 69 + if (s) { 70 + s->object_size = s->size = size; 71 + s->align = align; 72 + s->ctor = ctor; 73 + s->name = kstrdup(name, GFP_KERNEL); 74 + if (!s->name) { 75 + kmem_cache_free(kmem_cache, s); 76 + err = -ENOMEM; 77 + goto out_locked; 131 78 } 132 79 133 - if (!strcmp(s->name, name)) { 134 - printk(KERN_ERR "kmem_cache_create(%s): Cache name" 135 - " already exists.\n", 136 - name); 137 - dump_stack(); 138 - s = NULL; 139 - goto oops; 80 + err = __kmem_cache_create(s, flags); 81 + if (!err) { 82 + 83 + s->refcount = 1; 84 + list_add(&s->list, &slab_caches); 85 + 86 + } else { 87 + kfree(s->name); 88 + kmem_cache_free(kmem_cache, s); 140 89 } 141 - } 90 + } else 91 + err = -ENOMEM; 142 92 143 - WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 144 - #endif 145 - 146 - s = __kmem_cache_create(name, size, align, flags, ctor); 147 - 148 - #ifdef CONFIG_DEBUG_VM 149 - oops: 150 - #endif 93 + out_locked: 151 94 mutex_unlock(&slab_mutex); 152 95 put_online_cpus(); 153 96 154 - #ifdef CONFIG_DEBUG_VM 155 - out: 156 - #endif 157 - if (!s && (flags & SLAB_PANIC)) 158 - panic("kmem_cache_create: Failed to create slab '%s'\n", name); 97 + if (err) { 98 + 99 + if (flags & SLAB_PANIC) 100 + panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 101 + name, err); 102 + else { 103 + printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", 104 + name, err); 105 + dump_stack(); 106 + } 107 + 108 + return NULL; 109 + } 159 110 160 111 return s; 161 112 } 162 113 EXPORT_SYMBOL(kmem_cache_create); 114 + 115 + void kmem_cache_destroy(struct kmem_cache *s) 116 + { 117 + get_online_cpus(); 118 + mutex_lock(&slab_mutex); 119 + s->refcount--; 120 + if (!s->refcount) { 121 + list_del(&s->list); 122 + 123 + if (!__kmem_cache_shutdown(s)) { 124 + if (s->flags & SLAB_DESTROY_BY_RCU) 125 + rcu_barrier(); 126 + 127 + kfree(s->name); 128 + kmem_cache_free(kmem_cache, s); 129 + } else { 130 + list_add(&s->list, &slab_caches); 131 + printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", 132 + s->name); 133 + dump_stack(); 134 + } 135 + } 136 + mutex_unlock(&slab_mutex); 137 + put_online_cpus(); 138 + } 139 + EXPORT_SYMBOL(kmem_cache_destroy); 163 140 164 141 int slab_is_available(void) 165 142 {
+53 -38
mm/slob.c
··· 194 194 void *page; 195 195 196 196 #ifdef CONFIG_NUMA 197 - if (node != -1) 197 + if (node != NUMA_NO_NODE) 198 198 page = alloc_pages_exact_node(node, gfp, order); 199 199 else 200 200 #endif ··· 290 290 * If there's a node specification, search for a partial 291 291 * page with a matching node id in the freelist. 292 292 */ 293 - if (node != -1 && page_to_nid(sp) != node) 293 + if (node != NUMA_NO_NODE && page_to_nid(sp) != node) 294 294 continue; 295 295 #endif 296 296 /* Enough room on this page? */ ··· 425 425 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. 426 426 */ 427 427 428 - void *__kmalloc_node(size_t size, gfp_t gfp, int node) 428 + static __always_inline void * 429 + __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) 429 430 { 430 431 unsigned int *m; 431 432 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ··· 447 446 *m = size; 448 447 ret = (void *)m + align; 449 448 450 - trace_kmalloc_node(_RET_IP_, ret, 449 + trace_kmalloc_node(caller, ret, 451 450 size, size + align, gfp, node); 452 451 } else { 453 452 unsigned int order = get_order(size); ··· 461 460 page->private = size; 462 461 } 463 462 464 - trace_kmalloc_node(_RET_IP_, ret, 463 + trace_kmalloc_node(caller, ret, 465 464 size, PAGE_SIZE << order, gfp, node); 466 465 } 467 466 468 467 kmemleak_alloc(ret, size, 1, gfp); 469 468 return ret; 470 469 } 470 + 471 + void *__kmalloc_node(size_t size, gfp_t gfp, int node) 472 + { 473 + return __do_kmalloc_node(size, gfp, node, _RET_IP_); 474 + } 471 475 EXPORT_SYMBOL(__kmalloc_node); 476 + 477 + #ifdef CONFIG_TRACING 478 + void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) 479 + { 480 + return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); 481 + } 482 + 483 + #ifdef CONFIG_NUMA 484 + void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, 485 + int node, unsigned long caller) 486 + { 487 + return __do_kmalloc_node(size, gfp, node, caller); 488 + } 489 + #endif 490 + #endif 472 491 473 492 void kfree(const void *block) 474 493 { ··· 529 508 } 530 509 EXPORT_SYMBOL(ksize); 531 510 532 - struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 533 - size_t align, unsigned long flags, void (*ctor)(void *)) 511 + int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) 534 512 { 535 - struct kmem_cache *c; 513 + size_t align = c->size; 536 514 537 - c = slob_alloc(sizeof(struct kmem_cache), 538 - GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); 539 - 540 - if (c) { 541 - c->name = name; 542 - c->size = size; 543 - if (flags & SLAB_DESTROY_BY_RCU) { 544 - /* leave room for rcu footer at the end of object */ 545 - c->size += sizeof(struct slob_rcu); 546 - } 547 - c->flags = flags; 548 - c->ctor = ctor; 549 - /* ignore alignment unless it's forced */ 550 - c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 551 - if (c->align < ARCH_SLAB_MINALIGN) 552 - c->align = ARCH_SLAB_MINALIGN; 553 - if (c->align < align) 554 - c->align = align; 555 - 556 - kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); 557 - c->refcount = 1; 515 + if (flags & SLAB_DESTROY_BY_RCU) { 516 + /* leave room for rcu footer at the end of object */ 517 + c->size += sizeof(struct slob_rcu); 558 518 } 559 - return c; 560 - } 519 + c->flags = flags; 520 + /* ignore alignment unless it's forced */ 521 + c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 522 + if (c->align < ARCH_SLAB_MINALIGN) 523 + c->align = ARCH_SLAB_MINALIGN; 524 + if (c->align < align) 525 + c->align = align; 561 526 562 - void kmem_cache_destroy(struct kmem_cache *c) 563 - { 564 - kmemleak_free(c); 565 - if (c->flags & SLAB_DESTROY_BY_RCU) 566 - rcu_barrier(); 567 - slob_free(c, sizeof(struct kmem_cache)); 527 + return 0; 568 528 } 569 - EXPORT_SYMBOL(kmem_cache_destroy); 570 529 571 530 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 572 531 { ··· 614 613 } 615 614 EXPORT_SYMBOL(kmem_cache_size); 616 615 616 + int __kmem_cache_shutdown(struct kmem_cache *c) 617 + { 618 + /* No way to check for remaining objects */ 619 + return 0; 620 + } 621 + 617 622 int kmem_cache_shrink(struct kmem_cache *d) 618 623 { 619 624 return 0; 620 625 } 621 626 EXPORT_SYMBOL(kmem_cache_shrink); 622 627 628 + struct kmem_cache kmem_cache_boot = { 629 + .name = "kmem_cache", 630 + .size = sizeof(struct kmem_cache), 631 + .flags = SLAB_PANIC, 632 + .align = ARCH_KMALLOC_MINALIGN, 633 + }; 634 + 623 635 void __init kmem_cache_init(void) 624 636 { 637 + kmem_cache = &kmem_cache_boot; 625 638 slab_state = UP; 626 639 } 627 640
+95 -113
mm/slub.c
··· 210 210 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 211 211 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 212 212 { return 0; } 213 - static inline void sysfs_slab_remove(struct kmem_cache *s) 214 - { 215 - kfree(s->name); 216 - kfree(s); 217 - } 213 + static inline void sysfs_slab_remove(struct kmem_cache *s) { } 218 214 219 215 #endif 220 216 ··· 564 568 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); 565 569 printk(KERN_ERR "----------------------------------------" 566 570 "-------------------------------------\n\n"); 571 + 572 + add_taint(TAINT_BAD_PAGE); 567 573 } 568 574 569 575 static void slab_fix(struct kmem_cache *s, char *fmt, ...) ··· 622 624 print_trailer(s, page, object); 623 625 } 624 626 625 - static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 627 + static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) 626 628 { 627 629 va_list args; 628 630 char buf[100]; ··· 1067 1069 return 0; 1068 1070 } 1069 1071 1070 - static noinline int free_debug_processing(struct kmem_cache *s, 1071 - struct page *page, void *object, unsigned long addr) 1072 + static noinline struct kmem_cache_node *free_debug_processing( 1073 + struct kmem_cache *s, struct page *page, void *object, 1074 + unsigned long addr, unsigned long *flags) 1072 1075 { 1073 - unsigned long flags; 1074 - int rc = 0; 1076 + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1075 1077 1076 - local_irq_save(flags); 1078 + spin_lock_irqsave(&n->list_lock, *flags); 1077 1079 slab_lock(page); 1078 1080 1079 1081 if (!check_slab(s, page)) ··· 1111 1113 set_track(s, object, TRACK_FREE, addr); 1112 1114 trace(s, page, object, 0); 1113 1115 init_object(s, object, SLUB_RED_INACTIVE); 1114 - rc = 1; 1115 1116 out: 1116 1117 slab_unlock(page); 1117 - local_irq_restore(flags); 1118 - return rc; 1118 + /* 1119 + * Keep node_lock to preserve integrity 1120 + * until the object is actually freed 1121 + */ 1122 + return n; 1119 1123 1120 1124 fail: 1125 + slab_unlock(page); 1126 + spin_unlock_irqrestore(&n->list_lock, *flags); 1121 1127 slab_fix(s, "Object at 0x%p not freed", object); 1122 - goto out; 1128 + return NULL; 1123 1129 } 1124 1130 1125 1131 static int __init setup_slub_debug(char *str) ··· 1216 1214 static inline int alloc_debug_processing(struct kmem_cache *s, 1217 1215 struct page *page, void *object, unsigned long addr) { return 0; } 1218 1216 1219 - static inline int free_debug_processing(struct kmem_cache *s, 1220 - struct page *page, void *object, unsigned long addr) { return 0; } 1217 + static inline struct kmem_cache_node *free_debug_processing( 1218 + struct kmem_cache *s, struct page *page, void *object, 1219 + unsigned long addr, unsigned long *flags) { return NULL; } 1221 1220 1222 1221 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1223 1222 { return 1; } ··· 1717 1714 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 1718 1715 } 1719 1716 1720 - void init_kmem_cache_cpus(struct kmem_cache *s) 1717 + static void init_kmem_cache_cpus(struct kmem_cache *s) 1721 1718 { 1722 1719 int cpu; 1723 1720 ··· 1942 1939 * If we did not find a slot then simply move all the partials to the 1943 1940 * per node partial list. 1944 1941 */ 1945 - int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 1942 + static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 1946 1943 { 1947 1944 struct page *oldpage; 1948 1945 int pages; ··· 1965 1962 local_irq_save(flags); 1966 1963 unfreeze_partials(s); 1967 1964 local_irq_restore(flags); 1965 + oldpage = NULL; 1968 1966 pobjects = 0; 1969 1967 pages = 0; 1970 1968 stat(s, CPU_PARTIAL_DRAIN); ··· 2314 2310 * 2315 2311 * Otherwise we can simply pick the next object from the lockless free list. 2316 2312 */ 2317 - static __always_inline void *slab_alloc(struct kmem_cache *s, 2313 + static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2318 2314 gfp_t gfpflags, int node, unsigned long addr) 2319 2315 { 2320 2316 void **object; ··· 2384 2380 return object; 2385 2381 } 2386 2382 2383 + static __always_inline void *slab_alloc(struct kmem_cache *s, 2384 + gfp_t gfpflags, unsigned long addr) 2385 + { 2386 + return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); 2387 + } 2388 + 2387 2389 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2388 2390 { 2389 - void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2391 + void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2390 2392 2391 2393 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); 2392 2394 ··· 2403 2393 #ifdef CONFIG_TRACING 2404 2394 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2405 2395 { 2406 - void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2396 + void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2407 2397 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2408 2398 return ret; 2409 2399 } ··· 2421 2411 #ifdef CONFIG_NUMA 2422 2412 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2423 2413 { 2424 - void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2414 + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); 2425 2415 2426 2416 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2427 2417 s->object_size, s->size, gfpflags, node); ··· 2435 2425 gfp_t gfpflags, 2436 2426 int node, size_t size) 2437 2427 { 2438 - void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2428 + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); 2439 2429 2440 2430 trace_kmalloc_node(_RET_IP_, ret, 2441 2431 size, s->size, gfpflags, node); ··· 2467 2457 2468 2458 stat(s, FREE_SLOWPATH); 2469 2459 2470 - if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) 2460 + if (kmem_cache_debug(s) && 2461 + !(n = free_debug_processing(s, page, x, addr, &flags))) 2471 2462 return; 2472 2463 2473 2464 do { ··· 2622 2611 struct page *page; 2623 2612 2624 2613 page = virt_to_head_page(x); 2614 + 2615 + if (kmem_cache_debug(s) && page->slab != s) { 2616 + pr_err("kmem_cache_free: Wrong slab cache. %s but object" 2617 + " is from %s\n", page->slab->name, s->name); 2618 + WARN_ON_ONCE(1); 2619 + return; 2620 + } 2625 2621 2626 2622 slab_free(s, page, x, _RET_IP_); 2627 2623 ··· 3044 3026 3045 3027 } 3046 3028 3047 - static int kmem_cache_open(struct kmem_cache *s, 3048 - const char *name, size_t size, 3049 - size_t align, unsigned long flags, 3050 - void (*ctor)(void *)) 3029 + static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) 3051 3030 { 3052 - memset(s, 0, kmem_size); 3053 - s->name = name; 3054 - s->ctor = ctor; 3055 - s->object_size = size; 3056 - s->align = align; 3057 - s->flags = kmem_cache_flags(size, flags, name, ctor); 3031 + s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); 3058 3032 s->reserved = 0; 3059 3033 3060 3034 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) ··· 3108 3098 else 3109 3099 s->cpu_partial = 30; 3110 3100 3111 - s->refcount = 1; 3112 3101 #ifdef CONFIG_NUMA 3113 3102 s->remote_node_defrag_ratio = 1000; 3114 3103 #endif ··· 3115 3106 goto error; 3116 3107 3117 3108 if (alloc_kmem_cache_cpus(s)) 3118 - return 1; 3109 + return 0; 3119 3110 3120 3111 free_kmem_cache_nodes(s); 3121 3112 error: 3122 3113 if (flags & SLAB_PANIC) 3123 3114 panic("Cannot create slab %s size=%lu realsize=%u " 3124 3115 "order=%u offset=%u flags=%lx\n", 3125 - s->name, (unsigned long)size, s->size, oo_order(s->oo), 3116 + s->name, (unsigned long)s->size, s->size, oo_order(s->oo), 3126 3117 s->offset, flags); 3127 - return 0; 3118 + return -EINVAL; 3128 3119 } 3129 3120 3130 3121 /* ··· 3146 3137 sizeof(long), GFP_ATOMIC); 3147 3138 if (!map) 3148 3139 return; 3149 - slab_err(s, page, "%s", text); 3140 + slab_err(s, page, text, s->name); 3150 3141 slab_lock(page); 3151 3142 3152 3143 get_map(s, page, map); ··· 3178 3169 discard_slab(s, page); 3179 3170 } else { 3180 3171 list_slab_objects(s, page, 3181 - "Objects remaining on kmem_cache_close()"); 3172 + "Objects remaining in %s on kmem_cache_close()"); 3182 3173 } 3183 3174 } 3184 3175 } ··· 3191 3182 int node; 3192 3183 3193 3184 flush_all(s); 3194 - free_percpu(s->cpu_slab); 3195 3185 /* Attempt to free all objects */ 3196 3186 for_each_node_state(node, N_NORMAL_MEMORY) { 3197 3187 struct kmem_cache_node *n = get_node(s, node); ··· 3199 3191 if (n->nr_partial || slabs_node(s, node)) 3200 3192 return 1; 3201 3193 } 3194 + free_percpu(s->cpu_slab); 3202 3195 free_kmem_cache_nodes(s); 3203 3196 return 0; 3204 3197 } 3205 3198 3206 - /* 3207 - * Close a cache and release the kmem_cache structure 3208 - * (must be used for caches created using kmem_cache_create) 3209 - */ 3210 - void kmem_cache_destroy(struct kmem_cache *s) 3199 + int __kmem_cache_shutdown(struct kmem_cache *s) 3211 3200 { 3212 - mutex_lock(&slab_mutex); 3213 - s->refcount--; 3214 - if (!s->refcount) { 3215 - list_del(&s->list); 3216 - mutex_unlock(&slab_mutex); 3217 - if (kmem_cache_close(s)) { 3218 - printk(KERN_ERR "SLUB %s: %s called for cache that " 3219 - "still has objects.\n", s->name, __func__); 3220 - dump_stack(); 3221 - } 3222 - if (s->flags & SLAB_DESTROY_BY_RCU) 3223 - rcu_barrier(); 3201 + int rc = kmem_cache_close(s); 3202 + 3203 + if (!rc) 3224 3204 sysfs_slab_remove(s); 3225 - } else 3226 - mutex_unlock(&slab_mutex); 3205 + 3206 + return rc; 3227 3207 } 3228 - EXPORT_SYMBOL(kmem_cache_destroy); 3229 3208 3230 3209 /******************************************************************** 3231 3210 * Kmalloc subsystem ··· 3220 3225 3221 3226 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 3222 3227 EXPORT_SYMBOL(kmalloc_caches); 3223 - 3224 - static struct kmem_cache *kmem_cache; 3225 3228 3226 3229 #ifdef CONFIG_ZONE_DMA 3227 3230 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; ··· 3266 3273 { 3267 3274 struct kmem_cache *s; 3268 3275 3269 - s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3276 + s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 3277 + 3278 + s->name = name; 3279 + s->size = s->object_size = size; 3280 + s->align = ARCH_KMALLOC_MINALIGN; 3270 3281 3271 3282 /* 3272 3283 * This function is called with IRQs disabled during early-boot on 3273 3284 * single CPU so there's no need to take slab_mutex here. 3274 3285 */ 3275 - if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 3276 - flags, NULL)) 3286 + if (kmem_cache_open(s, flags)) 3277 3287 goto panic; 3278 3288 3279 3289 list_add(&s->list, &slab_caches); ··· 3358 3362 if (unlikely(ZERO_OR_NULL_PTR(s))) 3359 3363 return s; 3360 3364 3361 - ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 3365 + ret = slab_alloc(s, flags, _RET_IP_); 3362 3366 3363 3367 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3364 3368 ··· 3401 3405 if (unlikely(ZERO_OR_NULL_PTR(s))) 3402 3406 return s; 3403 3407 3404 - ret = slab_alloc(s, flags, node, _RET_IP_); 3408 + ret = slab_alloc_node(s, flags, node, _RET_IP_); 3405 3409 3406 3410 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3407 3411 ··· 3478 3482 if (unlikely(!PageSlab(page))) { 3479 3483 BUG_ON(!PageCompound(page)); 3480 3484 kmemleak_free(x); 3481 - put_page(page); 3485 + __free_pages(page, compound_order(page)); 3482 3486 return; 3483 3487 } 3484 3488 slab_free(page->slab, page, object, _RET_IP_); ··· 3715 3719 slub_max_order = 0; 3716 3720 3717 3721 kmem_size = offsetof(struct kmem_cache, node) + 3718 - nr_node_ids * sizeof(struct kmem_cache_node *); 3722 + nr_node_ids * sizeof(struct kmem_cache_node *); 3719 3723 3720 3724 /* Allocate two kmem_caches from the page allocator */ 3721 3725 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3722 3726 order = get_order(2 * kmalloc_size); 3723 - kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3727 + kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order); 3724 3728 3725 3729 /* 3726 3730 * Must first have the slab cache available for the allocations of the ··· 3729 3733 */ 3730 3734 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3731 3735 3732 - kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3733 - sizeof(struct kmem_cache_node), 3734 - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3736 + kmem_cache_node->name = "kmem_cache_node"; 3737 + kmem_cache_node->size = kmem_cache_node->object_size = 3738 + sizeof(struct kmem_cache_node); 3739 + kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3735 3740 3736 3741 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3737 3742 ··· 3740 3743 slab_state = PARTIAL; 3741 3744 3742 3745 temp_kmem_cache = kmem_cache; 3743 - kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3744 - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3746 + kmem_cache->name = "kmem_cache"; 3747 + kmem_cache->size = kmem_cache->object_size = kmem_size; 3748 + kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3749 + 3745 3750 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3746 3751 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3747 3752 ··· 3932 3933 return NULL; 3933 3934 } 3934 3935 3935 - struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 3936 + struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 3936 3937 size_t align, unsigned long flags, void (*ctor)(void *)) 3937 3938 { 3938 3939 struct kmem_cache *s; 3939 - char *n; 3940 3940 3941 3941 s = find_mergeable(size, align, flags, name, ctor); 3942 3942 if (s) { ··· 3949 3951 3950 3952 if (sysfs_slab_alias(s, name)) { 3951 3953 s->refcount--; 3952 - return NULL; 3954 + s = NULL; 3953 3955 } 3954 - return s; 3955 3956 } 3956 3957 3957 - n = kstrdup(name, GFP_KERNEL); 3958 - if (!n) 3959 - return NULL; 3958 + return s; 3959 + } 3960 3960 3961 - s = kmalloc(kmem_size, GFP_KERNEL); 3962 - if (s) { 3963 - if (kmem_cache_open(s, n, 3964 - size, align, flags, ctor)) { 3965 - int r; 3961 + int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) 3962 + { 3963 + int err; 3966 3964 3967 - list_add(&s->list, &slab_caches); 3968 - mutex_unlock(&slab_mutex); 3969 - r = sysfs_slab_add(s); 3970 - mutex_lock(&slab_mutex); 3965 + err = kmem_cache_open(s, flags); 3966 + if (err) 3967 + return err; 3971 3968 3972 - if (!r) 3973 - return s; 3969 + mutex_unlock(&slab_mutex); 3970 + err = sysfs_slab_add(s); 3971 + mutex_lock(&slab_mutex); 3974 3972 3975 - list_del(&s->list); 3976 - kmem_cache_close(s); 3977 - } 3978 - kfree(s); 3979 - } 3980 - kfree(n); 3981 - return NULL; 3973 + if (err) 3974 + kmem_cache_close(s); 3975 + 3976 + return err; 3982 3977 } 3983 3978 3984 3979 #ifdef CONFIG_SMP ··· 4024 4033 if (unlikely(ZERO_OR_NULL_PTR(s))) 4025 4034 return s; 4026 4035 4027 - ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 4036 + ret = slab_alloc(s, gfpflags, caller); 4028 4037 4029 4038 /* Honor the call site pointer we received. */ 4030 4039 trace_kmalloc(caller, ret, size, s->size, gfpflags); ··· 4054 4063 if (unlikely(ZERO_OR_NULL_PTR(s))) 4055 4064 return s; 4056 4065 4057 - ret = slab_alloc(s, gfpflags, node, caller); 4066 + ret = slab_alloc_node(s, gfpflags, node, caller); 4058 4067 4059 4068 /* Honor the call site pointer we received. */ 4060 4069 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); ··· 5201 5210 return err; 5202 5211 } 5203 5212 5204 - static void kmem_cache_release(struct kobject *kobj) 5205 - { 5206 - struct kmem_cache *s = to_slab(kobj); 5207 - 5208 - kfree(s->name); 5209 - kfree(s); 5210 - } 5211 - 5212 5213 static const struct sysfs_ops slab_sysfs_ops = { 5213 5214 .show = slab_attr_show, 5214 5215 .store = slab_attr_store, ··· 5208 5225 5209 5226 static struct kobj_type slab_ktype = { 5210 5227 .sysfs_ops = &slab_sysfs_ops, 5211 - .release = kmem_cache_release 5212 5228 }; 5213 5229 5214 5230 static int uevent_filter(struct kset *kset, struct kobject *kobj)
+22 -15
mm/util.c
··· 105 105 } 106 106 EXPORT_SYMBOL(memdup_user); 107 107 108 - /** 109 - * __krealloc - like krealloc() but don't free @p. 110 - * @p: object to reallocate memory for. 111 - * @new_size: how many bytes of memory are required. 112 - * @flags: the type of memory to allocate. 113 - * 114 - * This function is like krealloc() except it never frees the originally 115 - * allocated buffer. Use this if you don't want to free the buffer immediately 116 - * like, for example, with RCU. 117 - */ 118 - void *__krealloc(const void *p, size_t new_size, gfp_t flags) 108 + static __always_inline void *__do_krealloc(const void *p, size_t new_size, 109 + gfp_t flags) 119 110 { 120 111 void *ret; 121 112 size_t ks = 0; 122 - 123 - if (unlikely(!new_size)) 124 - return ZERO_SIZE_PTR; 125 113 126 114 if (p) 127 115 ks = ksize(p); ··· 122 134 memcpy(ret, p, ks); 123 135 124 136 return ret; 137 + } 138 + 139 + /** 140 + * __krealloc - like krealloc() but don't free @p. 141 + * @p: object to reallocate memory for. 142 + * @new_size: how many bytes of memory are required. 143 + * @flags: the type of memory to allocate. 144 + * 145 + * This function is like krealloc() except it never frees the originally 146 + * allocated buffer. Use this if you don't want to free the buffer immediately 147 + * like, for example, with RCU. 148 + */ 149 + void *__krealloc(const void *p, size_t new_size, gfp_t flags) 150 + { 151 + if (unlikely(!new_size)) 152 + return ZERO_SIZE_PTR; 153 + 154 + return __do_krealloc(p, new_size, flags); 155 + 125 156 } 126 157 EXPORT_SYMBOL(__krealloc); 127 158 ··· 164 157 return ZERO_SIZE_PTR; 165 158 } 166 159 167 - ret = __krealloc(p, new_size, flags); 160 + ret = __do_krealloc(p, new_size, flags); 168 161 if (ret && p != ret) 169 162 kfree(p); 170 163