Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux

Pull SLAB changes from Pekka Enberg:
"Random bug fixes that have accumulated in my inbox over the past few
months"

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
mm: Fix warning on make htmldocs caused by slab.c
mm: slub: work around unneeded lockdep warning
mm: sl[uo]b: fix misleading comments
slub: Fix possible format string bug.
slub: use lockdep_assert_held
slub: Fix calculation of cpu slabs
slab.h: remove duplicate kmalloc declaration and fix kernel-doc warnings

Changed files
+39 -27
include
linux
mm
+4 -4
include/linux/slab.h
··· 205 205 206 206 #ifdef CONFIG_SLUB 207 207 /* 208 - * SLUB allocates up to order 2 pages directly and otherwise 209 - * passes the request to the page allocator. 208 + * SLUB directly allocates requests fitting in to an order-1 page 209 + * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 210 210 */ 211 211 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 212 212 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) ··· 217 217 218 218 #ifdef CONFIG_SLOB 219 219 /* 220 - * SLOB passes all page size and larger requests to the page allocator. 220 + * SLOB passes all requests larger than one page to the page allocator. 221 221 * No kmalloc array is necessary since objects of different sizes can 222 222 * be allocated from the same page. 223 223 */ 224 - #define KMALLOC_SHIFT_MAX 30 225 224 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT 225 + #define KMALLOC_SHIFT_MAX 30 226 226 #ifndef KMALLOC_SHIFT_LOW 227 227 #define KMALLOC_SHIFT_LOW 3 228 228 #endif
+1 -1
mm/slab.c
··· 1946 1946 /** 1947 1947 * slab_destroy - destroy and release all objects in a slab 1948 1948 * @cachep: cache pointer being destroyed 1949 - * @slabp: slab pointer being destroyed 1949 + * @page: page pointer being destroyed 1950 1950 * 1951 1951 * Destroy all the objs in a slab, and release the mem back to the system. 1952 1952 * Before calling the slab must have been unlinked from the cache. The
+34 -22
mm/slub.c
··· 1000 1000 1001 1001 /* 1002 1002 * Tracking of fully allocated slabs for debugging purposes. 1003 - * 1004 - * list_lock must be held. 1005 1003 */ 1006 1004 static void add_full(struct kmem_cache *s, 1007 1005 struct kmem_cache_node *n, struct page *page) 1008 1006 { 1007 + lockdep_assert_held(&n->list_lock); 1008 + 1009 1009 if (!(s->flags & SLAB_STORE_USER)) 1010 1010 return; 1011 1011 1012 1012 list_add(&page->lru, &n->full); 1013 1013 } 1014 1014 1015 - /* 1016 - * list_lock must be held. 1017 - */ 1018 - static void remove_full(struct kmem_cache *s, struct page *page) 1015 + static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1019 1016 { 1017 + lockdep_assert_held(&n->list_lock); 1018 + 1020 1019 if (!(s->flags & SLAB_STORE_USER)) 1021 1020 return; 1022 1021 ··· 1264 1265 void *object, u8 val) { return 1; } 1265 1266 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1266 1267 struct page *page) {} 1267 - static inline void remove_full(struct kmem_cache *s, struct page *page) {} 1268 + static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1269 + struct page *page) {} 1268 1270 static inline unsigned long kmem_cache_flags(unsigned long object_size, 1269 1271 unsigned long flags, const char *name, 1270 1272 void (*ctor)(void *)) ··· 1519 1519 1520 1520 /* 1521 1521 * Management of partially allocated slabs. 1522 - * 1523 - * list_lock must be held. 1524 1522 */ 1525 1523 static inline void add_partial(struct kmem_cache_node *n, 1526 1524 struct page *page, int tail) 1527 1525 { 1526 + lockdep_assert_held(&n->list_lock); 1527 + 1528 1528 n->nr_partial++; 1529 1529 if (tail == DEACTIVATE_TO_TAIL) 1530 1530 list_add_tail(&page->lru, &n->partial); ··· 1532 1532 list_add(&page->lru, &n->partial); 1533 1533 } 1534 1534 1535 - /* 1536 - * list_lock must be held. 1537 - */ 1538 1535 static inline void remove_partial(struct kmem_cache_node *n, 1539 1536 struct page *page) 1540 1537 { 1538 + lockdep_assert_held(&n->list_lock); 1539 + 1541 1540 list_del(&page->lru); 1542 1541 n->nr_partial--; 1543 1542 } ··· 1546 1547 * return the pointer to the freelist. 1547 1548 * 1548 1549 * Returns a list of objects or NULL if it fails. 1549 - * 1550 - * Must hold list_lock since we modify the partial list. 1551 1550 */ 1552 1551 static inline void *acquire_slab(struct kmem_cache *s, 1553 1552 struct kmem_cache_node *n, struct page *page, ··· 1554 1557 void *freelist; 1555 1558 unsigned long counters; 1556 1559 struct page new; 1560 + 1561 + lockdep_assert_held(&n->list_lock); 1557 1562 1558 1563 /* 1559 1564 * Zap the freelist and set the frozen bit. ··· 1901 1902 1902 1903 else if (l == M_FULL) 1903 1904 1904 - remove_full(s, page); 1905 + remove_full(s, n, page); 1905 1906 1906 1907 if (m == M_PARTIAL) { 1907 1908 ··· 2555 2556 new.inuse--; 2556 2557 if ((!new.inuse || !prior) && !was_frozen) { 2557 2558 2558 - if (kmem_cache_has_cpu_partial(s) && !prior) 2559 + if (kmem_cache_has_cpu_partial(s) && !prior) { 2559 2560 2560 2561 /* 2561 2562 * Slab was on no list before and will be ··· 2565 2566 */ 2566 2567 new.frozen = 1; 2567 2568 2568 - else { /* Needs to be taken off a list */ 2569 + } else { /* Needs to be taken off a list */ 2569 2570 2570 2571 n = get_node(s, page_to_nid(page)); 2571 2572 /* ··· 2614 2615 */ 2615 2616 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 2616 2617 if (kmem_cache_debug(s)) 2617 - remove_full(s, page); 2618 + remove_full(s, n, page); 2618 2619 add_partial(n, page, DEACTIVATE_TO_TAIL); 2619 2620 stat(s, FREE_ADD_PARTIAL); 2620 2621 } ··· 2628 2629 */ 2629 2630 remove_partial(n, page); 2630 2631 stat(s, FREE_REMOVE_PARTIAL); 2631 - } else 2632 + } else { 2632 2633 /* Slab must be on the full list */ 2633 - remove_full(s, page); 2634 + remove_full(s, n, page); 2635 + } 2634 2636 2635 2637 spin_unlock_irqrestore(&n->list_lock, flags); 2636 2638 stat(s, FREE_SLAB); ··· 2905 2905 init_kmem_cache_node(n); 2906 2906 inc_slabs_node(kmem_cache_node, node, page->objects); 2907 2907 2908 + /* 2909 + * the lock is for lockdep's sake, not for any actual 2910 + * race protection 2911 + */ 2912 + spin_lock(&n->list_lock); 2908 2913 add_partial(n, page, DEACTIVATE_TO_HEAD); 2914 + spin_unlock(&n->list_lock); 2909 2915 } 2910 2916 2911 2917 static void free_kmem_cache_nodes(struct kmem_cache *s) ··· 4320 4314 4321 4315 page = ACCESS_ONCE(c->partial); 4322 4316 if (page) { 4323 - x = page->pobjects; 4317 + node = page_to_nid(page); 4318 + if (flags & SO_TOTAL) 4319 + WARN_ON_ONCE(1); 4320 + else if (flags & SO_OBJECTS) 4321 + WARN_ON_ONCE(1); 4322 + else 4323 + x = page->pages; 4324 4324 total += x; 4325 4325 nodes[node] += x; 4326 4326 } ··· 5190 5178 } 5191 5179 5192 5180 s->kobj.kset = slab_kset; 5193 - err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 5181 + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5194 5182 if (err) { 5195 5183 kobject_put(&s->kobj); 5196 5184 return err;