Move count_partial before kmem_cache_shrink

Move the counting function for objects in partial slabs so that it is placed
before kmem_cache_shrink.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

+13 -13
+13 -13
mm/slub.c
··· 2607 } 2608 EXPORT_SYMBOL(kfree); 2609 2610 /* 2611 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2612 * the remaining slabs by the number of items in use. The slabs with the ··· 3089 return s; 3090 3091 return slab_alloc(s, gfpflags, node, caller); 3092 - } 3093 - 3094 - static unsigned long count_partial(struct kmem_cache_node *n) 3095 - { 3096 - unsigned long flags; 3097 - unsigned long x = 0; 3098 - struct page *page; 3099 - 3100 - spin_lock_irqsave(&n->list_lock, flags); 3101 - list_for_each_entry(page, &n->partial, lru) 3102 - x += page->inuse; 3103 - spin_unlock_irqrestore(&n->list_lock, flags); 3104 - return x; 3105 } 3106 3107 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
··· 2607 } 2608 EXPORT_SYMBOL(kfree); 2609 2610 + static unsigned long count_partial(struct kmem_cache_node *n) 2611 + { 2612 + unsigned long flags; 2613 + unsigned long x = 0; 2614 + struct page *page; 2615 + 2616 + spin_lock_irqsave(&n->list_lock, flags); 2617 + list_for_each_entry(page, &n->partial, lru) 2618 + x += page->inuse; 2619 + spin_unlock_irqrestore(&n->list_lock, flags); 2620 + return x; 2621 + } 2622 + 2623 /* 2624 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2625 * the remaining slabs by the number of items in use. The slabs with the ··· 3076 return s; 3077 3078 return slab_alloc(s, gfpflags, node, caller); 3079 } 3080 3081 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)