Merge branch 'slub-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm

* 'slub-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm:
Explain kmem_cache_cpu fields
SLUB: Do not upset lockdep
SLUB: Fix coding style violations
Add parameter to add_partial to avoid having two functions
SLUB: rename defrag to remote_node_defrag_ratio
Move count_partial before kmem_cache_shrink
SLUB: Fix sysfs refcounting
slub: fix shadowed variable sparse warnings

+108 -89
+9 -6
include/linux/slub_def.h
··· 12 #include <linux/kobject.h> 13 14 struct kmem_cache_cpu { 15 - void **freelist; 16 - struct page *page; 17 - int node; 18 - unsigned int offset; 19 - unsigned int objsize; 20 }; 21 22 struct kmem_cache_node { ··· 59 #endif 60 61 #ifdef CONFIG_NUMA 62 - int defrag_ratio; 63 struct kmem_cache_node *node[MAX_NUMNODES]; 64 #endif 65 #ifdef CONFIG_SMP
··· 12 #include <linux/kobject.h> 13 14 struct kmem_cache_cpu { 15 + void **freelist; /* Pointer to first free per cpu object */ 16 + struct page *page; /* The slab from which we are allocating */ 17 + int node; /* The node of the page (or -1 for debug) */ 18 + unsigned int offset; /* Freepointer offset (in word units) */ 19 + unsigned int objsize; /* Size of an object (from kmem_cache) */ 20 }; 21 22 struct kmem_cache_node { ··· 59 #endif 60 61 #ifdef CONFIG_NUMA 62 + /* 63 + * Defragmentation by allocating from a remote node. 64 + */ 65 + int remote_node_defrag_ratio; 66 struct kmem_cache_node *node[MAX_NUMNODES]; 67 #endif 68 #ifdef CONFIG_SMP
+99 -83
mm/slub.c
··· 247 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 248 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 249 { return 0; } 250 - static inline void sysfs_slab_remove(struct kmem_cache *s) {} 251 #endif 252 253 /******************************************************************** ··· 357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 358 newline = 0; 359 } 360 - printk(" %02x", addr[i]); 361 offset = i % 16; 362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 363 if (offset == 15) { 364 - printk(" %s\n",ascii); 365 newline = 1; 366 } 367 } 368 if (!newline) { 369 i %= 16; 370 while (i < 16) { 371 - printk(" "); 372 ascii[i] = ' '; 373 i++; 374 } 375 - printk(" %s\n", ascii); 376 } 377 } 378 ··· 532 533 if (s->flags & __OBJECT_POISON) { 534 memset(p, POISON_FREE, s->objsize - 1); 535 - p[s->objsize -1] = POISON_END; 536 } 537 538 if (s->flags & SLAB_RED_ZONE) ··· 561 562 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 563 u8 *object, char *what, 564 - u8* start, unsigned int value, unsigned int bytes) 565 { 566 u8 *fault; 567 u8 *end; ··· 695 (!check_bytes_and_report(s, page, p, "Poison", p, 696 POISON_FREE, s->objsize - 1) || 697 !check_bytes_and_report(s, page, p, "Poison", 698 - p + s->objsize -1, POISON_END, 1))) 699 return 0; 700 /* 701 * check_pad_bytes cleans up on its own. ··· 903 "SLUB <none>: no slab for object 0x%p.\n", 904 object); 905 dump_stack(); 906 - } 907 - else 908 object_err(s, page, object, 909 "page slab pointer corrupt."); 910 goto fail; ··· 949 /* 950 * Determine which debug features should be switched on 951 */ 952 - for ( ;*str && *str != ','; str++) { 953 switch (tolower(*str)) { 954 case 'f': 955 slub_debug |= SLAB_DEBUG_FREE; ··· 968 break; 969 default: 970 printk(KERN_ERR "slub_debug option '%c' " 971 - "unknown. skipped\n",*str); 972 } 973 } 974 ··· 1041 */ 1042 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1043 { 1044 - struct page * page; 1045 int pages = 1 << s->order; 1046 1047 if (s->order) ··· 1137 mod_zone_page_state(page_zone(page), 1138 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1139 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1140 - - pages); 1141 1142 __free_pages(page, s->order); 1143 } ··· 1197 /* 1198 * Management of partially allocated slabs 1199 */ 1200 - static void add_partial_tail(struct kmem_cache_node *n, struct page *page) 1201 { 1202 spin_lock(&n->list_lock); 1203 n->nr_partial++; 1204 - list_add_tail(&page->lru, &n->partial); 1205 - spin_unlock(&n->list_lock); 1206 - } 1207 - 1208 - static void add_partial(struct kmem_cache_node *n, struct page *page) 1209 - { 1210 - spin_lock(&n->list_lock); 1211 - n->nr_partial++; 1212 - list_add(&page->lru, &n->partial); 1213 spin_unlock(&n->list_lock); 1214 } 1215 ··· 1290 * expensive if we do it every time we are trying to find a slab 1291 * with available objects. 1292 */ 1293 - if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) 1294 return NULL; 1295 1296 zonelist = &NODE_DATA(slab_node(current->mempolicy)) ··· 1334 * 1335 * On exit the slab lock will have been dropped. 1336 */ 1337 - static void unfreeze_slab(struct kmem_cache *s, struct page *page) 1338 { 1339 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1340 ··· 1342 if (page->inuse) { 1343 1344 if (page->freelist) 1345 - add_partial(n, page); 1346 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1347 add_full(n, page); 1348 slab_unlock(page); ··· 1357 * partial list stays small. kmem_cache_shrink can 1358 * reclaim empty slabs from the partial list. 1359 */ 1360 - add_partial_tail(n, page); 1361 slab_unlock(page); 1362 } else { 1363 slab_unlock(page); ··· 1372 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1373 { 1374 struct page *page = c->page; 1375 /* 1376 * Merge cpu freelist into freelist. Typically we get here 1377 * because both freelists are empty. So this is unlikely ··· 1380 */ 1381 while (unlikely(c->freelist)) { 1382 void **object; 1383 1384 /* Retrieve object from cpu_freelist */ 1385 object = c->freelist; ··· 1393 page->inuse--; 1394 } 1395 c->page = NULL; 1396 - unfreeze_slab(s, page); 1397 } 1398 1399 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ··· 1541 * 1542 * Otherwise we can simply pick the next object from the lockless free list. 1543 */ 1544 - static void __always_inline *slab_alloc(struct kmem_cache *s, 1545 gfp_t gfpflags, int node, void *addr) 1546 { 1547 void **object; ··· 1615 * then add it. 1616 */ 1617 if (unlikely(!prior)) 1618 - add_partial_tail(get_node(s, page_to_nid(page)), page); 1619 1620 out_unlock: 1621 slab_unlock(page); ··· 1649 * If fastpath is not possible then fall back to __slab_free where we deal 1650 * with all sorts of special processing. 1651 */ 1652 - static void __always_inline slab_free(struct kmem_cache *s, 1653 struct page *page, void *x, void *addr) 1654 { 1655 void **object = (void *)x; ··· 1999 { 2000 struct page *page; 2001 struct kmem_cache_node *n; 2002 2003 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2004 ··· 2024 #endif 2025 init_kmem_cache_node(n); 2026 atomic_long_inc(&n->nr_slabs); 2027 - add_partial(n, page); 2028 return n; 2029 } 2030 ··· 2216 2217 s->refcount = 1; 2218 #ifdef CONFIG_NUMA 2219 - s->defrag_ratio = 100; 2220 #endif 2221 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2222 goto error; ··· 2238 */ 2239 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2240 { 2241 - struct page * page; 2242 2243 page = get_object_page(object); 2244 ··· 2332 if (kmem_cache_close(s)) 2333 WARN_ON(1); 2334 sysfs_slab_remove(s); 2335 - kfree(s); 2336 } else 2337 up_write(&slub_lock); 2338 } ··· 2350 2351 static int __init setup_slub_min_order(char *str) 2352 { 2353 - get_option (&str, &slub_min_order); 2354 2355 return 1; 2356 } ··· 2359 2360 static int __init setup_slub_max_order(char *str) 2361 { 2362 - get_option (&str, &slub_max_order); 2363 2364 return 1; 2365 } ··· 2368 2369 static int __init setup_slub_min_objects(char *str) 2370 { 2371 - get_option (&str, &slub_min_objects); 2372 2373 return 1; 2374 } ··· 2613 slab_free(page->slab, page, (void *)x, __builtin_return_address(0)); 2614 } 2615 EXPORT_SYMBOL(kfree); 2616 2617 /* 2618 * kmem_cache_shrink removes empty slabs from the partial lists and sorts ··· 2953 * Check if alignment is compatible. 2954 * Courtesy of Adrian Drzewiecki 2955 */ 2956 - if ((s->size & ~(align -1)) != s->size) 2957 continue; 2958 2959 if (s->size - size >= sizeof(void *)) ··· 3062 return NOTIFY_OK; 3063 } 3064 3065 - static struct notifier_block __cpuinitdata slab_notifier = 3066 - { &slab_cpuup_callback, NULL, 0 }; 3067 3068 #endif 3069 ··· 3097 return s; 3098 3099 return slab_alloc(s, gfpflags, node, caller); 3100 - } 3101 - 3102 - static unsigned long count_partial(struct kmem_cache_node *n) 3103 - { 3104 - unsigned long flags; 3105 - unsigned long x = 0; 3106 - struct page *page; 3107 - 3108 - spin_lock_irqsave(&n->list_lock, flags); 3109 - list_for_each_entry(page, &n->partial, lru) 3110 - x += page->inuse; 3111 - spin_unlock_irqrestore(&n->list_lock, flags); 3112 - return x; 3113 } 3114 3115 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) ··· 3400 static int list_locations(struct kmem_cache *s, char *buf, 3401 enum track_item alloc) 3402 { 3403 - int n = 0; 3404 unsigned long i; 3405 struct loc_track t = { 0, 0, NULL }; 3406 int node; ··· 3431 for (i = 0; i < t.count; i++) { 3432 struct location *l = &t.loc[i]; 3433 3434 - if (n > PAGE_SIZE - 100) 3435 break; 3436 - n += sprintf(buf + n, "%7ld ", l->count); 3437 3438 if (l->addr) 3439 - n += sprint_symbol(buf + n, (unsigned long)l->addr); 3440 else 3441 - n += sprintf(buf + n, "<not-available>"); 3442 3443 if (l->sum_time != l->min_time) { 3444 unsigned long remainder; 3445 3446 - n += sprintf(buf + n, " age=%ld/%ld/%ld", 3447 l->min_time, 3448 div_long_long_rem(l->sum_time, l->count, &remainder), 3449 l->max_time); 3450 } else 3451 - n += sprintf(buf + n, " age=%ld", 3452 l->min_time); 3453 3454 if (l->min_pid != l->max_pid) 3455 - n += sprintf(buf + n, " pid=%ld-%ld", 3456 l->min_pid, l->max_pid); 3457 else 3458 - n += sprintf(buf + n, " pid=%ld", 3459 l->min_pid); 3460 3461 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3462 - n < PAGE_SIZE - 60) { 3463 - n += sprintf(buf + n, " cpus="); 3464 - n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50, 3465 l->cpus); 3466 } 3467 3468 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3469 - n < PAGE_SIZE - 60) { 3470 - n += sprintf(buf + n, " nodes="); 3471 - n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50, 3472 l->nodes); 3473 } 3474 3475 - n += sprintf(buf + n, "\n"); 3476 } 3477 3478 free_loc_track(&t); 3479 if (!t.count) 3480 - n += sprintf(buf, "No data\n"); 3481 - return n; 3482 } 3483 3484 enum slab_stat_type { ··· 3508 3509 for_each_possible_cpu(cpu) { 3510 struct page *page; 3511 - int node; 3512 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3513 3514 if (!c) ··· 3519 continue; 3520 if (page) { 3521 if (flags & SO_CPU) { 3522 - int x = 0; 3523 - 3524 if (flags & SO_OBJECTS) 3525 x = page->inuse; 3526 else ··· 3855 SLAB_ATTR_RO(free_calls); 3856 3857 #ifdef CONFIG_NUMA 3858 - static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) 3859 { 3860 - return sprintf(buf, "%d\n", s->defrag_ratio / 10); 3861 } 3862 3863 - static ssize_t defrag_ratio_store(struct kmem_cache *s, 3864 const char *buf, size_t length) 3865 { 3866 int n = simple_strtoul(buf, NULL, 10); 3867 3868 if (n < 100) 3869 - s->defrag_ratio = n * 10; 3870 return length; 3871 } 3872 - SLAB_ATTR(defrag_ratio); 3873 #endif 3874 3875 - static struct attribute * slab_attrs[] = { 3876 &slab_size_attr.attr, 3877 &object_size_attr.attr, 3878 &objs_per_slab_attr.attr, ··· 3900 &cache_dma_attr.attr, 3901 #endif 3902 #ifdef CONFIG_NUMA 3903 - &defrag_ratio_attr.attr, 3904 #endif 3905 NULL 3906 }; ··· 3947 return err; 3948 } 3949 3950 static struct sysfs_ops slab_sysfs_ops = { 3951 .show = slab_attr_show, 3952 .store = slab_attr_store, ··· 3961 3962 static struct kobj_type slab_ktype = { 3963 .sysfs_ops = &slab_sysfs_ops, 3964 }; 3965 3966 static int uevent_filter(struct kset *kset, struct kobject *kobj) ··· 4063 { 4064 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4065 kobject_del(&s->kobj); 4066 } 4067 4068 /*
··· 247 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 248 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 249 { return 0; } 250 + static inline void sysfs_slab_remove(struct kmem_cache *s) 251 + { 252 + kfree(s); 253 + } 254 #endif 255 256 /******************************************************************** ··· 354 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 355 newline = 0; 356 } 357 + printk(KERN_CONT " %02x", addr[i]); 358 offset = i % 16; 359 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 360 if (offset == 15) { 361 + printk(KERN_CONT " %s\n", ascii); 362 newline = 1; 363 } 364 } 365 if (!newline) { 366 i %= 16; 367 while (i < 16) { 368 + printk(KERN_CONT " "); 369 ascii[i] = ' '; 370 i++; 371 } 372 + printk(KERN_CONT " %s\n", ascii); 373 } 374 } 375 ··· 529 530 if (s->flags & __OBJECT_POISON) { 531 memset(p, POISON_FREE, s->objsize - 1); 532 + p[s->objsize - 1] = POISON_END; 533 } 534 535 if (s->flags & SLAB_RED_ZONE) ··· 558 559 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 560 u8 *object, char *what, 561 + u8 *start, unsigned int value, unsigned int bytes) 562 { 563 u8 *fault; 564 u8 *end; ··· 692 (!check_bytes_and_report(s, page, p, "Poison", p, 693 POISON_FREE, s->objsize - 1) || 694 !check_bytes_and_report(s, page, p, "Poison", 695 + p + s->objsize - 1, POISON_END, 1))) 696 return 0; 697 /* 698 * check_pad_bytes cleans up on its own. ··· 900 "SLUB <none>: no slab for object 0x%p.\n", 901 object); 902 dump_stack(); 903 + } else 904 object_err(s, page, object, 905 "page slab pointer corrupt."); 906 goto fail; ··· 947 /* 948 * Determine which debug features should be switched on 949 */ 950 + for (; *str && *str != ','; str++) { 951 switch (tolower(*str)) { 952 case 'f': 953 slub_debug |= SLAB_DEBUG_FREE; ··· 966 break; 967 default: 968 printk(KERN_ERR "slub_debug option '%c' " 969 + "unknown. skipped\n", *str); 970 } 971 } 972 ··· 1039 */ 1040 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1041 { 1042 + struct page *page; 1043 int pages = 1 << s->order; 1044 1045 if (s->order) ··· 1135 mod_zone_page_state(page_zone(page), 1136 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1137 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1138 + -pages); 1139 1140 __free_pages(page, s->order); 1141 } ··· 1195 /* 1196 * Management of partially allocated slabs 1197 */ 1198 + static void add_partial(struct kmem_cache_node *n, 1199 + struct page *page, int tail) 1200 { 1201 spin_lock(&n->list_lock); 1202 n->nr_partial++; 1203 + if (tail) 1204 + list_add_tail(&page->lru, &n->partial); 1205 + else 1206 + list_add(&page->lru, &n->partial); 1207 spin_unlock(&n->list_lock); 1208 } 1209 ··· 1292 * expensive if we do it every time we are trying to find a slab 1293 * with available objects. 1294 */ 1295 + if (!s->remote_node_defrag_ratio || 1296 + get_cycles() % 1024 > s->remote_node_defrag_ratio) 1297 return NULL; 1298 1299 zonelist = &NODE_DATA(slab_node(current->mempolicy)) ··· 1335 * 1336 * On exit the slab lock will have been dropped. 1337 */ 1338 + static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1339 { 1340 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1341 ··· 1343 if (page->inuse) { 1344 1345 if (page->freelist) 1346 + add_partial(n, page, tail); 1347 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1348 add_full(n, page); 1349 slab_unlock(page); ··· 1358 * partial list stays small. kmem_cache_shrink can 1359 * reclaim empty slabs from the partial list. 1360 */ 1361 + add_partial(n, page, 1); 1362 slab_unlock(page); 1363 } else { 1364 slab_unlock(page); ··· 1373 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1374 { 1375 struct page *page = c->page; 1376 + int tail = 1; 1377 /* 1378 * Merge cpu freelist into freelist. Typically we get here 1379 * because both freelists are empty. So this is unlikely ··· 1380 */ 1381 while (unlikely(c->freelist)) { 1382 void **object; 1383 + 1384 + tail = 0; /* Hot objects. Put the slab first */ 1385 1386 /* Retrieve object from cpu_freelist */ 1387 object = c->freelist; ··· 1391 page->inuse--; 1392 } 1393 c->page = NULL; 1394 + unfreeze_slab(s, page, tail); 1395 } 1396 1397 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ··· 1539 * 1540 * Otherwise we can simply pick the next object from the lockless free list. 1541 */ 1542 + static __always_inline void *slab_alloc(struct kmem_cache *s, 1543 gfp_t gfpflags, int node, void *addr) 1544 { 1545 void **object; ··· 1613 * then add it. 1614 */ 1615 if (unlikely(!prior)) 1616 + add_partial(get_node(s, page_to_nid(page)), page, 1); 1617 1618 out_unlock: 1619 slab_unlock(page); ··· 1647 * If fastpath is not possible then fall back to __slab_free where we deal 1648 * with all sorts of special processing. 1649 */ 1650 + static __always_inline void slab_free(struct kmem_cache *s, 1651 struct page *page, void *x, void *addr) 1652 { 1653 void **object = (void *)x; ··· 1997 { 1998 struct page *page; 1999 struct kmem_cache_node *n; 2000 + unsigned long flags; 2001 2002 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2003 ··· 2021 #endif 2022 init_kmem_cache_node(n); 2023 atomic_long_inc(&n->nr_slabs); 2024 + /* 2025 + * lockdep requires consistent irq usage for each lock 2026 + * so even though there cannot be a race this early in 2027 + * the boot sequence, we still disable irqs. 2028 + */ 2029 + local_irq_save(flags); 2030 + add_partial(n, page, 0); 2031 + local_irq_restore(flags); 2032 return n; 2033 } 2034 ··· 2206 2207 s->refcount = 1; 2208 #ifdef CONFIG_NUMA 2209 + s->remote_node_defrag_ratio = 100; 2210 #endif 2211 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2212 goto error; ··· 2228 */ 2229 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2230 { 2231 + struct page *page; 2232 2233 page = get_object_page(object); 2234 ··· 2322 if (kmem_cache_close(s)) 2323 WARN_ON(1); 2324 sysfs_slab_remove(s); 2325 } else 2326 up_write(&slub_lock); 2327 } ··· 2341 2342 static int __init setup_slub_min_order(char *str) 2343 { 2344 + get_option(&str, &slub_min_order); 2345 2346 return 1; 2347 } ··· 2350 2351 static int __init setup_slub_max_order(char *str) 2352 { 2353 + get_option(&str, &slub_max_order); 2354 2355 return 1; 2356 } ··· 2359 2360 static int __init setup_slub_min_objects(char *str) 2361 { 2362 + get_option(&str, &slub_min_objects); 2363 2364 return 1; 2365 } ··· 2604 slab_free(page->slab, page, (void *)x, __builtin_return_address(0)); 2605 } 2606 EXPORT_SYMBOL(kfree); 2607 + 2608 + static unsigned long count_partial(struct kmem_cache_node *n) 2609 + { 2610 + unsigned long flags; 2611 + unsigned long x = 0; 2612 + struct page *page; 2613 + 2614 + spin_lock_irqsave(&n->list_lock, flags); 2615 + list_for_each_entry(page, &n->partial, lru) 2616 + x += page->inuse; 2617 + spin_unlock_irqrestore(&n->list_lock, flags); 2618 + return x; 2619 + } 2620 2621 /* 2622 * kmem_cache_shrink removes empty slabs from the partial lists and sorts ··· 2931 * Check if alignment is compatible. 2932 * Courtesy of Adrian Drzewiecki 2933 */ 2934 + if ((s->size & ~(align - 1)) != s->size) 2935 continue; 2936 2937 if (s->size - size >= sizeof(void *)) ··· 3040 return NOTIFY_OK; 3041 } 3042 3043 + static struct notifier_block __cpuinitdata slab_notifier = { 3044 + &slab_cpuup_callback, NULL, 0 3045 + }; 3046 3047 #endif 3048 ··· 3074 return s; 3075 3076 return slab_alloc(s, gfpflags, node, caller); 3077 } 3078 3079 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) ··· 3390 static int list_locations(struct kmem_cache *s, char *buf, 3391 enum track_item alloc) 3392 { 3393 + int len = 0; 3394 unsigned long i; 3395 struct loc_track t = { 0, 0, NULL }; 3396 int node; ··· 3421 for (i = 0; i < t.count; i++) { 3422 struct location *l = &t.loc[i]; 3423 3424 + if (len > PAGE_SIZE - 100) 3425 break; 3426 + len += sprintf(buf + len, "%7ld ", l->count); 3427 3428 if (l->addr) 3429 + len += sprint_symbol(buf + len, (unsigned long)l->addr); 3430 else 3431 + len += sprintf(buf + len, "<not-available>"); 3432 3433 if (l->sum_time != l->min_time) { 3434 unsigned long remainder; 3435 3436 + len += sprintf(buf + len, " age=%ld/%ld/%ld", 3437 l->min_time, 3438 div_long_long_rem(l->sum_time, l->count, &remainder), 3439 l->max_time); 3440 } else 3441 + len += sprintf(buf + len, " age=%ld", 3442 l->min_time); 3443 3444 if (l->min_pid != l->max_pid) 3445 + len += sprintf(buf + len, " pid=%ld-%ld", 3446 l->min_pid, l->max_pid); 3447 else 3448 + len += sprintf(buf + len, " pid=%ld", 3449 l->min_pid); 3450 3451 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3452 + len < PAGE_SIZE - 60) { 3453 + len += sprintf(buf + len, " cpus="); 3454 + len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3455 l->cpus); 3456 } 3457 3458 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3459 + len < PAGE_SIZE - 60) { 3460 + len += sprintf(buf + len, " nodes="); 3461 + len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3462 l->nodes); 3463 } 3464 3465 + len += sprintf(buf + len, "\n"); 3466 } 3467 3468 free_loc_track(&t); 3469 if (!t.count) 3470 + len += sprintf(buf, "No data\n"); 3471 + return len; 3472 } 3473 3474 enum slab_stat_type { ··· 3498 3499 for_each_possible_cpu(cpu) { 3500 struct page *page; 3501 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3502 3503 if (!c) ··· 3510 continue; 3511 if (page) { 3512 if (flags & SO_CPU) { 3513 if (flags & SO_OBJECTS) 3514 x = page->inuse; 3515 else ··· 3848 SLAB_ATTR_RO(free_calls); 3849 3850 #ifdef CONFIG_NUMA 3851 + static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 3852 { 3853 + return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 3854 } 3855 3856 + static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 3857 const char *buf, size_t length) 3858 { 3859 int n = simple_strtoul(buf, NULL, 10); 3860 3861 if (n < 100) 3862 + s->remote_node_defrag_ratio = n * 10; 3863 return length; 3864 } 3865 + SLAB_ATTR(remote_node_defrag_ratio); 3866 #endif 3867 3868 + static struct attribute *slab_attrs[] = { 3869 &slab_size_attr.attr, 3870 &object_size_attr.attr, 3871 &objs_per_slab_attr.attr, ··· 3893 &cache_dma_attr.attr, 3894 #endif 3895 #ifdef CONFIG_NUMA 3896 + &remote_node_defrag_ratio_attr.attr, 3897 #endif 3898 NULL 3899 }; ··· 3940 return err; 3941 } 3942 3943 + static void kmem_cache_release(struct kobject *kobj) 3944 + { 3945 + struct kmem_cache *s = to_slab(kobj); 3946 + 3947 + kfree(s); 3948 + } 3949 + 3950 static struct sysfs_ops slab_sysfs_ops = { 3951 .show = slab_attr_show, 3952 .store = slab_attr_store, ··· 3947 3948 static struct kobj_type slab_ktype = { 3949 .sysfs_ops = &slab_sysfs_ops, 3950 + .release = kmem_cache_release 3951 }; 3952 3953 static int uevent_filter(struct kset *kset, struct kobject *kobj) ··· 4048 { 4049 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4050 kobject_del(&s->kobj); 4051 + kobject_put(&s->kobj); 4052 } 4053 4054 /*