Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

SLUB: Simplify debug code

Consolidate functionality into the #ifdef section.

Extract tracing into one subroutine.

Move object debug processing into the #ifdef section so that the
code in __slab_alloc and __slab_free becomes minimal.

Reduce number of functions we need to provide stubs for in the !SLUB_DEBUG case.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Lameter and committed by
Linus Torvalds
3ec09742 a35afb83

+57 -55
+57 -55
mm/slub.c
··· 742 742 return search == NULL; 743 743 } 744 744 745 + static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 746 + { 747 + if (s->flags & SLAB_TRACE) { 748 + printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 749 + s->name, 750 + alloc ? "alloc" : "free", 751 + object, page->inuse, 752 + page->freelist); 753 + 754 + if (!alloc) 755 + print_section("Object", (void *)object, s->objsize); 756 + 757 + dump_stack(); 758 + } 759 + } 760 + 745 761 /* 746 762 * Tracking of fully allocated slabs for debugging purposes. 747 763 */ ··· 782 766 spin_unlock(&n->list_lock); 783 767 } 784 768 785 - static int alloc_object_checks(struct kmem_cache *s, struct page *page, 786 - void *object) 769 + static void setup_object_debug(struct kmem_cache *s, struct page *page, 770 + void *object) 771 + { 772 + if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 773 + return; 774 + 775 + init_object(s, object, 0); 776 + init_tracking(s, object); 777 + } 778 + 779 + static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 780 + void *object, void *addr) 787 781 { 788 782 if (!check_slab(s, page)) 789 783 goto bad; ··· 808 782 goto bad; 809 783 } 810 784 811 - if (!object) 812 - return 1; 813 - 814 - if (!check_object(s, page, object, 0)) 785 + if (object && !check_object(s, page, object, 0)) 815 786 goto bad; 816 787 788 + /* Success perform special debug activities for allocs */ 789 + if (s->flags & SLAB_STORE_USER) 790 + set_track(s, object, TRACK_ALLOC, addr); 791 + trace(s, page, object, 1); 792 + init_object(s, object, 1); 817 793 return 1; 794 + 818 795 bad: 819 796 if (PageSlab(page)) { 820 797 /* ··· 835 806 return 0; 836 807 } 837 808 838 - static int free_object_checks(struct kmem_cache *s, struct page *page, 839 - void *object) 809 + static int free_debug_processing(struct kmem_cache *s, struct page *page, 810 + void *object, void *addr) 840 811 { 841 812 if (!check_slab(s, page)) 842 813 goto fail; ··· 870 841 "to slab %s", object, page->slab->name); 871 842 goto fail; 872 843 } 844 + 845 + /* Special debug activities for freeing objects */ 846 + if (!SlabFrozen(page) && !page->freelist) 847 + remove_full(s, page); 848 + if (s->flags & SLAB_STORE_USER) 849 + set_track(s, object, TRACK_FREE, addr); 850 + trace(s, page, object, 0); 851 + init_object(s, object, 0); 873 852 return 1; 853 + 874 854 fail: 875 855 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 876 856 s->name, page, object); 877 857 return 0; 878 - } 879 - 880 - static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 881 - { 882 - if (s->flags & SLAB_TRACE) { 883 - printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 884 - s->name, 885 - alloc ? "alloc" : "free", 886 - object, page->inuse, 887 - page->freelist); 888 - 889 - if (!alloc) 890 - print_section("Object", (void *)object, s->objsize); 891 - 892 - dump_stack(); 893 - } 894 858 } 895 859 896 860 static int __init setup_slub_debug(char *str) ··· 954 932 s->flags |= slub_debug; 955 933 } 956 934 #else 935 + static inline void setup_object_debug(struct kmem_cache *s, 936 + struct page *page, void *object) {} 957 937 958 - static inline int alloc_object_checks(struct kmem_cache *s, 959 - struct page *page, void *object) { return 0; } 938 + static inline int alloc_debug_processing(struct kmem_cache *s, 939 + struct page *page, void *object, void *addr) { return 0; } 960 940 961 - static inline int free_object_checks(struct kmem_cache *s, 962 - struct page *page, void *object) { return 0; } 941 + static inline int free_debug_processing(struct kmem_cache *s, 942 + struct page *page, void *object, void *addr) { return 0; } 963 943 964 - static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 965 - static inline void remove_full(struct kmem_cache *s, struct page *page) {} 966 - static inline void trace(struct kmem_cache *s, struct page *page, 967 - void *object, int alloc) {} 968 - static inline void init_object(struct kmem_cache *s, 969 - void *object, int active) {} 970 - static inline void init_tracking(struct kmem_cache *s, void *object) {} 971 944 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 972 945 { return 1; } 973 946 static inline int check_object(struct kmem_cache *s, struct page *page, 974 947 void *object, int active) { return 1; } 975 - static inline void set_track(struct kmem_cache *s, void *object, 976 - enum track_item alloc, void *addr) {} 948 + static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 977 949 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 978 950 #define slub_debug 0 979 951 #endif ··· 1004 988 static void setup_object(struct kmem_cache *s, struct page *page, 1005 989 void *object) 1006 990 { 1007 - if (SlabDebug(page)) { 1008 - init_object(s, object, 0); 1009 - init_tracking(s, object); 1010 - } 1011 - 991 + setup_object_debug(s, page, object); 1012 992 if (unlikely(s->ctor)) 1013 993 s->ctor(object, s, 0); 1014 994 } ··· 1461 1449 return NULL; 1462 1450 debug: 1463 1451 object = page->freelist; 1464 - if (!alloc_object_checks(s, page, object)) 1452 + if (!alloc_debug_processing(s, page, object, addr)) 1465 1453 goto another_slab; 1466 - if (s->flags & SLAB_STORE_USER) 1467 - set_track(s, object, TRACK_ALLOC, addr); 1468 - trace(s, page, object, 1); 1469 - init_object(s, object, 1); 1470 1454 1471 1455 page->inuse++; 1472 1456 page->freelist = object[page->offset]; ··· 1569 1561 return; 1570 1562 1571 1563 debug: 1572 - if (!free_object_checks(s, page, x)) 1564 + if (!free_debug_processing(s, page, x, addr)) 1573 1565 goto out_unlock; 1574 - if (!SlabFrozen(page) && !page->freelist) 1575 - remove_full(s, page); 1576 - if (s->flags & SLAB_STORE_USER) 1577 - set_track(s, x, TRACK_FREE, addr); 1578 - trace(s, page, object, 0); 1579 - init_object(s, object, 0); 1580 1566 goto checks_ok; 1581 1567 } 1582 1568 ··· 1807 1805 page->freelist = get_freepointer(kmalloc_caches, n); 1808 1806 page->inuse++; 1809 1807 kmalloc_caches->node[node] = n; 1810 - init_object(kmalloc_caches, n, 1); 1808 + setup_object_debug(kmalloc_caches, page, n); 1811 1809 init_kmem_cache_node(n); 1812 1810 atomic_long_inc(&n->nr_slabs); 1813 1811 add_partial(n, page);