Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Fix a crash during slabinfo -v
tracing/slab: Move kmalloc tracepoint out of inline code
slub: Fix slub_lock down/up imbalance
slub: Fix build breakage in Documentation/vm
slub tracing: move trace calls out of always inlined functions to reduce kernel code size
slub: move slabinfo.c to tools/slub/slabinfo.c

+89 -75
+1 -1
Documentation/vm/Makefile
··· 2 2 obj- := dummy.o 3 3 4 4 # List of programs to build 5 - hostprogs-y := slabinfo page-types hugepage-mmap hugepage-shm map_hugetlb 5 + hostprogs-y := page-types hugepage-mmap hugepage-shm map_hugetlb 6 6 7 7 # Tell kbuild to always build the programs 8 8 always := $(hostprogs-y)
+3 -3
Documentation/vm/slabinfo.c tools/slub/slabinfo.c
··· 607 607 } 608 608 609 609 for ( ; *opt; opt++) 610 - switch (*opt) { 610 + switch (*opt) { 611 611 case 'F' : case 'f': 612 612 if (sanity) 613 613 return 0; ··· 1127 1127 continue; 1128 1128 switch (de->d_type) { 1129 1129 case DT_LNK: 1130 - alias->name = strdup(de->d_name); 1130 + alias->name = strdup(de->d_name); 1131 1131 count = readlink(de->d_name, buffer, sizeof(buffer)); 1132 1132 1133 1133 if (count < 0) ··· 1143 1143 case DT_DIR: 1144 1144 if (chdir(de->d_name)) 1145 1145 fatal("Unable to access slab %s\n", slab->name); 1146 - slab->name = strdup(de->d_name); 1146 + slab->name = strdup(de->d_name); 1147 1147 slab->alias = 0; 1148 1148 slab->refs = 0; 1149 1149 slab->aliases = get_obj("aliases");
+13 -20
include/linux/slab_def.h
··· 138 138 void *__kmalloc(size_t size, gfp_t flags); 139 139 140 140 #ifdef CONFIG_TRACING 141 - extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); 141 + extern void *kmem_cache_alloc_trace(size_t size, 142 + struct kmem_cache *cachep, gfp_t flags); 142 143 extern size_t slab_buffer_size(struct kmem_cache *cachep); 143 144 #else 144 145 static __always_inline void * 145 - kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 146 + kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) 146 147 { 147 148 return kmem_cache_alloc(cachep, flags); 148 149 } ··· 180 179 #endif 181 180 cachep = malloc_sizes[i].cs_cachep; 182 181 183 - ret = kmem_cache_alloc_notrace(cachep, flags); 184 - 185 - trace_kmalloc(_THIS_IP_, ret, 186 - size, slab_buffer_size(cachep), flags); 182 + ret = kmem_cache_alloc_trace(size, cachep, flags); 187 183 188 184 return ret; 189 185 } ··· 192 194 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 193 195 194 196 #ifdef CONFIG_TRACING 195 - extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 196 - gfp_t flags, 197 - int nodeid); 197 + extern void *kmem_cache_alloc_node_trace(size_t size, 198 + struct kmem_cache *cachep, 199 + gfp_t flags, 200 + int nodeid); 198 201 #else 199 202 static __always_inline void * 200 - kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 201 - gfp_t flags, 202 - int nodeid) 203 + kmem_cache_alloc_node_trace(size_t size, 204 + struct kmem_cache *cachep, 205 + gfp_t flags, 206 + int nodeid) 203 207 { 204 208 return kmem_cache_alloc_node(cachep, flags, nodeid); 205 209 } ··· 210 210 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 211 211 { 212 212 struct kmem_cache *cachep; 213 - void *ret; 214 213 215 214 if (__builtin_constant_p(size)) { 216 215 int i = 0; ··· 233 234 #endif 234 235 cachep = malloc_sizes[i].cs_cachep; 235 236 236 - ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 237 - 238 - trace_kmalloc_node(_THIS_IP_, ret, 239 - size, slab_buffer_size(cachep), 240 - flags, node); 241 - 242 - return ret; 237 + return kmem_cache_alloc_node_trace(size, cachep, flags, node); 243 238 } 244 239 return __kmalloc_node(size, flags, node); 245 240 }
+26 -29
include/linux/slub_def.h
··· 10 10 #include <linux/gfp.h> 11 11 #include <linux/workqueue.h> 12 12 #include <linux/kobject.h> 13 - #include <linux/kmemleak.h> 14 13 15 - #include <trace/events/kmem.h> 14 + #include <linux/kmemleak.h> 16 15 17 16 enum stat_item { 18 17 ALLOC_FASTPATH, /* Allocation from cpu slab */ ··· 215 216 void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 216 217 void *__kmalloc(size_t size, gfp_t flags); 217 218 219 + static __always_inline void * 220 + kmalloc_order(size_t size, gfp_t flags, unsigned int order) 221 + { 222 + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 223 + kmemleak_alloc(ret, size, 1, flags); 224 + return ret; 225 + } 226 + 218 227 #ifdef CONFIG_TRACING 219 - extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 228 + extern void * 229 + kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); 230 + extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 220 231 #else 221 232 static __always_inline void * 222 - kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 233 + kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 223 234 { 224 235 return kmem_cache_alloc(s, gfpflags); 236 + } 237 + 238 + static __always_inline void * 239 + kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 240 + { 241 + return kmalloc_order(size, flags, order); 225 242 } 226 243 #endif 227 244 228 245 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 229 246 { 230 247 unsigned int order = get_order(size); 231 - void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 232 - 233 - kmemleak_alloc(ret, size, 1, flags); 234 - trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 235 - 236 - return ret; 248 + return kmalloc_order_trace(size, flags, order); 237 249 } 238 250 239 251 static __always_inline void *kmalloc(size_t size, gfp_t flags) 240 252 { 241 - void *ret; 242 - 243 253 if (__builtin_constant_p(size)) { 244 254 if (size > SLUB_MAX_SIZE) 245 255 return kmalloc_large(size, flags); ··· 259 251 if (!s) 260 252 return ZERO_SIZE_PTR; 261 253 262 - ret = kmem_cache_alloc_notrace(s, flags); 263 - 264 - trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); 265 - 266 - return ret; 254 + return kmem_cache_alloc_trace(s, flags, size); 267 255 } 268 256 } 269 257 return __kmalloc(size, flags); ··· 270 266 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 271 267 272 268 #ifdef CONFIG_TRACING 273 - extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 269 + extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 274 270 gfp_t gfpflags, 275 - int node); 271 + int node, size_t size); 276 272 #else 277 273 static __always_inline void * 278 - kmem_cache_alloc_node_notrace(struct kmem_cache *s, 274 + kmem_cache_alloc_node_trace(struct kmem_cache *s, 279 275 gfp_t gfpflags, 280 - int node) 276 + int node, size_t size) 281 277 { 282 278 return kmem_cache_alloc_node(s, gfpflags, node); 283 279 } ··· 285 281 286 282 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 287 283 { 288 - void *ret; 289 - 290 284 if (__builtin_constant_p(size) && 291 285 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 292 286 struct kmem_cache *s = kmalloc_slab(size); ··· 292 290 if (!s) 293 291 return ZERO_SIZE_PTR; 294 292 295 - ret = kmem_cache_alloc_node_notrace(s, flags, node); 296 - 297 - trace_kmalloc_node(_THIS_IP_, ret, 298 - size, s->size, flags, node); 299 - 300 - return ret; 293 + return kmem_cache_alloc_node_trace(s, flags, node, size); 301 294 } 302 295 return __kmalloc_node(size, flags, node); 303 296 }
+23 -15
mm/slab.c
··· 3653 3653 EXPORT_SYMBOL(kmem_cache_alloc); 3654 3654 3655 3655 #ifdef CONFIG_TRACING 3656 - void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3656 + void * 3657 + kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) 3657 3658 { 3658 - return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3659 + void *ret; 3660 + 3661 + ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3662 + 3663 + trace_kmalloc(_RET_IP_, ret, 3664 + size, slab_buffer_size(cachep), flags); 3665 + return ret; 3659 3666 } 3660 - EXPORT_SYMBOL(kmem_cache_alloc_notrace); 3667 + EXPORT_SYMBOL(kmem_cache_alloc_trace); 3661 3668 #endif 3662 3669 3663 3670 #ifdef CONFIG_NUMA ··· 3682 3675 EXPORT_SYMBOL(kmem_cache_alloc_node); 3683 3676 3684 3677 #ifdef CONFIG_TRACING 3685 - void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3686 - gfp_t flags, 3687 - int nodeid) 3678 + void *kmem_cache_alloc_node_trace(size_t size, 3679 + struct kmem_cache *cachep, 3680 + gfp_t flags, 3681 + int nodeid) 3688 3682 { 3689 - return __cache_alloc_node(cachep, flags, nodeid, 3683 + void *ret; 3684 + 3685 + ret = __cache_alloc_node(cachep, flags, nodeid, 3690 3686 __builtin_return_address(0)); 3687 + trace_kmalloc_node(_RET_IP_, ret, 3688 + size, slab_buffer_size(cachep), 3689 + flags, nodeid); 3690 + return ret; 3691 3691 } 3692 - EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 3692 + EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3693 3693 #endif 3694 3694 3695 3695 static __always_inline void * 3696 3696 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3697 3697 { 3698 3698 struct kmem_cache *cachep; 3699 - void *ret; 3700 3699 3701 3700 cachep = kmem_find_general_cachep(size, flags); 3702 3701 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3703 3702 return cachep; 3704 - ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3705 - 3706 - trace_kmalloc_node((unsigned long) caller, ret, 3707 - size, cachep->buffer_size, flags, node); 3708 - 3709 - return ret; 3703 + return kmem_cache_alloc_node_trace(size, cachep, flags, node); 3710 3704 } 3711 3705 3712 3706 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
+23 -7
mm/slub.c
··· 28 28 #include <linux/math64.h> 29 29 #include <linux/fault-inject.h> 30 30 31 + #include <trace/events/kmem.h> 32 + 31 33 /* 32 34 * Lock order: 33 35 * 1. slab_lock(page) ··· 1776 1774 EXPORT_SYMBOL(kmem_cache_alloc); 1777 1775 1778 1776 #ifdef CONFIG_TRACING 1779 - void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1777 + void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 1780 1778 { 1781 - return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1779 + void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1780 + trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 1781 + return ret; 1782 1782 } 1783 - EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1783 + EXPORT_SYMBOL(kmem_cache_alloc_trace); 1784 + 1785 + void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 1786 + { 1787 + void *ret = kmalloc_order(size, flags, order); 1788 + trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); 1789 + return ret; 1790 + } 1791 + EXPORT_SYMBOL(kmalloc_order_trace); 1784 1792 #endif 1785 1793 1786 1794 #ifdef CONFIG_NUMA ··· 1806 1794 EXPORT_SYMBOL(kmem_cache_alloc_node); 1807 1795 1808 1796 #ifdef CONFIG_TRACING 1809 - void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1797 + void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 1810 1798 gfp_t gfpflags, 1811 - int node) 1799 + int node, size_t size) 1812 1800 { 1813 - return slab_alloc(s, gfpflags, node, _RET_IP_); 1801 + void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1802 + 1803 + trace_kmalloc_node(_RET_IP_, ret, 1804 + size, s->size, gfpflags, node); 1805 + return ret; 1814 1806 } 1815 - EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1807 + EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 1816 1808 #endif 1817 1809 #endif 1818 1810