Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm

* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm:
slub: fix typo in Documentation/vm/slub.txt
slab: NUMA slab allocator migration bugfix
slub: Do not cross cacheline boundaries for very small objects
slab - use angle brackets for include of kmalloc_sizes.h
slab numa fallback logic: Do not pass unfiltered flags to page allocator
slub statistics: Fix check for DEACTIVATE_REMOTE_FREES

+17 -15
+2 -2
Documentation/vm/slub.txt
··· 50 51 Trying to find an issue in the dentry cache? Try 52 53 - slub_debug=,dentry_cache 54 55 to only enable debugging on the dentry cache. 56 57 Red zoning and tracking may realign the slab. We can just apply sanity checks 58 to the dentry cache with 59 60 - slub_debug=F,dentry_cache 61 62 In case you forgot to enable debugging on the kernel command line: It is 63 possible to enable debugging manually when the kernel is up. Look at the
··· 50 51 Trying to find an issue in the dentry cache? Try 52 53 + slub_debug=,dentry 54 55 to only enable debugging on the dentry cache. 56 57 Red zoning and tracking may realign the slab. We can just apply sanity checks 58 to the dentry cache with 59 60 + slub_debug=F,dentry 61 62 In case you forgot to enable debugging on the kernel command line: It is 63 possible to enable debugging manually when the kernel is up. Look at the
+2 -2
include/linux/slab_def.h
··· 41 goto found; \ 42 else \ 43 i++; 44 - #include "kmalloc_sizes.h" 45 #undef CACHE 46 { 47 extern void __you_cannot_kmalloc_that_much(void); ··· 75 goto found; \ 76 else \ 77 i++; 78 - #include "kmalloc_sizes.h" 79 #undef CACHE 80 { 81 extern void __you_cannot_kmalloc_that_much(void);
··· 41 goto found; \ 42 else \ 43 i++; 44 + #include <linux/kmalloc_sizes.h> 45 #undef CACHE 46 { 47 extern void __you_cannot_kmalloc_that_much(void); ··· 75 goto found; \ 76 else \ 77 i++; 78 + #include <linux/kmalloc_sizes.h> 79 #undef CACHE 80 { 81 extern void __you_cannot_kmalloc_that_much(void);
+5 -6
mm/slab.c
··· 333 return i; \ 334 else \ 335 i++; 336 - #include "linux/kmalloc_sizes.h" 337 #undef CACHE 338 __bad_size(); 339 } else ··· 2964 struct array_cache *ac; 2965 int node; 2966 2967 - node = numa_node_id(); 2968 - 2969 - check_irq_off(); 2970 - ac = cpu_cache_get(cachep); 2971 retry: 2972 batchcount = ac->batchcount; 2973 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2974 /* ··· 3279 if (local_flags & __GFP_WAIT) 3280 local_irq_enable(); 3281 kmem_flagcheck(cache, flags); 3282 - obj = kmem_getpages(cache, flags, -1); 3283 if (local_flags & __GFP_WAIT) 3284 local_irq_disable(); 3285 if (obj) {
··· 333 return i; \ 334 else \ 335 i++; 336 + #include <linux/kmalloc_sizes.h> 337 #undef CACHE 338 __bad_size(); 339 } else ··· 2964 struct array_cache *ac; 2965 int node; 2966 2967 retry: 2968 + check_irq_off(); 2969 + node = numa_node_id(); 2970 + ac = cpu_cache_get(cachep); 2971 batchcount = ac->batchcount; 2972 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2973 /* ··· 3280 if (local_flags & __GFP_WAIT) 3281 local_irq_enable(); 3282 kmem_flagcheck(cache, flags); 3283 + obj = kmem_getpages(cache, local_flags, -1); 3284 if (local_flags & __GFP_WAIT) 3285 local_irq_disable(); 3286 if (obj) {
+8 -5
mm/slub.c
··· 1368 struct page *page = c->page; 1369 int tail = 1; 1370 1371 - if (c->freelist) 1372 stat(c, DEACTIVATE_REMOTE_FREES); 1373 /* 1374 * Merge cpu freelist into slab freelist. Typically we get here ··· 1856 * The hardware cache alignment cannot override the specified 1857 * alignment though. If that is greater then use it. 1858 */ 1859 - if ((flags & SLAB_HWCACHE_ALIGN) && 1860 - size > cache_line_size() / 2) 1861 - return max_t(unsigned long, align, cache_line_size()); 1862 1863 if (align < ARCH_SLAB_MINALIGN) 1864 - return ARCH_SLAB_MINALIGN; 1865 1866 return ALIGN(align, sizeof(void *)); 1867 }
··· 1368 struct page *page = c->page; 1369 int tail = 1; 1370 1371 + if (page->freelist) 1372 stat(c, DEACTIVATE_REMOTE_FREES); 1373 /* 1374 * Merge cpu freelist into slab freelist. Typically we get here ··· 1856 * The hardware cache alignment cannot override the specified 1857 * alignment though. If that is greater then use it. 1858 */ 1859 + if (flags & SLAB_HWCACHE_ALIGN) { 1860 + unsigned long ralign = cache_line_size(); 1861 + while (size <= ralign / 2) 1862 + ralign /= 2; 1863 + align = max(align, ralign); 1864 + } 1865 1866 if (align < ARCH_SLAB_MINALIGN) 1867 + align = ARCH_SLAB_MINALIGN; 1868 1869 return ALIGN(align, sizeof(void *)); 1870 }