Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm

* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm:
slub: fix typo in Documentation/vm/slub.txt
slab: NUMA slab allocator migration bugfix
slub: Do not cross cacheline boundaries for very small objects
slab - use angle brackets for include of kmalloc_sizes.h
slab numa fallback logic: Do not pass unfiltered flags to page allocator
slub statistics: Fix check for DEACTIVATE_REMOTE_FREES

+17 -15
+2 -2
Documentation/vm/slub.txt
··· 50 50 51 51 Trying to find an issue in the dentry cache? Try 52 52 53 - slub_debug=,dentry_cache 53 + slub_debug=,dentry 54 54 55 55 to only enable debugging on the dentry cache. 56 56 57 57 Red zoning and tracking may realign the slab. We can just apply sanity checks 58 58 to the dentry cache with 59 59 60 - slub_debug=F,dentry_cache 60 + slub_debug=F,dentry 61 61 62 62 In case you forgot to enable debugging on the kernel command line: It is 63 63 possible to enable debugging manually when the kernel is up. Look at the
+2 -2
include/linux/slab_def.h
··· 41 41 goto found; \ 42 42 else \ 43 43 i++; 44 - #include "kmalloc_sizes.h" 44 + #include <linux/kmalloc_sizes.h> 45 45 #undef CACHE 46 46 { 47 47 extern void __you_cannot_kmalloc_that_much(void); ··· 75 75 goto found; \ 76 76 else \ 77 77 i++; 78 - #include "kmalloc_sizes.h" 78 + #include <linux/kmalloc_sizes.h> 79 79 #undef CACHE 80 80 { 81 81 extern void __you_cannot_kmalloc_that_much(void);
+5 -6
mm/slab.c
··· 333 333 return i; \ 334 334 else \ 335 335 i++; 336 - #include "linux/kmalloc_sizes.h" 336 + #include <linux/kmalloc_sizes.h> 337 337 #undef CACHE 338 338 __bad_size(); 339 339 } else ··· 2964 2964 struct array_cache *ac; 2965 2965 int node; 2966 2966 2967 - node = numa_node_id(); 2968 - 2969 - check_irq_off(); 2970 - ac = cpu_cache_get(cachep); 2971 2967 retry: 2968 + check_irq_off(); 2969 + node = numa_node_id(); 2970 + ac = cpu_cache_get(cachep); 2972 2971 batchcount = ac->batchcount; 2973 2972 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2974 2973 /* ··· 3279 3280 if (local_flags & __GFP_WAIT) 3280 3281 local_irq_enable(); 3281 3282 kmem_flagcheck(cache, flags); 3282 - obj = kmem_getpages(cache, flags, -1); 3283 + obj = kmem_getpages(cache, local_flags, -1); 3283 3284 if (local_flags & __GFP_WAIT) 3284 3285 local_irq_disable(); 3285 3286 if (obj) {
+8 -5
mm/slub.c
··· 1368 1368 struct page *page = c->page; 1369 1369 int tail = 1; 1370 1370 1371 - if (c->freelist) 1371 + if (page->freelist) 1372 1372 stat(c, DEACTIVATE_REMOTE_FREES); 1373 1373 /* 1374 1374 * Merge cpu freelist into slab freelist. Typically we get here ··· 1856 1856 * The hardware cache alignment cannot override the specified 1857 1857 * alignment though. If that is greater then use it. 1858 1858 */ 1859 - if ((flags & SLAB_HWCACHE_ALIGN) && 1860 - size > cache_line_size() / 2) 1861 - return max_t(unsigned long, align, cache_line_size()); 1859 + if (flags & SLAB_HWCACHE_ALIGN) { 1860 + unsigned long ralign = cache_line_size(); 1861 + while (size <= ralign / 2) 1862 + ralign /= 2; 1863 + align = max(align, ralign); 1864 + } 1862 1865 1863 1866 if (align < ARCH_SLAB_MINALIGN) 1864 - return ARCH_SLAB_MINALIGN; 1867 + align = ARCH_SLAB_MINALIGN; 1865 1868 1866 1869 return ALIGN(align, sizeof(void *)); 1867 1870 }