Merge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
Update Pekka's email address in MAINTAINERS
mm/slab.c: make local symbols static
slub: Avoid use of slub_lock in show_slab_objects()
memory hotplug: one more lock on memory hotplug

+17 -7
+2 -2
MAINTAINERS
··· 3684 3685 KMEMCHECK 3686 M: Vegard Nossum <vegardno@ifi.uio.no> 3687 - M: Pekka Enberg <penberg@cs.helsinki.fi> 3688 S: Maintained 3689 F: Documentation/kmemcheck.txt 3690 F: arch/x86/include/asm/kmemcheck.h ··· 5646 5647 SLAB ALLOCATOR 5648 M: Christoph Lameter <cl@linux-foundation.org> 5649 - M: Pekka Enberg <penberg@cs.helsinki.fi> 5650 M: Matt Mackall <mpm@selenic.com> 5651 L: linux-mm@kvack.org 5652 S: Maintained
··· 3684 3685 KMEMCHECK 3686 M: Vegard Nossum <vegardno@ifi.uio.no> 3687 + M: Pekka Enberg <penberg@kernel.org> 3688 S: Maintained 3689 F: Documentation/kmemcheck.txt 3690 F: arch/x86/include/asm/kmemcheck.h ··· 5646 5647 SLAB ALLOCATOR 5648 M: Christoph Lameter <cl@linux-foundation.org> 5649 + M: Pekka Enberg <penberg@kernel.org> 5650 M: Matt Mackall <mpm@selenic.com> 5651 L: linux-mm@kvack.org 5652 S: Maintained
+6
include/linux/memory_hotplug.h
··· 165 extern void put_page_bootmem(struct page *page); 166 #endif 167 168 void lock_memory_hotplug(void); 169 void unlock_memory_hotplug(void); 170
··· 165 extern void put_page_bootmem(struct page *page); 166 #endif 167 168 + /* 169 + * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug 170 + * notifier will be called under this. 2) offline/online/add/remove memory 171 + * will not run simultaneously. 172 + */ 173 + 174 void lock_memory_hotplug(void); 175 void unlock_memory_hotplug(void); 176
+4
mm/memory_hotplug.c
··· 409 int ret; 410 struct memory_notify arg; 411 412 arg.start_pfn = pfn; 413 arg.nr_pages = nr_pages; 414 arg.status_change_nid = -1; ··· 422 ret = notifier_to_errno(ret); 423 if (ret) { 424 memory_notify(MEM_CANCEL_ONLINE, &arg); 425 return ret; 426 } 427 /* ··· 447 printk(KERN_DEBUG "online_pages %lx at %lx failed\n", 448 nr_pages, pfn); 449 memory_notify(MEM_CANCEL_ONLINE, &arg); 450 return ret; 451 } 452 ··· 472 473 if (onlined_pages) 474 memory_notify(MEM_ONLINE, &arg); 475 476 return 0; 477 }
··· 409 int ret; 410 struct memory_notify arg; 411 412 + lock_memory_hotplug(); 413 arg.start_pfn = pfn; 414 arg.nr_pages = nr_pages; 415 arg.status_change_nid = -1; ··· 421 ret = notifier_to_errno(ret); 422 if (ret) { 423 memory_notify(MEM_CANCEL_ONLINE, &arg); 424 + unlock_memory_hotplug(); 425 return ret; 426 } 427 /* ··· 445 printk(KERN_DEBUG "online_pages %lx at %lx failed\n", 446 nr_pages, pfn); 447 memory_notify(MEM_CANCEL_ONLINE, &arg); 448 + unlock_memory_hotplug(); 449 return ret; 450 } 451 ··· 469 470 if (onlined_pages) 471 memory_notify(MEM_ONLINE, &arg); 472 + unlock_memory_hotplug(); 473 474 return 0; 475 }
+3 -3
mm/slab.c
··· 284 * Need this for bootstrapping a per node allocator. 285 */ 286 #define NUM_INIT_LISTS (3 * MAX_NUMNODES) 287 - struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 288 #define CACHE_CACHE 0 289 #define SIZE_AC MAX_NUMNODES 290 #define SIZE_L3 (2 * MAX_NUMNODES) ··· 4053 * necessary. Note that the l3 listlock also protects the array_cache 4054 * if drain_array() is used on the shared array. 4055 */ 4056 - void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4057 struct array_cache *ac, int force, int node) 4058 { 4059 int tofree; ··· 4317 * @count: data length 4318 * @ppos: unused 4319 */ 4320 - ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4321 size_t count, loff_t *ppos) 4322 { 4323 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
··· 284 * Need this for bootstrapping a per node allocator. 285 */ 286 #define NUM_INIT_LISTS (3 * MAX_NUMNODES) 287 + static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 288 #define CACHE_CACHE 0 289 #define SIZE_AC MAX_NUMNODES 290 #define SIZE_L3 (2 * MAX_NUMNODES) ··· 4053 * necessary. Note that the l3 listlock also protects the array_cache 4054 * if drain_array() is used on the shared array. 4055 */ 4056 + static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4057 struct array_cache *ac, int force, int node) 4058 { 4059 int tofree; ··· 4317 * @count: data length 4318 * @ppos: unused 4319 */ 4320 + static ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4321 size_t count, loff_t *ppos) 4322 { 4323 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
+2 -2
mm/slub.c
··· 3797 } 3798 } 3799 3800 - down_read(&slub_lock); 3801 #ifdef CONFIG_SLUB_DEBUG 3802 if (flags & SO_ALL) { 3803 for_each_node_state(node, N_NORMAL_MEMORY) { ··· 3838 x += sprintf(buf + x, " N%d=%lu", 3839 node, nodes[node]); 3840 #endif 3841 - up_read(&slub_lock); 3842 kfree(nodes); 3843 return x + sprintf(buf + x, "\n"); 3844 }
··· 3797 } 3798 } 3799 3800 + lock_memory_hotplug(); 3801 #ifdef CONFIG_SLUB_DEBUG 3802 if (flags & SO_ALL) { 3803 for_each_node_state(node, N_NORMAL_MEMORY) { ··· 3838 x += sprintf(buf + x, " N%d=%lu", 3839 node, nodes[node]); 3840 #endif 3841 + unlock_memory_hotplug(); 3842 kfree(nodes); 3843 return x + sprintf(buf + x, "\n"); 3844 }