Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] memory hotplug: move section_mem_map alloc to sparse.c

This basically keeps up from having to extern __kmalloc_section_memmap().

The vaddr_in_vmalloc_area() helper could go in a vmalloc header, but that
header gets hard to work with, because it needs some arch-specific macros.
Just stick it in here for now, instead of creating another header.

Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Lion Vollnhals <webmaster@schiggl.de>
Signed-off-by: Jiri Slaby <xslaby@fi.muni.cz>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Dave Hansen and committed by
Linus Torvalds
0b0acbec 3947be19

+75 -57
+2 -3
drivers/acpi/acpi_memhotplug.c
··· 200 200 * Note: Assume that this function returns zero on success 201 201 */ 202 202 result = add_memory(mem_device->start_addr, 203 - (mem_device->end_addr - mem_device->start_addr) + 1, 204 - mem_device->read_write_attribute); 203 + (mem_device->end_addr - mem_device->start_addr) + 1); 205 204 if (result) { 206 205 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n")); 207 206 mem_device->state = MEMORY_INVALID_STATE; ··· 258 259 * Ask the VM to offline this memory range. 259 260 * Note: Assume that this function returns zero on success 260 261 */ 261 - result = remove_memory(start, len, attr); 262 + result = remove_memory(start, len); 262 263 if (result) { 263 264 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n")); 264 265 return_VALUE(result);
+1 -4
drivers/base/memory.c
··· 340 340 static int add_memory_block(unsigned long node_id, struct mem_section *section, 341 341 unsigned long state, int phys_device) 342 342 { 343 - size_t size = sizeof(struct memory_block); 344 - struct memory_block *mem = kmalloc(size, GFP_KERNEL); 343 + struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); 345 344 int ret = 0; 346 345 347 346 if (!mem) 348 347 return -ENOMEM; 349 - 350 - memset(mem, 0, size); 351 348 352 349 mem->phys_index = __section_nr(section); 353 350 mem->state = state;
+3 -45
mm/memory_hotplug.c
··· 24 24 25 25 #include <asm/tlbflush.h> 26 26 27 - static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 28 - { 29 - struct page *page, *ret; 30 - unsigned long memmap_size = sizeof(struct page) * nr_pages; 31 - 32 - page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); 33 - if (page) 34 - goto got_map_page; 35 - 36 - ret = vmalloc(memmap_size); 37 - if (ret) 38 - goto got_map_ptr; 39 - 40 - return NULL; 41 - got_map_page: 42 - ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 43 - got_map_ptr: 44 - memset(ret, 0, memmap_size); 45 - 46 - return ret; 47 - } 48 - 49 27 extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 50 28 unsigned long size); 51 29 static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) ··· 38 60 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); 39 61 } 40 62 41 - extern int sparse_add_one_section(struct zone *, unsigned long, 42 - struct page *mem_map); 63 + extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 64 + int nr_pages); 43 65 static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 44 66 { 45 67 struct pglist_data *pgdat = zone->zone_pgdat; 46 68 int nr_pages = PAGES_PER_SECTION; 47 - struct page *memmap; 48 69 int ret; 49 70 50 - /* 51 - * This can potentially allocate memory, and does its own 52 - * internal locking. 53 - */ 54 - sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id); 55 - 56 - pgdat_resize_lock(pgdat, &flags); 57 - memmap = __kmalloc_section_memmap(nr_pages); 58 - ret = sparse_add_one_section(zone, phys_start_pfn, memmap); 59 - pgdat_resize_unlock(pgdat, &flags); 60 - 61 - if (ret <= 0) { 62 - /* the mem_map didn't get used */ 63 - if (memmap >= (struct page *)VMALLOC_START && 64 - memmap < (struct page *)VMALLOC_END) 65 - vfree(memmap); 66 - else 67 - free_pages((unsigned long)memmap, 68 - get_order(sizeof(struct page) * nr_pages)); 69 - } 71 + ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 70 72 71 73 if (ret < 0) 72 74 return ret;
+69 -5
mm/sparse.c
··· 5 5 #include <linux/mm.h> 6 6 #include <linux/mmzone.h> 7 7 #include <linux/bootmem.h> 8 + #include <linux/highmem.h> 8 9 #include <linux/module.h> 9 10 #include <linux/spinlock.h> 11 + #include <linux/vmalloc.h> 10 12 #include <asm/dma.h> 11 13 12 14 /* ··· 189 187 return NULL; 190 188 } 191 189 190 + static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 191 + { 192 + struct page *page, *ret; 193 + unsigned long memmap_size = sizeof(struct page) * nr_pages; 194 + 195 + page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); 196 + if (page) 197 + goto got_map_page; 198 + 199 + ret = vmalloc(memmap_size); 200 + if (ret) 201 + goto got_map_ptr; 202 + 203 + return NULL; 204 + got_map_page: 205 + ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 206 + got_map_ptr: 207 + memset(ret, 0, memmap_size); 208 + 209 + return ret; 210 + } 211 + 212 + static int vaddr_in_vmalloc_area(void *addr) 213 + { 214 + if (addr >= (void *)VMALLOC_START && 215 + addr < (void *)VMALLOC_END) 216 + return 1; 217 + return 0; 218 + } 219 + 220 + static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 221 + { 222 + if (vaddr_in_vmalloc_area(memmap)) 223 + vfree(memmap); 224 + else 225 + free_pages((unsigned long)memmap, 226 + get_order(sizeof(struct page) * nr_pages)); 227 + } 228 + 192 229 /* 193 230 * Allocate the accumulated non-linear sections, allocate a mem_map 194 231 * for each and record the physical to section mapping. ··· 253 212 * set. If this is <=0, then that means that the passed-in 254 213 * map was not consumed and must be freed. 255 214 */ 256 - int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map) 215 + int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 216 + int nr_pages) 257 217 { 258 - struct mem_section *ms = __pfn_to_section(start_pfn); 218 + unsigned long section_nr = pfn_to_section_nr(start_pfn); 219 + struct pglist_data *pgdat = zone->zone_pgdat; 220 + struct mem_section *ms; 221 + struct page *memmap; 222 + unsigned long flags; 223 + int ret; 259 224 260 - if (ms->section_mem_map & SECTION_MARKED_PRESENT) 261 - return -EEXIST; 225 + /* 226 + * no locking for this, because it does its own 227 + * plus, it does a kmalloc 228 + */ 229 + sparse_index_init(section_nr, pgdat->node_id); 230 + memmap = __kmalloc_section_memmap(nr_pages); 262 231 232 + pgdat_resize_lock(pgdat, &flags); 233 + 234 + ms = __pfn_to_section(start_pfn); 235 + if (ms->section_mem_map & SECTION_MARKED_PRESENT) { 236 + ret = -EEXIST; 237 + goto out; 238 + } 263 239 ms->section_mem_map |= SECTION_MARKED_PRESENT; 264 240 265 - return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map); 241 + ret = sparse_init_one_section(ms, section_nr, memmap); 242 + 243 + if (ret <= 0) 244 + __kfree_section_memmap(memmap, nr_pages); 245 + out: 246 + pgdat_resize_unlock(pgdat, &flags); 247 + return ret; 266 248 }