Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] vmalloc_node

This patch adds

vmalloc_node(size, node) -> Allocate necessary memory on the specified node

and

get_vm_area_node(size, flags, node)

and the other functions that it depends on.

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Christoph Lameter and committed by
Linus Torvalds
930fc45a be15cd72

+64 -17
+7 -1
include/linux/vmalloc.h
··· 32 32 * Highlevel APIs for driver use 33 33 */ 34 34 extern void *vmalloc(unsigned long size); 35 + extern void *vmalloc_node(unsigned long size, int node); 35 36 extern void *vmalloc_exec(unsigned long size); 36 37 extern void *vmalloc_32(unsigned long size); 37 38 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 38 - extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot); 39 + extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, 40 + pgprot_t prot); 41 + extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, 42 + pgprot_t prot, int node); 39 43 extern void vfree(void *addr); 40 44 41 45 extern void *vmap(struct page **pages, unsigned int count, ··· 52 48 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 53 49 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 54 50 unsigned long start, unsigned long end); 51 + extern struct vm_struct *get_vm_area_node(unsigned long size, 52 + unsigned long flags, int node); 55 53 extern struct vm_struct *remove_vm_area(void *addr); 56 54 extern struct vm_struct *__remove_vm_area(void *addr); 57 55 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
+57 -16
mm/vmalloc.c
··· 5 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 + * Numa awareness, Christoph Lameter, SGI, June 2005 8 9 */ 9 10 10 11 #include <linux/mm.h> ··· 159 158 return err; 160 159 } 161 160 162 - struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 163 - unsigned long start, unsigned long end) 161 + struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 162 + unsigned long start, unsigned long end, int node) 164 163 { 165 164 struct vm_struct **p, *tmp, *area; 166 165 unsigned long align = 1; ··· 179 178 addr = ALIGN(start, align); 180 179 size = PAGE_ALIGN(size); 181 180 182 - area = kmalloc(sizeof(*area), GFP_KERNEL); 181 + area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); 183 182 if (unlikely(!area)) 184 183 return NULL; 185 184 ··· 232 231 return NULL; 233 232 } 234 233 234 + struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 235 + unsigned long start, unsigned long end) 236 + { 237 + return __get_vm_area_node(size, flags, start, end, -1); 238 + } 239 + 235 240 /** 236 241 * get_vm_area - reserve a contingous kernel virtual area 237 242 * ··· 251 244 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 252 245 { 253 246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 247 + } 248 + 249 + struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node) 250 + { 251 + return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); 254 252 } 255 253 256 254 /* Caller must hold vmlist_lock */ ··· 354 342 BUG_ON(in_interrupt()); 355 343 __vunmap(addr, 1); 356 344 } 357 - 358 345 EXPORT_SYMBOL(vfree); 359 346 360 347 /** ··· 371 360 BUG_ON(in_interrupt()); 372 361 __vunmap(addr, 0); 373 362 } 374 - 375 363 EXPORT_SYMBOL(vunmap); 376 364 377 365 /** ··· 402 392 403 393 return area->addr; 404 394 } 405 - 406 395 EXPORT_SYMBOL(vmap); 407 396 408 - void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 397 + void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 398 + pgprot_t prot, int node) 409 399 { 410 400 struct page **pages; 411 401 unsigned int nr_pages, array_size, i; ··· 416 406 area->nr_pages = nr_pages; 417 407 /* Please note that the recursion is strictly bounded. */ 418 408 if (array_size > PAGE_SIZE) 419 - pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); 409 + pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 420 410 else 421 - pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); 411 + pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 422 412 area->pages = pages; 423 413 if (!area->pages) { 424 414 remove_vm_area(area->addr); ··· 428 418 memset(area->pages, 0, array_size); 429 419 430 420 for (i = 0; i < area->nr_pages; i++) { 431 - area->pages[i] = alloc_page(gfp_mask); 421 + if (node < 0) 422 + area->pages[i] = alloc_page(gfp_mask); 423 + else 424 + area->pages[i] = alloc_pages_node(node, gfp_mask, 0); 432 425 if (unlikely(!area->pages[i])) { 433 426 /* Successfully allocated i pages, free them in __vunmap() */ 434 427 area->nr_pages = i; ··· 448 435 return NULL; 449 436 } 450 437 438 + void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 439 + { 440 + return __vmalloc_area_node(area, gfp_mask, prot, -1); 441 + } 442 + 451 443 /** 452 - * __vmalloc - allocate virtually contiguous memory 444 + * __vmalloc_node - allocate virtually contiguous memory 453 445 * 454 446 * @size: allocation size 455 447 * @gfp_mask: flags for the page level allocator 456 448 * @prot: protection mask for the allocated pages 449 + * @node node to use for allocation or -1 457 450 * 458 451 * Allocate enough pages to cover @size from the page level 459 452 * allocator with @gfp_mask flags. Map them into contiguous 460 453 * kernel virtual space, using a pagetable protection of @prot. 461 454 */ 462 - void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 455 + void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 456 + int node) 463 457 { 464 458 struct vm_struct *area; 465 459 ··· 474 454 if (!size || (size >> PAGE_SHIFT) > num_physpages) 475 455 return NULL; 476 456 477 - area = get_vm_area(size, VM_ALLOC); 457 + area = get_vm_area_node(size, VM_ALLOC, node); 478 458 if (!area) 479 459 return NULL; 480 460 481 - return __vmalloc_area(area, gfp_mask, prot); 461 + return __vmalloc_area_node(area, gfp_mask, prot, node); 482 462 } 463 + EXPORT_SYMBOL(__vmalloc_node); 483 464 465 + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 466 + { 467 + return __vmalloc_node(size, gfp_mask, prot, -1); 468 + } 484 469 EXPORT_SYMBOL(__vmalloc); 485 470 486 471 /** ··· 503 478 { 504 479 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 505 480 } 506 - 507 481 EXPORT_SYMBOL(vmalloc); 482 + 483 + /** 484 + * vmalloc_node - allocate memory on a specific node 485 + * 486 + * @size: allocation size 487 + * @node; numa node 488 + * 489 + * Allocate enough pages to cover @size from the page level 490 + * allocator and map them into contiguous kernel virtual space. 491 + * 492 + * For tight cotrol over page level allocator and protection flags 493 + * use __vmalloc() instead. 494 + */ 495 + void *vmalloc_node(unsigned long size, int node) 496 + { 497 + return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); 498 + } 499 + EXPORT_SYMBOL(vmalloc_node); 508 500 509 501 #ifndef PAGE_KERNEL_EXEC 510 502 # define PAGE_KERNEL_EXEC PAGE_KERNEL ··· 557 515 { 558 516 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 559 517 } 560 - 561 518 EXPORT_SYMBOL(vmalloc_32); 562 519 563 520 long vread(char *buf, char *addr, unsigned long count)