Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/vmalloc.c: clean up map_vm_area third argument

Currently map_vm_area() takes (struct page *** pages) as third argument,
and after mapping, it moves (*pages) to point to (*pages +
nr_mappped_pages).

It looks like this kind of increment is useless to its caller these
days. The callers don't care about the increments and actually they're
trying to avoid this by passing another copy to map_vm_area().

The caller can always guarantee all the pages can be mapped into vm_area
as specified in first argument and the caller only cares about whether
map_vm_area() fails or not.

This patch cleans up the pointer movement in map_vm_area() and updates
its callers accordingly.

Signed-off-by: WANG Chao <chaowang@redhat.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

WANG Chao and committed by
Linus Torvalds
f6f8ed47 21bda264

+11 -20
+1 -1
arch/tile/kernel/module.c
··· 58 58 area->nr_pages = npages; 59 59 area->pages = pages; 60 60 61 - if (map_vm_area(area, prot_rwx, &pages)) { 61 + if (map_vm_area(area, prot_rwx, pages)) { 62 62 vunmap(area->addr); 63 63 goto error; 64 64 }
+2 -5
drivers/lguest/core.c
··· 42 42 static __init int map_switcher(void) 43 43 { 44 44 int i, err; 45 - struct page **pagep; 46 45 47 46 /* 48 47 * Map the Switcher in to high memory. ··· 109 110 * This code actually sets up the pages we've allocated to appear at 110 111 * switcher_addr. map_vm_area() takes the vma we allocated above, the 111 112 * kind of pages we're mapping (kernel pages), and a pointer to our 112 - * array of struct pages. It increments that pointer, but we don't 113 - * care. 113 + * array of struct pages. 114 114 */ 115 - pagep = lg_switcher_pages; 116 - err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); 115 + err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages); 117 116 if (err) { 118 117 printk("lguest: map_vm_area failed: %i\n", err); 119 118 goto free_vma;
+1 -3
drivers/staging/android/binder.c
··· 585 585 586 586 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 587 587 int ret; 588 - struct page **page_array_ptr; 589 588 590 589 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 591 590 ··· 597 598 } 598 599 tmp_area.addr = page_addr; 599 600 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 600 - page_array_ptr = page; 601 - ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); 601 + ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); 602 602 if (ret) { 603 603 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 604 604 proc->pid, page_addr);
+1 -1
include/linux/vmalloc.h
··· 113 113 extern struct vm_struct *find_vm_area(const void *addr); 114 114 115 115 extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 116 - struct page ***pages); 116 + struct page **pages); 117 117 #ifdef CONFIG_MMU 118 118 extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 119 119 pgprot_t prot, struct page **pages);
+5 -9
mm/vmalloc.c
··· 1270 1270 } 1271 1271 EXPORT_SYMBOL_GPL(unmap_kernel_range); 1272 1272 1273 - int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1273 + int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 1274 1274 { 1275 1275 unsigned long addr = (unsigned long)area->addr; 1276 1276 unsigned long end = addr + get_vm_area_size(area); 1277 1277 int err; 1278 1278 1279 - err = vmap_page_range(addr, end, prot, *pages); 1280 - if (err > 0) { 1281 - *pages += err; 1282 - err = 0; 1283 - } 1279 + err = vmap_page_range(addr, end, prot, pages); 1284 1280 1285 - return err; 1281 + return err > 0 ? 0 : err; 1286 1282 } 1287 1283 EXPORT_SYMBOL_GPL(map_vm_area); 1288 1284 ··· 1544 1548 if (!area) 1545 1549 return NULL; 1546 1550 1547 - if (map_vm_area(area, prot, &pages)) { 1551 + if (map_vm_area(area, prot, pages)) { 1548 1552 vunmap(area->addr); 1549 1553 return NULL; 1550 1554 } ··· 1602 1606 cond_resched(); 1603 1607 } 1604 1608 1605 - if (map_vm_area(area, prot, &pages)) 1609 + if (map_vm_area(area, prot, pages)) 1606 1610 goto fail; 1607 1611 return area->addr; 1608 1612
+1 -1
mm/zsmalloc.c
··· 690 690 static inline void *__zs_map_object(struct mapping_area *area, 691 691 struct page *pages[2], int off, int size) 692 692 { 693 - BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages)); 693 + BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); 694 694 area->vm_addr = area->vm->addr; 695 695 return area->vm_addr + off; 696 696 }