Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/gup: Switch all callers of get_user_pages() to not pass tsk/mm

We will soon modify the vanilla get_user_pages() so it can no
longer be used on mm/tasks other than 'current/current->mm',
which is by far the most common way it is called. For now,
we allow the old-style calls, but warn when they are used.
(implemented in previous patch)

This patch switches all callers of:

get_user_pages()
get_user_pages_unlocked()
get_user_pages_locked()

to stop passing tsk/mm so they will no longer see the warnings.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: jack@suse.cz
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20160212210156.113E9407@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Dave Hansen and committed by
Ingo Molnar
d4edcf0d cde70140

+44 -64
+2 -6
arch/cris/arch-v32/drivers/cryptocop.c
··· 2719 2719 /* Acquire the mm page semaphore. */ 2720 2720 down_read(&current->mm->mmap_sem); 2721 2721 2722 - err = get_user_pages(current, 2723 - current->mm, 2724 - (unsigned long int)(oper.indata + prev_ix), 2722 + err = get_user_pages((unsigned long int)(oper.indata + prev_ix), 2725 2723 noinpages, 2726 2724 0, /* read access only for in data */ 2727 2725 0, /* no force */ ··· 2734 2736 } 2735 2737 noinpages = err; 2736 2738 if (oper.do_cipher){ 2737 - err = get_user_pages(current, 2738 - current->mm, 2739 - (unsigned long int)oper.cipher_outdata, 2739 + err = get_user_pages((unsigned long int)oper.cipher_outdata, 2740 2740 nooutpages, 2741 2741 1, /* write access for out data */ 2742 2742 0, /* no force */
+1 -2
arch/ia64/kernel/err_inject.c
··· 142 142 u64 virt_addr=simple_strtoull(buf, NULL, 16); 143 143 int ret; 144 144 145 - ret = get_user_pages(current, current->mm, virt_addr, 146 - 1, VM_READ, 0, NULL, NULL); 145 + ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); 147 146 if (ret<=0) { 148 147 #ifdef ERR_INJ_DEBUG 149 148 printk("Virtual address %lx is not existing.\n",virt_addr);
+1 -2
arch/mips/mm/gup.c
··· 286 286 start += nr << PAGE_SHIFT; 287 287 pages += nr; 288 288 289 - ret = get_user_pages_unlocked(current, mm, start, 290 - (end - start) >> PAGE_SHIFT, 289 + ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, 291 290 write, 0, pages); 292 291 293 292 /* Have to be a bit careful with return values */
+1 -3
arch/s390/mm/gup.c
··· 210 210 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 211 211 struct page **pages) 212 212 { 213 - struct mm_struct *mm = current->mm; 214 213 int nr, ret; 215 214 216 215 might_sleep(); ··· 221 222 /* Try to get the remaining pages with get_user_pages */ 222 223 start += nr << PAGE_SHIFT; 223 224 pages += nr; 224 - ret = get_user_pages_unlocked(current, mm, start, 225 - nr_pages - nr, write, 0, pages); 225 + ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); 226 226 /* Have to be a bit careful with return values */ 227 227 if (nr > 0) 228 228 ret = (ret < 0) ? nr : ret + nr;
+1 -1
arch/sh/mm/gup.c
··· 257 257 start += nr << PAGE_SHIFT; 258 258 pages += nr; 259 259 260 - ret = get_user_pages_unlocked(current, mm, start, 260 + ret = get_user_pages_unlocked(start, 261 261 (end - start) >> PAGE_SHIFT, write, 0, pages); 262 262 263 263 /* Have to be a bit careful with return values */
+1 -1
arch/sparc/mm/gup.c
··· 237 237 start += nr << PAGE_SHIFT; 238 238 pages += nr; 239 239 240 - ret = get_user_pages_unlocked(current, mm, start, 240 + ret = get_user_pages_unlocked(start, 241 241 (end - start) >> PAGE_SHIFT, write, 0, pages); 242 242 243 243 /* Have to be a bit careful with return values */
+1 -1
arch/x86/mm/gup.c
··· 422 422 start += nr << PAGE_SHIFT; 423 423 pages += nr; 424 424 425 - ret = get_user_pages_unlocked(current, mm, start, 425 + ret = get_user_pages_unlocked(start, 426 426 (end - start) >> PAGE_SHIFT, 427 427 write, 0, pages); 428 428
+2 -2
arch/x86/mm/mpx.c
··· 546 546 int nr_pages = 1; 547 547 int force = 0; 548 548 549 - gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, 550 - nr_pages, write, force, NULL, NULL); 549 + gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, 550 + force, NULL, NULL); 551 551 /* 552 552 * get_user_pages() returns number of pages gotten. 553 553 * 0 means we failed to fault in and get anything,
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 518 518 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 519 519 struct page **pages = ttm->pages + pinned; 520 520 521 - r = get_user_pages(current, current->mm, userptr, num_pages, 522 - write, 0, pages, NULL); 521 + r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); 523 522 if (r < 0) 524 523 goto release_pages; 525 524
+1 -2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 554 554 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 555 555 struct page **pages = ttm->pages + pinned; 556 556 557 - r = get_user_pages(current, current->mm, userptr, num_pages, 558 - write, 0, pages, NULL); 557 + r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); 559 558 if (r < 0) 560 559 goto release_pages; 561 560
+1 -2
drivers/gpu/drm/via/via_dmablit.c
··· 239 239 if (NULL == vsg->pages) 240 240 return -ENOMEM; 241 241 down_read(&current->mm->mmap_sem); 242 - ret = get_user_pages(current, current->mm, 243 - (unsigned long)xfer->mem_addr, 242 + ret = get_user_pages((unsigned long)xfer->mem_addr, 244 243 vsg->num_pages, 245 244 (vsg->direction == DMA_FROM_DEVICE), 246 245 0, vsg->pages, NULL);
+1 -1
drivers/infiniband/core/umem.c
··· 188 188 sg_list_start = umem->sg_head.sgl; 189 189 190 190 while (npages) { 191 - ret = get_user_pages(current, current->mm, cur_base, 191 + ret = get_user_pages(cur_base, 192 192 min_t(unsigned long, npages, 193 193 PAGE_SIZE / sizeof (struct page *)), 194 194 1, !umem->writable, page_list, vma_list);
+1 -2
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 472 472 goto out; 473 473 } 474 474 475 - ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 476 - pages, NULL); 475 + ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); 477 476 if (ret < 0) 478 477 goto out; 479 478
+1 -2
drivers/infiniband/hw/qib/qib_user_pages.c
··· 66 66 } 67 67 68 68 for (got = 0; got < num_pages; got += ret) { 69 - ret = get_user_pages(current, current->mm, 70 - start_page + got * PAGE_SIZE, 69 + ret = get_user_pages(start_page + got * PAGE_SIZE, 71 70 num_pages - got, 1, 1, 72 71 p + got, NULL); 73 72 if (ret < 0)
+1 -1
drivers/infiniband/hw/usnic/usnic_uiom.c
··· 144 144 ret = 0; 145 145 146 146 while (npages) { 147 - ret = get_user_pages(current, current->mm, cur_base, 147 + ret = get_user_pages(cur_base, 148 148 min_t(unsigned long, npages, 149 149 PAGE_SIZE / sizeof(struct page *)), 150 150 1, !writable, page_list, NULL);
+2 -2
drivers/media/pci/ivtv/ivtv-udma.c
··· 124 124 } 125 125 126 126 /* Get user pages for DMA Xfer */ 127 - err = get_user_pages_unlocked(current, current->mm, 128 - user_dma.uaddr, user_dma.page_count, 0, 1, dma->map); 127 + err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0, 128 + 1, dma->map); 129 129 130 130 if (user_dma.page_count != err) { 131 131 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
+4 -6
drivers/media/pci/ivtv/ivtv-yuv.c
··· 75 75 ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height); 76 76 77 77 /* Get user pages for DMA Xfer */ 78 - y_pages = get_user_pages_unlocked(current, current->mm, 79 - y_dma.uaddr, y_dma.page_count, 0, 1, 80 - &dma->map[0]); 78 + y_pages = get_user_pages_unlocked(y_dma.uaddr, 79 + y_dma.page_count, 0, 1, &dma->map[0]); 81 80 uv_pages = 0; /* silence gcc. value is set and consumed only if: */ 82 81 if (y_pages == y_dma.page_count) { 83 - uv_pages = get_user_pages_unlocked(current, current->mm, 84 - uv_dma.uaddr, uv_dma.page_count, 0, 1, 85 - &dma->map[y_pages]); 82 + uv_pages = get_user_pages_unlocked(uv_dma.uaddr, 83 + uv_dma.page_count, 0, 1, &dma->map[y_pages]); 86 84 } 87 85 88 86 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
+1 -2
drivers/media/v4l2-core/videobuf-dma-sg.c
··· 181 181 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", 182 182 data, size, dma->nr_pages); 183 183 184 - err = get_user_pages(current, current->mm, 185 - data & PAGE_MASK, dma->nr_pages, 184 + err = get_user_pages(data & PAGE_MASK, dma->nr_pages, 186 185 rw == READ, 1, /* force */ 187 186 dma->pages, NULL); 188 187
-2
drivers/misc/mic/scif/scif_rma.c
··· 1394 1394 } 1395 1395 1396 1396 pinned_pages->nr_pages = get_user_pages( 1397 - current, 1398 - mm, 1399 1397 (u64)addr, 1400 1398 nr_pages, 1401 1399 !!(prot & SCIF_PROT_WRITE),
+1 -2
drivers/misc/sgi-gru/grufault.c
··· 198 198 #else 199 199 *pageshift = PAGE_SHIFT; 200 200 #endif 201 - if (get_user_pages 202 - (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) 201 + if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0) 203 202 return -EFAULT; 204 203 *paddr = page_to_phys(page); 205 204 put_page(page);
-2
drivers/scsi/st.c
··· 4817 4817 /* Try to fault in all of the necessary pages */ 4818 4818 /* rw==READ means read from drive, write into memory area */ 4819 4819 res = get_user_pages_unlocked( 4820 - current, 4821 - current->mm, 4822 4820 uaddr, 4823 4821 nr_pages, 4824 4822 rw == READ,
+2 -2
drivers/video/fbdev/pvr2fb.c
··· 686 686 if (!pages) 687 687 return -ENOMEM; 688 688 689 - ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf, 690 - nr_pages, WRITE, 0, pages); 689 + ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE, 690 + 0, pages); 691 691 692 692 if (ret < nr_pages) { 693 693 nr_pages = ret;
+2 -3
drivers/virt/fsl_hypervisor.c
··· 244 244 245 245 /* Get the physical addresses of the source buffer */ 246 246 down_read(&current->mm->mmap_sem); 247 - num_pinned = get_user_pages(current, current->mm, 248 - param.local_vaddr - lb_offset, num_pages, 249 - (param.source == -1) ? READ : WRITE, 247 + num_pinned = get_user_pages(param.local_vaddr - lb_offset, 248 + num_pages, (param.source == -1) ? READ : WRITE, 250 249 0, pages, NULL); 251 250 up_read(&current->mm->mmap_sem); 252 251
+1 -1
mm/frame_vector.c
··· 58 58 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { 59 59 vec->got_ref = true; 60 60 vec->is_pfns = false; 61 - ret = get_user_pages_locked(current, mm, start, nr_frames, 61 + ret = get_user_pages_locked(start, nr_frames, 62 62 write, force, (struct page **)(vec->ptrs), &locked); 63 63 goto out; 64 64 }
+4 -2
mm/gup.c
··· 936 936 EXPORT_SYMBOL(get_user_pages_remote); 937 937 938 938 /* 939 - * This is the same as get_user_pages_remote() for the time 940 - * being. 939 + * This is the same as get_user_pages_remote(), just with a 940 + * less-flexible calling convention where we assume that the task 941 + * and mm being operated on are the current task's. We also 942 + * obviously don't pass FOLL_REMOTE in here. 941 943 */ 942 944 long get_user_pages6(unsigned long start, unsigned long nr_pages, 943 945 int write, int force, struct page **pages,
+1 -1
mm/ksm.c
··· 352 352 /* 353 353 * We use break_ksm to break COW on a ksm page: it's a stripped down 354 354 * 355 - * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) 355 + * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1) 356 356 * put_page(page); 357 357 * 358 358 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
+3 -3
mm/mempolicy.c
··· 844 844 } 845 845 } 846 846 847 - static int lookup_node(struct mm_struct *mm, unsigned long addr) 847 + static int lookup_node(unsigned long addr) 848 848 { 849 849 struct page *p; 850 850 int err; 851 851 852 - err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 852 + err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); 853 853 if (err >= 0) { 854 854 err = page_to_nid(p); 855 855 put_page(p); ··· 904 904 905 905 if (flags & MPOL_F_NODE) { 906 906 if (flags & MPOL_F_ADDR) { 907 - err = lookup_node(mm, addr); 907 + err = lookup_node(addr); 908 908 if (err < 0) 909 909 goto out; 910 910 *policy = err;
+1 -1
net/ceph/pagevec.c
··· 24 24 return ERR_PTR(-ENOMEM); 25 25 26 26 while (got < num_pages) { 27 - rc = get_user_pages_unlocked(current, current->mm, 27 + rc = get_user_pages_unlocked( 28 28 (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 29 29 num_pages - got, write_page, 0, pages + got); 30 30 if (rc < 0)
+5 -5
virt/kvm/kvm_main.c
··· 1264 1264 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1265 1265 } 1266 1266 1267 - static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1268 - unsigned long start, int write, struct page **page) 1267 + static int get_user_page_nowait(unsigned long start, int write, 1268 + struct page **page) 1269 1269 { 1270 1270 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1271 1271 1272 1272 if (write) 1273 1273 flags |= FOLL_WRITE; 1274 1274 1275 - return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1275 + return __get_user_pages(current, current->mm, start, 1, flags, page, 1276 + NULL, NULL); 1276 1277 } 1277 1278 1278 1279 static inline int check_user_page_hwpoison(unsigned long addr) ··· 1335 1334 1336 1335 if (async) { 1337 1336 down_read(&current->mm->mmap_sem); 1338 - npages = get_user_page_nowait(current, current->mm, 1339 - addr, write_fault, page); 1337 + npages = get_user_page_nowait(addr, write_fault, page); 1340 1338 up_read(&current->mm->mmap_sem); 1341 1339 } else 1342 1340 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,