Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/gup: change GUP fast to use flags rather than a write 'bool'

To facilitate additional options to get_user_pages_fast() change the
singular write parameter to be gup_flags.

This patch does not change any functionality. New functionality will
follow in subsequent patches.

Some of the get_user_pages_fast() call sites were unchanged because they
already passed FOLL_WRITE or 0 for the write parameter.

NOTE: It was suggested to change the ordering of the get_user_pages_fast()
arguments to ensure that callers were converted. This breaks the current
GUP call site convention of having the returned pages be the final
parameter. So the suggestion was rejected.

Link: http://lkml.kernel.org/r/20190328084422.29911-4-ira.weiny@intel.com
Link: http://lkml.kernel.org/r/20190317183438.2057-4-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Mike Marshall <hubcap@omnibond.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Ira Weiny and committed by
Linus Torvalds
73b0140b b798bec4

+73 -57
+6 -5
arch/mips/mm/gup.c
··· 235 235 * get_user_pages_fast() - pin user pages in memory 236 236 * @start: starting user address 237 237 * @nr_pages: number of pages from start to pin 238 - * @write: whether pages will be written to 238 + * @gup_flags: flags modifying pin behaviour 239 239 * @pages: array that receives pointers to the pages pinned. 240 240 * Should be at least nr_pages long. 241 241 * ··· 247 247 * requested. If nr_pages is 0 or negative, returns 0. If no pages 248 248 * were pinned, returns -errno. 249 249 */ 250 - int get_user_pages_fast(unsigned long start, int nr_pages, int write, 251 - struct page **pages) 250 + int get_user_pages_fast(unsigned long start, int nr_pages, 251 + unsigned int gup_flags, struct page **pages) 252 252 { 253 253 struct mm_struct *mm = current->mm; 254 254 unsigned long addr, len, end; ··· 273 273 next = pgd_addr_end(addr, end); 274 274 if (pgd_none(pgd)) 275 275 goto slow; 276 - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 276 + if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE, 277 + pages, &nr)) 277 278 goto slow; 278 279 } while (pgdp++, addr = next, addr != end); 279 280 local_irq_enable(); ··· 290 289 pages += nr; 291 290 292 291 ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, 293 - pages, write ? FOLL_WRITE : 0); 292 + pages, gup_flags); 294 293 295 294 /* Have to be a bit careful with return values */ 296 295 if (nr > 0) {
+2 -2
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 600 600 /* If writing != 0, then the HPTE must allow writing, if we get here */ 601 601 write_ok = writing; 602 602 hva = gfn_to_hva_memslot(memslot, gfn); 603 - npages = get_user_pages_fast(hva, 1, writing, pages); 603 + npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); 604 604 if (npages < 1) { 605 605 /* Check if it's an I/O mapping */ 606 606 down_read(&current->mm->mmap_sem); ··· 1193 1193 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 1194 1194 goto err; 1195 1195 hva = gfn_to_hva_memslot(memslot, gfn); 1196 - npages = get_user_pages_fast(hva, 1, 1, pages); 1196 + npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); 1197 1197 if (npages < 1) 1198 1198 goto err; 1199 1199 page = pages[0];
+1 -1
arch/powerpc/kvm/e500_mmu.c
··· 783 783 if (!pages) 784 784 return -ENOMEM; 785 785 786 - ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); 786 + ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages); 787 787 if (ret < 0) 788 788 goto free_pages; 789 789
+1 -1
arch/s390/kvm/interrupt.c
··· 2376 2376 ret = -EFAULT; 2377 2377 goto out; 2378 2378 } 2379 - ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 2379 + ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page); 2380 2380 if (ret < 0) 2381 2381 goto out; 2382 2382 BUG_ON(ret != 1);
+6 -5
arch/sh/mm/gup.c
··· 204 204 * get_user_pages_fast() - pin user pages in memory 205 205 * @start: starting user address 206 206 * @nr_pages: number of pages from start to pin 207 - * @write: whether pages will be written to 207 + * @gup_flags: flags modifying pin behaviour 208 208 * @pages: array that receives pointers to the pages pinned. 209 209 * Should be at least nr_pages long. 210 210 * ··· 216 216 * requested. If nr_pages is 0 or negative, returns 0. If no pages 217 217 * were pinned, returns -errno. 218 218 */ 219 - int get_user_pages_fast(unsigned long start, int nr_pages, int write, 220 - struct page **pages) 219 + int get_user_pages_fast(unsigned long start, int nr_pages, 220 + unsigned int gup_flags, struct page **pages) 221 221 { 222 222 struct mm_struct *mm = current->mm; 223 223 unsigned long addr, len, end; ··· 241 241 next = pgd_addr_end(addr, end); 242 242 if (pgd_none(pgd)) 243 243 goto slow; 244 - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 244 + if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE, 245 + pages, &nr)) 245 246 goto slow; 246 247 } while (pgdp++, addr = next, addr != end); 247 248 local_irq_enable(); ··· 262 261 263 262 ret = get_user_pages_unlocked(start, 264 263 (end - start) >> PAGE_SHIFT, pages, 265 - write ? FOLL_WRITE : 0); 264 + gup_flags); 266 265 267 266 /* Have to be a bit careful with return values */ 268 267 if (nr > 0) {
+5 -4
arch/sparc/mm/gup.c
··· 245 245 return nr; 246 246 } 247 247 248 - int get_user_pages_fast(unsigned long start, int nr_pages, int write, 249 - struct page **pages) 248 + int get_user_pages_fast(unsigned long start, int nr_pages, 249 + unsigned int gup_flags, struct page **pages) 250 250 { 251 251 struct mm_struct *mm = current->mm; 252 252 unsigned long addr, len, end; ··· 303 303 next = pgd_addr_end(addr, end); 304 304 if (pgd_none(pgd)) 305 305 goto slow; 306 - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 306 + if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE, 307 + pages, &nr)) 307 308 goto slow; 308 309 } while (pgdp++, addr = next, addr != end); 309 310 ··· 325 324 326 325 ret = get_user_pages_unlocked(start, 327 326 (end - start) >> PAGE_SHIFT, pages, 328 - write ? FOLL_WRITE : 0); 327 + gup_flags); 329 328 330 329 /* Have to be a bit careful with return values */ 331 330 if (nr > 0) {
+1 -1
arch/x86/kvm/paging_tmpl.h
··· 140 140 pt_element_t *table; 141 141 struct page *page; 142 142 143 - npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); 143 + npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); 144 144 /* Check if the user is doing something meaningless. */ 145 145 if (unlikely(npages != 1)) 146 146 return -EFAULT;
+1 -1
arch/x86/kvm/svm.c
··· 1805 1805 return NULL; 1806 1806 1807 1807 /* Pin the user virtual address. */ 1808 - npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); 1808 + npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); 1809 1809 if (npinned != npages) { 1810 1810 pr_err("SEV: Failure locking %lu pages.\n", npages); 1811 1811 goto err;
+1 -1
drivers/fpga/dfl-afu-dma-region.c
··· 102 102 goto unlock_vm; 103 103 } 104 104 105 - pinned = get_user_pages_fast(region->user_addr, npages, 1, 105 + pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, 106 106 region->pages); 107 107 if (pinned < 0) { 108 108 ret = pinned;
+2 -1
drivers/gpu/drm/via/via_dmablit.c
··· 243 243 if (NULL == vsg->pages) 244 244 return -ENOMEM; 245 245 ret = get_user_pages_fast((unsigned long)xfer->mem_addr, 246 - vsg->num_pages, vsg->direction == DMA_FROM_DEVICE, 246 + vsg->num_pages, 247 + vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, 247 248 vsg->pages); 248 249 if (ret != vsg->num_pages) { 249 250 if (ret < 0)
+2 -1
drivers/infiniband/hw/hfi1/user_pages.c
··· 105 105 { 106 106 int ret; 107 107 108 - ret = get_user_pages_fast(vaddr, npages, writable, pages); 108 + ret = get_user_pages_fast(vaddr, npages, writable ? FOLL_WRITE : 0, 109 + pages); 109 110 if (ret < 0) 110 111 return ret; 111 112
+1 -1
drivers/misc/genwqe/card_utils.c
··· 603 603 /* pin user pages in memory */ 604 604 rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ 605 605 m->nr_pages, 606 - m->write, /* readable/writable */ 606 + m->write ? FOLL_WRITE : 0, /* readable/writable */ 607 607 m->page_list); /* ptrs to pages */ 608 608 if (rc < 0) 609 609 goto fail_get_user_pages;
+1 -1
drivers/misc/vmw_vmci/vmci_host.c
··· 242 242 /* 243 243 * Lock physical page backing a given user VA. 244 244 */ 245 - retval = get_user_pages_fast(uva, 1, 1, &context->notify_page); 245 + retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); 246 246 if (retval != 1) { 247 247 context->notify_page = NULL; 248 248 return VMCI_ERROR_GENERIC;
+4 -2
drivers/misc/vmw_vmci/vmci_queue_pair.c
··· 659 659 int err = VMCI_SUCCESS; 660 660 661 661 retval = get_user_pages_fast((uintptr_t) produce_uva, 662 - produce_q->kernel_if->num_pages, 1, 662 + produce_q->kernel_if->num_pages, 663 + FOLL_WRITE, 663 664 produce_q->kernel_if->u.h.header_page); 664 665 if (retval < (int)produce_q->kernel_if->num_pages) { 665 666 pr_debug("get_user_pages_fast(produce) failed (retval=%d)", ··· 672 671 } 673 672 674 673 retval = get_user_pages_fast((uintptr_t) consume_uva, 675 - consume_q->kernel_if->num_pages, 1, 674 + consume_q->kernel_if->num_pages, 675 + FOLL_WRITE, 676 676 consume_q->kernel_if->u.h.header_page); 677 677 if (retval < (int)consume_q->kernel_if->num_pages) { 678 678 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
+2 -1
drivers/platform/goldfish/goldfish_pipe.c
··· 274 274 *iter_last_page_size = last_page_size; 275 275 } 276 276 277 - ret = get_user_pages_fast(first_page, requested_pages, !is_write, 277 + ret = get_user_pages_fast(first_page, requested_pages, 278 + !is_write ? FOLL_WRITE : 0, 278 279 pages); 279 280 if (ret <= 0) 280 281 return -EFAULT;
+3 -1
drivers/rapidio/devices/rio_mport_cdev.c
··· 868 868 869 869 pinned = get_user_pages_fast( 870 870 (unsigned long)xfer->loc_addr & PAGE_MASK, 871 - nr_pages, dir == DMA_FROM_DEVICE, page_list); 871 + nr_pages, 872 + dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, 873 + page_list); 872 874 873 875 if (pinned != nr_pages) { 874 876 if (pinned < 0) {
+1 -1
drivers/sbus/char/oradax.c
··· 437 437 438 438 dax_dbg("uva %p", va); 439 439 440 - ret = get_user_pages_fast((unsigned long)va, 1, 1, p); 440 + ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p); 441 441 if (ret == 1) { 442 442 dax_dbg("locked page %p, for VA %p", *p, va); 443 443 return 0;
+2 -1
drivers/scsi/st.c
··· 4922 4922 4923 4923 /* Try to fault in all of the necessary pages */ 4924 4924 /* rw==READ means read from drive, write into memory area */ 4925 - res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages); 4925 + res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0, 4926 + pages); 4926 4927 4927 4928 /* Errors and no page mapped should return here */ 4928 4929 if (res < nr_pages)
+2 -2
drivers/staging/gasket/gasket_page_table.c
··· 486 486 ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr + 487 487 off + i * PAGE_SIZE; 488 488 } else { 489 - ret = get_user_pages_fast(page_addr - offset, 1, 1, 490 - &page); 489 + ret = get_user_pages_fast(page_addr - offset, 1, 490 + FOLL_WRITE, &page); 491 491 492 492 if (ret <= 0) { 493 493 dev_err(pg_tbl->device,
+1 -1
drivers/tee/tee_shm.c
··· 273 273 goto err; 274 274 } 275 275 276 - rc = get_user_pages_fast(start, num_pages, 1, shm->pages); 276 + rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages); 277 277 if (rc > 0) 278 278 shm->num_pages = rc; 279 279 if (rc != num_pages) {
+2 -1
drivers/vfio/vfio_iommu_spapr_tce.c
··· 532 532 enum dma_data_direction direction = iommu_tce_direction(tce); 533 533 534 534 if (get_user_pages_fast(tce & PAGE_MASK, 1, 535 - direction != DMA_TO_DEVICE, &page) != 1) 535 + direction != DMA_TO_DEVICE ? FOLL_WRITE : 0, 536 + &page) != 1) 536 537 return -EFAULT; 537 538 538 539 *hpa = __pa((unsigned long) page_address(page));
+1 -1
drivers/vhost/vhost.c
··· 1704 1704 int bit = nr + (log % PAGE_SIZE) * 8; 1705 1705 int r; 1706 1706 1707 - r = get_user_pages_fast(log, 1, 1, &page); 1707 + r = get_user_pages_fast(log, 1, FOLL_WRITE, &page); 1708 1708 if (r < 0) 1709 1709 return r; 1710 1710 BUG_ON(r != 1);
+1 -1
drivers/video/fbdev/pvr2fb.c
··· 686 686 if (!pages) 687 687 return -ENOMEM; 688 688 689 - ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages); 689 + ret = get_user_pages_fast((unsigned long)buf, nr_pages, FOLL_WRITE, pages); 690 690 if (ret < nr_pages) { 691 691 nr_pages = ret; 692 692 ret = -EINVAL;
+1 -1
drivers/virt/fsl_hypervisor.c
··· 244 244 245 245 /* Get the physical addresses of the source buffer */ 246 246 num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset, 247 - num_pages, param.source != -1, pages); 247 + num_pages, param.source != -1 ? FOLL_WRITE : 0, pages); 248 248 249 249 if (num_pinned != num_pages) { 250 250 /* get_user_pages() failed */
+1 -1
drivers/xen/gntdev.c
··· 852 852 unsigned long xen_pfn; 853 853 int ret; 854 854 855 - ret = get_user_pages_fast(addr, 1, writeable, &page); 855 + ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page); 856 856 if (ret < 0) 857 857 return ret; 858 858
+1 -1
fs/orangefs/orangefs-bufmap.c
··· 269 269 270 270 /* map the pages */ 271 271 ret = get_user_pages_fast((unsigned long)user_desc->ptr, 272 - bufmap->page_count, 1, bufmap->page_array); 272 + bufmap->page_count, FOLL_WRITE, bufmap->page_array); 273 273 274 274 if (ret < 0) 275 275 return ret;
+2 -2
include/linux/mm.h
··· 1505 1505 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1506 1506 struct page **pages, unsigned int gup_flags); 1507 1507 1508 - int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1509 - struct page **pages); 1508 + int get_user_pages_fast(unsigned long start, int nr_pages, 1509 + unsigned int gup_flags, struct page **pages); 1510 1510 1511 1511 /* Container for pinned pfns / pages */ 1512 1512 struct frame_vector {
+1 -1
kernel/futex.c
··· 543 543 if (unlikely(should_fail_futex(fshared))) 544 544 return -EFAULT; 545 545 546 - err = get_user_pages_fast(address, 1, 1, &page); 546 + err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); 547 547 /* 548 548 * If write access is not required (eg. FUTEX_WAIT), try 549 549 * and get read-only access.
+5 -2
lib/iov_iter.c
··· 1293 1293 len = maxpages * PAGE_SIZE; 1294 1294 addr &= ~(PAGE_SIZE - 1); 1295 1295 n = DIV_ROUND_UP(len, PAGE_SIZE); 1296 - res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages); 1296 + res = get_user_pages_fast(addr, n, 1297 + iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, 1298 + pages); 1297 1299 if (unlikely(res < 0)) 1298 1300 return res; 1299 1301 return (res == n ? len : res * PAGE_SIZE) - *start; ··· 1376 1374 p = get_pages_array(n); 1377 1375 if (!p) 1378 1376 return -ENOMEM; 1379 - res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p); 1377 + res = get_user_pages_fast(addr, n, 1378 + iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); 1380 1379 if (unlikely(res < 0)) { 1381 1380 kvfree(p); 1382 1381 return res;
+5 -5
mm/gup.c
··· 2062 2062 * get_user_pages_fast() - pin user pages in memory 2063 2063 * @start: starting user address 2064 2064 * @nr_pages: number of pages from start to pin 2065 - * @write: whether pages will be written to 2065 + * @gup_flags: flags modifying pin behaviour 2066 2066 * @pages: array that receives pointers to the pages pinned. 2067 2067 * Should be at least nr_pages long. 2068 2068 * ··· 2074 2074 * requested. If nr_pages is 0 or negative, returns 0. If no pages 2075 2075 * were pinned, returns -errno. 2076 2076 */ 2077 - int get_user_pages_fast(unsigned long start, int nr_pages, int write, 2078 - struct page **pages) 2077 + int get_user_pages_fast(unsigned long start, int nr_pages, 2078 + unsigned int gup_flags, struct page **pages) 2079 2079 { 2080 2080 unsigned long addr, len, end; 2081 2081 int nr = 0, ret = 0; ··· 2093 2093 2094 2094 if (gup_fast_permitted(start, nr_pages)) { 2095 2095 local_irq_disable(); 2096 - gup_pgd_range(addr, end, write ? FOLL_WRITE : 0, pages, &nr); 2096 + gup_pgd_range(addr, end, gup_flags, pages, &nr); 2097 2097 local_irq_enable(); 2098 2098 ret = nr; 2099 2099 } ··· 2104 2104 pages += nr; 2105 2105 2106 2106 ret = get_user_pages_unlocked(start, nr_pages - nr, pages, 2107 - write ? FOLL_WRITE : 0); 2107 + gup_flags); 2108 2108 2109 2109 /* Have to be a bit careful with return values */ 2110 2110 if (nr > 0) {
+4 -4
mm/util.c
··· 318 318 * get_user_pages_fast() - pin user pages in memory 319 319 * @start: starting user address 320 320 * @nr_pages: number of pages from start to pin 321 - * @write: whether pages will be written to 321 + * @gup_flags: flags modifying pin behaviour 322 322 * @pages: array that receives pointers to the pages pinned. 323 323 * Should be at least nr_pages long. 324 324 * ··· 339 339 * were pinned, returns -errno. 340 340 */ 341 341 int __weak get_user_pages_fast(unsigned long start, 342 - int nr_pages, int write, struct page **pages) 342 + int nr_pages, unsigned int gup_flags, 343 + struct page **pages) 343 344 { 344 - return get_user_pages_unlocked(start, nr_pages, pages, 345 - write ? FOLL_WRITE : 0); 345 + return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); 346 346 } 347 347 EXPORT_SYMBOL_GPL(get_user_pages_fast); 348 348
+1 -1
net/ceph/pagevec.c
··· 27 27 while (got < num_pages) { 28 28 rc = get_user_pages_fast( 29 29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 30 - num_pages - got, write_page, pages + got); 30 + num_pages - got, write_page ? FOLL_WRITE : 0, pages + got); 31 31 if (rc < 0) 32 32 break; 33 33 BUG_ON(rc == 0);
+1 -1
net/rds/info.c
··· 193 193 ret = -ENOMEM; 194 194 goto out; 195 195 } 196 - ret = get_user_pages_fast(start, nr_pages, 1, pages); 196 + ret = get_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); 197 197 if (ret != nr_pages) { 198 198 if (ret > 0) 199 199 nr_pages = ret;
+2 -1
net/rds/rdma.c
··· 158 158 { 159 159 int ret; 160 160 161 - ret = get_user_pages_fast(user_addr, nr_pages, write, pages); 161 + ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0, 162 + pages); 162 163 163 164 if (ret >= 0 && ret < nr_pages) { 164 165 while (ret--)