Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: replace get_user_pages() write/force parameters with gup_flags

This removes the 'write' and 'force' from get_user_pages() and replaces
them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers
as use of this flag can result in surprising behaviour (and hence bugs)
within the mm subsystem.

Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Linus Torvalds
768ae309 7f23b350

+49 -54
+1 -3
arch/cris/arch-v32/drivers/cryptocop.c
··· 2722 2722 err = get_user_pages((unsigned long int)(oper.indata + prev_ix), 2723 2723 noinpages, 2724 2724 0, /* read access only for in data */ 2725 - 0, /* no force */ 2726 2725 inpages, 2727 2726 NULL); 2728 2727 ··· 2735 2736 if (oper.do_cipher){ 2736 2737 err = get_user_pages((unsigned long int)oper.cipher_outdata, 2737 2738 nooutpages, 2738 - 1, /* write access for out data */ 2739 - 0, /* no force */ 2739 + FOLL_WRITE, /* write access for out data */ 2740 2740 outpages, 2741 2741 NULL); 2742 2742 up_read(&current->mm->mmap_sem);
+1 -1
arch/ia64/kernel/err_inject.c
··· 142 142 u64 virt_addr=simple_strtoull(buf, NULL, 16); 143 143 int ret; 144 144 145 - ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); 145 + ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); 146 146 if (ret<=0) { 147 147 #ifdef ERR_INJ_DEBUG 148 148 printk("Virtual address %lx is not existing.\n",virt_addr);
+2 -3
arch/x86/mm/mpx.c
··· 544 544 { 545 545 long gup_ret; 546 546 int nr_pages = 1; 547 - int force = 0; 548 547 549 - gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, 550 - force, NULL, NULL); 548 + gup_ret = get_user_pages((unsigned long)addr, nr_pages, 549 + write ? FOLL_WRITE : 0, NULL, NULL); 551 550 /* 552 551 * get_user_pages() returns number of pages gotten. 553 552 * 0 means we failed to fault in and get anything,
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 555 555 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 556 556 { 557 557 struct amdgpu_ttm_tt *gtt = (void *)ttm; 558 - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 558 + unsigned int flags = 0; 559 559 unsigned pinned = 0; 560 560 int r; 561 + 562 + if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) 563 + flags |= FOLL_WRITE; 561 564 562 565 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 563 566 /* check that we only use anonymous memory ··· 584 581 list_add(&guptask.list, &gtt->guptasks); 585 582 spin_unlock(&gtt->guptasklock); 586 583 587 - r = get_user_pages(userptr, num_pages, write, 0, p, NULL); 584 + r = get_user_pages(userptr, num_pages, flags, p, NULL); 588 585 589 586 spin_lock(&gtt->guptasklock); 590 587 list_del(&guptask.list);
+2 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 566 566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 567 567 struct page **pages = ttm->pages + pinned; 568 568 569 - r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); 569 + r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, 570 + pages, NULL); 570 571 if (r < 0) 571 572 goto release_pages; 572 573
+2 -2
drivers/gpu/drm/via/via_dmablit.c
··· 241 241 down_read(&current->mm->mmap_sem); 242 242 ret = get_user_pages((unsigned long)xfer->mem_addr, 243 243 vsg->num_pages, 244 - (vsg->direction == DMA_FROM_DEVICE), 245 - 0, vsg->pages, NULL); 244 + (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0, 245 + vsg->pages, NULL); 246 246 247 247 up_read(&current->mm->mmap_sem); 248 248 if (ret != vsg->num_pages) {
+5 -1
drivers/infiniband/core/umem.c
··· 94 94 unsigned long dma_attrs = 0; 95 95 struct scatterlist *sg, *sg_list_start; 96 96 int need_release = 0; 97 + unsigned int gup_flags = FOLL_WRITE; 97 98 98 99 if (dmasync) 99 100 dma_attrs |= DMA_ATTR_WRITE_BARRIER; ··· 184 183 if (ret) 185 184 goto out; 186 185 186 + if (!umem->writable) 187 + gup_flags |= FOLL_FORCE; 188 + 187 189 need_release = 1; 188 190 sg_list_start = umem->sg_head.sgl; 189 191 ··· 194 190 ret = get_user_pages(cur_base, 195 191 min_t(unsigned long, npages, 196 192 PAGE_SIZE / sizeof (struct page *)), 197 - 1, !umem->writable, page_list, vma_list); 193 + gup_flags, page_list, vma_list); 198 194 199 195 if (ret < 0) 200 196 goto out;
+1 -1
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 472 472 goto out; 473 473 } 474 474 475 - ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); 475 + ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL); 476 476 if (ret < 0) 477 477 goto out; 478 478
+2 -1
drivers/infiniband/hw/qib/qib_user_pages.c
··· 67 67 68 68 for (got = 0; got < num_pages; got += ret) { 69 69 ret = get_user_pages(start_page + got * PAGE_SIZE, 70 - num_pages - got, 1, 1, 70 + num_pages - got, 71 + FOLL_WRITE | FOLL_FORCE, 71 72 p + got, NULL); 72 73 if (ret < 0) 73 74 goto bail_release;
+4 -1
drivers/infiniband/hw/usnic/usnic_uiom.c
··· 111 111 int i; 112 112 int flags; 113 113 dma_addr_t pa; 114 + unsigned int gup_flags; 114 115 115 116 if (!can_do_mlock()) 116 117 return -EPERM; ··· 136 135 137 136 flags = IOMMU_READ | IOMMU_CACHE; 138 137 flags |= (writable) ? IOMMU_WRITE : 0; 138 + gup_flags = FOLL_WRITE; 139 + gup_flags |= (writable) ? 0 : FOLL_FORCE; 139 140 cur_base = addr & PAGE_MASK; 140 141 ret = 0; 141 142 ··· 145 142 ret = get_user_pages(cur_base, 146 143 min_t(unsigned long, npages, 147 144 PAGE_SIZE / sizeof(struct page *)), 148 - 1, !writable, page_list, NULL); 145 + gup_flags, page_list, NULL); 149 146 150 147 if (ret < 0) 151 148 goto out;
+5 -2
drivers/media/v4l2-core/videobuf-dma-sg.c
··· 156 156 { 157 157 unsigned long first, last; 158 158 int err, rw = 0; 159 + unsigned int flags = FOLL_FORCE; 159 160 160 161 dma->direction = direction; 161 162 switch (dma->direction) { ··· 179 178 if (NULL == dma->pages) 180 179 return -ENOMEM; 181 180 181 + if (rw == READ) 182 + flags |= FOLL_WRITE; 183 + 182 184 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", 183 185 data, size, dma->nr_pages); 184 186 185 187 err = get_user_pages(data & PAGE_MASK, dma->nr_pages, 186 - rw == READ, 1, /* force */ 187 - dma->pages, NULL); 188 + flags, dma->pages, NULL); 188 189 189 190 if (err != dma->nr_pages) { 190 191 dma->nr_pages = (err >= 0) ? err : 0;
+1 -2
drivers/misc/mic/scif/scif_rma.c
··· 1396 1396 pinned_pages->nr_pages = get_user_pages( 1397 1397 (u64)addr, 1398 1398 nr_pages, 1399 - !!(prot & SCIF_PROT_WRITE), 1400 - 0, 1399 + (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, 1401 1400 pinned_pages->pages, 1402 1401 NULL); 1403 1402 up_write(&mm->mmap_sem);
+1 -1
drivers/misc/sgi-gru/grufault.c
··· 198 198 #else 199 199 *pageshift = PAGE_SHIFT; 200 200 #endif 201 - if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0) 201 + if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) 202 202 return -EFAULT; 203 203 *paddr = page_to_phys(page); 204 204 put_page(page);
+2 -1
drivers/platform/goldfish/goldfish_pipe.c
··· 309 309 * much memory to the process. 310 310 */ 311 311 down_read(&current->mm->mmap_sem); 312 - ret = get_user_pages(address, 1, !is_write, 0, &page, NULL); 312 + ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE, 313 + &page, NULL); 313 314 up_read(&current->mm->mmap_sem); 314 315 if (ret < 0) 315 316 break;
+2 -1
drivers/rapidio/devices/rio_mport_cdev.c
··· 892 892 down_read(&current->mm->mmap_sem); 893 893 pinned = get_user_pages( 894 894 (unsigned long)xfer->loc_addr & PAGE_MASK, 895 - nr_pages, dir == DMA_FROM_DEVICE, 0, 895 + nr_pages, 896 + dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, 896 897 page_list, NULL); 897 898 up_read(&current->mm->mmap_sem); 898 899
+1 -2
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
··· 423 423 actual_pages = get_user_pages(task, task->mm, 424 424 (unsigned long)buf & ~(PAGE_SIZE - 1), 425 425 num_pages, 426 - (type == PAGELIST_READ) /*Write */ , 427 - 0 /*Force */ , 426 + (type == PAGELIST_READ) ? FOLL_WRITE : 0, 428 427 pages, 429 428 NULL /*vmas */); 430 429 up_read(&task->mm->mmap_sem);
+1 -2
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
··· 1477 1477 current->mm, /* mm */ 1478 1478 (unsigned long)virt_addr, /* start */ 1479 1479 num_pages, /* len */ 1480 - 0, /* write */ 1481 - 0, /* force */ 1480 + 0, /* gup_flags */ 1482 1481 pages, /* pages (array of page pointers) */ 1483 1482 NULL); /* vmas */ 1484 1483 up_read(&current->mm->mmap_sem);
+2 -2
drivers/virt/fsl_hypervisor.c
··· 245 245 /* Get the physical addresses of the source buffer */ 246 246 down_read(&current->mm->mmap_sem); 247 247 num_pinned = get_user_pages(param.local_vaddr - lb_offset, 248 - num_pages, (param.source == -1) ? READ : WRITE, 249 - 0, pages, NULL); 248 + num_pages, (param.source == -1) ? 0 : FOLL_WRITE, 249 + pages, NULL); 250 250 up_read(&current->mm->mmap_sem); 251 251 252 252 if (num_pinned != num_pages) {
+1 -1
include/linux/mm.h
··· 1279 1279 int write, int force, struct page **pages, 1280 1280 struct vm_area_struct **vmas); 1281 1281 long get_user_pages(unsigned long start, unsigned long nr_pages, 1282 - int write, int force, struct page **pages, 1282 + unsigned int gup_flags, struct page **pages, 1283 1283 struct vm_area_struct **vmas); 1284 1284 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1285 1285 unsigned int gup_flags, struct page **pages, int *locked);
+3 -9
mm/gup.c
··· 987 987 * obviously don't pass FOLL_REMOTE in here. 988 988 */ 989 989 long get_user_pages(unsigned long start, unsigned long nr_pages, 990 - int write, int force, struct page **pages, 990 + unsigned int gup_flags, struct page **pages, 991 991 struct vm_area_struct **vmas) 992 992 { 993 - unsigned int flags = FOLL_TOUCH; 994 - 995 - if (write) 996 - flags |= FOLL_WRITE; 997 - if (force) 998 - flags |= FOLL_FORCE; 999 - 1000 993 return __get_user_pages_locked(current, current->mm, start, nr_pages, 1001 - pages, vmas, NULL, false, flags); 994 + pages, vmas, NULL, false, 995 + gup_flags | FOLL_TOUCH); 1002 996 } 1003 997 EXPORT_SYMBOL(get_user_pages); 1004 998
+1 -1
mm/mempolicy.c
··· 850 850 struct page *p; 851 851 int err; 852 852 853 - err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); 853 + err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); 854 854 if (err >= 0) { 855 855 err = page_to_nid(p); 856 856 put_page(p);
+4 -14
mm/nommu.c
··· 160 160 * - don't permit access to VMAs that don't support it, such as I/O mappings 161 161 */ 162 162 long get_user_pages(unsigned long start, unsigned long nr_pages, 163 - int write, int force, struct page **pages, 163 + unsigned int gup_flags, struct page **pages, 164 164 struct vm_area_struct **vmas) 165 165 { 166 - int flags = 0; 167 - 168 - if (write) 169 - flags |= FOLL_WRITE; 170 - if (force) 171 - flags |= FOLL_FORCE; 172 - 173 - return __get_user_pages(current, current->mm, start, nr_pages, flags, 174 - pages, vmas, NULL); 166 + return __get_user_pages(current, current->mm, start, nr_pages, 167 + gup_flags, pages, vmas, NULL); 175 168 } 176 169 EXPORT_SYMBOL(get_user_pages); 177 170 ··· 172 179 unsigned int gup_flags, struct page **pages, 173 180 int *locked) 174 181 { 175 - int write = gup_flags & FOLL_WRITE; 176 - int force = gup_flags & FOLL_FORCE; 177 - 178 - return get_user_pages(start, nr_pages, write, force, pages, NULL); 182 + return get_user_pages(start, nr_pages, gup_flags, pages, NULL); 179 183 } 180 184 EXPORT_SYMBOL(get_user_pages_locked); 181 185