Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media

Pull media updates from Mauro Carvalho Chehab:
"A series of patches that move part of the code used to allocate memory
from the media subsystem to the mm subsystem"

[ The mm parts have been acked by VM people, and the series was
apparently in -mm for a while - Linus ]

* tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media:
[media] drm/exynos: Convert g2d_userptr_get_dma_addr() to use get_vaddr_frames()
[media] media: vb2: Remove unused functions
[media] media: vb2: Convert vb2_dc_get_userptr() to use frame vector
[media] media: vb2: Convert vb2_vmalloc_get_userptr() to use frame vector
[media] media: vb2: Convert vb2_dma_sg_get_userptr() to use frame vector
[media] vb2: Provide helpers for mapping virtual addresses
[media] media: omap_vout: Convert omap_vout_uservirt_to_phys() to use get_vaddr_pfns()
[media] mm: Provide new get_vaddr_frames() helper
[media] vb2: Push mmap_sem down to memops

+484 -615
+1
drivers/gpu/drm/exynos/Kconfig
··· 77 77 config DRM_EXYNOS_G2D 78 78 bool "Exynos DRM G2D" 79 79 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 80 + select FRAME_VECTOR 80 81 help 81 82 Choose this option if you want to use Exynos G2D for DRM. 82 83
+31 -62
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 194 194 dma_addr_t dma_addr; 195 195 unsigned long userptr; 196 196 unsigned long size; 197 - struct page **pages; 198 - unsigned int npages; 197 + struct frame_vector *vec; 199 198 struct sg_table *sgt; 200 - struct vm_area_struct *vma; 201 199 atomic_t refcount; 202 200 bool in_pool; 203 201 bool out_of_list; ··· 365 367 { 366 368 struct g2d_cmdlist_userptr *g2d_userptr = 367 369 (struct g2d_cmdlist_userptr *)obj; 370 + struct page **pages; 368 371 369 372 if (!obj) 370 373 return; ··· 385 386 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, 386 387 DMA_BIDIRECTIONAL); 387 388 388 - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 389 - g2d_userptr->npages, 390 - g2d_userptr->vma); 389 + pages = frame_vector_pages(g2d_userptr->vec); 390 + if (!IS_ERR(pages)) { 391 + int i; 391 392 392 - exynos_gem_put_vma(g2d_userptr->vma); 393 + for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++) 394 + set_page_dirty_lock(pages[i]); 395 + } 396 + put_vaddr_frames(g2d_userptr->vec); 397 + frame_vector_destroy(g2d_userptr->vec); 393 398 394 399 if (!g2d_userptr->out_of_list) 395 400 list_del_init(&g2d_userptr->list); 396 401 397 402 sg_free_table(g2d_userptr->sgt); 398 403 kfree(g2d_userptr->sgt); 399 - 400 - drm_free_large(g2d_userptr->pages); 401 404 kfree(g2d_userptr); 402 405 } 403 406 ··· 413 412 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 414 413 struct g2d_cmdlist_userptr *g2d_userptr; 415 414 struct g2d_data *g2d; 416 - struct page **pages; 417 415 struct sg_table *sgt; 418 - struct vm_area_struct *vma; 419 416 unsigned long start, end; 420 417 unsigned int npages, offset; 421 418 int ret; ··· 459 460 return ERR_PTR(-ENOMEM); 460 461 461 462 atomic_set(&g2d_userptr->refcount, 1); 463 + g2d_userptr->size = size; 462 464 463 465 start = userptr & PAGE_MASK; 464 466 offset = userptr & ~PAGE_MASK; 465 467 end = PAGE_ALIGN(userptr + size); 466 468 npages = (end - start) >> PAGE_SHIFT; 467 - g2d_userptr->npages = npages; 468 - 469 - pages = drm_calloc_large(npages, sizeof(struct page *)); 470 - if (!pages) { 471 - DRM_ERROR("failed to allocate pages.\n"); 469 + g2d_userptr->vec = frame_vector_create(npages); 470 + if (!g2d_userptr->vec) { 472 471 ret = -ENOMEM; 473 472 goto err_free; 474 473 } 475 474 476 - down_read(&current->mm->mmap_sem); 477 - vma = find_vma(current->mm, userptr); 478 - if (!vma) { 479 - up_read(&current->mm->mmap_sem); 480 - DRM_ERROR("failed to get vm region.\n"); 481 - ret = -EFAULT; 482 - goto err_free_pages; 483 - } 484 - 485 - if (vma->vm_end < userptr + size) { 486 - up_read(&current->mm->mmap_sem); 487 - DRM_ERROR("vma is too small.\n"); 488 - ret = -EFAULT; 489 - goto err_free_pages; 490 - } 491 - 492 - g2d_userptr->vma = exynos_gem_get_vma(vma); 493 - if (!g2d_userptr->vma) { 494 - up_read(&current->mm->mmap_sem); 495 - DRM_ERROR("failed to copy vma.\n"); 496 - ret = -ENOMEM; 497 - goto err_free_pages; 498 - } 499 - 500 - g2d_userptr->size = size; 501 - 502 - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, 503 - npages, pages, vma); 504 - if (ret < 0) { 505 - up_read(&current->mm->mmap_sem); 475 + ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); 476 + if (ret != npages) { 506 477 DRM_ERROR("failed to get user pages from userptr.\n"); 507 - goto err_put_vma; 478 + if (ret < 0) 479 + goto err_destroy_framevec; 480 + ret = -EFAULT; 481 + goto err_put_framevec; 508 482 } 509 - 510 - up_read(&current->mm->mmap_sem); 511 - g2d_userptr->pages = pages; 483 + if (frame_vector_to_pages(g2d_userptr->vec) < 0) { 484 + ret = -EFAULT; 485 + goto err_put_framevec; 486 + } 512 487 513 488 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 514 489 if (!sgt) { 515 490 ret = -ENOMEM; 516 - goto err_free_userptr; 491 + goto err_put_framevec; 517 492 } 518 493 519 - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, 520 - size, GFP_KERNEL); 494 + ret = sg_alloc_table_from_pages(sgt, 495 + frame_vector_pages(g2d_userptr->vec), 496 + npages, offset, size, GFP_KERNEL); 521 497 if (ret < 0) { 522 498 DRM_ERROR("failed to get sgt from pages.\n"); 523 499 goto err_free_sgt; ··· 527 553 err_free_sgt: 528 554 kfree(sgt); 529 555 530 - err_free_userptr: 531 - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 532 - g2d_userptr->npages, 533 - g2d_userptr->vma); 556 + err_put_framevec: 557 + put_vaddr_frames(g2d_userptr->vec); 534 558 535 - err_put_vma: 536 - exynos_gem_put_vma(g2d_userptr->vma); 537 - 538 - err_free_pages: 539 - drm_free_large(pages); 559 + err_destroy_framevec: 560 + frame_vector_destroy(g2d_userptr->vec); 540 561 541 562 err_free: 542 563 kfree(g2d_userptr);
-97
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 366 366 return 0; 367 367 } 368 368 369 - struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) 370 - { 371 - struct vm_area_struct *vma_copy; 372 - 373 - vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); 374 - if (!vma_copy) 375 - return NULL; 376 - 377 - if (vma->vm_ops && vma->vm_ops->open) 378 - vma->vm_ops->open(vma); 379 - 380 - if (vma->vm_file) 381 - get_file(vma->vm_file); 382 - 383 - memcpy(vma_copy, vma, sizeof(*vma)); 384 - 385 - vma_copy->vm_mm = NULL; 386 - vma_copy->vm_next = NULL; 387 - vma_copy->vm_prev = NULL; 388 - 389 - return vma_copy; 390 - } 391 - 392 - void exynos_gem_put_vma(struct vm_area_struct *vma) 393 - { 394 - if (!vma) 395 - return; 396 - 397 - if (vma->vm_ops && vma->vm_ops->close) 398 - vma->vm_ops->close(vma); 399 - 400 - if (vma->vm_file) 401 - fput(vma->vm_file); 402 - 403 - kfree(vma); 404 - } 405 - 406 - int exynos_gem_get_pages_from_userptr(unsigned long start, 407 - unsigned int npages, 408 - struct page **pages, 409 - struct vm_area_struct *vma) 410 - { 411 - int get_npages; 412 - 413 - /* the memory region mmaped with VM_PFNMAP. */ 414 - if (vma_is_io(vma)) { 415 - unsigned int i; 416 - 417 - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { 418 - unsigned long pfn; 419 - int ret = follow_pfn(vma, start, &pfn); 420 - if (ret) 421 - return ret; 422 - 423 - pages[i] = pfn_to_page(pfn); 424 - } 425 - 426 - if (i != npages) { 427 - DRM_ERROR("failed to get user_pages.\n"); 428 - return -EINVAL; 429 - } 430 - 431 - return 0; 432 - } 433 - 434 - get_npages = get_user_pages(current, current->mm, start, 435 - npages, 1, 1, pages, NULL); 436 - get_npages = max(get_npages, 0); 437 - if (get_npages != npages) { 438 - DRM_ERROR("failed to get user_pages.\n"); 439 - while (get_npages) 440 - put_page(pages[--get_npages]); 441 - return -EFAULT; 442 - } 443 - 444 - return 0; 445 - } 446 - 447 - void exynos_gem_put_pages_to_userptr(struct page **pages, 448 - unsigned int npages, 449 - struct vm_area_struct *vma) 450 - { 451 - if (!vma_is_io(vma)) { 452 - unsigned int i; 453 - 454 - for (i = 0; i < npages; i++) { 455 - set_page_dirty_lock(pages[i]); 456 - 457 - /* 458 - * undo the reference we took when populating 459 - * the table. 460 - */ 461 - put_page(pages[i]); 462 - } 463 - } 464 - } 465 - 466 369 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, 467 370 struct sg_table *sgt, 468 371 enum dma_data_direction dir)
+1
drivers/media/platform/omap/Kconfig
··· 10 10 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS 11 11 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 12 12 select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB 13 + select FRAME_VECTOR 13 14 default n 14 15 ---help--- 15 16 V4L2 Display driver support for OMAP2/3 based boards.
+33 -40
drivers/media/platform/omap/omap_vout.c
··· 195 195 } 196 196 197 197 /* 198 - * omap_vout_uservirt_to_phys: This inline function is used to convert user 199 - * space virtual address to physical address. 198 + * omap_vout_get_userptr: Convert user space virtual address to physical 199 + * address. 200 200 */ 201 - static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp) 201 + static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp, 202 + u32 *physp) 202 203 { 203 - unsigned long physp = 0; 204 - struct vm_area_struct *vma; 205 - struct mm_struct *mm = current->mm; 204 + struct frame_vector *vec; 205 + int ret; 206 206 207 207 /* For kernel direct-mapped memory, take the easy way */ 208 - if (virtp >= PAGE_OFFSET) 209 - return virt_to_phys((void *) virtp); 210 - 211 - down_read(&current->mm->mmap_sem); 212 - vma = find_vma(mm, virtp); 213 - if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) { 214 - /* this will catch, kernel-allocated, mmaped-to-usermode 215 - addresses */ 216 - physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start); 217 - up_read(&current->mm->mmap_sem); 218 - } else { 219 - /* otherwise, use get_user_pages() for general userland pages */ 220 - int res, nr_pages = 1; 221 - struct page *pages; 222 - 223 - res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 224 - 0, &pages, NULL); 225 - up_read(&current->mm->mmap_sem); 226 - 227 - if (res == nr_pages) { 228 - physp = __pa(page_address(&pages[0]) + 229 - (virtp & ~PAGE_MASK)); 230 - } else { 231 - printk(KERN_WARNING VOUT_NAME 232 - "get_user_pages failed\n"); 233 - return 0; 234 - } 208 + if (virtp >= PAGE_OFFSET) { 209 + *physp = virt_to_phys((void *)virtp); 210 + return 0; 235 211 } 236 212 237 - return physp; 213 + vec = frame_vector_create(1); 214 + if (!vec) 215 + return -ENOMEM; 216 + 217 + ret = get_vaddr_frames(virtp, 1, true, false, vec); 218 + if (ret != 1) { 219 + frame_vector_destroy(vec); 220 + return -EINVAL; 221 + } 222 + *physp = __pfn_to_phys(frame_vector_pfns(vec)[0]); 223 + vb->priv = vec; 224 + 225 + return 0; 238 226 } 239 227 240 228 /* ··· 772 784 * address of the buffer 773 785 */ 774 786 if (V4L2_MEMORY_USERPTR == vb->memory) { 787 + int ret; 788 + 775 789 if (0 == vb->baddr) 776 790 return -EINVAL; 777 791 /* Physical address */ 778 - vout->queued_buf_addr[vb->i] = (u8 *) 779 - omap_vout_uservirt_to_phys(vb->baddr); 792 + ret = omap_vout_get_userptr(vb, vb->baddr, 793 + (u32 *)&vout->queued_buf_addr[vb->i]); 794 + if (ret < 0) 795 + return ret; 780 796 } else { 781 797 unsigned long addr, dma_addr; 782 798 unsigned long size; ··· 826 834 static void omap_vout_buffer_release(struct videobuf_queue *q, 827 835 struct videobuf_buffer *vb) 828 836 { 829 - struct omap_vout_device *vout = q->priv_data; 830 - 831 837 vb->state = VIDEOBUF_NEEDS_INIT; 838 + if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) { 839 + struct frame_vector *vec = vb->priv; 832 840 833 - if (V4L2_MEMORY_MMAP != vout->memory) 834 - return; 841 + put_vaddr_frames(vec); 842 + frame_vector_destroy(vec); 843 + } 835 844 } 836 845 837 846 /*
+1
drivers/media/v4l2-core/Kconfig
··· 84 84 85 85 config VIDEOBUF2_MEMOPS 86 86 tristate 87 + select FRAME_VECTOR 87 88 88 89 config VIDEOBUF2_DMA_CONTIG 89 90 tristate
-2
drivers/media/v4l2-core/videobuf2-core.c
··· 1691 1691 ret = __qbuf_mmap(vb, b); 1692 1692 break; 1693 1693 case V4L2_MEMORY_USERPTR: 1694 - down_read(&current->mm->mmap_sem); 1695 1694 ret = __qbuf_userptr(vb, b); 1696 - up_read(&current->mm->mmap_sem); 1697 1695 break; 1698 1696 case V4L2_MEMORY_DMABUF: 1699 1697 ret = __qbuf_dmabuf(vb, b);
+34 -173
drivers/media/v4l2-core/videobuf2-dma-contig.c
··· 32 32 dma_addr_t dma_addr; 33 33 enum dma_data_direction dma_dir; 34 34 struct sg_table *dma_sgt; 35 + struct frame_vector *vec; 35 36 36 37 /* MMAP related */ 37 38 struct vb2_vmarea_handler handler; 38 39 atomic_t refcount; 39 40 struct sg_table *sgt_base; 40 - 41 - /* USERPTR related */ 42 - struct vm_area_struct *vma; 43 41 44 42 /* DMABUF related */ 45 43 struct dma_buf_attachment *db_attach; ··· 46 48 /*********************************************/ 47 49 /* scatterlist table functions */ 48 50 /*********************************************/ 49 - 50 - 51 - static void vb2_dc_sgt_foreach_page(struct sg_table *sgt, 52 - void (*cb)(struct page *pg)) 53 - { 54 - struct scatterlist *s; 55 - unsigned int i; 56 - 57 - for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { 58 - struct page *page = sg_page(s); 59 - unsigned int n_pages = PAGE_ALIGN(s->offset + s->length) 60 - >> PAGE_SHIFT; 61 - unsigned int j; 62 - 63 - for (j = 0; j < n_pages; ++j, ++page) 64 - cb(page); 65 - } 66 - } 67 51 68 52 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) 69 53 { ··· 409 429 /* callbacks for USERPTR buffers */ 410 430 /*********************************************/ 411 431 412 - static inline int vma_is_io(struct vm_area_struct *vma) 413 - { 414 - return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 415 - } 416 - 417 - static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, 418 - struct vm_area_struct *vma, unsigned long *res) 419 - { 420 - unsigned long pfn, start_pfn, prev_pfn; 421 - unsigned int i; 422 - int ret; 423 - 424 - if (!vma_is_io(vma)) 425 - return -EFAULT; 426 - 427 - ret = follow_pfn(vma, start, &pfn); 428 - if (ret) 429 - return ret; 430 - 431 - start_pfn = pfn; 432 - start += PAGE_SIZE; 433 - 434 - for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) { 435 - prev_pfn = pfn; 436 - ret = follow_pfn(vma, start, &pfn); 437 - 438 - if (ret) { 439 - pr_err("no page for address %lu\n", start); 440 - return ret; 441 - } 442 - if (pfn != prev_pfn + 1) 443 - return -EINVAL; 444 - } 445 - 446 - *res = start_pfn; 447 - return 0; 448 - } 449 - 450 - static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, 451 - int n_pages, struct vm_area_struct *vma, 452 - enum dma_data_direction dma_dir) 453 - { 454 - if (vma_is_io(vma)) { 455 - unsigned int i; 456 - 457 - for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) { 458 - unsigned long pfn; 459 - int ret = follow_pfn(vma, start, &pfn); 460 - 461 - if (!pfn_valid(pfn)) 462 - return -EINVAL; 463 - 464 - if (ret) { 465 - pr_err("no page for address %lu\n", start); 466 - return ret; 467 - } 468 - pages[i] = pfn_to_page(pfn); 469 - } 470 - } else { 471 - int n; 472 - 473 - n = get_user_pages(current, current->mm, start & PAGE_MASK, 474 - n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); 475 - /* negative error means that no page was pinned */ 476 - n = max(n, 0); 477 - if (n != n_pages) { 478 - pr_err("got only %d of %d user pages\n", n, n_pages); 479 - while (n) 480 - put_page(pages[--n]); 481 - return -EFAULT; 482 - } 483 - } 484 - 485 - return 0; 486 - } 487 - 488 - static void vb2_dc_put_dirty_page(struct page *page) 489 - { 490 - set_page_dirty_lock(page); 491 - put_page(page); 492 - } 493 - 494 432 static void vb2_dc_put_userptr(void *buf_priv) 495 433 { 496 434 struct vb2_dc_buf *buf = buf_priv; 497 435 struct sg_table *sgt = buf->dma_sgt; 436 + int i; 437 + struct page **pages; 498 438 499 439 if (sgt) { 500 440 DEFINE_DMA_ATTRS(attrs); ··· 426 526 */ 427 527 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 428 528 buf->dma_dir, &attrs); 429 - if (!vma_is_io(buf->vma)) 430 - vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 431 - 529 + pages = frame_vector_pages(buf->vec); 530 + /* sgt should exist only if vector contains pages... */ 531 + BUG_ON(IS_ERR(pages)); 532 + for (i = 0; i < frame_vector_count(buf->vec); i++) 533 + set_page_dirty_lock(pages[i]); 432 534 sg_free_table(sgt); 433 535 kfree(sgt); 434 536 } 435 - vb2_put_vma(buf->vma); 537 + vb2_destroy_framevec(buf->vec); 436 538 kfree(buf); 437 539 } 438 540 ··· 474 572 { 475 573 struct vb2_dc_conf *conf = alloc_ctx; 476 574 struct vb2_dc_buf *buf; 477 - unsigned long start; 478 - unsigned long end; 575 + struct frame_vector *vec; 479 576 unsigned long offset; 480 - struct page **pages; 481 - int n_pages; 577 + int n_pages, i; 482 578 int ret = 0; 483 - struct vm_area_struct *vma; 484 579 struct sg_table *sgt; 485 580 unsigned long contig_size; 486 581 unsigned long dma_align = dma_get_cache_alignment(); ··· 503 604 buf->dev = conf->dev; 504 605 buf->dma_dir = dma_dir; 505 606 506 - start = vaddr & PAGE_MASK; 507 607 offset = vaddr & ~PAGE_MASK; 508 - end = PAGE_ALIGN(vaddr + size); 509 - n_pages = (end - start) >> PAGE_SHIFT; 510 - 511 - pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); 512 - if (!pages) { 513 - ret = -ENOMEM; 514 - pr_err("failed to allocate pages table\n"); 608 + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); 609 + if (IS_ERR(vec)) { 610 + ret = PTR_ERR(vec); 515 611 goto fail_buf; 516 612 } 613 + buf->vec = vec; 614 + n_pages = frame_vector_count(vec); 615 + ret = frame_vector_to_pages(vec); 616 + if (ret < 0) { 617 + unsigned long *nums = frame_vector_pfns(vec); 517 618 518 - /* current->mm->mmap_sem is taken by videobuf2 core */ 519 - vma = find_vma(current->mm, vaddr); 520 - if (!vma) { 521 - pr_err("no vma for address %lu\n", vaddr); 522 - ret = -EFAULT; 523 - goto fail_pages; 524 - } 525 - 526 - if (vma->vm_end < vaddr + size) { 527 - pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size); 528 - ret = -EFAULT; 529 - goto fail_pages; 530 - } 531 - 532 - buf->vma = vb2_get_vma(vma); 533 - if (!buf->vma) { 534 - pr_err("failed to copy vma\n"); 535 - ret = -ENOMEM; 536 - goto fail_pages; 537 - } 538 - 539 - /* extract page list from userspace mapping */ 540 - ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); 541 - if (ret) { 542 - unsigned long pfn; 543 - if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { 544 - buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn); 545 - buf->size = size; 546 - kfree(pages); 547 - return buf; 548 - } 549 - 550 - pr_err("failed to get user pages\n"); 551 - goto fail_vma; 619 + /* 620 + * Failed to convert to pages... Check the memory is physically 621 + * contiguous and use direct mapping 622 + */ 623 + for (i = 1; i < n_pages; i++) 624 + if (nums[i-1] + 1 != nums[i]) 625 + goto fail_pfnvec; 626 + buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]); 627 + goto out; 552 628 } 553 629 554 630 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 555 631 if (!sgt) { 556 632 pr_err("failed to allocate sg table\n"); 557 633 ret = -ENOMEM; 558 - goto fail_get_user_pages; 634 + goto fail_pfnvec; 559 635 } 560 636 561 - ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 637 + ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages, 562 638 offset, size, GFP_KERNEL); 563 639 if (ret) { 564 640 pr_err("failed to initialize sg table\n"); 565 641 goto fail_sgt; 566 642 } 567 - 568 - /* pages are no longer needed */ 569 - kfree(pages); 570 - pages = NULL; 571 643 572 644 /* 573 645 * No need to sync to the device, this will happen later when the ··· 561 691 } 562 692 563 693 buf->dma_addr = sg_dma_address(sgt->sgl); 564 - buf->size = size; 565 694 buf->dma_sgt = sgt; 695 + out: 696 + buf->size = size; 566 697 567 698 return buf; 568 699 ··· 572 701 buf->dma_dir, &attrs); 573 702 574 703 fail_sgt_init: 575 - if (!vma_is_io(buf->vma)) 576 - vb2_dc_sgt_foreach_page(sgt, put_page); 577 704 sg_free_table(sgt); 578 705 579 706 fail_sgt: 580 707 kfree(sgt); 581 708 582 - fail_get_user_pages: 583 - if (pages && !vma_is_io(buf->vma)) 584 - while (n_pages) 585 - put_page(pages[--n_pages]); 586 - 587 - fail_vma: 588 - vb2_put_vma(buf->vma); 589 - 590 - fail_pages: 591 - kfree(pages); /* kfree is NULL-proof */ 709 + fail_pfnvec: 710 + vb2_destroy_framevec(vec); 592 711 593 712 fail_buf: 594 713 kfree(buf);
+15 -76
drivers/media/v4l2-core/videobuf2-dma-sg.c
··· 38 38 struct device *dev; 39 39 void *vaddr; 40 40 struct page **pages; 41 + struct frame_vector *vec; 41 42 int offset; 42 43 enum dma_data_direction dma_dir; 43 44 struct sg_table sg_table; ··· 52 51 unsigned int num_pages; 53 52 atomic_t refcount; 54 53 struct vb2_vmarea_handler handler; 55 - struct vm_area_struct *vma; 56 54 57 55 struct dma_buf_attachment *db_attach; 58 56 }; ··· 225 225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 226 226 } 227 227 228 - static inline int vma_is_io(struct vm_area_struct *vma) 229 - { 230 - return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 231 - } 232 - 233 228 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, 234 229 unsigned long size, 235 230 enum dma_data_direction dma_dir) 236 231 { 237 232 struct vb2_dma_sg_conf *conf = alloc_ctx; 238 233 struct vb2_dma_sg_buf *buf; 239 - unsigned long first, last; 240 - int num_pages_from_user; 241 - struct vm_area_struct *vma; 242 234 struct sg_table *sgt; 243 235 DEFINE_DMA_ATTRS(attrs); 236 + struct frame_vector *vec; 244 237 245 238 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 246 - 247 239 buf = kzalloc(sizeof *buf, GFP_KERNEL); 248 240 if (!buf) 249 241 return NULL; ··· 246 254 buf->offset = vaddr & ~PAGE_MASK; 247 255 buf->size = size; 248 256 buf->dma_sgt = &buf->sg_table; 257 + vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); 258 + if (IS_ERR(vec)) 259 + goto userptr_fail_pfnvec; 260 + buf->vec = vec; 249 261 250 - first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; 251 - last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 252 - buf->num_pages = last - first + 1; 253 - 254 - buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), 255 - GFP_KERNEL); 256 - if (!buf->pages) 257 - goto userptr_fail_alloc_pages; 258 - 259 - vma = find_vma(current->mm, vaddr); 260 - if (!vma) { 261 - dprintk(1, "no vma for address %lu\n", vaddr); 262 - goto userptr_fail_find_vma; 263 - } 264 - 265 - if (vma->vm_end < vaddr + size) { 266 - dprintk(1, "vma at %lu is too small for %lu bytes\n", 267 - vaddr, size); 268 - goto userptr_fail_find_vma; 269 - } 270 - 271 - buf->vma = vb2_get_vma(vma); 272 - if (!buf->vma) { 273 - dprintk(1, "failed to copy vma\n"); 274 - goto userptr_fail_find_vma; 275 - } 276 - 277 - if (vma_is_io(buf->vma)) { 278 - for (num_pages_from_user = 0; 279 - num_pages_from_user < buf->num_pages; 280 - ++num_pages_from_user, vaddr += PAGE_SIZE) { 281 - unsigned long pfn; 282 - 283 - if (follow_pfn(vma, vaddr, &pfn)) { 284 - dprintk(1, "no page for address %lu\n", vaddr); 285 - break; 286 - } 287 - buf->pages[num_pages_from_user] = pfn_to_page(pfn); 288 - } 289 - } else 290 - num_pages_from_user = get_user_pages(current, current->mm, 291 - vaddr & PAGE_MASK, 292 - buf->num_pages, 293 - buf->dma_dir == DMA_FROM_DEVICE, 294 - 1, /* force */ 295 - buf->pages, 296 - NULL); 297 - 298 - if (num_pages_from_user != buf->num_pages) 299 - goto userptr_fail_get_user_pages; 262 + buf->pages = frame_vector_pages(vec); 263 + if (IS_ERR(buf->pages)) 264 + goto userptr_fail_sgtable; 265 + buf->num_pages = frame_vector_count(vec); 300 266 301 267 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 302 268 buf->num_pages, buf->offset, size, 0)) 303 - goto userptr_fail_alloc_table_from_pages; 269 + goto userptr_fail_sgtable; 304 270 305 271 sgt = &buf->sg_table; 306 272 /* ··· 274 324 275 325 userptr_fail_map: 276 326 sg_free_table(&buf->sg_table); 277 - userptr_fail_alloc_table_from_pages: 278 - userptr_fail_get_user_pages: 279 - dprintk(1, "get_user_pages requested/got: %d/%d]\n", 280 - buf->num_pages, num_pages_from_user); 281 - if (!vma_is_io(buf->vma)) 282 - while (--num_pages_from_user >= 0) 283 - put_page(buf->pages[num_pages_from_user]); 284 - vb2_put_vma(buf->vma); 285 - userptr_fail_find_vma: 286 - kfree(buf->pages); 287 - userptr_fail_alloc_pages: 327 + userptr_fail_sgtable: 328 + vb2_destroy_framevec(vec); 329 + userptr_fail_pfnvec: 288 330 kfree(buf); 289 331 return NULL; 290 332 } ··· 304 362 while (--i >= 0) { 305 363 if (buf->dma_dir == DMA_FROM_DEVICE) 306 364 set_page_dirty_lock(buf->pages[i]); 307 - if (!vma_is_io(buf->vma)) 308 - put_page(buf->pages[i]); 309 365 } 310 - kfree(buf->pages); 311 - vb2_put_vma(buf->vma); 366 + vb2_destroy_framevec(buf->vec); 312 367 kfree(buf); 313 368 } 314 369
+50 -106
drivers/media/v4l2-core/videobuf2-memops.c
··· 23 23 #include <media/videobuf2-memops.h> 24 24 25 25 /** 26 - * vb2_get_vma() - acquire and lock the virtual memory area 27 - * @vma: given virtual memory area 26 + * vb2_create_framevec() - map virtual addresses to pfns 27 + * @start: Virtual user address where we start mapping 28 + * @length: Length of a range to map 29 + * @write: Should we map for writing into the area 28 30 * 29 - * This function attempts to acquire an area mapped in the userspace for 30 - * the duration of a hardware operation. The area is "locked" by performing 31 - * the same set of operation that are done when process calls fork() and 32 - * memory areas are duplicated. 33 - * 34 - * Returns a copy of a virtual memory region on success or NULL. 31 + * This function allocates and fills in a vector with pfns corresponding to 32 + * virtual address range passed in arguments. If pfns have corresponding pages, 33 + * page references are also grabbed to pin pages in memory. The function 34 + * returns pointer to the vector on success and error pointer in case of 35 + * failure. Returned vector needs to be freed via vb2_destroy_pfnvec(). 35 36 */ 36 - struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) 37 + struct frame_vector *vb2_create_framevec(unsigned long start, 38 + unsigned long length, 39 + bool write) 37 40 { 38 - struct vm_area_struct *vma_copy; 41 + int ret; 42 + unsigned long first, last; 43 + unsigned long nr; 44 + struct frame_vector *vec; 39 45 40 - vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); 41 - if (vma_copy == NULL) 42 - return NULL; 43 - 44 - if (vma->vm_ops && vma->vm_ops->open) 45 - vma->vm_ops->open(vma); 46 - 47 - if (vma->vm_file) 48 - get_file(vma->vm_file); 49 - 50 - memcpy(vma_copy, vma, sizeof(*vma)); 51 - 52 - vma_copy->vm_mm = NULL; 53 - vma_copy->vm_next = NULL; 54 - vma_copy->vm_prev = NULL; 55 - 56 - return vma_copy; 57 - } 58 - EXPORT_SYMBOL_GPL(vb2_get_vma); 59 - 60 - /** 61 - * vb2_put_userptr() - release a userspace virtual memory area 62 - * @vma: virtual memory region associated with the area to be released 63 - * 64 - * This function releases the previously acquired memory area after a hardware 65 - * operation. 66 - */ 67 - void vb2_put_vma(struct vm_area_struct *vma) 68 - { 69 - if (!vma) 70 - return; 71 - 72 - if (vma->vm_ops && vma->vm_ops->close) 73 - vma->vm_ops->close(vma); 74 - 75 - if (vma->vm_file) 76 - fput(vma->vm_file); 77 - 78 - kfree(vma); 79 - } 80 - EXPORT_SYMBOL_GPL(vb2_put_vma); 81 - 82 - /** 83 - * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory 84 - * @vaddr: starting virtual address of the area to be verified 85 - * @size: size of the area 86 - * @res_paddr: will return physical address for the given vaddr 87 - * @res_vma: will return locked copy of struct vm_area for the given area 88 - * 89 - * This function will go through memory area of size @size mapped at @vaddr and 90 - * verify that the underlying physical pages are contiguous. If they are 91 - * contiguous the virtual memory area is locked and a @res_vma is filled with 92 - * the copy and @res_pa set to the physical address of the buffer. 93 - * 94 - * Returns 0 on success. 95 - */ 96 - int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 97 - struct vm_area_struct **res_vma, dma_addr_t *res_pa) 98 - { 99 - struct mm_struct *mm = current->mm; 100 - struct vm_area_struct *vma; 101 - unsigned long offset, start, end; 102 - unsigned long this_pfn, prev_pfn; 103 - dma_addr_t pa = 0; 104 - 105 - start = vaddr; 106 - offset = start & ~PAGE_MASK; 107 - end = start + size; 108 - 109 - vma = find_vma(mm, start); 110 - 111 - if (vma == NULL || vma->vm_end < end) 112 - return -EFAULT; 113 - 114 - for (prev_pfn = 0; start < end; start += PAGE_SIZE) { 115 - int ret = follow_pfn(vma, start, &this_pfn); 116 - if (ret) 117 - return ret; 118 - 119 - if (prev_pfn == 0) 120 - pa = this_pfn << PAGE_SHIFT; 121 - else if (this_pfn != prev_pfn + 1) 122 - return -EFAULT; 123 - 124 - prev_pfn = this_pfn; 46 + first = start >> PAGE_SHIFT; 47 + last = (start + length - 1) >> PAGE_SHIFT; 48 + nr = last - first + 1; 49 + vec = frame_vector_create(nr); 50 + if (!vec) 51 + return ERR_PTR(-ENOMEM); 52 + ret = get_vaddr_frames(start, nr, write, 1, vec); 53 + if (ret < 0) 54 + goto out_destroy; 55 + /* We accept only complete set of PFNs */ 56 + if (ret != nr) { 57 + ret = -EFAULT; 58 + goto out_release; 125 59 } 126 - 127 - /* 128 - * Memory is contiguous, lock vma and return to the caller 129 - */ 130 - *res_vma = vb2_get_vma(vma); 131 - if (*res_vma == NULL) 132 - return -ENOMEM; 133 - 134 - *res_pa = pa + offset; 135 - return 0; 60 + return vec; 61 + out_release: 62 + put_vaddr_frames(vec); 63 + out_destroy: 64 + frame_vector_destroy(vec); 65 + return ERR_PTR(ret); 136 66 } 137 - EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); 67 + EXPORT_SYMBOL(vb2_create_framevec); 68 + 69 + /** 70 + * vb2_destroy_framevec() - release vector of mapped pfns 71 + * @vec: vector of pfns / pages to release 72 + * 73 + * This releases references to all pages in the vector @vec (if corresponding 74 + * pfns are backed by pages) and frees the passed vector. 75 + */ 76 + void vb2_destroy_framevec(struct frame_vector *vec) 77 + { 78 + put_vaddr_frames(vec); 79 + frame_vector_destroy(vec); 80 + } 81 + EXPORT_SYMBOL(vb2_destroy_framevec); 138 82 139 83 /** 140 84 * vb2_common_vm_open() - increase refcount of the vma
+35 -53
drivers/media/v4l2-core/videobuf2-vmalloc.c
··· 23 23 24 24 struct vb2_vmalloc_buf { 25 25 void *vaddr; 26 - struct page **pages; 27 - struct vm_area_struct *vma; 26 + struct frame_vector *vec; 28 27 enum dma_data_direction dma_dir; 29 28 unsigned long size; 30 - unsigned int n_pages; 31 29 atomic_t refcount; 32 30 struct vb2_vmarea_handler handler; 33 31 struct dma_buf *dbuf; ··· 74 76 enum dma_data_direction dma_dir) 75 77 { 76 78 struct vb2_vmalloc_buf *buf; 77 - unsigned long first, last; 78 - int n_pages, offset; 79 - struct vm_area_struct *vma; 80 - dma_addr_t physp; 79 + struct frame_vector *vec; 80 + int n_pages, offset, i; 81 81 82 82 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 83 83 if (!buf) ··· 84 88 buf->dma_dir = dma_dir; 85 89 offset = vaddr & ~PAGE_MASK; 86 90 buf->size = size; 91 + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); 92 + if (IS_ERR(vec)) 93 + goto fail_pfnvec_create; 94 + buf->vec = vec; 95 + n_pages = frame_vector_count(vec); 96 + if (frame_vector_to_pages(vec) < 0) { 97 + unsigned long *nums = frame_vector_pfns(vec); 87 98 88 - 89 - vma = find_vma(current->mm, vaddr); 90 - if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { 91 - if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) 92 - goto fail_pages_array_alloc; 93 - buf->vma = vma; 94 - buf->vaddr = (__force void *)ioremap_nocache(physp, size); 95 - if (!buf->vaddr) 96 - goto fail_pages_array_alloc; 99 + /* 100 + * We cannot get page pointers for these pfns. Check memory is 101 + * physically contiguous and use direct mapping. 102 + */ 103 + for (i = 1; i < n_pages; i++) 104 + if (nums[i-1] + 1 != nums[i]) 105 + goto fail_map; 106 + buf->vaddr = (__force void *) 107 + ioremap_nocache(nums[0] << PAGE_SHIFT, size); 97 108 } else { 98 - first = vaddr >> PAGE_SHIFT; 99 - last = (vaddr + size - 1) >> PAGE_SHIFT; 100 - buf->n_pages = last - first + 1; 101 - buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), 102 - GFP_KERNEL); 103 - if (!buf->pages) 104 - goto fail_pages_array_alloc; 105 - 106 - /* current->mm->mmap_sem is taken by videobuf2 core */ 107 - n_pages = get_user_pages(current, current->mm, 108 - vaddr & PAGE_MASK, buf->n_pages, 109 - dma_dir == DMA_FROM_DEVICE, 110 - 1, /* force */ 111 - buf->pages, NULL); 112 - if (n_pages != buf->n_pages) 113 - goto fail_get_user_pages; 114 - 115 - buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1, 109 + buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, 116 110 PAGE_KERNEL); 117 - if (!buf->vaddr) 118 - goto fail_get_user_pages; 119 111 } 120 112 113 + if (!buf->vaddr) 114 + goto fail_map; 121 115 buf->vaddr += offset; 122 116 return buf; 123 117 124 - fail_get_user_pages: 125 - pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages, 126 - buf->n_pages); 127 - while (--n_pages >= 0) 128 - put_page(buf->pages[n_pages]); 129 - kfree(buf->pages); 130 - 131 - fail_pages_array_alloc: 118 + fail_map: 119 + vb2_destroy_framevec(vec); 120 + fail_pfnvec_create: 132 121 kfree(buf); 133 122 134 123 return NULL; ··· 124 143 struct vb2_vmalloc_buf *buf = buf_priv; 125 144 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; 126 145 unsigned int i; 146 + struct page **pages; 147 + unsigned int n_pages; 127 148 128 - if (buf->pages) { 149 + if (!buf->vec->is_pfns) { 150 + n_pages = frame_vector_count(buf->vec); 151 + pages = frame_vector_pages(buf->vec); 129 152 if (vaddr) 130 - vm_unmap_ram((void *)vaddr, buf->n_pages); 131 - for (i = 0; i < buf->n_pages; ++i) { 132 - if (buf->dma_dir == DMA_FROM_DEVICE) 133 - set_page_dirty_lock(buf->pages[i]); 134 - put_page(buf->pages[i]); 135 - } 136 - kfree(buf->pages); 153 + vm_unmap_ram((void *)vaddr, n_pages); 154 + if (buf->dma_dir == DMA_FROM_DEVICE) 155 + for (i = 0; i < n_pages; i++) 156 + set_page_dirty_lock(pages[i]); 137 157 } else { 138 - vb2_put_vma(buf->vma); 139 158 iounmap((__force void __iomem *)buf->vaddr); 140 159 } 160 + vb2_destroy_framevec(buf->vec); 141 161 kfree(buf); 142 162 } 143 163
+44
include/linux/mm.h
··· 20 20 #include <linux/shrinker.h> 21 21 #include <linux/resource.h> 22 22 #include <linux/page_ext.h> 23 + #include <linux/err.h> 23 24 24 25 struct mempolicy; 25 26 struct anon_vma; ··· 1215 1214 int write, int force, struct page **pages); 1216 1215 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1217 1216 struct page **pages); 1217 + 1218 + /* Container for pinned pfns / pages */ 1219 + struct frame_vector { 1220 + unsigned int nr_allocated; /* Number of frames we have space for */ 1221 + unsigned int nr_frames; /* Number of frames stored in ptrs array */ 1222 + bool got_ref; /* Did we pin pages by getting page ref? */ 1223 + bool is_pfns; /* Does array contain pages or pfns? */ 1224 + void *ptrs[0]; /* Array of pinned pfns / pages. Use 1225 + * pfns_vector_pages() or pfns_vector_pfns() 1226 + * for access */ 1227 + }; 1228 + 1229 + struct frame_vector *frame_vector_create(unsigned int nr_frames); 1230 + void frame_vector_destroy(struct frame_vector *vec); 1231 + int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1232 + bool write, bool force, struct frame_vector *vec); 1233 + void put_vaddr_frames(struct frame_vector *vec); 1234 + int frame_vector_to_pages(struct frame_vector *vec); 1235 + void frame_vector_to_pfns(struct frame_vector *vec); 1236 + 1237 + static inline unsigned int frame_vector_count(struct frame_vector *vec) 1238 + { 1239 + return vec->nr_frames; 1240 + } 1241 + 1242 + static inline struct page **frame_vector_pages(struct frame_vector *vec) 1243 + { 1244 + if (vec->is_pfns) { 1245 + int err = frame_vector_to_pages(vec); 1246 + 1247 + if (err) 1248 + return ERR_PTR(err); 1249 + } 1250 + return (struct page **)(vec->ptrs); 1251 + } 1252 + 1253 + static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 1254 + { 1255 + if (!vec->is_pfns) 1256 + frame_vector_to_pfns(vec); 1257 + return (unsigned long *)(vec->ptrs); 1258 + } 1259 + 1218 1260 struct kvec; 1219 1261 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1220 1262 struct page **pages);
+5 -6
include/media/videobuf2-memops.h
··· 15 15 #define _MEDIA_VIDEOBUF2_MEMOPS_H 16 16 17 17 #include <media/videobuf2-core.h> 18 + #include <linux/mm.h> 18 19 19 20 /** 20 21 * struct vb2_vmarea_handler - common vma refcount tracking handler ··· 32 31 33 32 extern const struct vm_operations_struct vb2_common_vm_ops; 34 33 35 - int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, 36 - struct vm_area_struct **res_vma, dma_addr_t *res_pa); 37 - 38 - struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma); 39 - void vb2_put_vma(struct vm_area_struct *vma); 40 - 34 + struct frame_vector *vb2_create_framevec(unsigned long start, 35 + unsigned long length, 36 + bool write); 37 + void vb2_destroy_framevec(struct frame_vector *vec); 41 38 42 39 #endif
+3
mm/Kconfig
··· 677 677 mapping in an O_DIRECT operation, among other things. 678 678 679 679 If FS_DAX is enabled, then say Y. 680 + 681 + config FRAME_VECTOR 682 + bool
+1
mm/Makefile
··· 80 80 obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o 81 81 obj-$(CONFIG_USERFAULTFD) += userfaultfd.o 82 82 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 83 + obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
+230
mm/frame_vector.c
··· 1 + #include <linux/kernel.h> 2 + #include <linux/errno.h> 3 + #include <linux/err.h> 4 + #include <linux/mm.h> 5 + #include <linux/slab.h> 6 + #include <linux/vmalloc.h> 7 + #include <linux/pagemap.h> 8 + #include <linux/sched.h> 9 + 10 + /* 11 + * get_vaddr_frames() - map virtual addresses to pfns 12 + * @start: starting user address 13 + * @nr_frames: number of pages / pfns from start to map 14 + * @write: whether pages will be written to by the caller 15 + * @force: whether to force write access even if user mapping is 16 + * readonly. See description of the same argument of 17 + get_user_pages(). 18 + * @vec: structure which receives pages / pfns of the addresses mapped. 19 + * It should have space for at least nr_frames entries. 20 + * 21 + * This function maps virtual addresses from @start and fills @vec structure 22 + * with page frame numbers or page pointers to corresponding pages (choice 23 + * depends on the type of the vma underlying the virtual address). If @start 24 + * belongs to a normal vma, the function grabs reference to each of the pages 25 + * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't 26 + * touch page structures and the caller must make sure pfns aren't reused for 27 + * anything else while he is using them. 28 + * 29 + * The function returns number of pages mapped which may be less than 30 + * @nr_frames. In particular we stop mapping if there are more vmas of 31 + * different type underlying the specified range of virtual addresses. 32 + * When the function isn't able to map a single page, it returns error. 33 + * 34 + * This function takes care of grabbing mmap_sem as necessary. 35 + */ 36 + int get_vaddr_frames(unsigned long start, unsigned int nr_frames, 37 + bool write, bool force, struct frame_vector *vec) 38 + { 39 + struct mm_struct *mm = current->mm; 40 + struct vm_area_struct *vma; 41 + int ret = 0; 42 + int err; 43 + int locked; 44 + 45 + if (nr_frames == 0) 46 + return 0; 47 + 48 + if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) 49 + nr_frames = vec->nr_allocated; 50 + 51 + down_read(&mm->mmap_sem); 52 + locked = 1; 53 + vma = find_vma_intersection(mm, start, start + 1); 54 + if (!vma) { 55 + ret = -EFAULT; 56 + goto out; 57 + } 58 + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { 59 + vec->got_ref = true; 60 + vec->is_pfns = false; 61 + ret = get_user_pages_locked(current, mm, start, nr_frames, 62 + write, force, (struct page **)(vec->ptrs), &locked); 63 + goto out; 64 + } 65 + 66 + vec->got_ref = false; 67 + vec->is_pfns = true; 68 + do { 69 + unsigned long *nums = frame_vector_pfns(vec); 70 + 71 + while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { 72 + err = follow_pfn(vma, start, &nums[ret]); 73 + if (err) { 74 + if (ret == 0) 75 + ret = err; 76 + goto out; 77 + } 78 + start += PAGE_SIZE; 79 + ret++; 80 + } 81 + /* 82 + * We stop if we have enough pages or if VMA doesn't completely 83 + * cover the tail page. 84 + */ 85 + if (ret >= nr_frames || start < vma->vm_end) 86 + break; 87 + vma = find_vma_intersection(mm, start, start + 1); 88 + } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); 89 + out: 90 + if (locked) 91 + up_read(&mm->mmap_sem); 92 + if (!ret) 93 + ret = -EFAULT; 94 + if (ret > 0) 95 + vec->nr_frames = ret; 96 + return ret; 97 + } 98 + EXPORT_SYMBOL(get_vaddr_frames); 99 + 100 + /** 101 + * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired 102 + * them 103 + * @vec: frame vector to put 104 + * 105 + * Drop references to pages if get_vaddr_frames() acquired them. We also 106 + * invalidate the frame vector so that it is prepared for the next call into 107 + * get_vaddr_frames(). 108 + */ 109 + void put_vaddr_frames(struct frame_vector *vec) 110 + { 111 + int i; 112 + struct page **pages; 113 + 114 + if (!vec->got_ref) 115 + goto out; 116 + pages = frame_vector_pages(vec); 117 + /* 118 + * frame_vector_pages() might needed to do a conversion when 119 + * get_vaddr_frames() got pages but vec was later converted to pfns. 120 + * But it shouldn't really fail to convert pfns back... 121 + */ 122 + if (WARN_ON(IS_ERR(pages))) 123 + goto out; 124 + for (i = 0; i < vec->nr_frames; i++) 125 + put_page(pages[i]); 126 + vec->got_ref = false; 127 + out: 128 + vec->nr_frames = 0; 129 + } 130 + EXPORT_SYMBOL(put_vaddr_frames); 131 + 132 + /** 133 + * frame_vector_to_pages - convert frame vector to contain page pointers 134 + * @vec: frame vector to convert 135 + * 136 + * Convert @vec to contain array of page pointers. If the conversion is 137 + * successful, return 0. Otherwise return an error. Note that we do not grab 138 + * page references for the page structures. 139 + */ 140 + int frame_vector_to_pages(struct frame_vector *vec) 141 + { 142 + int i; 143 + unsigned long *nums; 144 + struct page **pages; 145 + 146 + if (!vec->is_pfns) 147 + return 0; 148 + nums = frame_vector_pfns(vec); 149 + for (i = 0; i < vec->nr_frames; i++) 150 + if (!pfn_valid(nums[i])) 151 + return -EINVAL; 152 + pages = (struct page **)nums; 153 + for (i = 0; i < vec->nr_frames; i++) 154 + pages[i] = pfn_to_page(nums[i]); 155 + vec->is_pfns = false; 156 + return 0; 157 + } 158 + EXPORT_SYMBOL(frame_vector_to_pages); 159 + 160 + /** 161 + * frame_vector_to_pfns - convert frame vector to contain pfns 162 + * @vec: frame vector to convert 163 + * 164 + * Convert @vec to contain array of pfns. 165 + */ 166 + void frame_vector_to_pfns(struct frame_vector *vec) 167 + { 168 + int i; 169 + unsigned long *nums; 170 + struct page **pages; 171 + 172 + if (vec->is_pfns) 173 + return; 174 + pages = (struct page **)(vec->ptrs); 175 + nums = (unsigned long *)pages; 176 + for (i = 0; i < vec->nr_frames; i++) 177 + nums[i] = page_to_pfn(pages[i]); 178 + vec->is_pfns = true; 179 + } 180 + EXPORT_SYMBOL(frame_vector_to_pfns); 181 + 182 + /** 183 + * frame_vector_create() - allocate & initialize structure for pinned pfns 184 + * @nr_frames: number of pfns slots we should reserve 185 + * 186 + * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns 187 + * pfns. 188 + */ 189 + struct frame_vector *frame_vector_create(unsigned int nr_frames) 190 + { 191 + struct frame_vector *vec; 192 + int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames; 193 + 194 + if (WARN_ON_ONCE(nr_frames == 0)) 195 + return NULL; 196 + /* 197 + * This is absurdly high. It's here just to avoid strange effects when 198 + * arithmetics overflows. 199 + */ 200 + if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2)) 201 + return NULL; 202 + /* 203 + * Avoid higher order allocations, use vmalloc instead. It should 204 + * be rare anyway. 205 + */ 206 + if (size <= PAGE_SIZE) 207 + vec = kmalloc(size, GFP_KERNEL); 208 + else 209 + vec = vmalloc(size); 210 + if (!vec) 211 + return NULL; 212 + vec->nr_allocated = nr_frames; 213 + vec->nr_frames = 0; 214 + return vec; 215 + } 216 + EXPORT_SYMBOL(frame_vector_create); 217 + 218 + /** 219 + * frame_vector_destroy() - free memory allocated to carry frame vector 220 + * @vec: Frame vector to free 221 + * 222 + * Free structure allocated by frame_vector_create() to carry frames. 223 + */ 224 + void frame_vector_destroy(struct frame_vector *vec) 225 + { 226 + /* Make sure put_vaddr_frames() got called properly... */ 227 + VM_BUG_ON(vec->nr_frames > 0); 228 + kvfree(vec); 229 + } 230 + EXPORT_SYMBOL(frame_vector_destroy);