at v3.15 30 kB view raw
1/* 2 * ispqueue.c 3 * 4 * TI OMAP3 ISP - Video buffers queue handling 5 * 6 * Copyright (C) 2010 Nokia Corporation 7 * 8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 9 * Sakari Ailus <sakari.ailus@iki.fi> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 23 * 02110-1301 USA 24 */ 25 26#include <asm/cacheflush.h> 27#include <linux/dma-mapping.h> 28#include <linux/mm.h> 29#include <linux/pagemap.h> 30#include <linux/poll.h> 31#include <linux/scatterlist.h> 32#include <linux/sched.h> 33#include <linux/slab.h> 34#include <linux/vmalloc.h> 35 36#include "ispqueue.h" 37 38/* ----------------------------------------------------------------------------- 39 * Video buffers management 40 */ 41 42/* 43 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP 44 * 45 * The typical operation required here is Cache Invalidation across 46 * the (user space) buffer address range. And this _must_ be done 47 * at QBUF stage (and *only* at QBUF). 48 * 49 * We try to use optimal cache invalidation function: 50 * - dmac_map_area: 51 * - used when the number of pages are _low_. 52 * - it becomes quite slow as the number of pages increase. 53 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms. 54 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms. 55 * 56 * - flush_cache_all: 57 * - used when the number of pages are _high_. 58 * - time taken in the range of 500-900 us. 59 * - has a higher penalty but, as whole dcache + icache is invalidated 60 */ 61/* 62 * FIXME: dmac_inv_range crashes randomly on the user space buffer 63 * address. Fall back to flush_cache_all for now. 64 */ 65#define ISP_CACHE_FLUSH_PAGES_MAX 0 66 67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf) 68{ 69 if (buf->skip_cache) 70 return; 71 72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 || 73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX) 74 flush_cache_all(); 75 else { 76 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length, 77 DMA_FROM_DEVICE); 78 outer_inv_range(buf->vbuf.m.userptr, 79 buf->vbuf.m.userptr + buf->vbuf.length); 80 } 81} 82 83/* 84 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped 85 * 86 * Lock the VMAs underlying the given buffer into memory. This avoids the 87 * userspace buffer mapping from being swapped out, making VIPT cache handling 88 * easier. 89 * 90 * Note that the pages will not be freed as the buffers have been locked to 91 * memory using by a call to get_user_pages(), but the userspace mapping could 92 * still disappear if the VMAs are not locked. This is caused by the memory 93 * management code trying to be as lock-less as possible, which results in the 94 * userspace mapping manager not finding out that the pages are locked under 95 * some conditions. 96 */ 97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock) 98{ 99 struct vm_area_struct *vma; 100 unsigned long start; 101 unsigned long end; 102 int ret = 0; 103 104 if (buf->vbuf.memory == V4L2_MEMORY_MMAP) 105 return 0; 106 107 /* We can be called from workqueue context if the current task dies to 108 * unlock the VMAs. In that case there's no current memory management 109 * context so unlocking can't be performed, but the VMAs have been or 110 * are getting destroyed anyway so it doesn't really matter. 111 */ 112 if (!current || !current->mm) 113 return lock ? -EINVAL : 0; 114 115 start = buf->vbuf.m.userptr; 116 end = buf->vbuf.m.userptr + buf->vbuf.length - 1; 117 118 down_write(&current->mm->mmap_sem); 119 spin_lock(&current->mm->page_table_lock); 120 121 do { 122 vma = find_vma(current->mm, start); 123 if (vma == NULL) { 124 ret = -EFAULT; 125 goto out; 126 } 127 128 if (lock) 129 vma->vm_flags |= VM_LOCKED; 130 else 131 vma->vm_flags &= ~VM_LOCKED; 132 133 start = vma->vm_end + 1; 134 } while (vma->vm_end < end); 135 136 if (lock) 137 buf->vm_flags |= VM_LOCKED; 138 else 139 buf->vm_flags &= ~VM_LOCKED; 140 141out: 142 spin_unlock(&current->mm->page_table_lock); 143 up_write(&current->mm->mmap_sem); 144 return ret; 145} 146 147/* 148 * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer 149 * 150 * Iterate over the vmalloc'ed area and create a scatter list entry for every 151 * page. 152 */ 153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf) 154{ 155 struct scatterlist *sglist; 156 unsigned int npages; 157 unsigned int i; 158 void *addr; 159 160 addr = buf->vaddr; 161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT; 162 163 sglist = vmalloc(npages * sizeof(*sglist)); 164 if (sglist == NULL) 165 return -ENOMEM; 166 167 sg_init_table(sglist, npages); 168 169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { 170 struct page *page = vmalloc_to_page(addr); 171 172 if (page == NULL || PageHighMem(page)) { 173 vfree(sglist); 174 return -EINVAL; 175 } 176 177 sg_set_page(&sglist[i], page, PAGE_SIZE, 0); 178 } 179 180 buf->sglen = npages; 181 buf->sglist = sglist; 182 183 return 0; 184} 185 186/* 187 * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer 188 * 189 * Walk the buffer pages list and create a 1:1 mapping to a scatter list. 190 */ 191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf) 192{ 193 struct scatterlist *sglist; 194 unsigned int offset = buf->offset; 195 unsigned int i; 196 197 sglist = vmalloc(buf->npages * sizeof(*sglist)); 198 if (sglist == NULL) 199 return -ENOMEM; 200 201 sg_init_table(sglist, buf->npages); 202 203 for (i = 0; i < buf->npages; ++i) { 204 if (PageHighMem(buf->pages[i])) { 205 vfree(sglist); 206 return -EINVAL; 207 } 208 209 sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset, 210 offset); 211 offset = 0; 212 } 213 214 buf->sglen = buf->npages; 215 buf->sglist = sglist; 216 217 return 0; 218} 219 220/* 221 * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer 222 * 223 * Create a scatter list of physically contiguous pages starting at the buffer 224 * memory physical address. 225 */ 226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf) 227{ 228 struct scatterlist *sglist; 229 unsigned int offset = buf->offset; 230 unsigned long pfn = buf->paddr >> PAGE_SHIFT; 231 unsigned int i; 232 233 sglist = vmalloc(buf->npages * sizeof(*sglist)); 234 if (sglist == NULL) 235 return -ENOMEM; 236 237 sg_init_table(sglist, buf->npages); 238 239 for (i = 0; i < buf->npages; ++i, ++pfn) { 240 sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset, 241 offset); 242 /* PFNMAP buffers will not get DMA-mapped, set the DMA address 243 * manually. 244 */ 245 sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset; 246 offset = 0; 247 } 248 249 buf->sglen = buf->npages; 250 buf->sglist = sglist; 251 252 return 0; 253} 254 255/* 256 * isp_video_buffer_cleanup - Release pages for a userspace VMA. 257 * 258 * Release pages locked by a call isp_video_buffer_prepare_user and free the 259 * pages table. 260 */ 261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) 262{ 263 enum dma_data_direction direction; 264 unsigned int i; 265 266 if (buf->queue->ops->buffer_cleanup) 267 buf->queue->ops->buffer_cleanup(buf); 268 269 if (!(buf->vm_flags & VM_PFNMAP)) { 270 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE 271 ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 272 dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen, 273 direction); 274 } 275 276 vfree(buf->sglist); 277 buf->sglist = NULL; 278 buf->sglen = 0; 279 280 if (buf->pages != NULL) { 281 isp_video_buffer_lock_vma(buf, 0); 282 283 for (i = 0; i < buf->npages; ++i) 284 page_cache_release(buf->pages[i]); 285 286 vfree(buf->pages); 287 buf->pages = NULL; 288 } 289 290 buf->npages = 0; 291 buf->skip_cache = false; 292} 293 294/* 295 * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory. 296 * 297 * This function creates a list of pages for a userspace VMA. The number of 298 * pages is first computed based on the buffer size, and pages are then 299 * retrieved by a call to get_user_pages. 300 * 301 * Pages are pinned to memory by get_user_pages, making them available for DMA 302 * transfers. However, due to memory management optimization, it seems the 303 * get_user_pages doesn't guarantee that the pinned pages will not be written 304 * to swap and removed from the userspace mapping(s). When this happens, a page 305 * fault can be generated when accessing those unmapped pages. 306 * 307 * If the fault is triggered by a page table walk caused by VIPT cache 308 * management operations, the page fault handler might oops if the MM semaphore 309 * is held, as it can't handle kernel page faults in that case. To fix that, a 310 * fixup entry needs to be added to the cache management code, or the userspace 311 * VMA must be locked to avoid removing pages from the userspace mapping in the 312 * first place. 313 * 314 * If the number of pages retrieved is smaller than the number required by the 315 * buffer size, the function returns -EFAULT. 316 */ 317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf) 318{ 319 unsigned long data; 320 unsigned int first; 321 unsigned int last; 322 int ret; 323 324 data = buf->vbuf.m.userptr; 325 first = (data & PAGE_MASK) >> PAGE_SHIFT; 326 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT; 327 328 buf->offset = data & ~PAGE_MASK; 329 buf->npages = last - first + 1; 330 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0])); 331 if (buf->pages == NULL) 332 return -ENOMEM; 333 334 down_read(&current->mm->mmap_sem); 335 ret = get_user_pages(current, current->mm, data & PAGE_MASK, 336 buf->npages, 337 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, 338 buf->pages, NULL); 339 up_read(&current->mm->mmap_sem); 340 341 if (ret != buf->npages) { 342 buf->npages = ret < 0 ? 0 : ret; 343 isp_video_buffer_cleanup(buf); 344 return -EFAULT; 345 } 346 347 ret = isp_video_buffer_lock_vma(buf, 1); 348 if (ret < 0) 349 isp_video_buffer_cleanup(buf); 350 351 return ret; 352} 353 354/* 355 * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer 356 * 357 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in 358 * memory and if they span a single VMA. 359 * 360 * Return 0 if the buffer is valid, or -EFAULT otherwise. 361 */ 362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf) 363{ 364 struct vm_area_struct *vma; 365 unsigned long prev_pfn; 366 unsigned long this_pfn; 367 unsigned long start; 368 unsigned long end; 369 dma_addr_t pa = 0; 370 int ret = -EFAULT; 371 372 start = buf->vbuf.m.userptr; 373 end = buf->vbuf.m.userptr + buf->vbuf.length - 1; 374 375 buf->offset = start & ~PAGE_MASK; 376 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 377 buf->pages = NULL; 378 379 down_read(&current->mm->mmap_sem); 380 vma = find_vma(current->mm, start); 381 if (vma == NULL || vma->vm_end < end) 382 goto done; 383 384 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) { 385 ret = follow_pfn(vma, start, &this_pfn); 386 if (ret) 387 goto done; 388 389 if (prev_pfn == 0) 390 pa = this_pfn << PAGE_SHIFT; 391 else if (this_pfn != prev_pfn + 1) { 392 ret = -EFAULT; 393 goto done; 394 } 395 396 prev_pfn = this_pfn; 397 } 398 399 buf->paddr = pa + buf->offset; 400 ret = 0; 401 402done: 403 up_read(&current->mm->mmap_sem); 404 return ret; 405} 406 407/* 408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address 409 * 410 * This function locates the VMAs for the buffer's userspace address and checks 411 * that their flags match. The only flag that we need to care for at the moment 412 * is VM_PFNMAP. 413 * 414 * The buffer vm_flags field is set to the first VMA flags. 415 * 416 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs 417 * have incompatible flags. 418 */ 419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf) 420{ 421 struct vm_area_struct *vma; 422 pgprot_t uninitialized_var(vm_page_prot); 423 unsigned long start; 424 unsigned long end; 425 int ret = -EFAULT; 426 427 start = buf->vbuf.m.userptr; 428 end = buf->vbuf.m.userptr + buf->vbuf.length - 1; 429 430 down_read(&current->mm->mmap_sem); 431 432 do { 433 vma = find_vma(current->mm, start); 434 if (vma == NULL) 435 goto done; 436 437 if (start == buf->vbuf.m.userptr) { 438 buf->vm_flags = vma->vm_flags; 439 vm_page_prot = vma->vm_page_prot; 440 } 441 442 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP) 443 goto done; 444 445 if (vm_page_prot != vma->vm_page_prot) 446 goto done; 447 448 start = vma->vm_end + 1; 449 } while (vma->vm_end < end); 450 451 /* Skip cache management to enhance performances for non-cached or 452 * write-combining buffers. 453 */ 454 if (vm_page_prot == pgprot_noncached(vm_page_prot) || 455 vm_page_prot == pgprot_writecombine(vm_page_prot)) 456 buf->skip_cache = true; 457 458 ret = 0; 459 460done: 461 up_read(&current->mm->mmap_sem); 462 return ret; 463} 464 465/* 466 * isp_video_buffer_prepare - Make a buffer ready for operation 467 * 468 * Preparing a buffer involves: 469 * 470 * - validating VMAs (userspace buffers only) 471 * - locking pages and VMAs into memory (userspace buffers only) 472 * - building page and scatter-gather lists 473 * - mapping buffers for DMA operation 474 * - performing driver-specific preparation 475 * 476 * The function must be called in userspace context with a valid mm context 477 * (this excludes cleanup paths such as sys_close when the userspace process 478 * segfaults). 479 */ 480static int isp_video_buffer_prepare(struct isp_video_buffer *buf) 481{ 482 enum dma_data_direction direction; 483 int ret; 484 485 switch (buf->vbuf.memory) { 486 case V4L2_MEMORY_MMAP: 487 ret = isp_video_buffer_sglist_kernel(buf); 488 break; 489 490 case V4L2_MEMORY_USERPTR: 491 ret = isp_video_buffer_prepare_vm_flags(buf); 492 if (ret < 0) 493 return ret; 494 495 if (buf->vm_flags & VM_PFNMAP) { 496 ret = isp_video_buffer_prepare_pfnmap(buf); 497 if (ret < 0) 498 return ret; 499 500 ret = isp_video_buffer_sglist_pfnmap(buf); 501 } else { 502 ret = isp_video_buffer_prepare_user(buf); 503 if (ret < 0) 504 return ret; 505 506 ret = isp_video_buffer_sglist_user(buf); 507 } 508 break; 509 510 default: 511 return -EINVAL; 512 } 513 514 if (ret < 0) 515 goto done; 516 517 if (!(buf->vm_flags & VM_PFNMAP)) { 518 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE 519 ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 520 ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen, 521 direction); 522 if (ret != buf->sglen) { 523 ret = -EFAULT; 524 goto done; 525 } 526 } 527 528 if (buf->queue->ops->buffer_prepare) 529 ret = buf->queue->ops->buffer_prepare(buf); 530 531done: 532 if (ret < 0) { 533 isp_video_buffer_cleanup(buf); 534 return ret; 535 } 536 537 return ret; 538} 539 540/* 541 * isp_video_queue_query - Query the status of a given buffer 542 * 543 * Locking: must be called with the queue lock held. 544 */ 545static void isp_video_buffer_query(struct isp_video_buffer *buf, 546 struct v4l2_buffer *vbuf) 547{ 548 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf)); 549 550 if (buf->vma_use_count) 551 vbuf->flags |= V4L2_BUF_FLAG_MAPPED; 552 553 switch (buf->state) { 554 case ISP_BUF_STATE_ERROR: 555 vbuf->flags |= V4L2_BUF_FLAG_ERROR; 556 /* Fallthrough */ 557 case ISP_BUF_STATE_DONE: 558 vbuf->flags |= V4L2_BUF_FLAG_DONE; 559 break; 560 case ISP_BUF_STATE_QUEUED: 561 case ISP_BUF_STATE_ACTIVE: 562 vbuf->flags |= V4L2_BUF_FLAG_QUEUED; 563 break; 564 case ISP_BUF_STATE_IDLE: 565 default: 566 break; 567 } 568} 569 570/* 571 * isp_video_buffer_wait - Wait for a buffer to be ready 572 * 573 * In non-blocking mode, return immediately with 0 if the buffer is ready or 574 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state. 575 * 576 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait 577 * queue using the same condition. 578 */ 579static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking) 580{ 581 if (nonblocking) { 582 return (buf->state != ISP_BUF_STATE_QUEUED && 583 buf->state != ISP_BUF_STATE_ACTIVE) 584 ? 0 : -EAGAIN; 585 } 586 587 return wait_event_interruptible(buf->wait, 588 buf->state != ISP_BUF_STATE_QUEUED && 589 buf->state != ISP_BUF_STATE_ACTIVE); 590} 591 592/* ----------------------------------------------------------------------------- 593 * Queue management 594 */ 595 596/* 597 * isp_video_queue_free - Free video buffers memory 598 * 599 * Buffers can only be freed if the queue isn't streaming and if no buffer is 600 * mapped to userspace. Return -EBUSY if those conditions aren't satisfied. 601 * 602 * This function must be called with the queue lock held. 603 */ 604static int isp_video_queue_free(struct isp_video_queue *queue) 605{ 606 unsigned int i; 607 608 if (queue->streaming) 609 return -EBUSY; 610 611 for (i = 0; i < queue->count; ++i) { 612 if (queue->buffers[i]->vma_use_count != 0) 613 return -EBUSY; 614 } 615 616 for (i = 0; i < queue->count; ++i) { 617 struct isp_video_buffer *buf = queue->buffers[i]; 618 619 isp_video_buffer_cleanup(buf); 620 621 vfree(buf->vaddr); 622 buf->vaddr = NULL; 623 624 kfree(buf); 625 queue->buffers[i] = NULL; 626 } 627 628 INIT_LIST_HEAD(&queue->queue); 629 queue->count = 0; 630 return 0; 631} 632 633/* 634 * isp_video_queue_alloc - Allocate video buffers memory 635 * 636 * This function must be called with the queue lock held. 637 */ 638static int isp_video_queue_alloc(struct isp_video_queue *queue, 639 unsigned int nbuffers, 640 unsigned int size, enum v4l2_memory memory) 641{ 642 struct isp_video_buffer *buf; 643 unsigned int i; 644 void *mem; 645 int ret; 646 647 /* Start by freeing the buffers. */ 648 ret = isp_video_queue_free(queue); 649 if (ret < 0) 650 return ret; 651 652 /* Bail out if no buffers should be allocated. */ 653 if (nbuffers == 0) 654 return 0; 655 656 /* Initialize the allocated buffers. */ 657 for (i = 0; i < nbuffers; ++i) { 658 buf = kzalloc(queue->bufsize, GFP_KERNEL); 659 if (buf == NULL) 660 break; 661 662 if (memory == V4L2_MEMORY_MMAP) { 663 /* Allocate video buffers memory for mmap mode. Align 664 * the size to the page size. 665 */ 666 mem = vmalloc_32_user(PAGE_ALIGN(size)); 667 if (mem == NULL) { 668 kfree(buf); 669 break; 670 } 671 672 buf->vbuf.m.offset = i * PAGE_ALIGN(size); 673 buf->vaddr = mem; 674 } 675 676 buf->vbuf.index = i; 677 buf->vbuf.length = size; 678 buf->vbuf.type = queue->type; 679 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 680 buf->vbuf.field = V4L2_FIELD_NONE; 681 buf->vbuf.memory = memory; 682 683 buf->queue = queue; 684 init_waitqueue_head(&buf->wait); 685 686 queue->buffers[i] = buf; 687 } 688 689 if (i == 0) 690 return -ENOMEM; 691 692 queue->count = i; 693 return nbuffers; 694} 695 696/** 697 * omap3isp_video_queue_cleanup - Clean up the video buffers queue 698 * @queue: Video buffers queue 699 * 700 * Free all allocated resources and clean up the video buffers queue. The queue 701 * must not be busy (no ongoing video stream) and buffers must have been 702 * unmapped. 703 * 704 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been 705 * unmapped. 706 */ 707int omap3isp_video_queue_cleanup(struct isp_video_queue *queue) 708{ 709 return isp_video_queue_free(queue); 710} 711 712/** 713 * omap3isp_video_queue_init - Initialize the video buffers queue 714 * @queue: Video buffers queue 715 * @type: V4L2 buffer type (capture or output) 716 * @ops: Driver-specific queue operations 717 * @dev: Device used for DMA operations 718 * @bufsize: Size of the driver-specific buffer structure 719 * 720 * Initialize the video buffers queue with the supplied parameters. 721 * 722 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or 723 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet. 724 * 725 * Buffer objects will be allocated using the given buffer size to allow room 726 * for driver-specific fields. Driver-specific buffer structures must start 727 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer 728 * structure must pass the size of the isp_video_buffer structure in the bufsize 729 * parameter. 730 * 731 * Return 0 on success. 732 */ 733int omap3isp_video_queue_init(struct isp_video_queue *queue, 734 enum v4l2_buf_type type, 735 const struct isp_video_queue_operations *ops, 736 struct device *dev, unsigned int bufsize) 737{ 738 INIT_LIST_HEAD(&queue->queue); 739 mutex_init(&queue->lock); 740 spin_lock_init(&queue->irqlock); 741 742 queue->type = type; 743 queue->ops = ops; 744 queue->dev = dev; 745 queue->bufsize = bufsize; 746 747 return 0; 748} 749 750/* ----------------------------------------------------------------------------- 751 * V4L2 operations 752 */ 753 754/** 755 * omap3isp_video_queue_reqbufs - Allocate video buffers memory 756 * 757 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It 758 * allocated video buffer objects and, for MMAP buffers, buffer memory. 759 * 760 * If the number of buffers is 0, all buffers are freed and the function returns 761 * without performing any allocation. 762 * 763 * If the number of buffers is not 0, currently allocated buffers (if any) are 764 * freed and the requested number of buffers are allocated. Depending on 765 * driver-specific requirements and on memory availability, a number of buffer 766 * smaller or bigger than requested can be allocated. This isn't considered as 767 * an error. 768 * 769 * Return 0 on success or one of the following error codes: 770 * 771 * -EINVAL if the buffer type or index are invalid 772 * -EBUSY if the queue is busy (streaming or buffers mapped) 773 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition 774 */ 775int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue, 776 struct v4l2_requestbuffers *rb) 777{ 778 unsigned int nbuffers = rb->count; 779 unsigned int size; 780 int ret; 781 782 if (rb->type != queue->type) 783 return -EINVAL; 784 785 queue->ops->queue_prepare(queue, &nbuffers, &size); 786 if (size == 0) 787 return -EINVAL; 788 789 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS); 790 791 mutex_lock(&queue->lock); 792 793 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory); 794 if (ret < 0) 795 goto done; 796 797 rb->count = ret; 798 ret = 0; 799 800done: 801 mutex_unlock(&queue->lock); 802 return ret; 803} 804 805/** 806 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue 807 * 808 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It 809 * returns the status of a given video buffer. 810 * 811 * Return 0 on success or -EINVAL if the buffer type or index are invalid. 812 */ 813int omap3isp_video_queue_querybuf(struct isp_video_queue *queue, 814 struct v4l2_buffer *vbuf) 815{ 816 struct isp_video_buffer *buf; 817 int ret = 0; 818 819 if (vbuf->type != queue->type) 820 return -EINVAL; 821 822 mutex_lock(&queue->lock); 823 824 if (vbuf->index >= queue->count) { 825 ret = -EINVAL; 826 goto done; 827 } 828 829 buf = queue->buffers[vbuf->index]; 830 isp_video_buffer_query(buf, vbuf); 831 832done: 833 mutex_unlock(&queue->lock); 834 return ret; 835} 836 837/** 838 * omap3isp_video_queue_qbuf - Queue a buffer 839 * 840 * This function is intended to be used as a VIDIOC_QBUF ioctl handler. 841 * 842 * The v4l2_buffer structure passed from userspace is first sanity tested. If 843 * sane, the buffer is then processed and added to the main queue and, if the 844 * queue is streaming, to the IRQ queue. 845 * 846 * Before being enqueued, USERPTR buffers are checked for address changes. If 847 * the buffer has a different userspace address, the old memory area is unlocked 848 * and the new memory area is locked. 849 */ 850int omap3isp_video_queue_qbuf(struct isp_video_queue *queue, 851 struct v4l2_buffer *vbuf) 852{ 853 struct isp_video_buffer *buf; 854 unsigned long flags; 855 int ret = -EINVAL; 856 857 if (vbuf->type != queue->type) 858 goto done; 859 860 mutex_lock(&queue->lock); 861 862 if (vbuf->index >= queue->count) 863 goto done; 864 865 buf = queue->buffers[vbuf->index]; 866 867 if (vbuf->memory != buf->vbuf.memory) 868 goto done; 869 870 if (buf->state != ISP_BUF_STATE_IDLE) 871 goto done; 872 873 if (vbuf->memory == V4L2_MEMORY_USERPTR && 874 vbuf->length < buf->vbuf.length) 875 goto done; 876 877 if (vbuf->memory == V4L2_MEMORY_USERPTR && 878 vbuf->m.userptr != buf->vbuf.m.userptr) { 879 isp_video_buffer_cleanup(buf); 880 buf->vbuf.m.userptr = vbuf->m.userptr; 881 buf->prepared = 0; 882 } 883 884 if (!buf->prepared) { 885 ret = isp_video_buffer_prepare(buf); 886 if (ret < 0) 887 goto done; 888 buf->prepared = 1; 889 } 890 891 isp_video_buffer_cache_sync(buf); 892 893 buf->state = ISP_BUF_STATE_QUEUED; 894 list_add_tail(&buf->stream, &queue->queue); 895 896 if (queue->streaming) { 897 spin_lock_irqsave(&queue->irqlock, flags); 898 queue->ops->buffer_queue(buf); 899 spin_unlock_irqrestore(&queue->irqlock, flags); 900 } 901 902 ret = 0; 903 904done: 905 mutex_unlock(&queue->lock); 906 return ret; 907} 908 909/** 910 * omap3isp_video_queue_dqbuf - Dequeue a buffer 911 * 912 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler. 913 * 914 * Wait until a buffer is ready to be dequeued, remove it from the queue and 915 * copy its information to the v4l2_buffer structure. 916 * 917 * If the nonblocking argument is not zero and no buffer is ready, return 918 * -EAGAIN immediately instead of waiting. 919 * 920 * If no buffer has been enqueued, or if the requested buffer type doesn't match 921 * the queue type, return -EINVAL. 922 */ 923int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue, 924 struct v4l2_buffer *vbuf, int nonblocking) 925{ 926 struct isp_video_buffer *buf; 927 int ret; 928 929 if (vbuf->type != queue->type) 930 return -EINVAL; 931 932 mutex_lock(&queue->lock); 933 934 if (list_empty(&queue->queue)) { 935 ret = -EINVAL; 936 goto done; 937 } 938 939 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream); 940 ret = isp_video_buffer_wait(buf, nonblocking); 941 if (ret < 0) 942 goto done; 943 944 list_del(&buf->stream); 945 946 isp_video_buffer_query(buf, vbuf); 947 buf->state = ISP_BUF_STATE_IDLE; 948 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED; 949 950done: 951 mutex_unlock(&queue->lock); 952 return ret; 953} 954 955/** 956 * omap3isp_video_queue_streamon - Start streaming 957 * 958 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It 959 * starts streaming on the queue and calls the buffer_queue operation for all 960 * queued buffers. 961 * 962 * Return 0 on success. 963 */ 964int omap3isp_video_queue_streamon(struct isp_video_queue *queue) 965{ 966 struct isp_video_buffer *buf; 967 unsigned long flags; 968 969 mutex_lock(&queue->lock); 970 971 if (queue->streaming) 972 goto done; 973 974 queue->streaming = 1; 975 976 spin_lock_irqsave(&queue->irqlock, flags); 977 list_for_each_entry(buf, &queue->queue, stream) 978 queue->ops->buffer_queue(buf); 979 spin_unlock_irqrestore(&queue->irqlock, flags); 980 981done: 982 mutex_unlock(&queue->lock); 983 return 0; 984} 985 986/** 987 * omap3isp_video_queue_streamoff - Stop streaming 988 * 989 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It 990 * stops streaming on the queue and wakes up all the buffers. 991 * 992 * Drivers must stop the hardware and synchronize with interrupt handlers and/or 993 * delayed works before calling this function to make sure no buffer will be 994 * touched by the driver and/or hardware. 995 */ 996void omap3isp_video_queue_streamoff(struct isp_video_queue *queue) 997{ 998 struct isp_video_buffer *buf; 999 unsigned long flags; 1000 unsigned int i; 1001 1002 mutex_lock(&queue->lock); 1003 1004 if (!queue->streaming) 1005 goto done; 1006 1007 queue->streaming = 0; 1008 1009 spin_lock_irqsave(&queue->irqlock, flags); 1010 for (i = 0; i < queue->count; ++i) { 1011 buf = queue->buffers[i]; 1012 1013 if (buf->state == ISP_BUF_STATE_ACTIVE) 1014 wake_up(&buf->wait); 1015 1016 buf->state = ISP_BUF_STATE_IDLE; 1017 } 1018 spin_unlock_irqrestore(&queue->irqlock, flags); 1019 1020 INIT_LIST_HEAD(&queue->queue); 1021 1022done: 1023 mutex_unlock(&queue->lock); 1024} 1025 1026/** 1027 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE 1028 * 1029 * This function is intended to be used with suspend/resume operations. It 1030 * discards all 'done' buffers as they would be too old to be requested after 1031 * resume. 1032 * 1033 * Drivers must stop the hardware and synchronize with interrupt handlers and/or 1034 * delayed works before calling this function to make sure no buffer will be 1035 * touched by the driver and/or hardware. 1036 */ 1037void omap3isp_video_queue_discard_done(struct isp_video_queue *queue) 1038{ 1039 struct isp_video_buffer *buf; 1040 unsigned int i; 1041 1042 mutex_lock(&queue->lock); 1043 1044 if (!queue->streaming) 1045 goto done; 1046 1047 for (i = 0; i < queue->count; ++i) { 1048 buf = queue->buffers[i]; 1049 1050 if (buf->state == ISP_BUF_STATE_DONE) 1051 buf->state = ISP_BUF_STATE_ERROR; 1052 } 1053 1054done: 1055 mutex_unlock(&queue->lock); 1056} 1057 1058static void isp_video_queue_vm_open(struct vm_area_struct *vma) 1059{ 1060 struct isp_video_buffer *buf = vma->vm_private_data; 1061 1062 buf->vma_use_count++; 1063} 1064 1065static void isp_video_queue_vm_close(struct vm_area_struct *vma) 1066{ 1067 struct isp_video_buffer *buf = vma->vm_private_data; 1068 1069 buf->vma_use_count--; 1070} 1071 1072static const struct vm_operations_struct isp_video_queue_vm_ops = { 1073 .open = isp_video_queue_vm_open, 1074 .close = isp_video_queue_vm_close, 1075}; 1076 1077/** 1078 * omap3isp_video_queue_mmap - Map buffers to userspace 1079 * 1080 * This function is intended to be used as an mmap() file operation handler. It 1081 * maps a buffer to userspace based on the VMA offset. 1082 * 1083 * Only buffers of memory type MMAP are supported. 1084 */ 1085int omap3isp_video_queue_mmap(struct isp_video_queue *queue, 1086 struct vm_area_struct *vma) 1087{ 1088 struct isp_video_buffer *uninitialized_var(buf); 1089 unsigned long size; 1090 unsigned int i; 1091 int ret = 0; 1092 1093 mutex_lock(&queue->lock); 1094 1095 for (i = 0; i < queue->count; ++i) { 1096 buf = queue->buffers[i]; 1097 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) 1098 break; 1099 } 1100 1101 if (i == queue->count) { 1102 ret = -EINVAL; 1103 goto done; 1104 } 1105 1106 size = vma->vm_end - vma->vm_start; 1107 1108 if (buf->vbuf.memory != V4L2_MEMORY_MMAP || 1109 size != PAGE_ALIGN(buf->vbuf.length)) { 1110 ret = -EINVAL; 1111 goto done; 1112 } 1113 1114 ret = remap_vmalloc_range(vma, buf->vaddr, 0); 1115 if (ret < 0) 1116 goto done; 1117 1118 vma->vm_ops = &isp_video_queue_vm_ops; 1119 vma->vm_private_data = buf; 1120 isp_video_queue_vm_open(vma); 1121 1122done: 1123 mutex_unlock(&queue->lock); 1124 return ret; 1125} 1126 1127/** 1128 * omap3isp_video_queue_poll - Poll video queue state 1129 * 1130 * This function is intended to be used as a poll() file operation handler. It 1131 * polls the state of the video buffer at the front of the queue and returns an 1132 * events mask. 1133 * 1134 * If no buffer is present at the front of the queue, POLLERR is returned. 1135 */ 1136unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue, 1137 struct file *file, poll_table *wait) 1138{ 1139 struct isp_video_buffer *buf; 1140 unsigned int mask = 0; 1141 1142 mutex_lock(&queue->lock); 1143 if (list_empty(&queue->queue)) { 1144 mask |= POLLERR; 1145 goto done; 1146 } 1147 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream); 1148 1149 poll_wait(file, &buf->wait, wait); 1150 if (buf->state == ISP_BUF_STATE_DONE || 1151 buf->state == ISP_BUF_STATE_ERROR) { 1152 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1153 mask |= POLLIN | POLLRDNORM; 1154 else 1155 mask |= POLLOUT | POLLWRNORM; 1156 } 1157 1158done: 1159 mutex_unlock(&queue->lock); 1160 return mask; 1161}