at v3.1 588 lines 15 kB view raw
1/* 2 * uvc_queue.c -- USB Video Class driver - Buffers management 3 * 4 * Copyright (C) 2005-2010 5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 */ 13 14#include <linux/kernel.h> 15#include <linux/mm.h> 16#include <linux/list.h> 17#include <linux/module.h> 18#include <linux/usb.h> 19#include <linux/videodev2.h> 20#include <linux/vmalloc.h> 21#include <linux/wait.h> 22#include <linux/atomic.h> 23 24#include "uvc.h" 25 26/* ------------------------------------------------------------------------ 27 * Video buffers queue management. 28 * 29 * Video queues is initialized by uvc_queue_init(). The function performs 30 * basic initialization of the uvc_video_queue struct and never fails. 31 * 32 * Video buffer allocation and freeing are performed by uvc_alloc_buffers and 33 * uvc_free_buffers respectively. The former acquires the video queue lock, 34 * while the later must be called with the lock held (so that allocation can 35 * free previously allocated buffers). Trying to free buffers that are mapped 36 * to user space will return -EBUSY. 37 * 38 * Video buffers are managed using two queues. However, unlike most USB video 39 * drivers that use an in queue and an out queue, we use a main queue to hold 40 * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to 41 * hold empty buffers. This design (copied from video-buf) minimizes locking 42 * in interrupt, as only one queue is shared between interrupt and user 43 * contexts. 44 * 45 * Use cases 46 * --------- 47 * 48 * Unless stated otherwise, all operations that modify the irq buffers queue 49 * are protected by the irq spinlock. 50 * 51 * 1. The user queues the buffers, starts streaming and dequeues a buffer. 52 * 53 * The buffers are added to the main and irq queues. Both operations are 54 * protected by the queue lock, and the later is protected by the irq 55 * spinlock as well. 56 * 57 * The completion handler fetches a buffer from the irq queue and fills it 58 * with video data. If no buffer is available (irq queue empty), the handler 59 * returns immediately. 60 * 61 * When the buffer is full, the completion handler removes it from the irq 62 * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue. 63 * At that point, any process waiting on the buffer will be woken up. If a 64 * process tries to dequeue a buffer after it has been marked ready, the 65 * dequeing will succeed immediately. 66 * 67 * 2. Buffers are queued, user is waiting on a buffer and the device gets 68 * disconnected. 69 * 70 * When the device is disconnected, the kernel calls the completion handler 71 * with an appropriate status code. The handler marks all buffers in the 72 * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so 73 * that any process waiting on a buffer gets woken up. 74 * 75 * Waking up up the first buffer on the irq list is not enough, as the 76 * process waiting on the buffer might restart the dequeue operation 77 * immediately. 78 * 79 */ 80 81static void 82uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type) 83{ 84 mutex_init(&queue->mutex); 85 spin_lock_init(&queue->irqlock); 86 INIT_LIST_HEAD(&queue->mainqueue); 87 INIT_LIST_HEAD(&queue->irqqueue); 88 queue->type = type; 89} 90 91/* 92 * Free the video buffers. 93 * 94 * This function must be called with the queue lock held. 95 */ 96static int uvc_free_buffers(struct uvc_video_queue *queue) 97{ 98 unsigned int i; 99 100 for (i = 0; i < queue->count; ++i) { 101 if (queue->buffer[i].vma_use_count != 0) 102 return -EBUSY; 103 } 104 105 if (queue->count) { 106 vfree(queue->mem); 107 queue->count = 0; 108 } 109 110 return 0; 111} 112 113/* 114 * Allocate the video buffers. 115 * 116 * Pages are reserved to make sure they will not be swapped, as they will be 117 * filled in the URB completion handler. 118 * 119 * Buffers will be individually mapped, so they must all be page aligned. 120 */ 121static int 122uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers, 123 unsigned int buflength) 124{ 125 unsigned int bufsize = PAGE_ALIGN(buflength); 126 unsigned int i; 127 void *mem = NULL; 128 int ret; 129 130 if (nbuffers > UVC_MAX_VIDEO_BUFFERS) 131 nbuffers = UVC_MAX_VIDEO_BUFFERS; 132 133 mutex_lock(&queue->mutex); 134 135 if ((ret = uvc_free_buffers(queue)) < 0) 136 goto done; 137 138 /* Bail out if no buffers should be allocated. */ 139 if (nbuffers == 0) 140 goto done; 141 142 /* Decrement the number of buffers until allocation succeeds. */ 143 for (; nbuffers > 0; --nbuffers) { 144 mem = vmalloc_32(nbuffers * bufsize); 145 if (mem != NULL) 146 break; 147 } 148 149 if (mem == NULL) { 150 ret = -ENOMEM; 151 goto done; 152 } 153 154 for (i = 0; i < nbuffers; ++i) { 155 memset(&queue->buffer[i], 0, sizeof queue->buffer[i]); 156 queue->buffer[i].buf.index = i; 157 queue->buffer[i].buf.m.offset = i * bufsize; 158 queue->buffer[i].buf.length = buflength; 159 queue->buffer[i].buf.type = queue->type; 160 queue->buffer[i].buf.sequence = 0; 161 queue->buffer[i].buf.field = V4L2_FIELD_NONE; 162 queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP; 163 queue->buffer[i].buf.flags = 0; 164 init_waitqueue_head(&queue->buffer[i].wait); 165 } 166 167 queue->mem = mem; 168 queue->count = nbuffers; 169 queue->buf_size = bufsize; 170 ret = nbuffers; 171 172done: 173 mutex_unlock(&queue->mutex); 174 return ret; 175} 176 177static void __uvc_query_buffer(struct uvc_buffer *buf, 178 struct v4l2_buffer *v4l2_buf) 179{ 180 memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf); 181 182 if (buf->vma_use_count) 183 v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED; 184 185 switch (buf->state) { 186 case UVC_BUF_STATE_ERROR: 187 case UVC_BUF_STATE_DONE: 188 v4l2_buf->flags |= V4L2_BUF_FLAG_DONE; 189 break; 190 case UVC_BUF_STATE_QUEUED: 191 case UVC_BUF_STATE_ACTIVE: 192 v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED; 193 break; 194 case UVC_BUF_STATE_IDLE: 195 default: 196 break; 197 } 198} 199 200static int 201uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf) 202{ 203 int ret = 0; 204 205 mutex_lock(&queue->mutex); 206 if (v4l2_buf->index >= queue->count) { 207 ret = -EINVAL; 208 goto done; 209 } 210 211 __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf); 212 213done: 214 mutex_unlock(&queue->mutex); 215 return ret; 216} 217 218/* 219 * Queue a video buffer. Attempting to queue a buffer that has already been 220 * queued will return -EINVAL. 221 */ 222static int 223uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf) 224{ 225 struct uvc_buffer *buf; 226 unsigned long flags; 227 int ret = 0; 228 229 uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index); 230 231 if (v4l2_buf->type != queue->type || 232 v4l2_buf->memory != V4L2_MEMORY_MMAP) { 233 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " 234 "and/or memory (%u).\n", v4l2_buf->type, 235 v4l2_buf->memory); 236 return -EINVAL; 237 } 238 239 mutex_lock(&queue->mutex); 240 if (v4l2_buf->index >= queue->count) { 241 uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n"); 242 ret = -EINVAL; 243 goto done; 244 } 245 246 buf = &queue->buffer[v4l2_buf->index]; 247 if (buf->state != UVC_BUF_STATE_IDLE) { 248 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state " 249 "(%u).\n", buf->state); 250 ret = -EINVAL; 251 goto done; 252 } 253 254 if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 255 v4l2_buf->bytesused > buf->buf.length) { 256 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 257 ret = -EINVAL; 258 goto done; 259 } 260 261 if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 262 buf->buf.bytesused = 0; 263 else 264 buf->buf.bytesused = v4l2_buf->bytesused; 265 266 spin_lock_irqsave(&queue->irqlock, flags); 267 if (queue->flags & UVC_QUEUE_DISCONNECTED) { 268 spin_unlock_irqrestore(&queue->irqlock, flags); 269 ret = -ENODEV; 270 goto done; 271 } 272 buf->state = UVC_BUF_STATE_QUEUED; 273 274 ret = (queue->flags & UVC_QUEUE_PAUSED) != 0; 275 queue->flags &= ~UVC_QUEUE_PAUSED; 276 277 list_add_tail(&buf->stream, &queue->mainqueue); 278 list_add_tail(&buf->queue, &queue->irqqueue); 279 spin_unlock_irqrestore(&queue->irqlock, flags); 280 281done: 282 mutex_unlock(&queue->mutex); 283 return ret; 284} 285 286static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking) 287{ 288 if (nonblocking) { 289 return (buf->state != UVC_BUF_STATE_QUEUED && 290 buf->state != UVC_BUF_STATE_ACTIVE) 291 ? 0 : -EAGAIN; 292 } 293 294 return wait_event_interruptible(buf->wait, 295 buf->state != UVC_BUF_STATE_QUEUED && 296 buf->state != UVC_BUF_STATE_ACTIVE); 297} 298 299/* 300 * Dequeue a video buffer. If nonblocking is false, block until a buffer is 301 * available. 302 */ 303static int 304uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf, 305 int nonblocking) 306{ 307 struct uvc_buffer *buf; 308 int ret = 0; 309 310 if (v4l2_buf->type != queue->type || 311 v4l2_buf->memory != V4L2_MEMORY_MMAP) { 312 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " 313 "and/or memory (%u).\n", v4l2_buf->type, 314 v4l2_buf->memory); 315 return -EINVAL; 316 } 317 318 mutex_lock(&queue->mutex); 319 if (list_empty(&queue->mainqueue)) { 320 uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n"); 321 ret = -EINVAL; 322 goto done; 323 } 324 325 buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream); 326 if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0) 327 goto done; 328 329 uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n", 330 buf->buf.index, buf->state, buf->buf.bytesused); 331 332 switch (buf->state) { 333 case UVC_BUF_STATE_ERROR: 334 uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data " 335 "(transmission error).\n"); 336 ret = -EIO; 337 case UVC_BUF_STATE_DONE: 338 buf->state = UVC_BUF_STATE_IDLE; 339 break; 340 341 case UVC_BUF_STATE_IDLE: 342 case UVC_BUF_STATE_QUEUED: 343 case UVC_BUF_STATE_ACTIVE: 344 default: 345 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u " 346 "(driver bug?).\n", buf->state); 347 ret = -EINVAL; 348 goto done; 349 } 350 351 list_del(&buf->stream); 352 __uvc_query_buffer(buf, v4l2_buf); 353 354done: 355 mutex_unlock(&queue->mutex); 356 return ret; 357} 358 359/* 360 * Poll the video queue. 361 * 362 * This function implements video queue polling and is intended to be used by 363 * the device poll handler. 364 */ 365static unsigned int 366uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 367 poll_table *wait) 368{ 369 struct uvc_buffer *buf; 370 unsigned int mask = 0; 371 372 mutex_lock(&queue->mutex); 373 if (list_empty(&queue->mainqueue)) 374 goto done; 375 376 buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream); 377 378 poll_wait(file, &buf->wait, wait); 379 if (buf->state == UVC_BUF_STATE_DONE || 380 buf->state == UVC_BUF_STATE_ERROR) 381 mask |= POLLOUT | POLLWRNORM; 382 383done: 384 mutex_unlock(&queue->mutex); 385 return mask; 386} 387 388/* 389 * VMA operations. 390 */ 391static void uvc_vm_open(struct vm_area_struct *vma) 392{ 393 struct uvc_buffer *buffer = vma->vm_private_data; 394 buffer->vma_use_count++; 395} 396 397static void uvc_vm_close(struct vm_area_struct *vma) 398{ 399 struct uvc_buffer *buffer = vma->vm_private_data; 400 buffer->vma_use_count--; 401} 402 403static struct vm_operations_struct uvc_vm_ops = { 404 .open = uvc_vm_open, 405 .close = uvc_vm_close, 406}; 407 408/* 409 * Memory-map a buffer. 410 * 411 * This function implements video buffer memory mapping and is intended to be 412 * used by the device mmap handler. 413 */ 414static int 415uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 416{ 417 struct uvc_buffer *uninitialized_var(buffer); 418 struct page *page; 419 unsigned long addr, start, size; 420 unsigned int i; 421 int ret = 0; 422 423 start = vma->vm_start; 424 size = vma->vm_end - vma->vm_start; 425 426 mutex_lock(&queue->mutex); 427 428 for (i = 0; i < queue->count; ++i) { 429 buffer = &queue->buffer[i]; 430 if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) 431 break; 432 } 433 434 if (i == queue->count || size != queue->buf_size) { 435 ret = -EINVAL; 436 goto done; 437 } 438 439 /* 440 * VM_IO marks the area as being an mmaped region for I/O to a 441 * device. It also prevents the region from being core dumped. 442 */ 443 vma->vm_flags |= VM_IO; 444 445 addr = (unsigned long)queue->mem + buffer->buf.m.offset; 446 while (size > 0) { 447 page = vmalloc_to_page((void *)addr); 448 if ((ret = vm_insert_page(vma, start, page)) < 0) 449 goto done; 450 451 start += PAGE_SIZE; 452 addr += PAGE_SIZE; 453 size -= PAGE_SIZE; 454 } 455 456 vma->vm_ops = &uvc_vm_ops; 457 vma->vm_private_data = buffer; 458 uvc_vm_open(vma); 459 460done: 461 mutex_unlock(&queue->mutex); 462 return ret; 463} 464 465/* 466 * Cancel the video buffers queue. 467 * 468 * Cancelling the queue marks all buffers on the irq queue as erroneous, 469 * wakes them up and removes them from the queue. 470 * 471 * If the disconnect parameter is set, further calls to uvc_queue_buffer will 472 * fail with -ENODEV. 473 * 474 * This function acquires the irq spinlock and can be called from interrupt 475 * context. 476 */ 477static void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 478{ 479 struct uvc_buffer *buf; 480 unsigned long flags; 481 482 spin_lock_irqsave(&queue->irqlock, flags); 483 while (!list_empty(&queue->irqqueue)) { 484 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 485 queue); 486 list_del(&buf->queue); 487 buf->state = UVC_BUF_STATE_ERROR; 488 wake_up(&buf->wait); 489 } 490 /* This must be protected by the irqlock spinlock to avoid race 491 * conditions between uvc_queue_buffer and the disconnection event that 492 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 493 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED 494 * state outside the queue code. 495 */ 496 if (disconnect) 497 queue->flags |= UVC_QUEUE_DISCONNECTED; 498 spin_unlock_irqrestore(&queue->irqlock, flags); 499} 500 501/* 502 * Enable or disable the video buffers queue. 503 * 504 * The queue must be enabled before starting video acquisition and must be 505 * disabled after stopping it. This ensures that the video buffers queue 506 * state can be properly initialized before buffers are accessed from the 507 * interrupt handler. 508 * 509 * Enabling the video queue initializes parameters (such as sequence number, 510 * sync pattern, ...). If the queue is already enabled, return -EBUSY. 511 * 512 * Disabling the video queue cancels the queue and removes all buffers from 513 * the main queue. 514 * 515 * This function can't be called from interrupt context. Use 516 * uvc_queue_cancel() instead. 517 */ 518static int uvc_queue_enable(struct uvc_video_queue *queue, int enable) 519{ 520 unsigned int i; 521 int ret = 0; 522 523 mutex_lock(&queue->mutex); 524 if (enable) { 525 if (uvc_queue_streaming(queue)) { 526 ret = -EBUSY; 527 goto done; 528 } 529 queue->sequence = 0; 530 queue->flags |= UVC_QUEUE_STREAMING; 531 queue->buf_used = 0; 532 } else { 533 uvc_queue_cancel(queue, 0); 534 INIT_LIST_HEAD(&queue->mainqueue); 535 536 for (i = 0; i < queue->count; ++i) 537 queue->buffer[i].state = UVC_BUF_STATE_IDLE; 538 539 queue->flags &= ~UVC_QUEUE_STREAMING; 540 } 541 542done: 543 mutex_unlock(&queue->mutex); 544 return ret; 545} 546 547static struct uvc_buffer * 548uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) 549{ 550 struct uvc_buffer *nextbuf; 551 unsigned long flags; 552 553 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && 554 buf->buf.length != buf->buf.bytesused) { 555 buf->state = UVC_BUF_STATE_QUEUED; 556 buf->buf.bytesused = 0; 557 return buf; 558 } 559 560 spin_lock_irqsave(&queue->irqlock, flags); 561 list_del(&buf->queue); 562 if (!list_empty(&queue->irqqueue)) 563 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 564 queue); 565 else 566 nextbuf = NULL; 567 spin_unlock_irqrestore(&queue->irqlock, flags); 568 569 buf->buf.sequence = queue->sequence++; 570 do_gettimeofday(&buf->buf.timestamp); 571 572 wake_up(&buf->wait); 573 return nextbuf; 574} 575 576static struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue) 577{ 578 struct uvc_buffer *buf = NULL; 579 580 if (!list_empty(&queue->irqqueue)) 581 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 582 queue); 583 else 584 queue->flags |= UVC_QUEUE_PAUSED; 585 586 return buf; 587} 588