Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.20-rc7 504 lines 13 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Media device request objects 4 * 5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 * Copyright (C) 2018 Intel Corporation 7 * Copyright (C) 2018 Google, Inc. 8 * 9 * Author: Hans Verkuil <hans.verkuil@cisco.com> 10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com> 11 */ 12 13#include <linux/anon_inodes.h> 14#include <linux/file.h> 15#include <linux/refcount.h> 16 17#include <media/media-device.h> 18#include <media/media-request.h> 19 20static const char * const request_state[] = { 21 [MEDIA_REQUEST_STATE_IDLE] = "idle", 22 [MEDIA_REQUEST_STATE_VALIDATING] = "validating", 23 [MEDIA_REQUEST_STATE_QUEUED] = "queued", 24 [MEDIA_REQUEST_STATE_COMPLETE] = "complete", 25 [MEDIA_REQUEST_STATE_CLEANING] = "cleaning", 26 [MEDIA_REQUEST_STATE_UPDATING] = "updating", 27}; 28 29static const char * 30media_request_state_str(enum media_request_state state) 31{ 32 BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE); 33 34 if (WARN_ON(state >= ARRAY_SIZE(request_state))) 35 return "invalid"; 36 return request_state[state]; 37} 38 39static void media_request_clean(struct media_request *req) 40{ 41 struct media_request_object *obj, *obj_safe; 42 43 /* Just a sanity check. No other code path is allowed to change this. */ 44 WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); 45 WARN_ON(req->updating_count); 46 WARN_ON(req->access_count); 47 48 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 49 media_request_object_unbind(obj); 50 media_request_object_put(obj); 51 } 52 53 req->updating_count = 0; 54 req->access_count = 0; 55 WARN_ON(req->num_incomplete_objects); 56 req->num_incomplete_objects = 0; 57 wake_up_interruptible_all(&req->poll_wait); 58} 59 60static void media_request_release(struct kref *kref) 61{ 62 struct media_request *req = 63 container_of(kref, struct media_request, kref); 64 struct media_device *mdev = req->mdev; 65 66 dev_dbg(mdev->dev, "request: release %s\n", req->debug_str); 67 68 /* No other users, no need for a spinlock */ 69 req->state = MEDIA_REQUEST_STATE_CLEANING; 70 71 media_request_clean(req); 72 73 if (mdev->ops->req_free) 74 mdev->ops->req_free(req); 75 else 76 kfree(req); 77} 78 79void media_request_put(struct media_request *req) 80{ 81 kref_put(&req->kref, media_request_release); 82} 83EXPORT_SYMBOL_GPL(media_request_put); 84 85static int media_request_close(struct inode *inode, struct file *filp) 86{ 87 struct media_request *req = filp->private_data; 88 89 media_request_put(req); 90 return 0; 91} 92 93static __poll_t media_request_poll(struct file *filp, 94 struct poll_table_struct *wait) 95{ 96 struct media_request *req = filp->private_data; 97 unsigned long flags; 98 __poll_t ret = 0; 99 100 if (!(poll_requested_events(wait) & EPOLLPRI)) 101 return 0; 102 103 spin_lock_irqsave(&req->lock, flags); 104 if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { 105 ret = EPOLLPRI; 106 goto unlock; 107 } 108 if (req->state != MEDIA_REQUEST_STATE_QUEUED) { 109 ret = EPOLLERR; 110 goto unlock; 111 } 112 113 poll_wait(filp, &req->poll_wait, wait); 114 115unlock: 116 spin_unlock_irqrestore(&req->lock, flags); 117 return ret; 118} 119 120static long media_request_ioctl_queue(struct media_request *req) 121{ 122 struct media_device *mdev = req->mdev; 123 enum media_request_state state; 124 unsigned long flags; 125 int ret; 126 127 dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str); 128 129 /* 130 * Ensure the request that is validated will be the one that gets queued 131 * next by serialising the queueing process. This mutex is also used 132 * to serialize with canceling a vb2 queue and with setting values such 133 * as controls in a request. 134 */ 135 mutex_lock(&mdev->req_queue_mutex); 136 137 media_request_get(req); 138 139 spin_lock_irqsave(&req->lock, flags); 140 if (req->state == MEDIA_REQUEST_STATE_IDLE) 141 req->state = MEDIA_REQUEST_STATE_VALIDATING; 142 state = req->state; 143 spin_unlock_irqrestore(&req->lock, flags); 144 if (state != MEDIA_REQUEST_STATE_VALIDATING) { 145 dev_dbg(mdev->dev, 146 "request: unable to queue %s, request in state %s\n", 147 req->debug_str, media_request_state_str(state)); 148 media_request_put(req); 149 mutex_unlock(&mdev->req_queue_mutex); 150 return -EBUSY; 151 } 152 153 ret = mdev->ops->req_validate(req); 154 155 /* 156 * If the req_validate was successful, then we mark the state as QUEUED 157 * and call req_queue. The reason we set the state first is that this 158 * allows req_queue to unbind or complete the queued objects in case 159 * they are immediately 'consumed'. State changes from QUEUED to another 160 * state can only happen if either the driver changes the state or if 161 * the user cancels the vb2 queue. The driver can only change the state 162 * after each object is queued through the req_queue op (and note that 163 * that op cannot fail), so setting the state to QUEUED up front is 164 * safe. 165 * 166 * The other reason for changing the state is if the vb2 queue is 167 * canceled, and that uses the req_queue_mutex which is still locked 168 * while req_queue is called, so that's safe as well. 169 */ 170 spin_lock_irqsave(&req->lock, flags); 171 req->state = ret ? MEDIA_REQUEST_STATE_IDLE 172 : MEDIA_REQUEST_STATE_QUEUED; 173 spin_unlock_irqrestore(&req->lock, flags); 174 175 if (!ret) 176 mdev->ops->req_queue(req); 177 178 mutex_unlock(&mdev->req_queue_mutex); 179 180 if (ret) { 181 dev_dbg(mdev->dev, "request: can't queue %s (%d)\n", 182 req->debug_str, ret); 183 media_request_put(req); 184 } 185 186 return ret; 187} 188 189static long media_request_ioctl_reinit(struct media_request *req) 190{ 191 struct media_device *mdev = req->mdev; 192 unsigned long flags; 193 194 spin_lock_irqsave(&req->lock, flags); 195 if (req->state != MEDIA_REQUEST_STATE_IDLE && 196 req->state != MEDIA_REQUEST_STATE_COMPLETE) { 197 dev_dbg(mdev->dev, 198 "request: %s not in idle or complete state, cannot reinit\n", 199 req->debug_str); 200 spin_unlock_irqrestore(&req->lock, flags); 201 return -EBUSY; 202 } 203 if (req->access_count) { 204 dev_dbg(mdev->dev, 205 "request: %s is being accessed, cannot reinit\n", 206 req->debug_str); 207 spin_unlock_irqrestore(&req->lock, flags); 208 return -EBUSY; 209 } 210 req->state = MEDIA_REQUEST_STATE_CLEANING; 211 spin_unlock_irqrestore(&req->lock, flags); 212 213 media_request_clean(req); 214 215 spin_lock_irqsave(&req->lock, flags); 216 req->state = MEDIA_REQUEST_STATE_IDLE; 217 spin_unlock_irqrestore(&req->lock, flags); 218 219 return 0; 220} 221 222static long media_request_ioctl(struct file *filp, unsigned int cmd, 223 unsigned long arg) 224{ 225 struct media_request *req = filp->private_data; 226 227 switch (cmd) { 228 case MEDIA_REQUEST_IOC_QUEUE: 229 return media_request_ioctl_queue(req); 230 case MEDIA_REQUEST_IOC_REINIT: 231 return media_request_ioctl_reinit(req); 232 default: 233 return -ENOIOCTLCMD; 234 } 235} 236 237static const struct file_operations request_fops = { 238 .owner = THIS_MODULE, 239 .poll = media_request_poll, 240 .unlocked_ioctl = media_request_ioctl, 241#ifdef CONFIG_COMPAT 242 .compat_ioctl = media_request_ioctl, 243#endif /* CONFIG_COMPAT */ 244 .release = media_request_close, 245}; 246 247struct media_request * 248media_request_get_by_fd(struct media_device *mdev, int request_fd) 249{ 250 struct file *filp; 251 struct media_request *req; 252 253 if (!mdev || !mdev->ops || 254 !mdev->ops->req_validate || !mdev->ops->req_queue) 255 return ERR_PTR(-EACCES); 256 257 filp = fget(request_fd); 258 if (!filp) 259 goto err_no_req_fd; 260 261 if (filp->f_op != &request_fops) 262 goto err_fput; 263 req = filp->private_data; 264 if (req->mdev != mdev) 265 goto err_fput; 266 267 /* 268 * Note: as long as someone has an open filehandle of the request, 269 * the request can never be released. The fget() above ensures that 270 * even if userspace closes the request filehandle, the release() 271 * fop won't be called, so the media_request_get() always succeeds 272 * and there is no race condition where the request was released 273 * before media_request_get() is called. 274 */ 275 media_request_get(req); 276 fput(filp); 277 278 return req; 279 280err_fput: 281 fput(filp); 282 283err_no_req_fd: 284 dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd); 285 return ERR_PTR(-EINVAL); 286} 287EXPORT_SYMBOL_GPL(media_request_get_by_fd); 288 289int media_request_alloc(struct media_device *mdev, int *alloc_fd) 290{ 291 struct media_request *req; 292 struct file *filp; 293 int fd; 294 int ret; 295 296 /* Either both are NULL or both are non-NULL */ 297 if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) 298 return -ENOMEM; 299 300 fd = get_unused_fd_flags(O_CLOEXEC); 301 if (fd < 0) 302 return fd; 303 304 filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); 305 if (IS_ERR(filp)) { 306 ret = PTR_ERR(filp); 307 goto err_put_fd; 308 } 309 310 if (mdev->ops->req_alloc) 311 req = mdev->ops->req_alloc(mdev); 312 else 313 req = kzalloc(sizeof(*req), GFP_KERNEL); 314 if (!req) { 315 ret = -ENOMEM; 316 goto err_fput; 317 } 318 319 filp->private_data = req; 320 req->mdev = mdev; 321 req->state = MEDIA_REQUEST_STATE_IDLE; 322 req->num_incomplete_objects = 0; 323 kref_init(&req->kref); 324 INIT_LIST_HEAD(&req->objects); 325 spin_lock_init(&req->lock); 326 init_waitqueue_head(&req->poll_wait); 327 req->updating_count = 0; 328 req->access_count = 0; 329 330 *alloc_fd = fd; 331 332 snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", 333 atomic_inc_return(&mdev->request_id), fd); 334 dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); 335 336 fd_install(fd, filp); 337 338 return 0; 339 340err_fput: 341 fput(filp); 342 343err_put_fd: 344 put_unused_fd(fd); 345 346 return ret; 347} 348 349static void media_request_object_release(struct kref *kref) 350{ 351 struct media_request_object *obj = 352 container_of(kref, struct media_request_object, kref); 353 struct media_request *req = obj->req; 354 355 if (WARN_ON(req)) 356 media_request_object_unbind(obj); 357 obj->ops->release(obj); 358} 359 360struct media_request_object * 361media_request_object_find(struct media_request *req, 362 const struct media_request_object_ops *ops, 363 void *priv) 364{ 365 struct media_request_object *obj; 366 struct media_request_object *found = NULL; 367 unsigned long flags; 368 369 if (WARN_ON(!ops || !priv)) 370 return NULL; 371 372 spin_lock_irqsave(&req->lock, flags); 373 list_for_each_entry(obj, &req->objects, list) { 374 if (obj->ops == ops && obj->priv == priv) { 375 media_request_object_get(obj); 376 found = obj; 377 break; 378 } 379 } 380 spin_unlock_irqrestore(&req->lock, flags); 381 return found; 382} 383EXPORT_SYMBOL_GPL(media_request_object_find); 384 385void media_request_object_put(struct media_request_object *obj) 386{ 387 kref_put(&obj->kref, media_request_object_release); 388} 389EXPORT_SYMBOL_GPL(media_request_object_put); 390 391void media_request_object_init(struct media_request_object *obj) 392{ 393 obj->ops = NULL; 394 obj->req = NULL; 395 obj->priv = NULL; 396 obj->completed = false; 397 INIT_LIST_HEAD(&obj->list); 398 kref_init(&obj->kref); 399} 400EXPORT_SYMBOL_GPL(media_request_object_init); 401 402int media_request_object_bind(struct media_request *req, 403 const struct media_request_object_ops *ops, 404 void *priv, bool is_buffer, 405 struct media_request_object *obj) 406{ 407 unsigned long flags; 408 int ret = -EBUSY; 409 410 if (WARN_ON(!ops->release)) 411 return -EACCES; 412 413 spin_lock_irqsave(&req->lock, flags); 414 415 if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) 416 goto unlock; 417 418 obj->req = req; 419 obj->ops = ops; 420 obj->priv = priv; 421 422 if (is_buffer) 423 list_add_tail(&obj->list, &req->objects); 424 else 425 list_add(&obj->list, &req->objects); 426 req->num_incomplete_objects++; 427 ret = 0; 428 429unlock: 430 spin_unlock_irqrestore(&req->lock, flags); 431 return ret; 432} 433EXPORT_SYMBOL_GPL(media_request_object_bind); 434 435void media_request_object_unbind(struct media_request_object *obj) 436{ 437 struct media_request *req = obj->req; 438 unsigned long flags; 439 bool completed = false; 440 441 if (WARN_ON(!req)) 442 return; 443 444 spin_lock_irqsave(&req->lock, flags); 445 list_del(&obj->list); 446 obj->req = NULL; 447 448 if (req->state == MEDIA_REQUEST_STATE_COMPLETE) 449 goto unlock; 450 451 if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING)) 452 goto unlock; 453 454 if (req->state == MEDIA_REQUEST_STATE_CLEANING) { 455 if (!obj->completed) 456 req->num_incomplete_objects--; 457 goto unlock; 458 } 459 460 if (WARN_ON(!req->num_incomplete_objects)) 461 goto unlock; 462 463 req->num_incomplete_objects--; 464 if (req->state == MEDIA_REQUEST_STATE_QUEUED && 465 !req->num_incomplete_objects) { 466 req->state = MEDIA_REQUEST_STATE_COMPLETE; 467 completed = true; 468 wake_up_interruptible_all(&req->poll_wait); 469 } 470 471unlock: 472 spin_unlock_irqrestore(&req->lock, flags); 473 if (obj->ops->unbind) 474 obj->ops->unbind(obj); 475 if (completed) 476 media_request_put(req); 477} 478EXPORT_SYMBOL_GPL(media_request_object_unbind); 479 480void media_request_object_complete(struct media_request_object *obj) 481{ 482 struct media_request *req = obj->req; 483 unsigned long flags; 484 bool completed = false; 485 486 spin_lock_irqsave(&req->lock, flags); 487 if (obj->completed) 488 goto unlock; 489 obj->completed = true; 490 if (WARN_ON(!req->num_incomplete_objects) || 491 WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) 492 goto unlock; 493 494 if (!--req->num_incomplete_objects) { 495 req->state = MEDIA_REQUEST_STATE_COMPLETE; 496 wake_up_interruptible_all(&req->poll_wait); 497 completed = true; 498 } 499unlock: 500 spin_unlock_irqrestore(&req->lock, flags); 501 if (completed) 502 media_request_put(req); 503} 504EXPORT_SYMBOL_GPL(media_request_object_complete);