Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.12-rc7 740 lines 19 kB view raw
1/* 2 * Framework for buffer objects that can be shared across devices/subsystems. 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Author: Sumit Semwal <sumit.semwal@ti.com> 6 * 7 * Many thanks to linaro-mm-sig list, and specially 8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 10 * refining of this idea. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * more details. 20 * 21 * You should have received a copy of the GNU General Public License along with 22 * this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24 25#include <linux/fs.h> 26#include <linux/slab.h> 27#include <linux/dma-buf.h> 28#include <linux/anon_inodes.h> 29#include <linux/export.h> 30#include <linux/debugfs.h> 31#include <linux/seq_file.h> 32 33static inline int is_dma_buf_file(struct file *); 34 35struct dma_buf_list { 36 struct list_head head; 37 struct mutex lock; 38}; 39 40static struct dma_buf_list db_list; 41 42static int dma_buf_release(struct inode *inode, struct file *file) 43{ 44 struct dma_buf *dmabuf; 45 46 if (!is_dma_buf_file(file)) 47 return -EINVAL; 48 49 dmabuf = file->private_data; 50 51 BUG_ON(dmabuf->vmapping_counter); 52 53 dmabuf->ops->release(dmabuf); 54 55 mutex_lock(&db_list.lock); 56 list_del(&dmabuf->list_node); 57 mutex_unlock(&db_list.lock); 58 59 kfree(dmabuf); 60 return 0; 61} 62 63static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 64{ 65 struct dma_buf *dmabuf; 66 67 if (!is_dma_buf_file(file)) 68 return -EINVAL; 69 70 dmabuf = file->private_data; 71 72 /* check for overflowing the buffer's size */ 73 if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > 74 dmabuf->size >> PAGE_SHIFT) 75 return -EINVAL; 76 77 return dmabuf->ops->mmap(dmabuf, vma); 78} 79 80static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) 81{ 82 struct dma_buf *dmabuf; 83 loff_t base; 84 85 if (!is_dma_buf_file(file)) 86 return -EBADF; 87 88 dmabuf = file->private_data; 89 90 /* only support discovering the end of the buffer, 91 but also allow SEEK_SET to maintain the idiomatic 92 SEEK_END(0), SEEK_CUR(0) pattern */ 93 if (whence == SEEK_END) 94 base = dmabuf->size; 95 else if (whence == SEEK_SET) 96 base = 0; 97 else 98 return -EINVAL; 99 100 if (offset != 0) 101 return -EINVAL; 102 103 return base + offset; 104} 105 106static const struct file_operations dma_buf_fops = { 107 .release = dma_buf_release, 108 .mmap = dma_buf_mmap_internal, 109 .llseek = dma_buf_llseek, 110}; 111 112/* 113 * is_dma_buf_file - Check if struct file* is associated with dma_buf 114 */ 115static inline int is_dma_buf_file(struct file *file) 116{ 117 return file->f_op == &dma_buf_fops; 118} 119 120/** 121 * dma_buf_export_named - Creates a new dma_buf, and associates an anon file 122 * with this buffer, so it can be exported. 123 * Also connect the allocator specific data and ops to the buffer. 124 * Additionally, provide a name string for exporter; useful in debugging. 125 * 126 * @priv: [in] Attach private data of allocator to this buffer 127 * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. 128 * @size: [in] Size of the buffer 129 * @flags: [in] mode flags for the file. 130 * @exp_name: [in] name of the exporting module - useful for debugging. 131 * 132 * Returns, on success, a newly created dma_buf object, which wraps the 133 * supplied private data and operations for dma_buf_ops. On either missing 134 * ops, or error in allocating struct dma_buf, will return negative error. 135 * 136 */ 137struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, 138 size_t size, int flags, const char *exp_name) 139{ 140 struct dma_buf *dmabuf; 141 struct file *file; 142 143 if (WARN_ON(!priv || !ops 144 || !ops->map_dma_buf 145 || !ops->unmap_dma_buf 146 || !ops->release 147 || !ops->kmap_atomic 148 || !ops->kmap 149 || !ops->mmap)) { 150 return ERR_PTR(-EINVAL); 151 } 152 153 dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); 154 if (dmabuf == NULL) 155 return ERR_PTR(-ENOMEM); 156 157 dmabuf->priv = priv; 158 dmabuf->ops = ops; 159 dmabuf->size = size; 160 dmabuf->exp_name = exp_name; 161 162 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); 163 if (IS_ERR(file)) { 164 kfree(dmabuf); 165 return ERR_CAST(file); 166 } 167 168 file->f_mode |= FMODE_LSEEK; 169 dmabuf->file = file; 170 171 mutex_init(&dmabuf->lock); 172 INIT_LIST_HEAD(&dmabuf->attachments); 173 174 mutex_lock(&db_list.lock); 175 list_add(&dmabuf->list_node, &db_list.head); 176 mutex_unlock(&db_list.lock); 177 178 return dmabuf; 179} 180EXPORT_SYMBOL_GPL(dma_buf_export_named); 181 182 183/** 184 * dma_buf_fd - returns a file descriptor for the given dma_buf 185 * @dmabuf: [in] pointer to dma_buf for which fd is required. 186 * @flags: [in] flags to give to fd 187 * 188 * On success, returns an associated 'fd'. Else, returns error. 189 */ 190int dma_buf_fd(struct dma_buf *dmabuf, int flags) 191{ 192 int fd; 193 194 if (!dmabuf || !dmabuf->file) 195 return -EINVAL; 196 197 fd = get_unused_fd_flags(flags); 198 if (fd < 0) 199 return fd; 200 201 fd_install(fd, dmabuf->file); 202 203 return fd; 204} 205EXPORT_SYMBOL_GPL(dma_buf_fd); 206 207/** 208 * dma_buf_get - returns the dma_buf structure related to an fd 209 * @fd: [in] fd associated with the dma_buf to be returned 210 * 211 * On success, returns the dma_buf structure associated with an fd; uses 212 * file's refcounting done by fget to increase refcount. returns ERR_PTR 213 * otherwise. 214 */ 215struct dma_buf *dma_buf_get(int fd) 216{ 217 struct file *file; 218 219 file = fget(fd); 220 221 if (!file) 222 return ERR_PTR(-EBADF); 223 224 if (!is_dma_buf_file(file)) { 225 fput(file); 226 return ERR_PTR(-EINVAL); 227 } 228 229 return file->private_data; 230} 231EXPORT_SYMBOL_GPL(dma_buf_get); 232 233/** 234 * dma_buf_put - decreases refcount of the buffer 235 * @dmabuf: [in] buffer to reduce refcount of 236 * 237 * Uses file's refcounting done implicitly by fput() 238 */ 239void dma_buf_put(struct dma_buf *dmabuf) 240{ 241 if (WARN_ON(!dmabuf || !dmabuf->file)) 242 return; 243 244 fput(dmabuf->file); 245} 246EXPORT_SYMBOL_GPL(dma_buf_put); 247 248/** 249 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, 250 * calls attach() of dma_buf_ops to allow device-specific attach functionality 251 * @dmabuf: [in] buffer to attach device to. 252 * @dev: [in] device to be attached. 253 * 254 * Returns struct dma_buf_attachment * for this attachment; may return negative 255 * error codes. 256 * 257 */ 258struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 259 struct device *dev) 260{ 261 struct dma_buf_attachment *attach; 262 int ret; 263 264 if (WARN_ON(!dmabuf || !dev)) 265 return ERR_PTR(-EINVAL); 266 267 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); 268 if (attach == NULL) 269 return ERR_PTR(-ENOMEM); 270 271 attach->dev = dev; 272 attach->dmabuf = dmabuf; 273 274 mutex_lock(&dmabuf->lock); 275 276 if (dmabuf->ops->attach) { 277 ret = dmabuf->ops->attach(dmabuf, dev, attach); 278 if (ret) 279 goto err_attach; 280 } 281 list_add(&attach->node, &dmabuf->attachments); 282 283 mutex_unlock(&dmabuf->lock); 284 return attach; 285 286err_attach: 287 kfree(attach); 288 mutex_unlock(&dmabuf->lock); 289 return ERR_PTR(ret); 290} 291EXPORT_SYMBOL_GPL(dma_buf_attach); 292 293/** 294 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; 295 * optionally calls detach() of dma_buf_ops for device-specific detach 296 * @dmabuf: [in] buffer to detach from. 297 * @attach: [in] attachment to be detached; is free'd after this call. 298 * 299 */ 300void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 301{ 302 if (WARN_ON(!dmabuf || !attach)) 303 return; 304 305 mutex_lock(&dmabuf->lock); 306 list_del(&attach->node); 307 if (dmabuf->ops->detach) 308 dmabuf->ops->detach(dmabuf, attach); 309 310 mutex_unlock(&dmabuf->lock); 311 kfree(attach); 312} 313EXPORT_SYMBOL_GPL(dma_buf_detach); 314 315/** 316 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 317 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 318 * dma_buf_ops. 319 * @attach: [in] attachment whose scatterlist is to be returned 320 * @direction: [in] direction of DMA transfer 321 * 322 * Returns sg_table containing the scatterlist to be returned; may return NULL 323 * or ERR_PTR. 324 * 325 */ 326struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 327 enum dma_data_direction direction) 328{ 329 struct sg_table *sg_table = ERR_PTR(-EINVAL); 330 331 might_sleep(); 332 333 if (WARN_ON(!attach || !attach->dmabuf)) 334 return ERR_PTR(-EINVAL); 335 336 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 337 338 return sg_table; 339} 340EXPORT_SYMBOL_GPL(dma_buf_map_attachment); 341 342/** 343 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 344 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 345 * dma_buf_ops. 346 * @attach: [in] attachment to unmap buffer from 347 * @sg_table: [in] scatterlist info of the buffer to unmap 348 * @direction: [in] direction of DMA transfer 349 * 350 */ 351void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 352 struct sg_table *sg_table, 353 enum dma_data_direction direction) 354{ 355 might_sleep(); 356 357 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 358 return; 359 360 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, 361 direction); 362} 363EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 364 365 366/** 367 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 368 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 369 * preparations. Coherency is only guaranteed in the specified range for the 370 * specified access direction. 371 * @dmabuf: [in] buffer to prepare cpu access for. 372 * @start: [in] start of range for cpu access. 373 * @len: [in] length of range for cpu access. 374 * @direction: [in] length of range for cpu access. 375 * 376 * Can return negative error values, returns 0 on success. 377 */ 378int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, 379 enum dma_data_direction direction) 380{ 381 int ret = 0; 382 383 if (WARN_ON(!dmabuf)) 384 return -EINVAL; 385 386 if (dmabuf->ops->begin_cpu_access) 387 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); 388 389 return ret; 390} 391EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); 392 393/** 394 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 395 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 396 * actions. Coherency is only guaranteed in the specified range for the 397 * specified access direction. 398 * @dmabuf: [in] buffer to complete cpu access for. 399 * @start: [in] start of range for cpu access. 400 * @len: [in] length of range for cpu access. 401 * @direction: [in] length of range for cpu access. 402 * 403 * This call must always succeed. 404 */ 405void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, 406 enum dma_data_direction direction) 407{ 408 WARN_ON(!dmabuf); 409 410 if (dmabuf->ops->end_cpu_access) 411 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); 412} 413EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); 414 415/** 416 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address 417 * space. The same restrictions as for kmap_atomic and friends apply. 418 * @dmabuf: [in] buffer to map page from. 419 * @page_num: [in] page in PAGE_SIZE units to map. 420 * 421 * This call must always succeed, any necessary preparations that might fail 422 * need to be done in begin_cpu_access. 423 */ 424void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) 425{ 426 WARN_ON(!dmabuf); 427 428 return dmabuf->ops->kmap_atomic(dmabuf, page_num); 429} 430EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); 431 432/** 433 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. 434 * @dmabuf: [in] buffer to unmap page from. 435 * @page_num: [in] page in PAGE_SIZE units to unmap. 436 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. 437 * 438 * This call must always succeed. 439 */ 440void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, 441 void *vaddr) 442{ 443 WARN_ON(!dmabuf); 444 445 if (dmabuf->ops->kunmap_atomic) 446 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); 447} 448EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); 449 450/** 451 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The 452 * same restrictions as for kmap and friends apply. 453 * @dmabuf: [in] buffer to map page from. 454 * @page_num: [in] page in PAGE_SIZE units to map. 455 * 456 * This call must always succeed, any necessary preparations that might fail 457 * need to be done in begin_cpu_access. 458 */ 459void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) 460{ 461 WARN_ON(!dmabuf); 462 463 return dmabuf->ops->kmap(dmabuf, page_num); 464} 465EXPORT_SYMBOL_GPL(dma_buf_kmap); 466 467/** 468 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. 469 * @dmabuf: [in] buffer to unmap page from. 470 * @page_num: [in] page in PAGE_SIZE units to unmap. 471 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. 472 * 473 * This call must always succeed. 474 */ 475void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, 476 void *vaddr) 477{ 478 WARN_ON(!dmabuf); 479 480 if (dmabuf->ops->kunmap) 481 dmabuf->ops->kunmap(dmabuf, page_num, vaddr); 482} 483EXPORT_SYMBOL_GPL(dma_buf_kunmap); 484 485 486/** 487 * dma_buf_mmap - Setup up a userspace mmap with the given vma 488 * @dmabuf: [in] buffer that should back the vma 489 * @vma: [in] vma for the mmap 490 * @pgoff: [in] offset in pages where this mmap should start within the 491 * dma-buf buffer. 492 * 493 * This function adjusts the passed in vma so that it points at the file of the 494 * dma_buf operation. It alsog adjusts the starting pgoff and does bounds 495 * checking on the size of the vma. Then it calls the exporters mmap function to 496 * set up the mapping. 497 * 498 * Can return negative error values, returns 0 on success. 499 */ 500int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 501 unsigned long pgoff) 502{ 503 struct file *oldfile; 504 int ret; 505 506 if (WARN_ON(!dmabuf || !vma)) 507 return -EINVAL; 508 509 /* check for offset overflow */ 510 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) 511 return -EOVERFLOW; 512 513 /* check for overflowing the buffer's size */ 514 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > 515 dmabuf->size >> PAGE_SHIFT) 516 return -EINVAL; 517 518 /* readjust the vma */ 519 get_file(dmabuf->file); 520 oldfile = vma->vm_file; 521 vma->vm_file = dmabuf->file; 522 vma->vm_pgoff = pgoff; 523 524 ret = dmabuf->ops->mmap(dmabuf, vma); 525 if (ret) { 526 /* restore old parameters on failure */ 527 vma->vm_file = oldfile; 528 fput(dmabuf->file); 529 } else { 530 if (oldfile) 531 fput(oldfile); 532 } 533 return ret; 534 535} 536EXPORT_SYMBOL_GPL(dma_buf_mmap); 537 538/** 539 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 540 * address space. Same restrictions as for vmap and friends apply. 541 * @dmabuf: [in] buffer to vmap 542 * 543 * This call may fail due to lack of virtual mapping address space. 544 * These calls are optional in drivers. The intended use for them 545 * is for mapping objects linear in kernel space for high use objects. 546 * Please attempt to use kmap/kunmap before thinking about these interfaces. 547 */ 548void *dma_buf_vmap(struct dma_buf *dmabuf) 549{ 550 void *ptr; 551 552 if (WARN_ON(!dmabuf)) 553 return NULL; 554 555 if (!dmabuf->ops->vmap) 556 return NULL; 557 558 mutex_lock(&dmabuf->lock); 559 if (dmabuf->vmapping_counter) { 560 dmabuf->vmapping_counter++; 561 BUG_ON(!dmabuf->vmap_ptr); 562 ptr = dmabuf->vmap_ptr; 563 goto out_unlock; 564 } 565 566 BUG_ON(dmabuf->vmap_ptr); 567 568 ptr = dmabuf->ops->vmap(dmabuf); 569 if (IS_ERR_OR_NULL(ptr)) 570 goto out_unlock; 571 572 dmabuf->vmap_ptr = ptr; 573 dmabuf->vmapping_counter = 1; 574 575out_unlock: 576 mutex_unlock(&dmabuf->lock); 577 return ptr; 578} 579EXPORT_SYMBOL_GPL(dma_buf_vmap); 580 581/** 582 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 583 * @dmabuf: [in] buffer to vunmap 584 * @vaddr: [in] vmap to vunmap 585 */ 586void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) 587{ 588 if (WARN_ON(!dmabuf)) 589 return; 590 591 BUG_ON(!dmabuf->vmap_ptr); 592 BUG_ON(dmabuf->vmapping_counter == 0); 593 BUG_ON(dmabuf->vmap_ptr != vaddr); 594 595 mutex_lock(&dmabuf->lock); 596 if (--dmabuf->vmapping_counter == 0) { 597 if (dmabuf->ops->vunmap) 598 dmabuf->ops->vunmap(dmabuf, vaddr); 599 dmabuf->vmap_ptr = NULL; 600 } 601 mutex_unlock(&dmabuf->lock); 602} 603EXPORT_SYMBOL_GPL(dma_buf_vunmap); 604 605#ifdef CONFIG_DEBUG_FS 606static int dma_buf_describe(struct seq_file *s) 607{ 608 int ret; 609 struct dma_buf *buf_obj; 610 struct dma_buf_attachment *attach_obj; 611 int count = 0, attach_count; 612 size_t size = 0; 613 614 ret = mutex_lock_interruptible(&db_list.lock); 615 616 if (ret) 617 return ret; 618 619 seq_printf(s, "\nDma-buf Objects:\n"); 620 seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); 621 622 list_for_each_entry(buf_obj, &db_list.head, list_node) { 623 ret = mutex_lock_interruptible(&buf_obj->lock); 624 625 if (ret) { 626 seq_printf(s, 627 "\tERROR locking buffer object: skipping\n"); 628 continue; 629 } 630 631 seq_printf(s, "\t"); 632 633 seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n", 634 buf_obj->exp_name, buf_obj->size, 635 buf_obj->file->f_flags, buf_obj->file->f_mode, 636 (long)(buf_obj->file->f_count.counter)); 637 638 seq_printf(s, "\t\tAttached Devices:\n"); 639 attach_count = 0; 640 641 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 642 seq_printf(s, "\t\t"); 643 644 seq_printf(s, "%s\n", attach_obj->dev->init_name); 645 attach_count++; 646 } 647 648 seq_printf(s, "\n\t\tTotal %d devices attached\n", 649 attach_count); 650 651 count++; 652 size += buf_obj->size; 653 mutex_unlock(&buf_obj->lock); 654 } 655 656 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); 657 658 mutex_unlock(&db_list.lock); 659 return 0; 660} 661 662static int dma_buf_show(struct seq_file *s, void *unused) 663{ 664 void (*func)(struct seq_file *) = s->private; 665 func(s); 666 return 0; 667} 668 669static int dma_buf_debug_open(struct inode *inode, struct file *file) 670{ 671 return single_open(file, dma_buf_show, inode->i_private); 672} 673 674static const struct file_operations dma_buf_debug_fops = { 675 .open = dma_buf_debug_open, 676 .read = seq_read, 677 .llseek = seq_lseek, 678 .release = single_release, 679}; 680 681static struct dentry *dma_buf_debugfs_dir; 682 683static int dma_buf_init_debugfs(void) 684{ 685 int err = 0; 686 dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); 687 if (IS_ERR(dma_buf_debugfs_dir)) { 688 err = PTR_ERR(dma_buf_debugfs_dir); 689 dma_buf_debugfs_dir = NULL; 690 return err; 691 } 692 693 err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); 694 695 if (err) 696 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 697 698 return err; 699} 700 701static void dma_buf_uninit_debugfs(void) 702{ 703 if (dma_buf_debugfs_dir) 704 debugfs_remove_recursive(dma_buf_debugfs_dir); 705} 706 707int dma_buf_debugfs_create_file(const char *name, 708 int (*write)(struct seq_file *)) 709{ 710 struct dentry *d; 711 712 d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, 713 write, &dma_buf_debug_fops); 714 715 return PTR_ERR_OR_ZERO(d); 716} 717#else 718static inline int dma_buf_init_debugfs(void) 719{ 720 return 0; 721} 722static inline void dma_buf_uninit_debugfs(void) 723{ 724} 725#endif 726 727static int __init dma_buf_init(void) 728{ 729 mutex_init(&db_list.lock); 730 INIT_LIST_HEAD(&db_list.head); 731 dma_buf_init_debugfs(); 732 return 0; 733} 734subsys_initcall(dma_buf_init); 735 736static void __exit dma_buf_deinit(void) 737{ 738 dma_buf_uninit_debugfs(); 739} 740__exitcall(dma_buf_deinit);