Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.4 408 lines 11 kB view raw
1/* 2 * Framework for buffer objects that can be shared across devices/subsystems. 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Author: Sumit Semwal <sumit.semwal@ti.com> 6 * 7 * Many thanks to linaro-mm-sig list, and specially 8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 10 * refining of this idea. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * more details. 20 * 21 * You should have received a copy of the GNU General Public License along with 22 * this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24 25#include <linux/fs.h> 26#include <linux/slab.h> 27#include <linux/dma-buf.h> 28#include <linux/anon_inodes.h> 29#include <linux/export.h> 30 31static inline int is_dma_buf_file(struct file *); 32 33static int dma_buf_release(struct inode *inode, struct file *file) 34{ 35 struct dma_buf *dmabuf; 36 37 if (!is_dma_buf_file(file)) 38 return -EINVAL; 39 40 dmabuf = file->private_data; 41 42 dmabuf->ops->release(dmabuf); 43 kfree(dmabuf); 44 return 0; 45} 46 47static const struct file_operations dma_buf_fops = { 48 .release = dma_buf_release, 49}; 50 51/* 52 * is_dma_buf_file - Check if struct file* is associated with dma_buf 53 */ 54static inline int is_dma_buf_file(struct file *file) 55{ 56 return file->f_op == &dma_buf_fops; 57} 58 59/** 60 * dma_buf_export - Creates a new dma_buf, and associates an anon file 61 * with this buffer, so it can be exported. 62 * Also connect the allocator specific data and ops to the buffer. 63 * 64 * @priv: [in] Attach private data of allocator to this buffer 65 * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. 66 * @size: [in] Size of the buffer 67 * @flags: [in] mode flags for the file. 68 * 69 * Returns, on success, a newly created dma_buf object, which wraps the 70 * supplied private data and operations for dma_buf_ops. On either missing 71 * ops, or error in allocating struct dma_buf, will return negative error. 72 * 73 */ 74struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, 75 size_t size, int flags) 76{ 77 struct dma_buf *dmabuf; 78 struct file *file; 79 80 if (WARN_ON(!priv || !ops 81 || !ops->map_dma_buf 82 || !ops->unmap_dma_buf 83 || !ops->release 84 || !ops->kmap_atomic 85 || !ops->kmap)) { 86 return ERR_PTR(-EINVAL); 87 } 88 89 dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); 90 if (dmabuf == NULL) 91 return ERR_PTR(-ENOMEM); 92 93 dmabuf->priv = priv; 94 dmabuf->ops = ops; 95 dmabuf->size = size; 96 97 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); 98 99 dmabuf->file = file; 100 101 mutex_init(&dmabuf->lock); 102 INIT_LIST_HEAD(&dmabuf->attachments); 103 104 return dmabuf; 105} 106EXPORT_SYMBOL_GPL(dma_buf_export); 107 108 109/** 110 * dma_buf_fd - returns a file descriptor for the given dma_buf 111 * @dmabuf: [in] pointer to dma_buf for which fd is required. 112 * @flags: [in] flags to give to fd 113 * 114 * On success, returns an associated 'fd'. Else, returns error. 115 */ 116int dma_buf_fd(struct dma_buf *dmabuf, int flags) 117{ 118 int error, fd; 119 120 if (!dmabuf || !dmabuf->file) 121 return -EINVAL; 122 123 error = get_unused_fd_flags(flags); 124 if (error < 0) 125 return error; 126 fd = error; 127 128 fd_install(fd, dmabuf->file); 129 130 return fd; 131} 132EXPORT_SYMBOL_GPL(dma_buf_fd); 133 134/** 135 * dma_buf_get - returns the dma_buf structure related to an fd 136 * @fd: [in] fd associated with the dma_buf to be returned 137 * 138 * On success, returns the dma_buf structure associated with an fd; uses 139 * file's refcounting done by fget to increase refcount. returns ERR_PTR 140 * otherwise. 141 */ 142struct dma_buf *dma_buf_get(int fd) 143{ 144 struct file *file; 145 146 file = fget(fd); 147 148 if (!file) 149 return ERR_PTR(-EBADF); 150 151 if (!is_dma_buf_file(file)) { 152 fput(file); 153 return ERR_PTR(-EINVAL); 154 } 155 156 return file->private_data; 157} 158EXPORT_SYMBOL_GPL(dma_buf_get); 159 160/** 161 * dma_buf_put - decreases refcount of the buffer 162 * @dmabuf: [in] buffer to reduce refcount of 163 * 164 * Uses file's refcounting done implicitly by fput() 165 */ 166void dma_buf_put(struct dma_buf *dmabuf) 167{ 168 if (WARN_ON(!dmabuf || !dmabuf->file)) 169 return; 170 171 fput(dmabuf->file); 172} 173EXPORT_SYMBOL_GPL(dma_buf_put); 174 175/** 176 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, 177 * calls attach() of dma_buf_ops to allow device-specific attach functionality 178 * @dmabuf: [in] buffer to attach device to. 179 * @dev: [in] device to be attached. 180 * 181 * Returns struct dma_buf_attachment * for this attachment; may return negative 182 * error codes. 183 * 184 */ 185struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 186 struct device *dev) 187{ 188 struct dma_buf_attachment *attach; 189 int ret; 190 191 if (WARN_ON(!dmabuf || !dev)) 192 return ERR_PTR(-EINVAL); 193 194 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); 195 if (attach == NULL) 196 return ERR_PTR(-ENOMEM); 197 198 attach->dev = dev; 199 attach->dmabuf = dmabuf; 200 201 mutex_lock(&dmabuf->lock); 202 203 if (dmabuf->ops->attach) { 204 ret = dmabuf->ops->attach(dmabuf, dev, attach); 205 if (ret) 206 goto err_attach; 207 } 208 list_add(&attach->node, &dmabuf->attachments); 209 210 mutex_unlock(&dmabuf->lock); 211 return attach; 212 213err_attach: 214 kfree(attach); 215 mutex_unlock(&dmabuf->lock); 216 return ERR_PTR(ret); 217} 218EXPORT_SYMBOL_GPL(dma_buf_attach); 219 220/** 221 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; 222 * optionally calls detach() of dma_buf_ops for device-specific detach 223 * @dmabuf: [in] buffer to detach from. 224 * @attach: [in] attachment to be detached; is free'd after this call. 225 * 226 */ 227void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 228{ 229 if (WARN_ON(!dmabuf || !attach)) 230 return; 231 232 mutex_lock(&dmabuf->lock); 233 list_del(&attach->node); 234 if (dmabuf->ops->detach) 235 dmabuf->ops->detach(dmabuf, attach); 236 237 mutex_unlock(&dmabuf->lock); 238 kfree(attach); 239} 240EXPORT_SYMBOL_GPL(dma_buf_detach); 241 242/** 243 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 244 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 245 * dma_buf_ops. 246 * @attach: [in] attachment whose scatterlist is to be returned 247 * @direction: [in] direction of DMA transfer 248 * 249 * Returns sg_table containing the scatterlist to be returned; may return NULL 250 * or ERR_PTR. 251 * 252 */ 253struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 254 enum dma_data_direction direction) 255{ 256 struct sg_table *sg_table = ERR_PTR(-EINVAL); 257 258 might_sleep(); 259 260 if (WARN_ON(!attach || !attach->dmabuf)) 261 return ERR_PTR(-EINVAL); 262 263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 264 265 return sg_table; 266} 267EXPORT_SYMBOL_GPL(dma_buf_map_attachment); 268 269/** 270 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 271 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 272 * dma_buf_ops. 273 * @attach: [in] attachment to unmap buffer from 274 * @sg_table: [in] scatterlist info of the buffer to unmap 275 * @direction: [in] direction of DMA transfer 276 * 277 */ 278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 279 struct sg_table *sg_table, 280 enum dma_data_direction direction) 281{ 282 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 283 return; 284 285 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, 286 direction); 287} 288EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 289 290 291/** 292 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 293 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 294 * preparations. Coherency is only guaranteed in the specified range for the 295 * specified access direction. 296 * @dma_buf: [in] buffer to prepare cpu access for. 297 * @start: [in] start of range for cpu access. 298 * @len: [in] length of range for cpu access. 299 * @direction: [in] length of range for cpu access. 300 * 301 * Can return negative error values, returns 0 on success. 302 */ 303int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, 304 enum dma_data_direction direction) 305{ 306 int ret = 0; 307 308 if (WARN_ON(!dmabuf)) 309 return -EINVAL; 310 311 if (dmabuf->ops->begin_cpu_access) 312 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); 313 314 return ret; 315} 316EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); 317 318/** 319 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 320 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 321 * actions. Coherency is only guaranteed in the specified range for the 322 * specified access direction. 323 * @dma_buf: [in] buffer to complete cpu access for. 324 * @start: [in] start of range for cpu access. 325 * @len: [in] length of range for cpu access. 326 * @direction: [in] length of range for cpu access. 327 * 328 * This call must always succeed. 329 */ 330void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, 331 enum dma_data_direction direction) 332{ 333 WARN_ON(!dmabuf); 334 335 if (dmabuf->ops->end_cpu_access) 336 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); 337} 338EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); 339 340/** 341 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address 342 * space. The same restrictions as for kmap_atomic and friends apply. 343 * @dma_buf: [in] buffer to map page from. 344 * @page_num: [in] page in PAGE_SIZE units to map. 345 * 346 * This call must always succeed, any necessary preparations that might fail 347 * need to be done in begin_cpu_access. 348 */ 349void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) 350{ 351 WARN_ON(!dmabuf); 352 353 return dmabuf->ops->kmap_atomic(dmabuf, page_num); 354} 355EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); 356 357/** 358 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. 359 * @dma_buf: [in] buffer to unmap page from. 360 * @page_num: [in] page in PAGE_SIZE units to unmap. 361 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. 362 * 363 * This call must always succeed. 364 */ 365void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, 366 void *vaddr) 367{ 368 WARN_ON(!dmabuf); 369 370 if (dmabuf->ops->kunmap_atomic) 371 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); 372} 373EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); 374 375/** 376 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The 377 * same restrictions as for kmap and friends apply. 378 * @dma_buf: [in] buffer to map page from. 379 * @page_num: [in] page in PAGE_SIZE units to map. 380 * 381 * This call must always succeed, any necessary preparations that might fail 382 * need to be done in begin_cpu_access. 383 */ 384void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) 385{ 386 WARN_ON(!dmabuf); 387 388 return dmabuf->ops->kmap(dmabuf, page_num); 389} 390EXPORT_SYMBOL_GPL(dma_buf_kmap); 391 392/** 393 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. 394 * @dma_buf: [in] buffer to unmap page from. 395 * @page_num: [in] page in PAGE_SIZE units to unmap. 396 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. 397 * 398 * This call must always succeed. 399 */ 400void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, 401 void *vaddr) 402{ 403 WARN_ON(!dmabuf); 404 405 if (dmabuf->ops->kunmap) 406 dmabuf->ops->kunmap(dmabuf, page_num, vaddr); 407} 408EXPORT_SYMBOL_GPL(dma_buf_kunmap);