Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <linux/rbtree.h>
32#include <linux/module.h>
33
34#include <drm/drm.h>
35#include <drm/drm_drv.h>
36#include <drm/drm_file.h>
37#include <drm/drm_framebuffer.h>
38#include <drm/drm_gem.h>
39#include <drm/drm_prime.h>
40#include <drm/drm_print.h>
41
42#include "drm_internal.h"
43
44MODULE_IMPORT_NS("DMA_BUF");
45
46/**
47 * DOC: overview and lifetime rules
48 *
49 * Similar to GEM global names, PRIME file descriptors are also used to share
50 * buffer objects across processes. They offer additional security: as file
51 * descriptors must be explicitly sent over UNIX domain sockets to be shared
52 * between applications, they can't be guessed like the globally unique GEM
53 * names.
54 *
55 * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
56 * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
57 * drivers are all individually exported for drivers which need to overwrite
58 * or reimplement some of them.
59 *
60 * Reference Counting for GEM Drivers
61 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
62 *
63 * On the export the &dma_buf holds a reference to the exported buffer object,
64 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
65 * IOCTL, when it first calls &drm_gem_object_funcs.export
66 * and stores the exporting GEM object in the &dma_buf.priv field. This
67 * reference needs to be released when the final reference to the &dma_buf
68 * itself is dropped and its &dma_buf_ops.release function is called. For
69 * GEM-based drivers, the &dma_buf should be exported using
70 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
71 *
72 * Thus the chain of references always flows in one direction, avoiding loops:
73 * importing GEM object -> dma-buf -> exported GEM bo. A further complication
74 * are the lookup caches for import and export. These are required to guarantee
75 * that any given object will always have only one unique userspace handle. This
76 * is required to allow userspace to detect duplicated imports, since some GEM
77 * drivers do fail command submissions if a given buffer object is listed more
78 * than once. These import and export caches in &drm_prime_file_private only
79 * retain a weak reference, which is cleaned up when the corresponding object is
80 * released.
81 *
82 * Self-importing: If userspace is using PRIME as a replacement for flink then
83 * it will get a fd->handle request for a GEM object that it created. Drivers
84 * should detect this situation and return back the underlying object from the
85 * dma-buf private. For GEM based drivers this is handled in
86 * drm_gem_prime_import() already.
87 */
88
89struct drm_prime_member {
90 struct dma_buf *dma_buf;
91 uint32_t handle;
92
93 struct rb_node dmabuf_rb;
94 struct rb_node handle_rb;
95};
96
97int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
98 struct dma_buf *dma_buf, uint32_t handle)
99{
100 struct drm_prime_member *member;
101 struct rb_node **p, *rb;
102
103 member = kmalloc(sizeof(*member), GFP_KERNEL);
104 if (!member)
105 return -ENOMEM;
106
107 get_dma_buf(dma_buf);
108 member->dma_buf = dma_buf;
109 member->handle = handle;
110
111 rb = NULL;
112 p = &prime_fpriv->dmabufs.rb_node;
113 while (*p) {
114 struct drm_prime_member *pos;
115
116 rb = *p;
117 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
118 if (dma_buf > pos->dma_buf)
119 p = &rb->rb_right;
120 else
121 p = &rb->rb_left;
122 }
123 rb_link_node(&member->dmabuf_rb, rb, p);
124 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
125
126 rb = NULL;
127 p = &prime_fpriv->handles.rb_node;
128 while (*p) {
129 struct drm_prime_member *pos;
130
131 rb = *p;
132 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
133 if (handle > pos->handle)
134 p = &rb->rb_right;
135 else
136 p = &rb->rb_left;
137 }
138 rb_link_node(&member->handle_rb, rb, p);
139 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
140
141 return 0;
142}
143
144static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
145 uint32_t handle)
146{
147 struct rb_node *rb;
148
149 rb = prime_fpriv->handles.rb_node;
150 while (rb) {
151 struct drm_prime_member *member;
152
153 member = rb_entry(rb, struct drm_prime_member, handle_rb);
154 if (member->handle == handle)
155 return member->dma_buf;
156 else if (member->handle < handle)
157 rb = rb->rb_right;
158 else
159 rb = rb->rb_left;
160 }
161
162 return NULL;
163}
164
165static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
166 struct dma_buf *dma_buf,
167 uint32_t *handle)
168{
169 struct rb_node *rb;
170
171 rb = prime_fpriv->dmabufs.rb_node;
172 while (rb) {
173 struct drm_prime_member *member;
174
175 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
176 if (member->dma_buf == dma_buf) {
177 *handle = member->handle;
178 return 0;
179 } else if (member->dma_buf < dma_buf) {
180 rb = rb->rb_right;
181 } else {
182 rb = rb->rb_left;
183 }
184 }
185
186 return -ENOENT;
187}
188
189void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
190 uint32_t handle)
191{
192 struct rb_node *rb;
193
194 rb = prime_fpriv->handles.rb_node;
195 while (rb) {
196 struct drm_prime_member *member;
197
198 member = rb_entry(rb, struct drm_prime_member, handle_rb);
199 if (member->handle == handle) {
200 rb_erase(&member->handle_rb, &prime_fpriv->handles);
201 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
202
203 dma_buf_put(member->dma_buf);
204 kfree(member);
205 break;
206 } else if (member->handle < handle) {
207 rb = rb->rb_right;
208 } else {
209 rb = rb->rb_left;
210 }
211 }
212}
213
214void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
215{
216 mutex_init(&prime_fpriv->lock);
217 prime_fpriv->dmabufs = RB_ROOT;
218 prime_fpriv->handles = RB_ROOT;
219}
220
221void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
222{
223 /* by now drm_gem_release should've made sure the list is empty */
224 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
225}
226
227/**
228 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
229 * @dev: parent device for the exported dmabuf
230 * @exp_info: the export information used by dma_buf_export()
231 *
232 * This wraps dma_buf_export() for use by generic GEM drivers that are using
233 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
234 * a reference to the &drm_device and the exported &drm_gem_object (stored in
235 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
236 *
237 * Returns the new dmabuf.
238 */
239struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
240 struct dma_buf_export_info *exp_info)
241{
242 struct drm_gem_object *obj = exp_info->priv;
243 struct dma_buf *dma_buf;
244
245 dma_buf = dma_buf_export(exp_info);
246 if (IS_ERR(dma_buf))
247 return dma_buf;
248
249 drm_dev_get(dev);
250 drm_gem_object_get(obj);
251 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
252
253 return dma_buf;
254}
255EXPORT_SYMBOL(drm_gem_dmabuf_export);
256
257/**
258 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
259 * @dma_buf: buffer to be released
260 *
261 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
262 * must use this in their &dma_buf_ops structure as the release callback.
263 * drm_gem_dmabuf_release() should be used in conjunction with
264 * drm_gem_dmabuf_export().
265 */
266void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
267{
268 struct drm_gem_object *obj = dma_buf->priv;
269 struct drm_device *dev = obj->dev;
270
271 /* drop the reference on the export fd holds */
272 drm_gem_object_put(obj);
273
274 drm_dev_put(dev);
275}
276EXPORT_SYMBOL(drm_gem_dmabuf_release);
277
278/**
279 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
280 * @dev: drm_device to import into
281 * @file_priv: drm file-private structure
282 * @prime_fd: fd id of the dma-buf which should be imported
283 * @handle: pointer to storage for the handle of the imported buffer object
284 *
285 * This is the PRIME import function which must be used mandatorily by GEM
286 * drivers to ensure correct lifetime management of the underlying GEM object.
287 * The actual importing of GEM object from the dma-buf is done through the
288 * &drm_driver.gem_prime_import driver callback.
289 *
290 * Returns 0 on success or a negative error code on failure.
291 */
292int drm_gem_prime_fd_to_handle(struct drm_device *dev,
293 struct drm_file *file_priv, int prime_fd,
294 uint32_t *handle)
295{
296 struct dma_buf *dma_buf;
297 struct drm_gem_object *obj;
298 int ret;
299
300 dma_buf = dma_buf_get(prime_fd);
301 if (IS_ERR(dma_buf))
302 return PTR_ERR(dma_buf);
303
304 mutex_lock(&file_priv->prime.lock);
305
306 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
307 dma_buf, handle);
308 if (ret == 0)
309 goto out_put;
310
311 /* never seen this one, need to import */
312 mutex_lock(&dev->object_name_lock);
313 if (dev->driver->gem_prime_import)
314 obj = dev->driver->gem_prime_import(dev, dma_buf);
315 else
316 obj = drm_gem_prime_import(dev, dma_buf);
317 if (IS_ERR(obj)) {
318 ret = PTR_ERR(obj);
319 goto out_unlock;
320 }
321
322 if (obj->dma_buf) {
323 WARN_ON(obj->dma_buf != dma_buf);
324 } else {
325 obj->dma_buf = dma_buf;
326 get_dma_buf(dma_buf);
327 }
328
329 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
330 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
331 drm_gem_object_put(obj);
332 if (ret)
333 goto out_put;
334
335 ret = drm_prime_add_buf_handle(&file_priv->prime,
336 dma_buf, *handle);
337 mutex_unlock(&file_priv->prime.lock);
338 if (ret)
339 goto fail;
340
341 dma_buf_put(dma_buf);
342
343 return 0;
344
345fail:
346 /* hmm, if driver attached, we are relying on the free-object path
347 * to detach.. which seems ok..
348 */
349 drm_gem_handle_delete(file_priv, *handle);
350 dma_buf_put(dma_buf);
351 return ret;
352
353out_unlock:
354 mutex_unlock(&dev->object_name_lock);
355out_put:
356 mutex_unlock(&file_priv->prime.lock);
357 dma_buf_put(dma_buf);
358 return ret;
359}
360EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
361
362int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
363 struct drm_file *file_priv)
364{
365 struct drm_prime_handle *args = data;
366
367 if (dev->driver->prime_fd_to_handle) {
368 return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
369 &args->handle);
370 }
371
372 return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
373}
374
375static struct dma_buf *export_and_register_object(struct drm_device *dev,
376 struct drm_gem_object *obj,
377 uint32_t flags)
378{
379 struct dma_buf *dmabuf;
380
381 /* prevent races with concurrent gem_close. */
382 if (obj->handle_count == 0) {
383 dmabuf = ERR_PTR(-ENOENT);
384 return dmabuf;
385 }
386
387 if (obj->funcs && obj->funcs->export)
388 dmabuf = obj->funcs->export(obj, flags);
389 else
390 dmabuf = drm_gem_prime_export(obj, flags);
391 if (IS_ERR(dmabuf)) {
392 /* normally the created dma-buf takes ownership of the ref,
393 * but if that fails then drop the ref
394 */
395 return dmabuf;
396 }
397
398 /*
399 * Note that callers do not need to clean up the export cache
400 * since the check for obj->handle_count guarantees that someone
401 * will clean it up.
402 */
403 obj->dma_buf = dmabuf;
404 get_dma_buf(obj->dma_buf);
405
406 return dmabuf;
407}
408
409/**
410 * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers
411 * @dev: dev to export the buffer from
412 * @file_priv: drm file-private structure
413 * @handle: buffer handle to export
414 * @flags: flags like DRM_CLOEXEC
415 *
416 * This is the PRIME export function which must be used mandatorily by GEM
417 * drivers to ensure correct lifetime management of the underlying GEM object.
418 * The actual exporting from GEM object to a dma-buf is done through the
419 * &drm_gem_object_funcs.export callback.
420 *
421 * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it
422 * has created, without attaching it to any file descriptors. The difference
423 * between those two is similar to that between anon_inode_getfile() and
424 * anon_inode_getfd(); insertion into descriptor table is something you
425 * can not revert if any cleanup is needed, so the descriptor-returning
426 * variants should only be used when you are past the last failure exit
427 * and the only thing left is passing the new file descriptor to userland.
428 * When all you need is the object itself or when you need to do something
429 * else that might fail, use that one instead.
430 */
431struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
432 struct drm_file *file_priv, uint32_t handle,
433 uint32_t flags)
434{
435 struct drm_gem_object *obj;
436 int ret = 0;
437 struct dma_buf *dmabuf;
438
439 mutex_lock(&file_priv->prime.lock);
440 obj = drm_gem_object_lookup(file_priv, handle);
441 if (!obj) {
442 dmabuf = ERR_PTR(-ENOENT);
443 goto out_unlock;
444 }
445
446 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
447 if (dmabuf) {
448 get_dma_buf(dmabuf);
449 goto out;
450 }
451
452 mutex_lock(&dev->object_name_lock);
453 /* re-export the original imported object */
454 if (obj->import_attach) {
455 dmabuf = obj->import_attach->dmabuf;
456 get_dma_buf(dmabuf);
457 goto out_have_obj;
458 }
459
460 if (obj->dma_buf) {
461 get_dma_buf(obj->dma_buf);
462 dmabuf = obj->dma_buf;
463 goto out_have_obj;
464 }
465
466 dmabuf = export_and_register_object(dev, obj, flags);
467 if (IS_ERR(dmabuf)) {
468 /* normally the created dma-buf takes ownership of the ref,
469 * but if that fails then drop the ref
470 */
471 mutex_unlock(&dev->object_name_lock);
472 goto out;
473 }
474
475out_have_obj:
476 /*
477 * If we've exported this buffer then cheat and add it to the import list
478 * so we get the correct handle back. We must do this under the
479 * protection of dev->object_name_lock to ensure that a racing gem close
480 * ioctl doesn't miss to remove this buffer handle from the cache.
481 */
482 ret = drm_prime_add_buf_handle(&file_priv->prime,
483 dmabuf, handle);
484 mutex_unlock(&dev->object_name_lock);
485 if (ret) {
486 dma_buf_put(dmabuf);
487 dmabuf = ERR_PTR(ret);
488 }
489out:
490 drm_gem_object_put(obj);
491out_unlock:
492 mutex_unlock(&file_priv->prime.lock);
493 return dmabuf;
494}
495EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf);
496
497/**
498 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
499 * @dev: dev to export the buffer from
500 * @file_priv: drm file-private structure
501 * @handle: buffer handle to export
502 * @flags: flags like DRM_CLOEXEC
503 * @prime_fd: pointer to storage for the fd id of the create dma-buf
504 *
505 * This is the PRIME export function which must be used mandatorily by GEM
506 * drivers to ensure correct lifetime management of the underlying GEM object.
507 * The actual exporting from GEM object to a dma-buf is done through the
508 * &drm_gem_object_funcs.export callback.
509 */
510int drm_gem_prime_handle_to_fd(struct drm_device *dev,
511 struct drm_file *file_priv, uint32_t handle,
512 uint32_t flags,
513 int *prime_fd)
514{
515 struct dma_buf *dmabuf;
516 int fd = get_unused_fd_flags(flags);
517
518 if (fd < 0)
519 return fd;
520
521 dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags);
522 if (IS_ERR(dmabuf)) {
523 put_unused_fd(fd);
524 return PTR_ERR(dmabuf);
525 }
526
527 fd_install(fd, dmabuf->file);
528 *prime_fd = fd;
529 return 0;
530}
531EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
532
533int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
534 struct drm_file *file_priv)
535{
536 struct drm_prime_handle *args = data;
537
538 /* check flags are valid */
539 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
540 return -EINVAL;
541
542 if (dev->driver->prime_handle_to_fd) {
543 return dev->driver->prime_handle_to_fd(dev, file_priv,
544 args->handle, args->flags,
545 &args->fd);
546 }
547 return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
548 args->flags, &args->fd);
549}
550
551/**
552 * DOC: PRIME Helpers
553 *
554 * Drivers can implement &drm_gem_object_funcs.export and
555 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
556 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
557 * implement dma-buf support in terms of some lower-level helpers, which are
558 * again exported for drivers to use individually:
559 *
560 * Exporting buffers
561 * ~~~~~~~~~~~~~~~~~
562 *
563 * Optional pinning of buffers is handled at dma-buf attach and detach time in
564 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
565 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
566 * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
567 * unimplemented, exports into another device are rejected.
568 *
569 * For kernel-internal access there's drm_gem_dmabuf_vmap() and
570 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
571 * drm_gem_dmabuf_mmap().
572 *
573 * Note that these export helpers can only be used if the underlying backing
574 * storage is fully coherent and either permanently pinned, or it is safe to pin
575 * it indefinitely.
576 *
577 * FIXME: The underlying helper functions are named rather inconsistently.
578 *
579 * Importing buffers
580 * ~~~~~~~~~~~~~~~~~
581 *
582 * Importing dma-bufs using drm_gem_prime_import() relies on
583 * &drm_driver.gem_prime_import_sg_table.
584 *
585 * Note that similarly to the export helpers this permanently pins the
586 * underlying backing storage. Which is ok for scanout, but is not the best
587 * option for sharing lots of buffers for rendering.
588 */
589
590/**
591 * drm_gem_map_attach - dma_buf attach implementation for GEM
592 * @dma_buf: buffer to attach device to
593 * @attach: buffer attachment data
594 *
595 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
596 * used as the &dma_buf_ops.attach callback. Must be used together with
597 * drm_gem_map_detach().
598 *
599 * Returns 0 on success, negative error code on failure.
600 */
601int drm_gem_map_attach(struct dma_buf *dma_buf,
602 struct dma_buf_attachment *attach)
603{
604 struct drm_gem_object *obj = dma_buf->priv;
605 int ret;
606
607 /*
608 * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
609 * that implement their own ->map_dma_buf() do not.
610 */
611 if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
612 !obj->funcs->get_sg_table)
613 return -ENOSYS;
614
615 if (!obj->funcs->pin)
616 return 0;
617
618 ret = dma_resv_lock(obj->resv, NULL);
619 if (ret)
620 return ret;
621 ret = obj->funcs->pin(obj);
622 dma_resv_unlock(obj->resv);
623
624 return ret;
625}
626EXPORT_SYMBOL(drm_gem_map_attach);
627
628/**
629 * drm_gem_map_detach - dma_buf detach implementation for GEM
630 * @dma_buf: buffer to detach from
631 * @attach: attachment to be detached
632 *
633 * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
634 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
635 * &dma_buf_ops.detach callback.
636 */
637void drm_gem_map_detach(struct dma_buf *dma_buf,
638 struct dma_buf_attachment *attach)
639{
640 struct drm_gem_object *obj = dma_buf->priv;
641 int ret;
642
643 if (!obj->funcs->unpin)
644 return;
645
646 ret = dma_resv_lock(obj->resv, NULL);
647 if (drm_WARN_ON(obj->dev, ret))
648 return;
649 obj->funcs->unpin(obj);
650 dma_resv_unlock(obj->resv);
651}
652EXPORT_SYMBOL(drm_gem_map_detach);
653
654/**
655 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
656 * @attach: attachment whose scatterlist is to be returned
657 * @dir: direction of DMA transfer
658 *
659 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
660 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
661 * with drm_gem_unmap_dma_buf().
662 *
663 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
664 * on error. May return -EINTR if it is interrupted by a signal.
665 */
666struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
667 enum dma_data_direction dir)
668{
669 struct drm_gem_object *obj = attach->dmabuf->priv;
670 struct sg_table *sgt;
671 int ret;
672
673 if (WARN_ON(dir == DMA_NONE))
674 return ERR_PTR(-EINVAL);
675
676 if (WARN_ON(!obj->funcs->get_sg_table))
677 return ERR_PTR(-ENOSYS);
678
679 sgt = obj->funcs->get_sg_table(obj);
680 if (IS_ERR(sgt))
681 return sgt;
682
683 ret = dma_map_sgtable(attach->dev, sgt, dir,
684 DMA_ATTR_SKIP_CPU_SYNC);
685 if (ret) {
686 sg_free_table(sgt);
687 kfree(sgt);
688 sgt = ERR_PTR(ret);
689 }
690
691 return sgt;
692}
693EXPORT_SYMBOL(drm_gem_map_dma_buf);
694
695/**
696 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
697 * @attach: attachment to unmap buffer from
698 * @sgt: scatterlist info of the buffer to unmap
699 * @dir: direction of DMA transfer
700 *
701 * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
702 */
703void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
704 struct sg_table *sgt,
705 enum dma_data_direction dir)
706{
707 if (!sgt)
708 return;
709
710 dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
711 sg_free_table(sgt);
712 kfree(sgt);
713}
714EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
715
716/**
717 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
718 * @dma_buf: buffer to be mapped
719 * @map: the virtual address of the buffer
720 *
721 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
722 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
723 * The kernel virtual address is returned in map.
724 *
725 * Returns 0 on success or a negative errno code otherwise.
726 */
727int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
728{
729 struct drm_gem_object *obj = dma_buf->priv;
730
731 return drm_gem_vmap_locked(obj, map);
732}
733EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
734
735/**
736 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
737 * @dma_buf: buffer to be unmapped
738 * @map: the virtual address of the buffer
739 *
740 * Releases a kernel virtual mapping. This can be used as the
741 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
742 */
743void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
744{
745 struct drm_gem_object *obj = dma_buf->priv;
746
747 drm_gem_vunmap_locked(obj, map);
748}
749EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
750
751/**
752 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
753 * @obj: GEM object
754 * @vma: Virtual address range
755 *
756 * This function sets up a userspace mapping for PRIME exported buffers using
757 * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
758 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
759 * called to set up the mapping.
760 */
761int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
762{
763 struct drm_file *priv;
764 struct file *fil;
765 int ret;
766
767 /* Add the fake offset */
768 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
769
770 if (obj->funcs && obj->funcs->mmap) {
771 vma->vm_ops = obj->funcs->vm_ops;
772
773 drm_gem_object_get(obj);
774 ret = obj->funcs->mmap(obj, vma);
775 if (ret) {
776 drm_gem_object_put(obj);
777 return ret;
778 }
779 vma->vm_private_data = obj;
780 return 0;
781 }
782
783 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
784 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
785 if (!priv || !fil) {
786 ret = -ENOMEM;
787 goto out;
788 }
789
790 /* Used by drm_gem_mmap() to lookup the GEM object */
791 priv->minor = obj->dev->primary;
792 fil->private_data = priv;
793
794 ret = drm_vma_node_allow(&obj->vma_node, priv);
795 if (ret)
796 goto out;
797
798 ret = obj->dev->driver->fops->mmap(fil, vma);
799
800 drm_vma_node_revoke(&obj->vma_node, priv);
801out:
802 kfree(priv);
803 kfree(fil);
804
805 return ret;
806}
807EXPORT_SYMBOL(drm_gem_prime_mmap);
808
809/**
810 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
811 * @dma_buf: buffer to be mapped
812 * @vma: virtual address range
813 *
814 * Provides memory mapping for the buffer. This can be used as the
815 * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
816 *
817 * Returns 0 on success or a negative error code on failure.
818 */
819int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
820{
821 struct drm_gem_object *obj = dma_buf->priv;
822
823 return drm_gem_prime_mmap(obj, vma);
824}
825EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
826
827static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
828 .attach = drm_gem_map_attach,
829 .detach = drm_gem_map_detach,
830 .map_dma_buf = drm_gem_map_dma_buf,
831 .unmap_dma_buf = drm_gem_unmap_dma_buf,
832 .release = drm_gem_dmabuf_release,
833 .mmap = drm_gem_dmabuf_mmap,
834 .vmap = drm_gem_dmabuf_vmap,
835 .vunmap = drm_gem_dmabuf_vunmap,
836};
837
838/**
839 * drm_prime_pages_to_sg - converts a page array into an sg list
840 * @dev: DRM device
841 * @pages: pointer to the array of page pointers to convert
842 * @nr_pages: length of the page vector
843 *
844 * This helper creates an sg table object from a set of pages
845 * the driver is responsible for mapping the pages into the
846 * importers address space for use with dma_buf itself.
847 *
848 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
849 */
850struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
851 struct page **pages, unsigned int nr_pages)
852{
853 struct sg_table *sg;
854 size_t max_segment = 0;
855 int err;
856
857 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
858 if (!sg)
859 return ERR_PTR(-ENOMEM);
860
861 if (dev)
862 max_segment = dma_max_mapping_size(dev->dev);
863 if (max_segment == 0)
864 max_segment = UINT_MAX;
865 err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
866 (unsigned long)nr_pages << PAGE_SHIFT,
867 max_segment, GFP_KERNEL);
868 if (err) {
869 kfree(sg);
870 sg = ERR_PTR(err);
871 }
872 return sg;
873}
874EXPORT_SYMBOL(drm_prime_pages_to_sg);
875
876/**
877 * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
878 * @sgt: sg_table describing the buffer to check
879 *
880 * This helper calculates the contiguous size in the DMA address space
881 * of the buffer described by the provided sg_table.
882 *
883 * This is useful for implementing
884 * &drm_gem_object_funcs.gem_prime_import_sg_table.
885 */
886unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
887{
888 dma_addr_t expected = sg_dma_address(sgt->sgl);
889 struct scatterlist *sg;
890 unsigned long size = 0;
891 int i;
892
893 for_each_sgtable_dma_sg(sgt, sg, i) {
894 unsigned int len = sg_dma_len(sg);
895
896 if (!len)
897 break;
898 if (sg_dma_address(sg) != expected)
899 break;
900 expected += len;
901 size += len;
902 }
903 return size;
904}
905EXPORT_SYMBOL(drm_prime_get_contiguous_size);
906
907/**
908 * drm_gem_prime_export - helper library implementation of the export callback
909 * @obj: GEM object to export
910 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
911 *
912 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
913 * using the PRIME helpers. It is used as the default in
914 * drm_gem_prime_handle_to_fd().
915 */
916struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
917 int flags)
918{
919 struct drm_device *dev = obj->dev;
920 struct dma_buf_export_info exp_info = {
921 .exp_name = KBUILD_MODNAME, /* white lie for debug */
922 .owner = dev->driver->fops->owner,
923 .ops = &drm_gem_prime_dmabuf_ops,
924 .size = obj->size,
925 .flags = flags,
926 .priv = obj,
927 .resv = obj->resv,
928 };
929
930 return drm_gem_dmabuf_export(dev, &exp_info);
931}
932EXPORT_SYMBOL(drm_gem_prime_export);
933
934
935/**
936 * drm_gem_is_prime_exported_dma_buf -
937 * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
938 * @dev: drm_device to check against
939 * @dma_buf: dma-buf object to import
940 *
941 * Return: true if the DMA-BUF was exported from a GEM object belonging
942 * to @dev, false otherwise.
943 */
944
945bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
946 struct dma_buf *dma_buf)
947{
948 struct drm_gem_object *obj = dma_buf->priv;
949
950 return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
951}
952EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
953
954/**
955 * drm_gem_prime_import_dev - core implementation of the import callback
956 * @dev: drm_device to import into
957 * @dma_buf: dma-buf object to import
958 * @attach_dev: struct device to dma_buf attach
959 *
960 * This is the core of drm_gem_prime_import(). It's designed to be called by
961 * drivers who want to use a different device structure than &drm_device.dev for
962 * attaching via dma_buf. This function calls
963 * &drm_driver.gem_prime_import_sg_table internally.
964 *
965 * Drivers must arrange to call drm_prime_gem_destroy() from their
966 * &drm_gem_object_funcs.free hook when using this function.
967 */
968struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
969 struct dma_buf *dma_buf,
970 struct device *attach_dev)
971{
972 struct dma_buf_attachment *attach;
973 struct sg_table *sgt;
974 struct drm_gem_object *obj;
975 int ret;
976
977 if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
978 /*
979 * Importing dmabuf exported from our own gem increases
980 * refcount on gem itself instead of f_count of dmabuf.
981 */
982 obj = dma_buf->priv;
983 drm_gem_object_get(obj);
984 return obj;
985 }
986
987 if (!dev->driver->gem_prime_import_sg_table)
988 return ERR_PTR(-EINVAL);
989
990 attach = dma_buf_attach(dma_buf, attach_dev);
991 if (IS_ERR(attach))
992 return ERR_CAST(attach);
993
994 get_dma_buf(dma_buf);
995
996 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
997 if (IS_ERR(sgt)) {
998 ret = PTR_ERR(sgt);
999 goto fail_detach;
1000 }
1001
1002 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
1003 if (IS_ERR(obj)) {
1004 ret = PTR_ERR(obj);
1005 goto fail_unmap;
1006 }
1007
1008 obj->import_attach = attach;
1009 obj->resv = dma_buf->resv;
1010
1011 return obj;
1012
1013fail_unmap:
1014 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
1015fail_detach:
1016 dma_buf_detach(dma_buf, attach);
1017 dma_buf_put(dma_buf);
1018
1019 return ERR_PTR(ret);
1020}
1021EXPORT_SYMBOL(drm_gem_prime_import_dev);
1022
1023/**
1024 * drm_gem_prime_import - helper library implementation of the import callback
1025 * @dev: drm_device to import into
1026 * @dma_buf: dma-buf object to import
1027 *
1028 * This is the implementation of the gem_prime_import functions for GEM drivers
1029 * using the PRIME helpers. Drivers can use this as their
1030 * &drm_driver.gem_prime_import implementation. It is used as the default
1031 * implementation in drm_gem_prime_fd_to_handle().
1032 *
1033 * Drivers must arrange to call drm_prime_gem_destroy() from their
1034 * &drm_gem_object_funcs.free hook when using this function.
1035 */
1036struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1037 struct dma_buf *dma_buf)
1038{
1039 return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
1040}
1041EXPORT_SYMBOL(drm_gem_prime_import);
1042
1043/**
1044 * drm_prime_sg_to_page_array - convert an sg table into a page array
1045 * @sgt: scatter-gather table to convert
1046 * @pages: array of page pointers to store the pages in
1047 * @max_entries: size of the passed-in array
1048 *
1049 * Exports an sg table into an array of pages.
1050 *
1051 * This function is deprecated and strongly discouraged to be used.
1052 * The page array is only useful for page faults and those can corrupt fields
1053 * in the struct page if they are not handled by the exporting driver.
1054 */
1055int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
1056 struct page **pages,
1057 int max_entries)
1058{
1059 struct sg_page_iter page_iter;
1060 struct page **p = pages;
1061
1062 for_each_sgtable_page(sgt, &page_iter, 0) {
1063 if (WARN_ON(p - pages >= max_entries))
1064 return -1;
1065 *p++ = sg_page_iter_page(&page_iter);
1066 }
1067 return 0;
1068}
1069EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1070
1071/**
1072 * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
1073 * @sgt: scatter-gather table to convert
1074 * @addrs: array to store the dma bus address of each page
1075 * @max_entries: size of both the passed-in arrays
1076 *
1077 * Exports an sg table into an array of addresses.
1078 *
1079 * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
1080 * implementation.
1081 */
1082int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1083 int max_entries)
1084{
1085 struct sg_dma_page_iter dma_iter;
1086 dma_addr_t *a = addrs;
1087
1088 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1089 if (WARN_ON(a - addrs >= max_entries))
1090 return -1;
1091 *a++ = sg_page_iter_dma_address(&dma_iter);
1092 }
1093 return 0;
1094}
1095EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1096
1097/**
1098 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1099 * @obj: GEM object which was created from a dma-buf
1100 * @sg: the sg-table which was pinned at import time
1101 *
1102 * This is the cleanup functions which GEM drivers need to call when they use
1103 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1104 */
1105void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1106{
1107 struct dma_buf_attachment *attach;
1108 struct dma_buf *dma_buf;
1109
1110 attach = obj->import_attach;
1111 if (sg)
1112 dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
1113 dma_buf = attach->dmabuf;
1114 dma_buf_detach(attach->dmabuf, attach);
1115 /* remove the reference */
1116 dma_buf_put(dma_buf);
1117}
1118EXPORT_SYMBOL(drm_prime_gem_destroy);