Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drm gem CMA (contiguous memory allocator) helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 *
7 * Based on Samsung Exynos code
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 */
11
12#include <linux/dma-buf.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
19
20#include <drm/drm.h>
21#include <drm/drm_device.h>
22#include <drm/drm_drv.h>
23#include <drm/drm_gem_cma_helper.h>
24#include <drm/drm_vma_manager.h>
25
26/**
27 * DOC: cma helpers
28 *
29 * The DRM GEM/CMA helpers are a means to provide buffer objects that are
30 * presented to the device as a contiguous chunk of memory. This is useful
31 * for devices that do not support scatter-gather DMA (either directly or
32 * by using an intimately attached IOMMU).
33 *
34 * Despite the name, the DRM GEM/CMA helpers are not hardwired to use the
35 * Contiguous Memory Allocator (CMA).
36 *
37 * For devices that access the memory bus through an (external) IOMMU then
38 * the buffer objects are allocated using a traditional page-based
39 * allocator and may be scattered through physical memory. However they
40 * are contiguous in the IOVA space so appear contiguous to devices using
41 * them.
42 *
43 * For other devices then the helpers rely on CMA to provide buffer
44 * objects that are physically contiguous in memory.
45 *
46 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
47 * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
48 * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
49 */
50
51static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
52 .free = drm_gem_cma_object_free,
53 .print_info = drm_gem_cma_object_print_info,
54 .get_sg_table = drm_gem_cma_object_get_sg_table,
55 .vmap = drm_gem_cma_object_vmap,
56 .mmap = drm_gem_cma_object_mmap,
57 .vm_ops = &drm_gem_cma_vm_ops,
58};
59
60/**
61 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
62 * @drm: DRM device
63 * @size: size of the object to allocate
64 * @private: true if used for internal purposes
65 *
66 * This function creates and initializes a GEM CMA object of the given size,
67 * but doesn't allocate any memory to back the object.
68 *
69 * Returns:
70 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
71 * error code on failure.
72 */
73static struct drm_gem_cma_object *
74__drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
75{
76 struct drm_gem_cma_object *cma_obj;
77 struct drm_gem_object *gem_obj;
78 int ret = 0;
79
80 if (drm->driver->gem_create_object) {
81 gem_obj = drm->driver->gem_create_object(drm, size);
82 if (IS_ERR(gem_obj))
83 return ERR_CAST(gem_obj);
84 cma_obj = to_drm_gem_cma_obj(gem_obj);
85 } else {
86 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
87 if (!cma_obj)
88 return ERR_PTR(-ENOMEM);
89 gem_obj = &cma_obj->base;
90 }
91
92 if (!gem_obj->funcs)
93 gem_obj->funcs = &drm_gem_cma_default_funcs;
94
95 if (private) {
96 drm_gem_private_object_init(drm, gem_obj, size);
97
98 /* Always use writecombine for dma-buf mappings */
99 cma_obj->map_noncoherent = false;
100 } else {
101 ret = drm_gem_object_init(drm, gem_obj, size);
102 }
103 if (ret)
104 goto error;
105
106 ret = drm_gem_create_mmap_offset(gem_obj);
107 if (ret) {
108 drm_gem_object_release(gem_obj);
109 goto error;
110 }
111
112 return cma_obj;
113
114error:
115 kfree(cma_obj);
116 return ERR_PTR(ret);
117}
118
119/**
120 * drm_gem_cma_create - allocate an object with the given size
121 * @drm: DRM device
122 * @size: size of the object to allocate
123 *
124 * This function creates a CMA GEM object and allocates memory as backing store.
125 * The allocated memory will occupy a contiguous chunk of bus address space.
126 *
127 * For devices that are directly connected to the memory bus then the allocated
128 * memory will be physically contiguous. For devices that access through an
129 * IOMMU, then the allocated memory is not expected to be physically contiguous
130 * because having contiguous IOVAs is sufficient to meet a devices DMA
131 * requirements.
132 *
133 * Returns:
134 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
135 * error code on failure.
136 */
137struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
138 size_t size)
139{
140 struct drm_gem_cma_object *cma_obj;
141 int ret;
142
143 size = round_up(size, PAGE_SIZE);
144
145 cma_obj = __drm_gem_cma_create(drm, size, false);
146 if (IS_ERR(cma_obj))
147 return cma_obj;
148
149 if (cma_obj->map_noncoherent) {
150 cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
151 &cma_obj->paddr,
152 DMA_TO_DEVICE,
153 GFP_KERNEL | __GFP_NOWARN);
154 } else {
155 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
156 GFP_KERNEL | __GFP_NOWARN);
157 }
158 if (!cma_obj->vaddr) {
159 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
160 size);
161 ret = -ENOMEM;
162 goto error;
163 }
164
165 return cma_obj;
166
167error:
168 drm_gem_object_put(&cma_obj->base);
169 return ERR_PTR(ret);
170}
171EXPORT_SYMBOL_GPL(drm_gem_cma_create);
172
173/**
174 * drm_gem_cma_create_with_handle - allocate an object with the given size and
175 * return a GEM handle to it
176 * @file_priv: DRM file-private structure to register the handle for
177 * @drm: DRM device
178 * @size: size of the object to allocate
179 * @handle: return location for the GEM handle
180 *
181 * This function creates a CMA GEM object, allocating a chunk of memory as
182 * backing store. The GEM object is then added to the list of object associated
183 * with the given file and a handle to it is returned.
184 *
185 * The allocated memory will occupy a contiguous chunk of bus address space.
186 * See drm_gem_cma_create() for more details.
187 *
188 * Returns:
189 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
190 * error code on failure.
191 */
192static struct drm_gem_cma_object *
193drm_gem_cma_create_with_handle(struct drm_file *file_priv,
194 struct drm_device *drm, size_t size,
195 uint32_t *handle)
196{
197 struct drm_gem_cma_object *cma_obj;
198 struct drm_gem_object *gem_obj;
199 int ret;
200
201 cma_obj = drm_gem_cma_create(drm, size);
202 if (IS_ERR(cma_obj))
203 return cma_obj;
204
205 gem_obj = &cma_obj->base;
206
207 /*
208 * allocate a id of idr table where the obj is registered
209 * and handle has the id what user can see.
210 */
211 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
212 /* drop reference from allocate - handle holds it now. */
213 drm_gem_object_put(gem_obj);
214 if (ret)
215 return ERR_PTR(ret);
216
217 return cma_obj;
218}
219
220/**
221 * drm_gem_cma_free - free resources associated with a CMA GEM object
222 * @cma_obj: CMA GEM object to free
223 *
224 * This function frees the backing memory of the CMA GEM object, cleans up the
225 * GEM object state and frees the memory used to store the object itself.
226 * If the buffer is imported and the virtual address is set, it is released.
227 */
228void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
229{
230 struct drm_gem_object *gem_obj = &cma_obj->base;
231 struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr);
232
233 if (gem_obj->import_attach) {
234 if (cma_obj->vaddr)
235 dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
236 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
237 } else if (cma_obj->vaddr) {
238 if (cma_obj->map_noncoherent)
239 dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
240 cma_obj->vaddr, cma_obj->paddr,
241 DMA_TO_DEVICE);
242 else
243 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
244 cma_obj->vaddr, cma_obj->paddr);
245 }
246
247 drm_gem_object_release(gem_obj);
248
249 kfree(cma_obj);
250}
251EXPORT_SYMBOL_GPL(drm_gem_cma_free);
252
253/**
254 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
255 * @file_priv: DRM file-private structure to create the dumb buffer for
256 * @drm: DRM device
257 * @args: IOCTL data
258 *
259 * This aligns the pitch and size arguments to the minimum required. This is
260 * an internal helper that can be wrapped by a driver to account for hardware
261 * with more specific alignment requirements. It should not be used directly
262 * as their &drm_driver.dumb_create callback.
263 *
264 * Returns:
265 * 0 on success or a negative error code on failure.
266 */
267int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
268 struct drm_device *drm,
269 struct drm_mode_create_dumb *args)
270{
271 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
272 struct drm_gem_cma_object *cma_obj;
273
274 if (args->pitch < min_pitch)
275 args->pitch = min_pitch;
276
277 if (args->size < args->pitch * args->height)
278 args->size = args->pitch * args->height;
279
280 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
281 &args->handle);
282 return PTR_ERR_OR_ZERO(cma_obj);
283}
284EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
285
286/**
287 * drm_gem_cma_dumb_create - create a dumb buffer object
288 * @file_priv: DRM file-private structure to create the dumb buffer for
289 * @drm: DRM device
290 * @args: IOCTL data
291 *
292 * This function computes the pitch of the dumb buffer and rounds it up to an
293 * integer number of bytes per pixel. Drivers for hardware that doesn't have
294 * any additional restrictions on the pitch can directly use this function as
295 * their &drm_driver.dumb_create callback.
296 *
297 * For hardware with additional restrictions, drivers can adjust the fields
298 * set up by userspace and pass the IOCTL data along to the
299 * drm_gem_cma_dumb_create_internal() function.
300 *
301 * Returns:
302 * 0 on success or a negative error code on failure.
303 */
304int drm_gem_cma_dumb_create(struct drm_file *file_priv,
305 struct drm_device *drm,
306 struct drm_mode_create_dumb *args)
307{
308 struct drm_gem_cma_object *cma_obj;
309
310 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
311 args->size = args->pitch * args->height;
312
313 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
314 &args->handle);
315 return PTR_ERR_OR_ZERO(cma_obj);
316}
317EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
318
319const struct vm_operations_struct drm_gem_cma_vm_ops = {
320 .open = drm_gem_vm_open,
321 .close = drm_gem_vm_close,
322};
323EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
324
325#ifndef CONFIG_MMU
326/**
327 * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
328 * @filp: file object
329 * @addr: memory address
330 * @len: buffer size
331 * @pgoff: page offset
332 * @flags: memory flags
333 *
334 * This function is used in noMMU platforms to propose address mapping
335 * for a given buffer.
336 * It's intended to be used as a direct handler for the struct
337 * &file_operations.get_unmapped_area operation.
338 *
339 * Returns:
340 * mapping address on success or a negative error code on failure.
341 */
342unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
343 unsigned long addr,
344 unsigned long len,
345 unsigned long pgoff,
346 unsigned long flags)
347{
348 struct drm_gem_cma_object *cma_obj;
349 struct drm_gem_object *obj = NULL;
350 struct drm_file *priv = filp->private_data;
351 struct drm_device *dev = priv->minor->dev;
352 struct drm_vma_offset_node *node;
353
354 if (drm_dev_is_unplugged(dev))
355 return -ENODEV;
356
357 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
358 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
359 pgoff,
360 len >> PAGE_SHIFT);
361 if (likely(node)) {
362 obj = container_of(node, struct drm_gem_object, vma_node);
363 /*
364 * When the object is being freed, after it hits 0-refcnt it
365 * proceeds to tear down the object. In the process it will
366 * attempt to remove the VMA offset and so acquire this
367 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
368 * that matches our range, we know it is in the process of being
369 * destroyed and will be freed as soon as we release the lock -
370 * so we have to check for the 0-refcnted object and treat it as
371 * invalid.
372 */
373 if (!kref_get_unless_zero(&obj->refcount))
374 obj = NULL;
375 }
376
377 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
378
379 if (!obj)
380 return -EINVAL;
381
382 if (!drm_vma_node_is_allowed(node, priv)) {
383 drm_gem_object_put(obj);
384 return -EACCES;
385 }
386
387 cma_obj = to_drm_gem_cma_obj(obj);
388
389 drm_gem_object_put(obj);
390
391 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
392}
393EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
394#endif
395
396/**
397 * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
398 * @cma_obj: CMA GEM object
399 * @p: DRM printer
400 * @indent: Tab indentation level
401 *
402 * This function prints paddr and vaddr for use in e.g. debugfs output.
403 */
404void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
405 struct drm_printer *p, unsigned int indent)
406{
407 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
408 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
409}
410EXPORT_SYMBOL(drm_gem_cma_print_info);
411
412/**
413 * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
414 * pages for a CMA GEM object
415 * @cma_obj: CMA GEM object
416 *
417 * This function exports a scatter/gather table by calling the standard
418 * DMA mapping API.
419 *
420 * Returns:
421 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
422 */
423struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
424{
425 struct drm_gem_object *obj = &cma_obj->base;
426 struct sg_table *sgt;
427 int ret;
428
429 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
430 if (!sgt)
431 return ERR_PTR(-ENOMEM);
432
433 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
434 cma_obj->paddr, obj->size);
435 if (ret < 0)
436 goto out;
437
438 return sgt;
439
440out:
441 kfree(sgt);
442 return ERR_PTR(ret);
443}
444EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
445
446/**
447 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
448 * driver's scatter/gather table of pinned pages
449 * @dev: device to import into
450 * @attach: DMA-BUF attachment
451 * @sgt: scatter/gather table of pinned pages
452 *
453 * This function imports a scatter/gather table exported via DMA-BUF by
454 * another driver. Imported buffers must be physically contiguous in memory
455 * (i.e. the scatter/gather table must contain a single entry). Drivers that
456 * use the CMA helpers should set this as their
457 * &drm_driver.gem_prime_import_sg_table callback.
458 *
459 * Returns:
460 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
461 * error code on failure.
462 */
463struct drm_gem_object *
464drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
465 struct dma_buf_attachment *attach,
466 struct sg_table *sgt)
467{
468 struct drm_gem_cma_object *cma_obj;
469
470 /* check if the entries in the sg_table are contiguous */
471 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
472 return ERR_PTR(-EINVAL);
473
474 /* Create a CMA GEM buffer. */
475 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
476 if (IS_ERR(cma_obj))
477 return ERR_CAST(cma_obj);
478
479 cma_obj->paddr = sg_dma_address(sgt->sgl);
480 cma_obj->sgt = sgt;
481
482 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
483
484 return &cma_obj->base;
485}
486EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
487
488/**
489 * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
490 * address space
491 * @cma_obj: CMA GEM object
492 * @map: Returns the kernel virtual address of the CMA GEM object's backing
493 * store.
494 *
495 * This function maps a buffer into the kernel's virtual address space.
496 * Since the CMA buffers are already mapped into the kernel virtual address
497 * space this simply returns the cached virtual address.
498 *
499 * Returns:
500 * 0 on success, or a negative error code otherwise.
501 */
502int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
503 struct iosys_map *map)
504{
505 iosys_map_set_vaddr(map, cma_obj->vaddr);
506
507 return 0;
508}
509EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
510
511/**
512 * drm_gem_cma_mmap - memory-map an exported CMA GEM object
513 * @cma_obj: CMA GEM object
514 * @vma: VMA for the area to be mapped
515 *
516 * This function maps a buffer into a userspace process's address space.
517 * In addition to the usual GEM VMA setup it immediately faults in the entire
518 * object instead of using on-demand faulting.
519 *
520 * Returns:
521 * 0 on success or a negative error code on failure.
522 */
523int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
524{
525 struct drm_gem_object *obj = &cma_obj->base;
526 int ret;
527
528 /*
529 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
530 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
531 * the whole buffer.
532 */
533 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
534 vma->vm_flags &= ~VM_PFNMAP;
535 vma->vm_flags |= VM_DONTEXPAND;
536
537 if (cma_obj->map_noncoherent) {
538 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
539
540 ret = dma_mmap_pages(cma_obj->base.dev->dev,
541 vma, vma->vm_end - vma->vm_start,
542 virt_to_page(cma_obj->vaddr));
543 } else {
544 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
545 cma_obj->paddr, vma->vm_end - vma->vm_start);
546 }
547 if (ret)
548 drm_gem_vm_close(vma);
549
550 return ret;
551}
552EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
553
554/**
555 * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
556 * scatter/gather table and get the virtual address of the buffer
557 * @dev: DRM device
558 * @attach: DMA-BUF attachment
559 * @sgt: Scatter/gather table of pinned pages
560 *
561 * This function imports a scatter/gather table using
562 * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
563 * virtual address. This ensures that a CMA GEM object always has its virtual
564 * address set. This address is released when the object is freed.
565 *
566 * This function can be used as the &drm_driver.gem_prime_import_sg_table
567 * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
568 * the necessary DRM driver operations.
569 *
570 * Returns:
571 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
572 * error code on failure.
573 */
574struct drm_gem_object *
575drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
576 struct dma_buf_attachment *attach,
577 struct sg_table *sgt)
578{
579 struct drm_gem_cma_object *cma_obj;
580 struct drm_gem_object *obj;
581 struct iosys_map map;
582 int ret;
583
584 ret = dma_buf_vmap(attach->dmabuf, &map);
585 if (ret) {
586 DRM_ERROR("Failed to vmap PRIME buffer\n");
587 return ERR_PTR(ret);
588 }
589
590 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
591 if (IS_ERR(obj)) {
592 dma_buf_vunmap(attach->dmabuf, &map);
593 return obj;
594 }
595
596 cma_obj = to_drm_gem_cma_obj(obj);
597 cma_obj->vaddr = map.vaddr;
598
599 return obj;
600}
601EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
602
603MODULE_DESCRIPTION("DRM CMA memory-management helpers");
604MODULE_IMPORT_NS(DMA_BUF);
605MODULE_LICENSE("GPL");