Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43
44/**
45 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
46 * @obj: GEM BO
47 * @vma: Virtual memory area
48 *
49 * Sets up a userspace mapping of the BO's memory in the given
50 * virtual memory area.
51 *
52 * Returns:
53 * 0 on success or a negative error code on failure.
54 */
55int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
56 struct vm_area_struct *vma)
57{
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
59 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
60 unsigned asize = amdgpu_bo_size(bo);
61 int ret;
62
63 if (!vma->vm_file)
64 return -ENODEV;
65
66 if (adev == NULL)
67 return -ENODEV;
68
69 /* Check for valid size. */
70 if (asize < vma->vm_end - vma->vm_start)
71 return -EINVAL;
72
73 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
74 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
75 return -EPERM;
76 }
77 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
78
79 /* prime mmap does not need to check access, so allow here */
80 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
81 if (ret)
82 return ret;
83
84 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
85 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
86
87 return ret;
88}
89
90static int
91__dma_resv_make_exclusive(struct dma_resv *obj)
92{
93 struct dma_fence **fences;
94 unsigned int count;
95 int r;
96
97 if (!dma_resv_get_list(obj)) /* no shared fences to convert */
98 return 0;
99
100 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
101 if (r)
102 return r;
103
104 if (count == 0) {
105 /* Now that was unexpected. */
106 } else if (count == 1) {
107 dma_resv_add_excl_fence(obj, fences[0]);
108 dma_fence_put(fences[0]);
109 kfree(fences);
110 } else {
111 struct dma_fence_array *array;
112
113 array = dma_fence_array_create(count, fences,
114 dma_fence_context_alloc(1), 0,
115 false);
116 if (!array)
117 goto err_fences_put;
118
119 dma_resv_add_excl_fence(obj, &array->base);
120 dma_fence_put(&array->base);
121 }
122
123 return 0;
124
125err_fences_put:
126 while (count--)
127 dma_fence_put(fences[count]);
128 kfree(fences);
129 return -ENOMEM;
130}
131
132/**
133 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
134 *
135 * @dmabuf: DMA-buf where we attach to
136 * @attach: attachment to add
137 *
138 * Add the attachment as user to the exported DMA-buf.
139 */
140static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
141 struct dma_buf_attachment *attach)
142{
143 struct drm_gem_object *obj = dmabuf->priv;
144 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
145 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
146 int r;
147
148 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
149 attach->peer2peer = false;
150
151 if (attach->dev->driver == adev->dev->driver)
152 return 0;
153
154 r = amdgpu_bo_reserve(bo, false);
155 if (unlikely(r != 0))
156 return r;
157
158 /*
159 * We only create shared fences for internal use, but importers
160 * of the dmabuf rely on exclusive fences for implicitly
161 * tracking write hazards. As any of the current fences may
162 * correspond to a write, we need to convert all existing
163 * fences on the reservation object into a single exclusive
164 * fence.
165 */
166 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
167 if (r)
168 return r;
169
170 bo->prime_shared_count++;
171 amdgpu_bo_unreserve(bo);
172 return 0;
173}
174
175/**
176 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
177 *
178 * @dmabuf: DMA-buf where we remove the attachment from
179 * @attach: the attachment to remove
180 *
181 * Called when an attachment is removed from the DMA-buf.
182 */
183static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
184 struct dma_buf_attachment *attach)
185{
186 struct drm_gem_object *obj = dmabuf->priv;
187 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
188 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
189
190 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
191 bo->prime_shared_count--;
192}
193
194/**
195 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
196 *
197 * @attach: attachment to pin down
198 *
199 * Pin the BO which is backing the DMA-buf so that it can't move any more.
200 */
201static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
202{
203 struct drm_gem_object *obj = attach->dmabuf->priv;
204 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
205
206 /* pin buffer into GTT */
207 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
208}
209
210/**
211 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
212 *
213 * @attach: attachment to unpin
214 *
215 * Unpin a previously pinned BO to make it movable again.
216 */
217static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
218{
219 struct drm_gem_object *obj = attach->dmabuf->priv;
220 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
221
222 amdgpu_bo_unpin(bo);
223}
224
225/**
226 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
227 * @attach: DMA-buf attachment
228 * @dir: DMA direction
229 *
230 * Makes sure that the shared DMA buffer can be accessed by the target device.
231 * For now, simply pins it to the GTT domain, where it should be accessible by
232 * all DMA devices.
233 *
234 * Returns:
235 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
236 * code.
237 */
238static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
239 enum dma_data_direction dir)
240{
241 struct dma_buf *dma_buf = attach->dmabuf;
242 struct drm_gem_object *obj = dma_buf->priv;
243 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
244 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
245 struct sg_table *sgt;
246 long r;
247
248 if (!bo->tbo.pin_count) {
249 /* move buffer into GTT or VRAM */
250 struct ttm_operation_ctx ctx = { false, false };
251 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
252
253 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
254 attach->peer2peer) {
255 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
256 domains |= AMDGPU_GEM_DOMAIN_VRAM;
257 }
258 amdgpu_bo_placement_from_domain(bo, domains);
259 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
260 if (r)
261 return ERR_PTR(r);
262
263 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
264 AMDGPU_GEM_DOMAIN_GTT)) {
265 return ERR_PTR(-EBUSY);
266 }
267
268 switch (bo->tbo.mem.mem_type) {
269 case TTM_PL_TT:
270 sgt = drm_prime_pages_to_sg(obj->dev,
271 bo->tbo.ttm->pages,
272 bo->tbo.num_pages);
273 if (IS_ERR(sgt))
274 return sgt;
275
276 if (dma_map_sgtable(attach->dev, sgt, dir,
277 DMA_ATTR_SKIP_CPU_SYNC))
278 goto error_free;
279 break;
280
281 case TTM_PL_VRAM:
282 r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
283 dir, &sgt);
284 if (r)
285 return ERR_PTR(r);
286 break;
287 default:
288 return ERR_PTR(-EINVAL);
289 }
290
291 return sgt;
292
293error_free:
294 sg_free_table(sgt);
295 kfree(sgt);
296 return ERR_PTR(-EBUSY);
297}
298
299/**
300 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
301 * @attach: DMA-buf attachment
302 * @sgt: sg_table to unmap
303 * @dir: DMA direction
304 *
305 * This is called when a shared DMA buffer no longer needs to be accessible by
306 * another device. For now, simply unpins the buffer from GTT.
307 */
308static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
309 struct sg_table *sgt,
310 enum dma_data_direction dir)
311{
312 struct dma_buf *dma_buf = attach->dmabuf;
313 struct drm_gem_object *obj = dma_buf->priv;
314 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
315 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
316
317 if (sgt->sgl->page_link) {
318 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
319 sg_free_table(sgt);
320 kfree(sgt);
321 } else {
322 amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
323 }
324}
325
326/**
327 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
328 * @dma_buf: Shared DMA buffer
329 * @direction: Direction of DMA transfer
330 *
331 * This is called before CPU access to the shared DMA buffer's memory. If it's
332 * a read access, the buffer is moved to the GTT domain if possible, for optimal
333 * CPU read performance.
334 *
335 * Returns:
336 * 0 on success or a negative error code on failure.
337 */
338static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
339 enum dma_data_direction direction)
340{
341 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
342 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
343 struct ttm_operation_ctx ctx = { true, false };
344 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
345 int ret;
346 bool reads = (direction == DMA_BIDIRECTIONAL ||
347 direction == DMA_FROM_DEVICE);
348
349 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
350 return 0;
351
352 /* move to gtt */
353 ret = amdgpu_bo_reserve(bo, false);
354 if (unlikely(ret != 0))
355 return ret;
356
357 if (!bo->tbo.pin_count &&
358 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
359 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
360 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
361 }
362
363 amdgpu_bo_unreserve(bo);
364 return ret;
365}
366
367const struct dma_buf_ops amdgpu_dmabuf_ops = {
368 .attach = amdgpu_dma_buf_attach,
369 .detach = amdgpu_dma_buf_detach,
370 .pin = amdgpu_dma_buf_pin,
371 .unpin = amdgpu_dma_buf_unpin,
372 .map_dma_buf = amdgpu_dma_buf_map,
373 .unmap_dma_buf = amdgpu_dma_buf_unmap,
374 .release = drm_gem_dmabuf_release,
375 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
376 .mmap = drm_gem_dmabuf_mmap,
377 .vmap = drm_gem_dmabuf_vmap,
378 .vunmap = drm_gem_dmabuf_vunmap,
379};
380
381/**
382 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
383 * @gobj: GEM BO
384 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
385 *
386 * The main work is done by the &drm_gem_prime_export helper.
387 *
388 * Returns:
389 * Shared DMA buffer representing the GEM BO from the given device.
390 */
391struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
392 int flags)
393{
394 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
395 struct dma_buf *buf;
396
397 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
398 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
399 return ERR_PTR(-EPERM);
400
401 buf = drm_gem_prime_export(gobj, flags);
402 if (!IS_ERR(buf))
403 buf->ops = &amdgpu_dmabuf_ops;
404
405 return buf;
406}
407
408/**
409 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
410 *
411 * @dev: DRM device
412 * @dma_buf: DMA-buf
413 *
414 * Creates an empty SG BO for DMA-buf import.
415 *
416 * Returns:
417 * A new GEM BO of the given DRM device, representing the memory
418 * described by the given DMA-buf attachment and scatter/gather table.
419 */
420static struct drm_gem_object *
421amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
422{
423 struct dma_resv *resv = dma_buf->resv;
424 struct amdgpu_device *adev = drm_to_adev(dev);
425 struct amdgpu_bo *bo;
426 struct amdgpu_bo_param bp;
427 struct drm_gem_object *gobj;
428 int ret;
429
430 memset(&bp, 0, sizeof(bp));
431 bp.size = dma_buf->size;
432 bp.byte_align = PAGE_SIZE;
433 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
434 bp.flags = 0;
435 bp.type = ttm_bo_type_sg;
436 bp.resv = resv;
437 dma_resv_lock(resv, NULL);
438 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
439 AMDGPU_GEM_DOMAIN_CPU,
440 0, ttm_bo_type_sg, resv, &gobj);
441 if (ret)
442 goto error;
443
444 bo = gem_to_amdgpu_bo(gobj);
445 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
446 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
447 if (dma_buf->ops != &amdgpu_dmabuf_ops)
448 bo->prime_shared_count = 1;
449
450 dma_resv_unlock(resv);
451 return gobj;
452
453error:
454 dma_resv_unlock(resv);
455 return ERR_PTR(ret);
456}
457
458/**
459 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
460 *
461 * @attach: the DMA-buf attachment
462 *
463 * Invalidate the DMA-buf attachment, making sure that the we re-create the
464 * mapping before the next use.
465 */
466static void
467amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
468{
469 struct drm_gem_object *obj = attach->importer_priv;
470 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
471 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
472 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
473 struct ttm_operation_ctx ctx = { false, false };
474 struct ttm_placement placement = {};
475 struct amdgpu_vm_bo_base *bo_base;
476 int r;
477
478 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
479 return;
480
481 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
482 if (r) {
483 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
484 return;
485 }
486
487 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
488 struct amdgpu_vm *vm = bo_base->vm;
489 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
490
491 if (ticket) {
492 /* When we get an error here it means that somebody
493 * else is holding the VM lock and updating page tables
494 * So we can just continue here.
495 */
496 r = dma_resv_lock(resv, ticket);
497 if (r)
498 continue;
499
500 } else {
501 /* TODO: This is more problematic and we actually need
502 * to allow page tables updates without holding the
503 * lock.
504 */
505 if (!dma_resv_trylock(resv))
506 continue;
507 }
508
509 r = amdgpu_vm_clear_freed(adev, vm, NULL);
510 if (!r)
511 r = amdgpu_vm_handle_moved(adev, vm);
512
513 if (r && r != -EBUSY)
514 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
515 r);
516
517 dma_resv_unlock(resv);
518 }
519}
520
521static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
522 .allow_peer2peer = true,
523 .move_notify = amdgpu_dma_buf_move_notify
524};
525
526/**
527 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
528 * @dev: DRM device
529 * @dma_buf: Shared DMA buffer
530 *
531 * Import a dma_buf into a the driver and potentially create a new GEM object.
532 *
533 * Returns:
534 * GEM BO representing the shared DMA buffer for the given device.
535 */
536struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
537 struct dma_buf *dma_buf)
538{
539 struct dma_buf_attachment *attach;
540 struct drm_gem_object *obj;
541
542 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
543 obj = dma_buf->priv;
544 if (obj->dev == dev) {
545 /*
546 * Importing dmabuf exported from out own gem increases
547 * refcount on gem itself instead of f_count of dmabuf.
548 */
549 drm_gem_object_get(obj);
550 return obj;
551 }
552 }
553
554 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
555 if (IS_ERR(obj))
556 return obj;
557
558 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
559 &amdgpu_dma_buf_attach_ops, obj);
560 if (IS_ERR(attach)) {
561 drm_gem_object_put(obj);
562 return ERR_CAST(attach);
563 }
564
565 get_dma_buf(dma_buf);
566 obj->import_attach = attach;
567 return obj;
568}
569
570/**
571 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
572 *
573 * @adev: amdgpu_device pointer of the importer
574 * @bo: amdgpu buffer object
575 *
576 * Returns:
577 * True if dmabuf accessible over xgmi, false otherwise.
578 */
579bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
580 struct amdgpu_bo *bo)
581{
582 struct drm_gem_object *obj = &bo->tbo.base;
583 struct drm_gem_object *gobj;
584
585 if (obj->import_attach) {
586 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
587
588 if (dma_buf->ops != &amdgpu_dmabuf_ops)
589 /* No XGMI with non AMD GPUs */
590 return false;
591
592 gobj = dma_buf->priv;
593 bo = gem_to_amdgpu_bo(gobj);
594 }
595
596 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
597 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
598 return true;
599
600 return false;
601}