Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43#include <linux/pm_runtime.h>
44
45/**
46 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
47 *
48 * @dmabuf: DMA-buf where we attach to
49 * @attach: attachment to add
50 *
51 * Add the attachment as user to the exported DMA-buf.
52 */
53static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
54 struct dma_buf_attachment *attach)
55{
56 struct drm_gem_object *obj = dmabuf->priv;
57 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
58 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
59 int r;
60
61 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
62 attach->peer2peer = false;
63
64 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
65 if (r < 0)
66 goto out;
67
68 return 0;
69
70out:
71 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
72 return r;
73}
74
75/**
76 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
77 *
78 * @dmabuf: DMA-buf where we remove the attachment from
79 * @attach: the attachment to remove
80 *
81 * Called when an attachment is removed from the DMA-buf.
82 */
83static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attach)
85{
86 struct drm_gem_object *obj = dmabuf->priv;
87 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
88 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
89
90 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
91 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
92}
93
94/**
95 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
96 *
97 * @attach: attachment to pin down
98 *
99 * Pin the BO which is backing the DMA-buf so that it can't move any more.
100 */
101static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
102{
103 struct drm_gem_object *obj = attach->dmabuf->priv;
104 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
105 int r;
106
107 /* pin buffer into GTT */
108 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
109 if (r)
110 return r;
111
112 if (bo->tbo.moving) {
113 r = dma_fence_wait(bo->tbo.moving, true);
114 if (r) {
115 amdgpu_bo_unpin(bo);
116 return r;
117 }
118 }
119 return 0;
120}
121
122/**
123 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
124 *
125 * @attach: attachment to unpin
126 *
127 * Unpin a previously pinned BO to make it movable again.
128 */
129static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
130{
131 struct drm_gem_object *obj = attach->dmabuf->priv;
132 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
133
134 amdgpu_bo_unpin(bo);
135}
136
137/**
138 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
139 * @attach: DMA-buf attachment
140 * @dir: DMA direction
141 *
142 * Makes sure that the shared DMA buffer can be accessed by the target device.
143 * For now, simply pins it to the GTT domain, where it should be accessible by
144 * all DMA devices.
145 *
146 * Returns:
147 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
148 * code.
149 */
150static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
151 enum dma_data_direction dir)
152{
153 struct dma_buf *dma_buf = attach->dmabuf;
154 struct drm_gem_object *obj = dma_buf->priv;
155 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
156 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
157 struct sg_table *sgt;
158 long r;
159
160 if (!bo->tbo.pin_count) {
161 /* move buffer into GTT or VRAM */
162 struct ttm_operation_ctx ctx = { false, false };
163 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
164
165 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
166 attach->peer2peer) {
167 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
168 domains |= AMDGPU_GEM_DOMAIN_VRAM;
169 }
170 amdgpu_bo_placement_from_domain(bo, domains);
171 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
172 if (r)
173 return ERR_PTR(r);
174
175 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
176 AMDGPU_GEM_DOMAIN_GTT)) {
177 return ERR_PTR(-EBUSY);
178 }
179
180 switch (bo->tbo.resource->mem_type) {
181 case TTM_PL_TT:
182 sgt = drm_prime_pages_to_sg(obj->dev,
183 bo->tbo.ttm->pages,
184 bo->tbo.ttm->num_pages);
185 if (IS_ERR(sgt))
186 return sgt;
187
188 if (dma_map_sgtable(attach->dev, sgt, dir,
189 DMA_ATTR_SKIP_CPU_SYNC))
190 goto error_free;
191 break;
192
193 case TTM_PL_VRAM:
194 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
195 bo->tbo.base.size, attach->dev,
196 dir, &sgt);
197 if (r)
198 return ERR_PTR(r);
199 break;
200 default:
201 return ERR_PTR(-EINVAL);
202 }
203
204 return sgt;
205
206error_free:
207 sg_free_table(sgt);
208 kfree(sgt);
209 return ERR_PTR(-EBUSY);
210}
211
212/**
213 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
214 * @attach: DMA-buf attachment
215 * @sgt: sg_table to unmap
216 * @dir: DMA direction
217 *
218 * This is called when a shared DMA buffer no longer needs to be accessible by
219 * another device. For now, simply unpins the buffer from GTT.
220 */
221static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
222 struct sg_table *sgt,
223 enum dma_data_direction dir)
224{
225 if (sgt->sgl->page_link) {
226 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
227 sg_free_table(sgt);
228 kfree(sgt);
229 } else {
230 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
231 }
232}
233
234/**
235 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
236 * @dma_buf: Shared DMA buffer
237 * @direction: Direction of DMA transfer
238 *
239 * This is called before CPU access to the shared DMA buffer's memory. If it's
240 * a read access, the buffer is moved to the GTT domain if possible, for optimal
241 * CPU read performance.
242 *
243 * Returns:
244 * 0 on success or a negative error code on failure.
245 */
246static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
247 enum dma_data_direction direction)
248{
249 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
250 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
251 struct ttm_operation_ctx ctx = { true, false };
252 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
253 int ret;
254 bool reads = (direction == DMA_BIDIRECTIONAL ||
255 direction == DMA_FROM_DEVICE);
256
257 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
258 return 0;
259
260 /* move to gtt */
261 ret = amdgpu_bo_reserve(bo, false);
262 if (unlikely(ret != 0))
263 return ret;
264
265 if (!bo->tbo.pin_count &&
266 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
267 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
268 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
269 }
270
271 amdgpu_bo_unreserve(bo);
272 return ret;
273}
274
275const struct dma_buf_ops amdgpu_dmabuf_ops = {
276 .attach = amdgpu_dma_buf_attach,
277 .detach = amdgpu_dma_buf_detach,
278 .pin = amdgpu_dma_buf_pin,
279 .unpin = amdgpu_dma_buf_unpin,
280 .map_dma_buf = amdgpu_dma_buf_map,
281 .unmap_dma_buf = amdgpu_dma_buf_unmap,
282 .release = drm_gem_dmabuf_release,
283 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
284 .mmap = drm_gem_dmabuf_mmap,
285 .vmap = drm_gem_dmabuf_vmap,
286 .vunmap = drm_gem_dmabuf_vunmap,
287};
288
289/**
290 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
291 * @gobj: GEM BO
292 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
293 *
294 * The main work is done by the &drm_gem_prime_export helper.
295 *
296 * Returns:
297 * Shared DMA buffer representing the GEM BO from the given device.
298 */
299struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
300 int flags)
301{
302 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
303 struct dma_buf *buf;
304
305 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
306 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
307 return ERR_PTR(-EPERM);
308
309 buf = drm_gem_prime_export(gobj, flags);
310 if (!IS_ERR(buf))
311 buf->ops = &amdgpu_dmabuf_ops;
312
313 return buf;
314}
315
316/**
317 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
318 *
319 * @dev: DRM device
320 * @dma_buf: DMA-buf
321 *
322 * Creates an empty SG BO for DMA-buf import.
323 *
324 * Returns:
325 * A new GEM BO of the given DRM device, representing the memory
326 * described by the given DMA-buf attachment and scatter/gather table.
327 */
328static struct drm_gem_object *
329amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
330{
331 struct dma_resv *resv = dma_buf->resv;
332 struct amdgpu_device *adev = drm_to_adev(dev);
333 struct drm_gem_object *gobj;
334 struct amdgpu_bo *bo;
335 uint64_t flags = 0;
336 int ret;
337
338 dma_resv_lock(resv, NULL);
339
340 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
341 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
342
343 flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
344 }
345
346 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
347 AMDGPU_GEM_DOMAIN_CPU, flags,
348 ttm_bo_type_sg, resv, &gobj);
349 if (ret)
350 goto error;
351
352 bo = gem_to_amdgpu_bo(gobj);
353 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
354 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
355
356 dma_resv_unlock(resv);
357 return gobj;
358
359error:
360 dma_resv_unlock(resv);
361 return ERR_PTR(ret);
362}
363
364/**
365 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
366 *
367 * @attach: the DMA-buf attachment
368 *
369 * Invalidate the DMA-buf attachment, making sure that the we re-create the
370 * mapping before the next use.
371 */
372static void
373amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
374{
375 struct drm_gem_object *obj = attach->importer_priv;
376 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
377 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
378 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
379 struct ttm_operation_ctx ctx = { false, false };
380 struct ttm_placement placement = {};
381 struct amdgpu_vm_bo_base *bo_base;
382 int r;
383
384 if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
385 return;
386
387 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
388 if (r) {
389 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
390 return;
391 }
392
393 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
394 struct amdgpu_vm *vm = bo_base->vm;
395 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
396
397 if (ticket) {
398 /* When we get an error here it means that somebody
399 * else is holding the VM lock and updating page tables
400 * So we can just continue here.
401 */
402 r = dma_resv_lock(resv, ticket);
403 if (r)
404 continue;
405
406 } else {
407 /* TODO: This is more problematic and we actually need
408 * to allow page tables updates without holding the
409 * lock.
410 */
411 if (!dma_resv_trylock(resv))
412 continue;
413 }
414
415 r = amdgpu_vm_clear_freed(adev, vm, NULL);
416 if (!r)
417 r = amdgpu_vm_handle_moved(adev, vm);
418
419 if (r && r != -EBUSY)
420 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
421 r);
422
423 dma_resv_unlock(resv);
424 }
425}
426
427static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
428 .allow_peer2peer = true,
429 .move_notify = amdgpu_dma_buf_move_notify
430};
431
432/**
433 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
434 * @dev: DRM device
435 * @dma_buf: Shared DMA buffer
436 *
437 * Import a dma_buf into a the driver and potentially create a new GEM object.
438 *
439 * Returns:
440 * GEM BO representing the shared DMA buffer for the given device.
441 */
442struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
443 struct dma_buf *dma_buf)
444{
445 struct dma_buf_attachment *attach;
446 struct drm_gem_object *obj;
447
448 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
449 obj = dma_buf->priv;
450 if (obj->dev == dev) {
451 /*
452 * Importing dmabuf exported from out own gem increases
453 * refcount on gem itself instead of f_count of dmabuf.
454 */
455 drm_gem_object_get(obj);
456 return obj;
457 }
458 }
459
460 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
461 if (IS_ERR(obj))
462 return obj;
463
464 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
465 &amdgpu_dma_buf_attach_ops, obj);
466 if (IS_ERR(attach)) {
467 drm_gem_object_put(obj);
468 return ERR_CAST(attach);
469 }
470
471 get_dma_buf(dma_buf);
472 obj->import_attach = attach;
473 return obj;
474}
475
476/**
477 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
478 *
479 * @adev: amdgpu_device pointer of the importer
480 * @bo: amdgpu buffer object
481 *
482 * Returns:
483 * True if dmabuf accessible over xgmi, false otherwise.
484 */
485bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
486 struct amdgpu_bo *bo)
487{
488 struct drm_gem_object *obj = &bo->tbo.base;
489 struct drm_gem_object *gobj;
490
491 if (obj->import_attach) {
492 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
493
494 if (dma_buf->ops != &amdgpu_dmabuf_ops)
495 /* No XGMI with non AMD GPUs */
496 return false;
497
498 gobj = dma_buf->priv;
499 bo = gem_to_amdgpu_bo(gobj);
500 }
501
502 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
503 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
504 return true;
505
506 return false;
507}