Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <drm/ttm/ttm_tt.h>
41#include <linux/dma-buf.h>
42#include <linux/dma-fence-array.h>
43#include <linux/pci-p2pdma.h>
44
45/**
46 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
47 *
48 * @dmabuf: DMA-buf where we attach to
49 * @attach: attachment to add
50 *
51 * Add the attachment as user to the exported DMA-buf.
52 */
53static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
54 struct dma_buf_attachment *attach)
55{
56 struct drm_gem_object *obj = dmabuf->priv;
57 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
58 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
59
60 if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
61 attach->peer2peer = false;
62
63 return 0;
64}
65
66/**
67 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
68 *
69 * @attach: attachment to pin down
70 *
71 * Pin the BO which is backing the DMA-buf so that it can't move any more.
72 */
73static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
74{
75 struct drm_gem_object *obj = attach->dmabuf->priv;
76 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
77
78 /* pin buffer into GTT */
79 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
80}
81
82/**
83 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
84 *
85 * @attach: attachment to unpin
86 *
87 * Unpin a previously pinned BO to make it movable again.
88 */
89static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
90{
91 struct drm_gem_object *obj = attach->dmabuf->priv;
92 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
93
94 amdgpu_bo_unpin(bo);
95}
96
97/**
98 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
99 * @attach: DMA-buf attachment
100 * @dir: DMA direction
101 *
102 * Makes sure that the shared DMA buffer can be accessed by the target device.
103 * For now, simply pins it to the GTT domain, where it should be accessible by
104 * all DMA devices.
105 *
106 * Returns:
107 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
108 * code.
109 */
110static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
111 enum dma_data_direction dir)
112{
113 struct dma_buf *dma_buf = attach->dmabuf;
114 struct drm_gem_object *obj = dma_buf->priv;
115 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
116 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
117 struct sg_table *sgt;
118 long r;
119
120 if (!bo->tbo.pin_count) {
121 /* move buffer into GTT or VRAM */
122 struct ttm_operation_ctx ctx = { false, false };
123 unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
124
125 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
126 attach->peer2peer) {
127 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
128 domains |= AMDGPU_GEM_DOMAIN_VRAM;
129 }
130 amdgpu_bo_placement_from_domain(bo, domains);
131 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
132 if (r)
133 return ERR_PTR(r);
134
135 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
136 AMDGPU_GEM_DOMAIN_GTT)) {
137 return ERR_PTR(-EBUSY);
138 }
139
140 switch (bo->tbo.resource->mem_type) {
141 case TTM_PL_TT:
142 sgt = drm_prime_pages_to_sg(obj->dev,
143 bo->tbo.ttm->pages,
144 bo->tbo.ttm->num_pages);
145 if (IS_ERR(sgt))
146 return sgt;
147
148 if (dma_map_sgtable(attach->dev, sgt, dir,
149 DMA_ATTR_SKIP_CPU_SYNC))
150 goto error_free;
151 break;
152
153 case TTM_PL_VRAM:
154 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
155 bo->tbo.base.size, attach->dev,
156 dir, &sgt);
157 if (r)
158 return ERR_PTR(r);
159 break;
160 default:
161 return ERR_PTR(-EINVAL);
162 }
163
164 return sgt;
165
166error_free:
167 sg_free_table(sgt);
168 kfree(sgt);
169 return ERR_PTR(-EBUSY);
170}
171
172/**
173 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
174 * @attach: DMA-buf attachment
175 * @sgt: sg_table to unmap
176 * @dir: DMA direction
177 *
178 * This is called when a shared DMA buffer no longer needs to be accessible by
179 * another device. For now, simply unpins the buffer from GTT.
180 */
181static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
182 struct sg_table *sgt,
183 enum dma_data_direction dir)
184{
185 if (sgt->sgl->page_link) {
186 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
187 sg_free_table(sgt);
188 kfree(sgt);
189 } else {
190 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
191 }
192}
193
194/**
195 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
196 * @dma_buf: Shared DMA buffer
197 * @direction: Direction of DMA transfer
198 *
199 * This is called before CPU access to the shared DMA buffer's memory. If it's
200 * a read access, the buffer is moved to the GTT domain if possible, for optimal
201 * CPU read performance.
202 *
203 * Returns:
204 * 0 on success or a negative error code on failure.
205 */
206static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
207 enum dma_data_direction direction)
208{
209 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
210 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
211 struct ttm_operation_ctx ctx = { true, false };
212 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
213 int ret;
214 bool reads = (direction == DMA_BIDIRECTIONAL ||
215 direction == DMA_FROM_DEVICE);
216
217 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
218 return 0;
219
220 /* move to gtt */
221 ret = amdgpu_bo_reserve(bo, false);
222 if (unlikely(ret != 0))
223 return ret;
224
225 if (!bo->tbo.pin_count &&
226 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
227 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
228 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
229 }
230
231 amdgpu_bo_unreserve(bo);
232 return ret;
233}
234
235const struct dma_buf_ops amdgpu_dmabuf_ops = {
236 .attach = amdgpu_dma_buf_attach,
237 .pin = amdgpu_dma_buf_pin,
238 .unpin = amdgpu_dma_buf_unpin,
239 .map_dma_buf = amdgpu_dma_buf_map,
240 .unmap_dma_buf = amdgpu_dma_buf_unmap,
241 .release = drm_gem_dmabuf_release,
242 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
243 .mmap = drm_gem_dmabuf_mmap,
244 .vmap = drm_gem_dmabuf_vmap,
245 .vunmap = drm_gem_dmabuf_vunmap,
246};
247
248/**
249 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
250 * @gobj: GEM BO
251 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
252 *
253 * The main work is done by the &drm_gem_prime_export helper.
254 *
255 * Returns:
256 * Shared DMA buffer representing the GEM BO from the given device.
257 */
258struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
259 int flags)
260{
261 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
262 struct dma_buf *buf;
263
264 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
265 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
266 return ERR_PTR(-EPERM);
267
268 buf = drm_gem_prime_export(gobj, flags);
269 if (!IS_ERR(buf))
270 buf->ops = &amdgpu_dmabuf_ops;
271
272 return buf;
273}
274
275/**
276 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
277 *
278 * @dev: DRM device
279 * @dma_buf: DMA-buf
280 *
281 * Creates an empty SG BO for DMA-buf import.
282 *
283 * Returns:
284 * A new GEM BO of the given DRM device, representing the memory
285 * described by the given DMA-buf attachment and scatter/gather table.
286 */
287static struct drm_gem_object *
288amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
289{
290 struct dma_resv *resv = dma_buf->resv;
291 struct amdgpu_device *adev = drm_to_adev(dev);
292 struct drm_gem_object *gobj;
293 struct amdgpu_bo *bo;
294 uint64_t flags = 0;
295 int ret;
296
297 dma_resv_lock(resv, NULL);
298
299 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
300 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
301
302 flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
303 AMDGPU_GEM_CREATE_COHERENT |
304 AMDGPU_GEM_CREATE_EXT_COHERENT |
305 AMDGPU_GEM_CREATE_UNCACHED);
306 }
307
308 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
309 AMDGPU_GEM_DOMAIN_CPU, flags,
310 ttm_bo_type_sg, resv, &gobj, 0);
311 if (ret)
312 goto error;
313
314 bo = gem_to_amdgpu_bo(gobj);
315 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
316 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
317
318 dma_resv_unlock(resv);
319 return gobj;
320
321error:
322 dma_resv_unlock(resv);
323 return ERR_PTR(ret);
324}
325
326/**
327 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
328 *
329 * @attach: the DMA-buf attachment
330 *
331 * Invalidate the DMA-buf attachment, making sure that the we re-create the
332 * mapping before the next use.
333 */
334static void
335amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
336{
337 struct drm_gem_object *obj = attach->importer_priv;
338 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
339 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
340 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
341 struct ttm_operation_ctx ctx = { false, false };
342 struct ttm_placement placement = {};
343 struct amdgpu_vm_bo_base *bo_base;
344 int r;
345
346 /* FIXME: This should be after the "if", but needs a fix to make sure
347 * DMABuf imports are initialized in the right VM list.
348 */
349 amdgpu_vm_bo_invalidate(adev, bo, false);
350 if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
351 return;
352
353 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
354 if (r) {
355 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
356 return;
357 }
358
359 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
360 struct amdgpu_vm *vm = bo_base->vm;
361 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
362
363 if (ticket) {
364 /* When we get an error here it means that somebody
365 * else is holding the VM lock and updating page tables
366 * So we can just continue here.
367 */
368 r = dma_resv_lock(resv, ticket);
369 if (r)
370 continue;
371
372 } else {
373 /* TODO: This is more problematic and we actually need
374 * to allow page tables updates without holding the
375 * lock.
376 */
377 if (!dma_resv_trylock(resv))
378 continue;
379 }
380
381 /* Reserve fences for two SDMA page table updates */
382 r = dma_resv_reserve_fences(resv, 2);
383 if (!r)
384 r = amdgpu_vm_clear_freed(adev, vm, NULL);
385 if (!r)
386 r = amdgpu_vm_handle_moved(adev, vm, ticket);
387
388 if (r && r != -EBUSY)
389 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
390 r);
391
392 dma_resv_unlock(resv);
393 }
394}
395
396static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
397 .allow_peer2peer = true,
398 .move_notify = amdgpu_dma_buf_move_notify
399};
400
401/**
402 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
403 * @dev: DRM device
404 * @dma_buf: Shared DMA buffer
405 *
406 * Import a dma_buf into a the driver and potentially create a new GEM object.
407 *
408 * Returns:
409 * GEM BO representing the shared DMA buffer for the given device.
410 */
411struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
412 struct dma_buf *dma_buf)
413{
414 struct dma_buf_attachment *attach;
415 struct drm_gem_object *obj;
416
417 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
418 obj = dma_buf->priv;
419 if (obj->dev == dev) {
420 /*
421 * Importing dmabuf exported from out own gem increases
422 * refcount on gem itself instead of f_count of dmabuf.
423 */
424 drm_gem_object_get(obj);
425 return obj;
426 }
427 }
428
429 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
430 if (IS_ERR(obj))
431 return obj;
432
433 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
434 &amdgpu_dma_buf_attach_ops, obj);
435 if (IS_ERR(attach)) {
436 drm_gem_object_put(obj);
437 return ERR_CAST(attach);
438 }
439
440 get_dma_buf(dma_buf);
441 obj->import_attach = attach;
442 return obj;
443}
444
445/**
446 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
447 *
448 * @adev: amdgpu_device pointer of the importer
449 * @bo: amdgpu buffer object
450 *
451 * Returns:
452 * True if dmabuf accessible over xgmi, false otherwise.
453 */
454bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
455 struct amdgpu_bo *bo)
456{
457 struct drm_gem_object *obj = &bo->tbo.base;
458 struct drm_gem_object *gobj;
459
460 if (obj->import_attach) {
461 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
462
463 if (dma_buf->ops != &amdgpu_dmabuf_ops)
464 /* No XGMI with non AMD GPUs */
465 return false;
466
467 gobj = dma_buf->priv;
468 bo = gem_to_amdgpu_bo(gobj);
469 }
470
471 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
472 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
473 return true;
474
475 return false;
476}