Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <linux/dma-buf.h>
35
36#include <drm/amdgpu_drm.h>
37#include <drm/drm_cache.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40#include "amdgpu_amdkfd.h"
41
42/**
43 * DOC: amdgpu_object
44 *
45 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
46 * represents memory used by driver (VRAM, system memory, etc.). The driver
47 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
48 * to create/destroy/set buffer object which are then managed by the kernel TTM
49 * memory manager.
50 * The interfaces are also used internally by kernel clients, including gfx,
51 * uvd, etc. for kernel managed allocations used by the GPU.
52 *
53 */
54
55/**
56 * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
57 *
58 * @bo: &amdgpu_bo buffer object
59 *
60 * This function is called when a BO stops being pinned, and updates the
61 * &amdgpu_device pin_size values accordingly.
62 */
63static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
64{
65 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
66
67 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
68 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
69 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
70 &adev->visible_pin_size);
71 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
72 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
73 }
74}
75
76static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
77{
78 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
79 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
80
81 if (bo->tbo.pin_count > 0)
82 amdgpu_bo_subtract_pin_size(bo);
83
84 amdgpu_bo_kunmap(bo);
85
86 if (bo->tbo.base.import_attach)
87 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
88 drm_gem_object_release(&bo->tbo.base);
89 /* in case amdgpu_device_recover_vram got NULL of bo->parent */
90 if (!list_empty(&bo->shadow_list)) {
91 mutex_lock(&adev->shadow_list_lock);
92 list_del_init(&bo->shadow_list);
93 mutex_unlock(&adev->shadow_list_lock);
94 }
95 amdgpu_bo_unref(&bo->parent);
96
97 kfree(bo->metadata);
98 kfree(bo);
99}
100
101/**
102 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
103 * @bo: buffer object to be checked
104 *
105 * Uses destroy function associated with the object to determine if this is
106 * an &amdgpu_bo.
107 *
108 * Returns:
109 * true if the object belongs to &amdgpu_bo, false if not.
110 */
111bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
112{
113 if (bo->destroy == &amdgpu_bo_destroy)
114 return true;
115 return false;
116}
117
118/**
119 * amdgpu_bo_placement_from_domain - set buffer's placement
120 * @abo: &amdgpu_bo buffer object whose placement is to be set
121 * @domain: requested domain
122 *
123 * Sets buffer's placement according to requested domain and the buffer's
124 * flags.
125 */
126void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
127{
128 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
129 struct ttm_placement *placement = &abo->placement;
130 struct ttm_place *places = abo->placements;
131 u64 flags = abo->flags;
132 u32 c = 0;
133
134 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
135 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
136
137 places[c].fpfn = 0;
138 places[c].lpfn = 0;
139 places[c].mem_type = TTM_PL_VRAM;
140 places[c].flags = 0;
141
142 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
143 places[c].lpfn = visible_pfn;
144 else
145 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
146
147 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
148 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
149 c++;
150 }
151
152 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
153 places[c].fpfn = 0;
154 places[c].lpfn = 0;
155 places[c].mem_type = TTM_PL_TT;
156 places[c].flags = 0;
157 c++;
158 }
159
160 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
161 places[c].fpfn = 0;
162 places[c].lpfn = 0;
163 places[c].mem_type = TTM_PL_SYSTEM;
164 places[c].flags = 0;
165 c++;
166 }
167
168 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
169 places[c].fpfn = 0;
170 places[c].lpfn = 0;
171 places[c].mem_type = AMDGPU_PL_GDS;
172 places[c].flags = 0;
173 c++;
174 }
175
176 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
177 places[c].fpfn = 0;
178 places[c].lpfn = 0;
179 places[c].mem_type = AMDGPU_PL_GWS;
180 places[c].flags = 0;
181 c++;
182 }
183
184 if (domain & AMDGPU_GEM_DOMAIN_OA) {
185 places[c].fpfn = 0;
186 places[c].lpfn = 0;
187 places[c].mem_type = AMDGPU_PL_OA;
188 places[c].flags = 0;
189 c++;
190 }
191
192 if (!c) {
193 places[c].fpfn = 0;
194 places[c].lpfn = 0;
195 places[c].mem_type = TTM_PL_SYSTEM;
196 places[c].flags = 0;
197 c++;
198 }
199
200 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
201
202 placement->num_placement = c;
203 placement->placement = places;
204
205 placement->num_busy_placement = c;
206 placement->busy_placement = places;
207}
208
209/**
210 * amdgpu_bo_create_reserved - create reserved BO for kernel use
211 *
212 * @adev: amdgpu device object
213 * @size: size for the new BO
214 * @align: alignment for the new BO
215 * @domain: where to place it
216 * @bo_ptr: used to initialize BOs in structures
217 * @gpu_addr: GPU addr of the pinned BO
218 * @cpu_addr: optional CPU address mapping
219 *
220 * Allocates and pins a BO for kernel internal use, and returns it still
221 * reserved.
222 *
223 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
224 *
225 * Returns:
226 * 0 on success, negative error code otherwise.
227 */
228int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
229 unsigned long size, int align,
230 u32 domain, struct amdgpu_bo **bo_ptr,
231 u64 *gpu_addr, void **cpu_addr)
232{
233 struct amdgpu_bo_param bp;
234 bool free = false;
235 int r;
236
237 if (!size) {
238 amdgpu_bo_unref(bo_ptr);
239 return 0;
240 }
241
242 memset(&bp, 0, sizeof(bp));
243 bp.size = size;
244 bp.byte_align = align;
245 bp.domain = domain;
246 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
247 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
248 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
249 bp.type = ttm_bo_type_kernel;
250 bp.resv = NULL;
251
252 if (!*bo_ptr) {
253 r = amdgpu_bo_create(adev, &bp, bo_ptr);
254 if (r) {
255 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
256 r);
257 return r;
258 }
259 free = true;
260 }
261
262 r = amdgpu_bo_reserve(*bo_ptr, false);
263 if (r) {
264 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
265 goto error_free;
266 }
267
268 r = amdgpu_bo_pin(*bo_ptr, domain);
269 if (r) {
270 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
271 goto error_unreserve;
272 }
273
274 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
275 if (r) {
276 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
277 goto error_unpin;
278 }
279
280 if (gpu_addr)
281 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
282
283 if (cpu_addr) {
284 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
285 if (r) {
286 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
287 goto error_unpin;
288 }
289 }
290
291 return 0;
292
293error_unpin:
294 amdgpu_bo_unpin(*bo_ptr);
295error_unreserve:
296 amdgpu_bo_unreserve(*bo_ptr);
297
298error_free:
299 if (free)
300 amdgpu_bo_unref(bo_ptr);
301
302 return r;
303}
304
305/**
306 * amdgpu_bo_create_kernel - create BO for kernel use
307 *
308 * @adev: amdgpu device object
309 * @size: size for the new BO
310 * @align: alignment for the new BO
311 * @domain: where to place it
312 * @bo_ptr: used to initialize BOs in structures
313 * @gpu_addr: GPU addr of the pinned BO
314 * @cpu_addr: optional CPU address mapping
315 *
316 * Allocates and pins a BO for kernel internal use.
317 *
318 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
319 *
320 * Returns:
321 * 0 on success, negative error code otherwise.
322 */
323int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
324 unsigned long size, int align,
325 u32 domain, struct amdgpu_bo **bo_ptr,
326 u64 *gpu_addr, void **cpu_addr)
327{
328 int r;
329
330 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
331 gpu_addr, cpu_addr);
332
333 if (r)
334 return r;
335
336 if (*bo_ptr)
337 amdgpu_bo_unreserve(*bo_ptr);
338
339 return 0;
340}
341
342/**
343 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
344 *
345 * @adev: amdgpu device object
346 * @offset: offset of the BO
347 * @size: size of the BO
348 * @domain: where to place it
349 * @bo_ptr: used to initialize BOs in structures
350 * @cpu_addr: optional CPU address mapping
351 *
352 * Creates a kernel BO at a specific offset in the address space of the domain.
353 *
354 * Returns:
355 * 0 on success, negative error code otherwise.
356 */
357int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
358 uint64_t offset, uint64_t size, uint32_t domain,
359 struct amdgpu_bo **bo_ptr, void **cpu_addr)
360{
361 struct ttm_operation_ctx ctx = { false, false };
362 unsigned int i;
363 int r;
364
365 offset &= PAGE_MASK;
366 size = ALIGN(size, PAGE_SIZE);
367
368 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
369 NULL, cpu_addr);
370 if (r)
371 return r;
372
373 if ((*bo_ptr) == NULL)
374 return 0;
375
376 /*
377 * Remove the original mem node and create a new one at the request
378 * position.
379 */
380 if (cpu_addr)
381 amdgpu_bo_kunmap(*bo_ptr);
382
383 ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
384
385 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
386 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
387 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
388 }
389 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
390 &(*bo_ptr)->tbo.mem, &ctx);
391 if (r)
392 goto error;
393
394 if (cpu_addr) {
395 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
396 if (r)
397 goto error;
398 }
399
400 amdgpu_bo_unreserve(*bo_ptr);
401 return 0;
402
403error:
404 amdgpu_bo_unreserve(*bo_ptr);
405 amdgpu_bo_unref(bo_ptr);
406 return r;
407}
408
409/**
410 * amdgpu_bo_free_kernel - free BO for kernel use
411 *
412 * @bo: amdgpu BO to free
413 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
414 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
415 *
416 * unmaps and unpin a BO for kernel internal use.
417 */
418void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
419 void **cpu_addr)
420{
421 if (*bo == NULL)
422 return;
423
424 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
425 if (cpu_addr)
426 amdgpu_bo_kunmap(*bo);
427
428 amdgpu_bo_unpin(*bo);
429 amdgpu_bo_unreserve(*bo);
430 }
431 amdgpu_bo_unref(bo);
432
433 if (gpu_addr)
434 *gpu_addr = 0;
435
436 if (cpu_addr)
437 *cpu_addr = NULL;
438}
439
440/* Validate bo size is bit bigger then the request domain */
441static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
442 unsigned long size, u32 domain)
443{
444 struct ttm_resource_manager *man = NULL;
445
446 /*
447 * If GTT is part of requested domains the check must succeed to
448 * allow fall back to GTT
449 */
450 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
451 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
452
453 if (size < (man->size << PAGE_SHIFT))
454 return true;
455 else
456 goto fail;
457 }
458
459 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
460 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
461
462 if (size < (man->size << PAGE_SHIFT))
463 return true;
464 else
465 goto fail;
466 }
467
468
469 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
470 return true;
471
472fail:
473 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
474 man->size << PAGE_SHIFT);
475 return false;
476}
477
478bool amdgpu_bo_support_uswc(u64 bo_flags)
479{
480
481#ifdef CONFIG_X86_32
482 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
483 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
484 */
485 return false;
486#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
487 /* Don't try to enable write-combining when it can't work, or things
488 * may be slow
489 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
490 */
491
492#ifndef CONFIG_COMPILE_TEST
493#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
494 thanks to write-combining
495#endif
496
497 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
498 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
499 "better performance thanks to write-combining\n");
500 return false;
501#else
502 /* For architectures that don't support WC memory,
503 * mask out the WC flag from the BO
504 */
505 if (!drm_arch_can_wc_memory())
506 return false;
507
508 return true;
509#endif
510}
511
512static int amdgpu_bo_do_create(struct amdgpu_device *adev,
513 struct amdgpu_bo_param *bp,
514 struct amdgpu_bo **bo_ptr)
515{
516 struct ttm_operation_ctx ctx = {
517 .interruptible = (bp->type != ttm_bo_type_kernel),
518 .no_wait_gpu = bp->no_wait_gpu,
519 /* We opt to avoid OOM on system pages allocations */
520 .gfp_retry_mayfail = true,
521 .allow_res_evict = bp->type != ttm_bo_type_kernel,
522 .resv = bp->resv
523 };
524 struct amdgpu_bo *bo;
525 unsigned long page_align, size = bp->size;
526 size_t acc_size;
527 int r;
528
529 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
530 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
531 /* GWS and OA don't need any alignment. */
532 page_align = bp->byte_align;
533 size <<= PAGE_SHIFT;
534 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
535 /* Both size and alignment must be a multiple of 4. */
536 page_align = ALIGN(bp->byte_align, 4);
537 size = ALIGN(size, 4) << PAGE_SHIFT;
538 } else {
539 /* Memory should be aligned at least to a page size. */
540 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
541 size = ALIGN(size, PAGE_SIZE);
542 }
543
544 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
545 return -ENOMEM;
546
547 *bo_ptr = NULL;
548
549 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
550 sizeof(struct amdgpu_bo));
551
552 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
553 if (bo == NULL)
554 return -ENOMEM;
555 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
556 INIT_LIST_HEAD(&bo->shadow_list);
557 bo->vm_bo = NULL;
558 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
559 bp->domain;
560 bo->allowed_domains = bo->preferred_domains;
561 if (bp->type != ttm_bo_type_kernel &&
562 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
563 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
564
565 bo->flags = bp->flags;
566
567 if (!amdgpu_bo_support_uswc(bo->flags))
568 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
569
570 bo->tbo.bdev = &adev->mman.bdev;
571 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
572 AMDGPU_GEM_DOMAIN_GDS))
573 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
574 else
575 amdgpu_bo_placement_from_domain(bo, bp->domain);
576 if (bp->type == ttm_bo_type_kernel)
577 bo->tbo.priority = 1;
578
579 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
580 &bo->placement, page_align, &ctx, acc_size,
581 NULL, bp->resv, &amdgpu_bo_destroy);
582 if (unlikely(r != 0))
583 return r;
584
585 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
586 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
587 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
588 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
589 ctx.bytes_moved);
590 else
591 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
592
593 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
594 bo->tbo.mem.mem_type == TTM_PL_VRAM) {
595 struct dma_fence *fence;
596
597 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
598 if (unlikely(r))
599 goto fail_unreserve;
600
601 amdgpu_bo_fence(bo, fence, false);
602 dma_fence_put(bo->tbo.moving);
603 bo->tbo.moving = dma_fence_get(fence);
604 dma_fence_put(fence);
605 }
606 if (!bp->resv)
607 amdgpu_bo_unreserve(bo);
608 *bo_ptr = bo;
609
610 trace_amdgpu_bo_create(bo);
611
612 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
613 if (bp->type == ttm_bo_type_device)
614 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
615
616 return 0;
617
618fail_unreserve:
619 if (!bp->resv)
620 dma_resv_unlock(bo->tbo.base.resv);
621 amdgpu_bo_unref(&bo);
622 return r;
623}
624
625static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
626 unsigned long size,
627 struct amdgpu_bo *bo)
628{
629 struct amdgpu_bo_param bp;
630 int r;
631
632 if (bo->shadow)
633 return 0;
634
635 memset(&bp, 0, sizeof(bp));
636 bp.size = size;
637 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
638 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
639 AMDGPU_GEM_CREATE_SHADOW;
640 bp.type = ttm_bo_type_kernel;
641 bp.resv = bo->tbo.base.resv;
642
643 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
644 if (!r) {
645 bo->shadow->parent = amdgpu_bo_ref(bo);
646 mutex_lock(&adev->shadow_list_lock);
647 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
648 mutex_unlock(&adev->shadow_list_lock);
649 }
650
651 return r;
652}
653
654/**
655 * amdgpu_bo_create - create an &amdgpu_bo buffer object
656 * @adev: amdgpu device object
657 * @bp: parameters to be used for the buffer object
658 * @bo_ptr: pointer to the buffer object pointer
659 *
660 * Creates an &amdgpu_bo buffer object; and if requested, also creates a
661 * shadow object.
662 * Shadow object is used to backup the original buffer object, and is always
663 * in GTT.
664 *
665 * Returns:
666 * 0 for success or a negative error code on failure.
667 */
668int amdgpu_bo_create(struct amdgpu_device *adev,
669 struct amdgpu_bo_param *bp,
670 struct amdgpu_bo **bo_ptr)
671{
672 u64 flags = bp->flags;
673 int r;
674
675 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
676 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
677 if (r)
678 return r;
679
680 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
681 if (!bp->resv)
682 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
683 NULL));
684
685 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
686
687 if (!bp->resv)
688 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
689
690 if (r)
691 amdgpu_bo_unref(bo_ptr);
692 }
693
694 return r;
695}
696
697/**
698 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
699 * @bo: pointer to the buffer object
700 *
701 * Sets placement according to domain; and changes placement and caching
702 * policy of the buffer object according to the placement.
703 * This is used for validating shadow bos. It calls ttm_bo_validate() to
704 * make sure the buffer is resident where it needs to be.
705 *
706 * Returns:
707 * 0 for success or a negative error code on failure.
708 */
709int amdgpu_bo_validate(struct amdgpu_bo *bo)
710{
711 struct ttm_operation_ctx ctx = { false, false };
712 uint32_t domain;
713 int r;
714
715 if (bo->tbo.pin_count)
716 return 0;
717
718 domain = bo->preferred_domains;
719
720retry:
721 amdgpu_bo_placement_from_domain(bo, domain);
722 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
723 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
724 domain = bo->allowed_domains;
725 goto retry;
726 }
727
728 return r;
729}
730
731/**
732 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
733 *
734 * @shadow: &amdgpu_bo shadow to be restored
735 * @fence: dma_fence associated with the operation
736 *
737 * Copies a buffer object's shadow content back to the object.
738 * This is used for recovering a buffer from its shadow in case of a gpu
739 * reset where vram context may be lost.
740 *
741 * Returns:
742 * 0 for success or a negative error code on failure.
743 */
744int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
745
746{
747 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
748 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
749 uint64_t shadow_addr, parent_addr;
750
751 shadow_addr = amdgpu_bo_gpu_offset(shadow);
752 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
753
754 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
755 amdgpu_bo_size(shadow), NULL, fence,
756 true, false, false);
757}
758
759/**
760 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
761 * @bo: &amdgpu_bo buffer object to be mapped
762 * @ptr: kernel virtual address to be returned
763 *
764 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
765 * amdgpu_bo_kptr() to get the kernel virtual address.
766 *
767 * Returns:
768 * 0 for success or a negative error code on failure.
769 */
770int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
771{
772 void *kptr;
773 long r;
774
775 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
776 return -EPERM;
777
778 kptr = amdgpu_bo_kptr(bo);
779 if (kptr) {
780 if (ptr)
781 *ptr = kptr;
782 return 0;
783 }
784
785 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
786 MAX_SCHEDULE_TIMEOUT);
787 if (r < 0)
788 return r;
789
790 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
791 if (r)
792 return r;
793
794 if (ptr)
795 *ptr = amdgpu_bo_kptr(bo);
796
797 return 0;
798}
799
800/**
801 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
802 * @bo: &amdgpu_bo buffer object
803 *
804 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
805 *
806 * Returns:
807 * the virtual address of a buffer object area.
808 */
809void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
810{
811 bool is_iomem;
812
813 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
814}
815
816/**
817 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
818 * @bo: &amdgpu_bo buffer object to be unmapped
819 *
820 * Unmaps a kernel map set up by amdgpu_bo_kmap().
821 */
822void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
823{
824 if (bo->kmap.bo)
825 ttm_bo_kunmap(&bo->kmap);
826}
827
828/**
829 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
830 * @bo: &amdgpu_bo buffer object
831 *
832 * References the contained &ttm_buffer_object.
833 *
834 * Returns:
835 * a refcounted pointer to the &amdgpu_bo buffer object.
836 */
837struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
838{
839 if (bo == NULL)
840 return NULL;
841
842 ttm_bo_get(&bo->tbo);
843 return bo;
844}
845
846/**
847 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
848 * @bo: &amdgpu_bo buffer object
849 *
850 * Unreferences the contained &ttm_buffer_object and clear the pointer
851 */
852void amdgpu_bo_unref(struct amdgpu_bo **bo)
853{
854 struct ttm_buffer_object *tbo;
855
856 if ((*bo) == NULL)
857 return;
858
859 tbo = &((*bo)->tbo);
860 ttm_bo_put(tbo);
861 *bo = NULL;
862}
863
864/**
865 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
866 * @bo: &amdgpu_bo buffer object to be pinned
867 * @domain: domain to be pinned to
868 * @min_offset: the start of requested address range
869 * @max_offset: the end of requested address range
870 *
871 * Pins the buffer object according to requested domain and address range. If
872 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
873 * pin_count and pin_size accordingly.
874 *
875 * Pinning means to lock pages in memory along with keeping them at a fixed
876 * offset. It is required when a buffer can not be moved, for example, when
877 * a display buffer is being scanned out.
878 *
879 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
880 * where to pin a buffer if there are specific restrictions on where a buffer
881 * must be located.
882 *
883 * Returns:
884 * 0 for success or a negative error code on failure.
885 */
886int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
887 u64 min_offset, u64 max_offset)
888{
889 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
890 struct ttm_operation_ctx ctx = { false, false };
891 int r, i;
892
893 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
894 return -EPERM;
895
896 if (WARN_ON_ONCE(min_offset > max_offset))
897 return -EINVAL;
898
899 /* A shared bo cannot be migrated to VRAM */
900 if (bo->prime_shared_count || bo->tbo.base.import_attach) {
901 if (domain & AMDGPU_GEM_DOMAIN_GTT)
902 domain = AMDGPU_GEM_DOMAIN_GTT;
903 else
904 return -EINVAL;
905 }
906
907 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
908 * See function amdgpu_display_supported_domains()
909 */
910 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
911
912 if (bo->tbo.pin_count) {
913 uint32_t mem_type = bo->tbo.mem.mem_type;
914 uint32_t mem_flags = bo->tbo.mem.placement;
915
916 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
917 return -EINVAL;
918
919 if ((mem_type == TTM_PL_VRAM) &&
920 (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
921 !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
922 return -EINVAL;
923
924 ttm_bo_pin(&bo->tbo);
925
926 if (max_offset != 0) {
927 u64 domain_start = amdgpu_ttm_domain_start(adev,
928 mem_type);
929 WARN_ON_ONCE(max_offset <
930 (amdgpu_bo_gpu_offset(bo) - domain_start));
931 }
932
933 return 0;
934 }
935
936 if (bo->tbo.base.import_attach)
937 dma_buf_pin(bo->tbo.base.import_attach);
938
939 /* force to pin into visible video ram */
940 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
941 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
942 amdgpu_bo_placement_from_domain(bo, domain);
943 for (i = 0; i < bo->placement.num_placement; i++) {
944 unsigned fpfn, lpfn;
945
946 fpfn = min_offset >> PAGE_SHIFT;
947 lpfn = max_offset >> PAGE_SHIFT;
948
949 if (fpfn > bo->placements[i].fpfn)
950 bo->placements[i].fpfn = fpfn;
951 if (!bo->placements[i].lpfn ||
952 (lpfn && lpfn < bo->placements[i].lpfn))
953 bo->placements[i].lpfn = lpfn;
954 }
955
956 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
957 if (unlikely(r)) {
958 dev_err(adev->dev, "%p pin failed\n", bo);
959 goto error;
960 }
961
962 ttm_bo_pin(&bo->tbo);
963
964 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
965 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
966 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
967 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
968 &adev->visible_pin_size);
969 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
970 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
971 }
972
973error:
974 return r;
975}
976
977/**
978 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
979 * @bo: &amdgpu_bo buffer object to be pinned
980 * @domain: domain to be pinned to
981 *
982 * A simple wrapper to amdgpu_bo_pin_restricted().
983 * Provides a simpler API for buffers that do not have any strict restrictions
984 * on where a buffer must be located.
985 *
986 * Returns:
987 * 0 for success or a negative error code on failure.
988 */
989int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
990{
991 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
992 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
993}
994
995/**
996 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
997 * @bo: &amdgpu_bo buffer object to be unpinned
998 *
999 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1000 * Changes placement and pin size accordingly.
1001 *
1002 * Returns:
1003 * 0 for success or a negative error code on failure.
1004 */
1005void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1006{
1007 ttm_bo_unpin(&bo->tbo);
1008 if (bo->tbo.pin_count)
1009 return;
1010
1011 amdgpu_bo_subtract_pin_size(bo);
1012
1013 if (bo->tbo.base.import_attach)
1014 dma_buf_unpin(bo->tbo.base.import_attach);
1015}
1016
1017/**
1018 * amdgpu_bo_evict_vram - evict VRAM buffers
1019 * @adev: amdgpu device object
1020 *
1021 * Evicts all VRAM buffers on the lru list of the memory type.
1022 * Mainly used for evicting vram at suspend time.
1023 *
1024 * Returns:
1025 * 0 for success or a negative error code on failure.
1026 */
1027int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1028{
1029 struct ttm_resource_manager *man;
1030
1031 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
1032#ifndef CONFIG_HIBERNATION
1033 if (adev->flags & AMD_IS_APU) {
1034 /* Useless to evict on IGP chips */
1035 return 0;
1036 }
1037#endif
1038
1039 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1040 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1041}
1042
1043static const char *amdgpu_vram_names[] = {
1044 "UNKNOWN",
1045 "GDDR1",
1046 "DDR2",
1047 "GDDR3",
1048 "GDDR4",
1049 "GDDR5",
1050 "HBM",
1051 "DDR3",
1052 "DDR4",
1053 "GDDR6",
1054 "DDR5"
1055};
1056
1057/**
1058 * amdgpu_bo_init - initialize memory manager
1059 * @adev: amdgpu device object
1060 *
1061 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1062 *
1063 * Returns:
1064 * 0 for success or a negative error code on failure.
1065 */
1066int amdgpu_bo_init(struct amdgpu_device *adev)
1067{
1068 /* reserve PAT memory space to WC for VRAM */
1069 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1070 adev->gmc.aper_size);
1071
1072 /* Add an MTRR for the VRAM */
1073 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1074 adev->gmc.aper_size);
1075 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1076 adev->gmc.mc_vram_size >> 20,
1077 (unsigned long long)adev->gmc.aper_size >> 20);
1078 DRM_INFO("RAM width %dbits %s\n",
1079 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1080 return amdgpu_ttm_init(adev);
1081}
1082
1083/**
1084 * amdgpu_bo_fini - tear down memory manager
1085 * @adev: amdgpu device object
1086 *
1087 * Reverses amdgpu_bo_init() to tear down memory manager.
1088 */
1089void amdgpu_bo_fini(struct amdgpu_device *adev)
1090{
1091 amdgpu_ttm_fini(adev);
1092 arch_phys_wc_del(adev->gmc.vram_mtrr);
1093 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1094}
1095
1096/**
1097 * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1098 * @bo: &amdgpu_bo buffer object
1099 * @vma: vma as input from the fbdev mmap method
1100 *
1101 * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1102 *
1103 * Returns:
1104 * 0 for success or a negative error code on failure.
1105 */
1106int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1107 struct vm_area_struct *vma)
1108{
1109 if (vma->vm_pgoff != 0)
1110 return -EACCES;
1111
1112 return ttm_bo_mmap_obj(vma, &bo->tbo);
1113}
1114
1115/**
1116 * amdgpu_bo_set_tiling_flags - set tiling flags
1117 * @bo: &amdgpu_bo buffer object
1118 * @tiling_flags: new flags
1119 *
1120 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1121 * kernel driver to set the tiling flags on a buffer.
1122 *
1123 * Returns:
1124 * 0 for success or a negative error code on failure.
1125 */
1126int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1127{
1128 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1129
1130 if (adev->family <= AMDGPU_FAMILY_CZ &&
1131 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1132 return -EINVAL;
1133
1134 bo->tiling_flags = tiling_flags;
1135 return 0;
1136}
1137
1138/**
1139 * amdgpu_bo_get_tiling_flags - get tiling flags
1140 * @bo: &amdgpu_bo buffer object
1141 * @tiling_flags: returned flags
1142 *
1143 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1144 * set the tiling flags on a buffer.
1145 */
1146void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1147{
1148 dma_resv_assert_held(bo->tbo.base.resv);
1149
1150 if (tiling_flags)
1151 *tiling_flags = bo->tiling_flags;
1152}
1153
1154/**
1155 * amdgpu_bo_set_metadata - set metadata
1156 * @bo: &amdgpu_bo buffer object
1157 * @metadata: new metadata
1158 * @metadata_size: size of the new metadata
1159 * @flags: flags of the new metadata
1160 *
1161 * Sets buffer object's metadata, its size and flags.
1162 * Used via GEM ioctl.
1163 *
1164 * Returns:
1165 * 0 for success or a negative error code on failure.
1166 */
1167int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1168 uint32_t metadata_size, uint64_t flags)
1169{
1170 void *buffer;
1171
1172 if (!metadata_size) {
1173 if (bo->metadata_size) {
1174 kfree(bo->metadata);
1175 bo->metadata = NULL;
1176 bo->metadata_size = 0;
1177 }
1178 return 0;
1179 }
1180
1181 if (metadata == NULL)
1182 return -EINVAL;
1183
1184 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1185 if (buffer == NULL)
1186 return -ENOMEM;
1187
1188 kfree(bo->metadata);
1189 bo->metadata_flags = flags;
1190 bo->metadata = buffer;
1191 bo->metadata_size = metadata_size;
1192
1193 return 0;
1194}
1195
1196/**
1197 * amdgpu_bo_get_metadata - get metadata
1198 * @bo: &amdgpu_bo buffer object
1199 * @buffer: returned metadata
1200 * @buffer_size: size of the buffer
1201 * @metadata_size: size of the returned metadata
1202 * @flags: flags of the returned metadata
1203 *
1204 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1205 * less than metadata_size.
1206 * Used via GEM ioctl.
1207 *
1208 * Returns:
1209 * 0 for success or a negative error code on failure.
1210 */
1211int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1212 size_t buffer_size, uint32_t *metadata_size,
1213 uint64_t *flags)
1214{
1215 if (!buffer && !metadata_size)
1216 return -EINVAL;
1217
1218 if (buffer) {
1219 if (buffer_size < bo->metadata_size)
1220 return -EINVAL;
1221
1222 if (bo->metadata_size)
1223 memcpy(buffer, bo->metadata, bo->metadata_size);
1224 }
1225
1226 if (metadata_size)
1227 *metadata_size = bo->metadata_size;
1228 if (flags)
1229 *flags = bo->metadata_flags;
1230
1231 return 0;
1232}
1233
1234/**
1235 * amdgpu_bo_move_notify - notification about a memory move
1236 * @bo: pointer to a buffer object
1237 * @evict: if this move is evicting the buffer from the graphics address space
1238 * @new_mem: new information of the bufer object
1239 *
1240 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1241 * bookkeeping.
1242 * TTM driver callback which is called when ttm moves a buffer.
1243 */
1244void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1245 bool evict,
1246 struct ttm_resource *new_mem)
1247{
1248 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1249 struct amdgpu_bo *abo;
1250 struct ttm_resource *old_mem = &bo->mem;
1251
1252 if (!amdgpu_bo_is_amdgpu_bo(bo))
1253 return;
1254
1255 abo = ttm_to_amdgpu_bo(bo);
1256 amdgpu_vm_bo_invalidate(adev, abo, evict);
1257
1258 amdgpu_bo_kunmap(abo);
1259
1260 if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1261 bo->mem.mem_type != TTM_PL_SYSTEM)
1262 dma_buf_move_notify(abo->tbo.base.dma_buf);
1263
1264 /* remember the eviction */
1265 if (evict)
1266 atomic64_inc(&adev->num_evictions);
1267
1268 /* update statistics */
1269 if (!new_mem)
1270 return;
1271
1272 /* move_notify is called before move happens */
1273 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1274}
1275
1276/**
1277 * amdgpu_bo_release_notify - notification about a BO being released
1278 * @bo: pointer to a buffer object
1279 *
1280 * Wipes VRAM buffers whose contents should not be leaked before the
1281 * memory is released.
1282 */
1283void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1284{
1285 struct dma_fence *fence = NULL;
1286 struct amdgpu_bo *abo;
1287 int r;
1288
1289 if (!amdgpu_bo_is_amdgpu_bo(bo))
1290 return;
1291
1292 abo = ttm_to_amdgpu_bo(bo);
1293
1294 if (abo->kfd_bo)
1295 amdgpu_amdkfd_unreserve_memory_limit(abo);
1296
1297 /* We only remove the fence if the resv has individualized. */
1298 WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1299 && bo->base.resv != &bo->base._resv);
1300 if (bo->base.resv == &bo->base._resv)
1301 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1302
1303 if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1304 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1305 return;
1306
1307 dma_resv_lock(bo->base.resv, NULL);
1308
1309 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1310 if (!WARN_ON(r)) {
1311 amdgpu_bo_fence(abo, fence, false);
1312 dma_fence_put(fence);
1313 }
1314
1315 dma_resv_unlock(bo->base.resv);
1316}
1317
1318/**
1319 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1320 * @bo: pointer to a buffer object
1321 *
1322 * Notifies the driver we are taking a fault on this BO and have reserved it,
1323 * also performs bookkeeping.
1324 * TTM driver callback for dealing with vm faults.
1325 *
1326 * Returns:
1327 * 0 for success or a negative error code on failure.
1328 */
1329vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1330{
1331 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1332 struct ttm_operation_ctx ctx = { false, false };
1333 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1334 unsigned long offset, size;
1335 int r;
1336
1337 /* Remember that this BO was accessed by the CPU */
1338 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1339
1340 if (bo->mem.mem_type != TTM_PL_VRAM)
1341 return 0;
1342
1343 size = bo->mem.num_pages << PAGE_SHIFT;
1344 offset = bo->mem.start << PAGE_SHIFT;
1345 if ((offset + size) <= adev->gmc.visible_vram_size)
1346 return 0;
1347
1348 /* Can't move a pinned BO to visible VRAM */
1349 if (abo->tbo.pin_count > 0)
1350 return VM_FAULT_SIGBUS;
1351
1352 /* hurrah the memory is not visible ! */
1353 atomic64_inc(&adev->num_vram_cpu_page_faults);
1354 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1355 AMDGPU_GEM_DOMAIN_GTT);
1356
1357 /* Avoid costly evictions; only set GTT as a busy placement */
1358 abo->placement.num_busy_placement = 1;
1359 abo->placement.busy_placement = &abo->placements[1];
1360
1361 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1362 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1363 return VM_FAULT_NOPAGE;
1364 else if (unlikely(r))
1365 return VM_FAULT_SIGBUS;
1366
1367 offset = bo->mem.start << PAGE_SHIFT;
1368 /* this should never happen */
1369 if (bo->mem.mem_type == TTM_PL_VRAM &&
1370 (offset + size) > adev->gmc.visible_vram_size)
1371 return VM_FAULT_SIGBUS;
1372
1373 ttm_bo_move_to_lru_tail_unlocked(bo);
1374 return 0;
1375}
1376
1377/**
1378 * amdgpu_bo_fence - add fence to buffer object
1379 *
1380 * @bo: buffer object in question
1381 * @fence: fence to add
1382 * @shared: true if fence should be added shared
1383 *
1384 */
1385void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1386 bool shared)
1387{
1388 struct dma_resv *resv = bo->tbo.base.resv;
1389
1390 if (shared)
1391 dma_resv_add_shared_fence(resv, fence);
1392 else
1393 dma_resv_add_excl_fence(resv, fence);
1394}
1395
1396/**
1397 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1398 *
1399 * @adev: amdgpu device pointer
1400 * @resv: reservation object to sync to
1401 * @sync_mode: synchronization mode
1402 * @owner: fence owner
1403 * @intr: Whether the wait is interruptible
1404 *
1405 * Extract the fences from the reservation object and waits for them to finish.
1406 *
1407 * Returns:
1408 * 0 on success, errno otherwise.
1409 */
1410int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1411 enum amdgpu_sync_mode sync_mode, void *owner,
1412 bool intr)
1413{
1414 struct amdgpu_sync sync;
1415 int r;
1416
1417 amdgpu_sync_create(&sync);
1418 amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1419 r = amdgpu_sync_wait(&sync, intr);
1420 amdgpu_sync_free(&sync);
1421 return r;
1422}
1423
1424/**
1425 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1426 * @bo: buffer object to wait for
1427 * @owner: fence owner
1428 * @intr: Whether the wait is interruptible
1429 *
1430 * Wrapper to wait for fences in a BO.
1431 * Returns:
1432 * 0 on success, errno otherwise.
1433 */
1434int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1435{
1436 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1437
1438 return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1439 AMDGPU_SYNC_NE_OWNER, owner, intr);
1440}
1441
1442/**
1443 * amdgpu_bo_gpu_offset - return GPU offset of bo
1444 * @bo: amdgpu object for which we query the offset
1445 *
1446 * Note: object should either be pinned or reserved when calling this
1447 * function, it might be useful to add check for this for debugging.
1448 *
1449 * Returns:
1450 * current GPU offset of the object.
1451 */
1452u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1453{
1454 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1455 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1456 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1457 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1458 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1459 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1460
1461 return amdgpu_bo_gpu_offset_no_check(bo);
1462}
1463
1464/**
1465 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1466 * @bo: amdgpu object for which we query the offset
1467 *
1468 * Returns:
1469 * current GPU offset of the object without raising warnings.
1470 */
1471u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1472{
1473 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1474 uint64_t offset;
1475
1476 offset = (bo->tbo.mem.start << PAGE_SHIFT) +
1477 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
1478
1479 return amdgpu_gmc_sign_extend(offset);
1480}
1481
1482/**
1483 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1484 * @adev: amdgpu device object
1485 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1486 *
1487 * Returns:
1488 * Which of the allowed domains is preferred for pinning the BO for scanout.
1489 */
1490uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1491 uint32_t domain)
1492{
1493 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1494 domain = AMDGPU_GEM_DOMAIN_VRAM;
1495 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1496 domain = AMDGPU_GEM_DOMAIN_GTT;
1497 }
1498 return domain;
1499}
1500
1501#if defined(CONFIG_DEBUG_FS)
1502#define amdgpu_bo_print_flag(m, bo, flag) \
1503 do { \
1504 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
1505 seq_printf((m), " " #flag); \
1506 } \
1507 } while (0)
1508
1509/**
1510 * amdgpu_bo_print_info - print BO info in debugfs file
1511 *
1512 * @id: Index or Id of the BO
1513 * @bo: Requested BO for printing info
1514 * @m: debugfs file
1515 *
1516 * Print BO information in debugfs file
1517 *
1518 * Returns:
1519 * Size of the BO in bytes.
1520 */
1521u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1522{
1523 struct dma_buf_attachment *attachment;
1524 struct dma_buf *dma_buf;
1525 unsigned int domain;
1526 const char *placement;
1527 unsigned int pin_count;
1528 u64 size;
1529
1530 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
1531 switch (domain) {
1532 case AMDGPU_GEM_DOMAIN_VRAM:
1533 placement = "VRAM";
1534 break;
1535 case AMDGPU_GEM_DOMAIN_GTT:
1536 placement = " GTT";
1537 break;
1538 case AMDGPU_GEM_DOMAIN_CPU:
1539 default:
1540 placement = " CPU";
1541 break;
1542 }
1543
1544 size = amdgpu_bo_size(bo);
1545 seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1546 id, size, placement);
1547
1548 pin_count = READ_ONCE(bo->tbo.pin_count);
1549 if (pin_count)
1550 seq_printf(m, " pin count %d", pin_count);
1551
1552 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1553 attachment = READ_ONCE(bo->tbo.base.import_attach);
1554
1555 if (attachment)
1556 seq_printf(m, " imported from %p", dma_buf);
1557 else if (dma_buf)
1558 seq_printf(m, " exported as %p", dma_buf);
1559
1560 amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1561 amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1562 amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1563 amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1564 amdgpu_bo_print_flag(m, bo, SHADOW);
1565 amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1566 amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1567 amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1568
1569 seq_puts(m, "\n");
1570
1571 return size;
1572}
1573#endif