Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <linux/dma-buf.h>
35
36#include <drm/amdgpu_drm.h>
37#include <drm/drm_cache.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40#include "amdgpu_amdkfd.h"
41
42/**
43 * DOC: amdgpu_object
44 *
45 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
46 * represents memory used by driver (VRAM, system memory, etc.). The driver
47 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
48 * to create/destroy/set buffer object which are then managed by the kernel TTM
49 * memory manager.
50 * The interfaces are also used internally by kernel clients, including gfx,
51 * uvd, etc. for kernel managed allocations used by the GPU.
52 *
53 */
54
55/**
56 * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
57 *
58 * @bo: &amdgpu_bo buffer object
59 *
60 * This function is called when a BO stops being pinned, and updates the
61 * &amdgpu_device pin_size values accordingly.
62 */
63static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
64{
65 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
66
67 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
68 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
69 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
70 &adev->visible_pin_size);
71 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
72 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
73 }
74}
75
76static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
77{
78 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
79 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
80 struct amdgpu_bo_user *ubo;
81
82 if (bo->tbo.pin_count > 0)
83 amdgpu_bo_subtract_pin_size(bo);
84
85 amdgpu_bo_kunmap(bo);
86
87 if (bo->tbo.base.import_attach)
88 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
89 drm_gem_object_release(&bo->tbo.base);
90 /* in case amdgpu_device_recover_vram got NULL of bo->parent */
91 if (!list_empty(&bo->shadow_list)) {
92 mutex_lock(&adev->shadow_list_lock);
93 list_del_init(&bo->shadow_list);
94 mutex_unlock(&adev->shadow_list_lock);
95 }
96 amdgpu_bo_unref(&bo->parent);
97
98 if (bo->tbo.type == ttm_bo_type_device) {
99 ubo = to_amdgpu_bo_user(bo);
100 kfree(ubo->metadata);
101 }
102
103 kvfree(bo);
104}
105
106/**
107 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
108 * @bo: buffer object to be checked
109 *
110 * Uses destroy function associated with the object to determine if this is
111 * an &amdgpu_bo.
112 *
113 * Returns:
114 * true if the object belongs to &amdgpu_bo, false if not.
115 */
116bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
117{
118 if (bo->destroy == &amdgpu_bo_destroy)
119 return true;
120 return false;
121}
122
123/**
124 * amdgpu_bo_placement_from_domain - set buffer's placement
125 * @abo: &amdgpu_bo buffer object whose placement is to be set
126 * @domain: requested domain
127 *
128 * Sets buffer's placement according to requested domain and the buffer's
129 * flags.
130 */
131void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
132{
133 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
134 struct ttm_placement *placement = &abo->placement;
135 struct ttm_place *places = abo->placements;
136 u64 flags = abo->flags;
137 u32 c = 0;
138
139 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
140 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
141
142 places[c].fpfn = 0;
143 places[c].lpfn = 0;
144 places[c].mem_type = TTM_PL_VRAM;
145 places[c].flags = 0;
146
147 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
148 places[c].lpfn = visible_pfn;
149 else
150 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
151
152 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
153 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
154 c++;
155 }
156
157 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
158 places[c].fpfn = 0;
159 places[c].lpfn = 0;
160 places[c].mem_type = TTM_PL_TT;
161 places[c].flags = 0;
162 c++;
163 }
164
165 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
166 places[c].fpfn = 0;
167 places[c].lpfn = 0;
168 places[c].mem_type = TTM_PL_SYSTEM;
169 places[c].flags = 0;
170 c++;
171 }
172
173 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
174 places[c].fpfn = 0;
175 places[c].lpfn = 0;
176 places[c].mem_type = AMDGPU_PL_GDS;
177 places[c].flags = 0;
178 c++;
179 }
180
181 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
182 places[c].fpfn = 0;
183 places[c].lpfn = 0;
184 places[c].mem_type = AMDGPU_PL_GWS;
185 places[c].flags = 0;
186 c++;
187 }
188
189 if (domain & AMDGPU_GEM_DOMAIN_OA) {
190 places[c].fpfn = 0;
191 places[c].lpfn = 0;
192 places[c].mem_type = AMDGPU_PL_OA;
193 places[c].flags = 0;
194 c++;
195 }
196
197 if (!c) {
198 places[c].fpfn = 0;
199 places[c].lpfn = 0;
200 places[c].mem_type = TTM_PL_SYSTEM;
201 places[c].flags = 0;
202 c++;
203 }
204
205 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
206
207 placement->num_placement = c;
208 placement->placement = places;
209
210 placement->num_busy_placement = c;
211 placement->busy_placement = places;
212}
213
214/**
215 * amdgpu_bo_create_reserved - create reserved BO for kernel use
216 *
217 * @adev: amdgpu device object
218 * @size: size for the new BO
219 * @align: alignment for the new BO
220 * @domain: where to place it
221 * @bo_ptr: used to initialize BOs in structures
222 * @gpu_addr: GPU addr of the pinned BO
223 * @cpu_addr: optional CPU address mapping
224 *
225 * Allocates and pins a BO for kernel internal use, and returns it still
226 * reserved.
227 *
228 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
229 *
230 * Returns:
231 * 0 on success, negative error code otherwise.
232 */
233int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
234 unsigned long size, int align,
235 u32 domain, struct amdgpu_bo **bo_ptr,
236 u64 *gpu_addr, void **cpu_addr)
237{
238 struct amdgpu_bo_param bp;
239 bool free = false;
240 int r;
241
242 if (!size) {
243 amdgpu_bo_unref(bo_ptr);
244 return 0;
245 }
246
247 memset(&bp, 0, sizeof(bp));
248 bp.size = size;
249 bp.byte_align = align;
250 bp.domain = domain;
251 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
252 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
253 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
254 bp.type = ttm_bo_type_kernel;
255 bp.resv = NULL;
256 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
257
258 if (!*bo_ptr) {
259 r = amdgpu_bo_create(adev, &bp, bo_ptr);
260 if (r) {
261 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
262 r);
263 return r;
264 }
265 free = true;
266 }
267
268 r = amdgpu_bo_reserve(*bo_ptr, false);
269 if (r) {
270 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
271 goto error_free;
272 }
273
274 r = amdgpu_bo_pin(*bo_ptr, domain);
275 if (r) {
276 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
277 goto error_unreserve;
278 }
279
280 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
281 if (r) {
282 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
283 goto error_unpin;
284 }
285
286 if (gpu_addr)
287 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
288
289 if (cpu_addr) {
290 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
291 if (r) {
292 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
293 goto error_unpin;
294 }
295 }
296
297 return 0;
298
299error_unpin:
300 amdgpu_bo_unpin(*bo_ptr);
301error_unreserve:
302 amdgpu_bo_unreserve(*bo_ptr);
303
304error_free:
305 if (free)
306 amdgpu_bo_unref(bo_ptr);
307
308 return r;
309}
310
311/**
312 * amdgpu_bo_create_kernel - create BO for kernel use
313 *
314 * @adev: amdgpu device object
315 * @size: size for the new BO
316 * @align: alignment for the new BO
317 * @domain: where to place it
318 * @bo_ptr: used to initialize BOs in structures
319 * @gpu_addr: GPU addr of the pinned BO
320 * @cpu_addr: optional CPU address mapping
321 *
322 * Allocates and pins a BO for kernel internal use.
323 *
324 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
325 *
326 * Returns:
327 * 0 on success, negative error code otherwise.
328 */
329int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
330 unsigned long size, int align,
331 u32 domain, struct amdgpu_bo **bo_ptr,
332 u64 *gpu_addr, void **cpu_addr)
333{
334 int r;
335
336 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
337 gpu_addr, cpu_addr);
338
339 if (r)
340 return r;
341
342 if (*bo_ptr)
343 amdgpu_bo_unreserve(*bo_ptr);
344
345 return 0;
346}
347
348/**
349 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
350 *
351 * @adev: amdgpu device object
352 * @offset: offset of the BO
353 * @size: size of the BO
354 * @domain: where to place it
355 * @bo_ptr: used to initialize BOs in structures
356 * @cpu_addr: optional CPU address mapping
357 *
358 * Creates a kernel BO at a specific offset in the address space of the domain.
359 *
360 * Returns:
361 * 0 on success, negative error code otherwise.
362 */
363int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
364 uint64_t offset, uint64_t size, uint32_t domain,
365 struct amdgpu_bo **bo_ptr, void **cpu_addr)
366{
367 struct ttm_operation_ctx ctx = { false, false };
368 unsigned int i;
369 int r;
370
371 offset &= PAGE_MASK;
372 size = ALIGN(size, PAGE_SIZE);
373
374 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
375 NULL, cpu_addr);
376 if (r)
377 return r;
378
379 if ((*bo_ptr) == NULL)
380 return 0;
381
382 /*
383 * Remove the original mem node and create a new one at the request
384 * position.
385 */
386 if (cpu_addr)
387 amdgpu_bo_kunmap(*bo_ptr);
388
389 ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
390
391 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
392 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
393 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
394 }
395 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
396 &(*bo_ptr)->tbo.mem, &ctx);
397 if (r)
398 goto error;
399
400 if (cpu_addr) {
401 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
402 if (r)
403 goto error;
404 }
405
406 amdgpu_bo_unreserve(*bo_ptr);
407 return 0;
408
409error:
410 amdgpu_bo_unreserve(*bo_ptr);
411 amdgpu_bo_unref(bo_ptr);
412 return r;
413}
414
415/**
416 * amdgpu_bo_free_kernel - free BO for kernel use
417 *
418 * @bo: amdgpu BO to free
419 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
420 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
421 *
422 * unmaps and unpin a BO for kernel internal use.
423 */
424void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
425 void **cpu_addr)
426{
427 if (*bo == NULL)
428 return;
429
430 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
431 if (cpu_addr)
432 amdgpu_bo_kunmap(*bo);
433
434 amdgpu_bo_unpin(*bo);
435 amdgpu_bo_unreserve(*bo);
436 }
437 amdgpu_bo_unref(bo);
438
439 if (gpu_addr)
440 *gpu_addr = 0;
441
442 if (cpu_addr)
443 *cpu_addr = NULL;
444}
445
446/* Validate bo size is bit bigger then the request domain */
447static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
448 unsigned long size, u32 domain)
449{
450 struct ttm_resource_manager *man = NULL;
451
452 /*
453 * If GTT is part of requested domains the check must succeed to
454 * allow fall back to GTT
455 */
456 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
457 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
458
459 if (size < (man->size << PAGE_SHIFT))
460 return true;
461 else
462 goto fail;
463 }
464
465 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
466 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
467
468 if (size < (man->size << PAGE_SHIFT))
469 return true;
470 else
471 goto fail;
472 }
473
474
475 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
476 return true;
477
478fail:
479 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
480 man->size << PAGE_SHIFT);
481 return false;
482}
483
484bool amdgpu_bo_support_uswc(u64 bo_flags)
485{
486
487#ifdef CONFIG_X86_32
488 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
489 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
490 */
491 return false;
492#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
493 /* Don't try to enable write-combining when it can't work, or things
494 * may be slow
495 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
496 */
497
498#ifndef CONFIG_COMPILE_TEST
499#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
500 thanks to write-combining
501#endif
502
503 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
504 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
505 "better performance thanks to write-combining\n");
506 return false;
507#else
508 /* For architectures that don't support WC memory,
509 * mask out the WC flag from the BO
510 */
511 if (!drm_arch_can_wc_memory())
512 return false;
513
514 return true;
515#endif
516}
517
518static int amdgpu_bo_do_create(struct amdgpu_device *adev,
519 struct amdgpu_bo_param *bp,
520 struct amdgpu_bo **bo_ptr)
521{
522 struct ttm_operation_ctx ctx = {
523 .interruptible = (bp->type != ttm_bo_type_kernel),
524 .no_wait_gpu = bp->no_wait_gpu,
525 /* We opt to avoid OOM on system pages allocations */
526 .gfp_retry_mayfail = true,
527 .allow_res_evict = bp->type != ttm_bo_type_kernel,
528 .resv = bp->resv
529 };
530 struct amdgpu_bo *bo;
531 unsigned long page_align, size = bp->size;
532 int r;
533
534 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
535 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
536 /* GWS and OA don't need any alignment. */
537 page_align = bp->byte_align;
538 size <<= PAGE_SHIFT;
539 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
540 /* Both size and alignment must be a multiple of 4. */
541 page_align = ALIGN(bp->byte_align, 4);
542 size = ALIGN(size, 4) << PAGE_SHIFT;
543 } else {
544 /* Memory should be aligned at least to a page size. */
545 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
546 size = ALIGN(size, PAGE_SIZE);
547 }
548
549 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
550 return -ENOMEM;
551
552 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
553
554 *bo_ptr = NULL;
555 bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
556 if (bo == NULL)
557 return -ENOMEM;
558 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
559 INIT_LIST_HEAD(&bo->shadow_list);
560 bo->vm_bo = NULL;
561 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
562 bp->domain;
563 bo->allowed_domains = bo->preferred_domains;
564 if (bp->type != ttm_bo_type_kernel &&
565 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
566 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
567
568 bo->flags = bp->flags;
569
570 if (!amdgpu_bo_support_uswc(bo->flags))
571 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
572
573 bo->tbo.bdev = &adev->mman.bdev;
574 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
575 AMDGPU_GEM_DOMAIN_GDS))
576 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
577 else
578 amdgpu_bo_placement_from_domain(bo, bp->domain);
579 if (bp->type == ttm_bo_type_kernel)
580 bo->tbo.priority = 1;
581
582 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
583 &bo->placement, page_align, &ctx, NULL,
584 bp->resv, &amdgpu_bo_destroy);
585 if (unlikely(r != 0))
586 return r;
587
588 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
589 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
590 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
591 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
592 ctx.bytes_moved);
593 else
594 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
595
596 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
597 bo->tbo.mem.mem_type == TTM_PL_VRAM) {
598 struct dma_fence *fence;
599
600 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
601 if (unlikely(r))
602 goto fail_unreserve;
603
604 amdgpu_bo_fence(bo, fence, false);
605 dma_fence_put(bo->tbo.moving);
606 bo->tbo.moving = dma_fence_get(fence);
607 dma_fence_put(fence);
608 }
609 if (!bp->resv)
610 amdgpu_bo_unreserve(bo);
611 *bo_ptr = bo;
612
613 trace_amdgpu_bo_create(bo);
614
615 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
616 if (bp->type == ttm_bo_type_device)
617 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
618
619 return 0;
620
621fail_unreserve:
622 if (!bp->resv)
623 dma_resv_unlock(bo->tbo.base.resv);
624 amdgpu_bo_unref(&bo);
625 return r;
626}
627
628static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
629 unsigned long size,
630 struct amdgpu_bo *bo)
631{
632 struct amdgpu_bo_param bp;
633 int r;
634
635 if (bo->shadow)
636 return 0;
637
638 memset(&bp, 0, sizeof(bp));
639 bp.size = size;
640 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
641 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
642 AMDGPU_GEM_CREATE_SHADOW;
643 bp.type = ttm_bo_type_kernel;
644 bp.resv = bo->tbo.base.resv;
645 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
646
647 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
648 if (!r) {
649 bo->shadow->parent = amdgpu_bo_ref(bo);
650 mutex_lock(&adev->shadow_list_lock);
651 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
652 mutex_unlock(&adev->shadow_list_lock);
653 }
654
655 return r;
656}
657
658/**
659 * amdgpu_bo_create - create an &amdgpu_bo buffer object
660 * @adev: amdgpu device object
661 * @bp: parameters to be used for the buffer object
662 * @bo_ptr: pointer to the buffer object pointer
663 *
664 * Creates an &amdgpu_bo buffer object; and if requested, also creates a
665 * shadow object.
666 * Shadow object is used to backup the original buffer object, and is always
667 * in GTT.
668 *
669 * Returns:
670 * 0 for success or a negative error code on failure.
671 */
672int amdgpu_bo_create(struct amdgpu_device *adev,
673 struct amdgpu_bo_param *bp,
674 struct amdgpu_bo **bo_ptr)
675{
676 u64 flags = bp->flags;
677 int r;
678
679 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
680
681 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
682 if (r)
683 return r;
684
685 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
686 if (!bp->resv)
687 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
688 NULL));
689
690 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
691
692 if (!bp->resv)
693 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
694
695 if (r)
696 amdgpu_bo_unref(bo_ptr);
697 }
698
699 return r;
700}
701
702/**
703 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
704 * @adev: amdgpu device object
705 * @bp: parameters to be used for the buffer object
706 * @ubo_ptr: pointer to the buffer object pointer
707 *
708 * Create a BO to be used by user application;
709 *
710 * Returns:
711 * 0 for success or a negative error code on failure.
712 */
713
714int amdgpu_bo_create_user(struct amdgpu_device *adev,
715 struct amdgpu_bo_param *bp,
716 struct amdgpu_bo_user **ubo_ptr)
717{
718 struct amdgpu_bo *bo_ptr;
719 int r;
720
721 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
722 bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
723 r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
724 if (r)
725 return r;
726
727 *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
728 return r;
729}
730/**
731 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
732 * @bo: pointer to the buffer object
733 *
734 * Sets placement according to domain; and changes placement and caching
735 * policy of the buffer object according to the placement.
736 * This is used for validating shadow bos. It calls ttm_bo_validate() to
737 * make sure the buffer is resident where it needs to be.
738 *
739 * Returns:
740 * 0 for success or a negative error code on failure.
741 */
742int amdgpu_bo_validate(struct amdgpu_bo *bo)
743{
744 struct ttm_operation_ctx ctx = { false, false };
745 uint32_t domain;
746 int r;
747
748 if (bo->tbo.pin_count)
749 return 0;
750
751 domain = bo->preferred_domains;
752
753retry:
754 amdgpu_bo_placement_from_domain(bo, domain);
755 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
756 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
757 domain = bo->allowed_domains;
758 goto retry;
759 }
760
761 return r;
762}
763
764/**
765 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
766 *
767 * @shadow: &amdgpu_bo shadow to be restored
768 * @fence: dma_fence associated with the operation
769 *
770 * Copies a buffer object's shadow content back to the object.
771 * This is used for recovering a buffer from its shadow in case of a gpu
772 * reset where vram context may be lost.
773 *
774 * Returns:
775 * 0 for success or a negative error code on failure.
776 */
777int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
778
779{
780 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
781 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
782 uint64_t shadow_addr, parent_addr;
783
784 shadow_addr = amdgpu_bo_gpu_offset(shadow);
785 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
786
787 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
788 amdgpu_bo_size(shadow), NULL, fence,
789 true, false, false);
790}
791
792/**
793 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
794 * @bo: &amdgpu_bo buffer object to be mapped
795 * @ptr: kernel virtual address to be returned
796 *
797 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
798 * amdgpu_bo_kptr() to get the kernel virtual address.
799 *
800 * Returns:
801 * 0 for success or a negative error code on failure.
802 */
803int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
804{
805 void *kptr;
806 long r;
807
808 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
809 return -EPERM;
810
811 kptr = amdgpu_bo_kptr(bo);
812 if (kptr) {
813 if (ptr)
814 *ptr = kptr;
815 return 0;
816 }
817
818 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
819 MAX_SCHEDULE_TIMEOUT);
820 if (r < 0)
821 return r;
822
823 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
824 if (r)
825 return r;
826
827 if (ptr)
828 *ptr = amdgpu_bo_kptr(bo);
829
830 return 0;
831}
832
833/**
834 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
835 * @bo: &amdgpu_bo buffer object
836 *
837 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
838 *
839 * Returns:
840 * the virtual address of a buffer object area.
841 */
842void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
843{
844 bool is_iomem;
845
846 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
847}
848
849/**
850 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
851 * @bo: &amdgpu_bo buffer object to be unmapped
852 *
853 * Unmaps a kernel map set up by amdgpu_bo_kmap().
854 */
855void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
856{
857 if (bo->kmap.bo)
858 ttm_bo_kunmap(&bo->kmap);
859}
860
861/**
862 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
863 * @bo: &amdgpu_bo buffer object
864 *
865 * References the contained &ttm_buffer_object.
866 *
867 * Returns:
868 * a refcounted pointer to the &amdgpu_bo buffer object.
869 */
870struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
871{
872 if (bo == NULL)
873 return NULL;
874
875 ttm_bo_get(&bo->tbo);
876 return bo;
877}
878
879/**
880 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
881 * @bo: &amdgpu_bo buffer object
882 *
883 * Unreferences the contained &ttm_buffer_object and clear the pointer
884 */
885void amdgpu_bo_unref(struct amdgpu_bo **bo)
886{
887 struct ttm_buffer_object *tbo;
888
889 if ((*bo) == NULL)
890 return;
891
892 tbo = &((*bo)->tbo);
893 ttm_bo_put(tbo);
894 *bo = NULL;
895}
896
897/**
898 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
899 * @bo: &amdgpu_bo buffer object to be pinned
900 * @domain: domain to be pinned to
901 * @min_offset: the start of requested address range
902 * @max_offset: the end of requested address range
903 *
904 * Pins the buffer object according to requested domain and address range. If
905 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
906 * pin_count and pin_size accordingly.
907 *
908 * Pinning means to lock pages in memory along with keeping them at a fixed
909 * offset. It is required when a buffer can not be moved, for example, when
910 * a display buffer is being scanned out.
911 *
912 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
913 * where to pin a buffer if there are specific restrictions on where a buffer
914 * must be located.
915 *
916 * Returns:
917 * 0 for success or a negative error code on failure.
918 */
919int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
920 u64 min_offset, u64 max_offset)
921{
922 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
923 struct ttm_operation_ctx ctx = { false, false };
924 int r, i;
925
926 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
927 return -EPERM;
928
929 if (WARN_ON_ONCE(min_offset > max_offset))
930 return -EINVAL;
931
932 /* A shared bo cannot be migrated to VRAM */
933 if (bo->prime_shared_count || bo->tbo.base.import_attach) {
934 if (domain & AMDGPU_GEM_DOMAIN_GTT)
935 domain = AMDGPU_GEM_DOMAIN_GTT;
936 else
937 return -EINVAL;
938 }
939
940 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
941 * See function amdgpu_display_supported_domains()
942 */
943 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
944
945 if (bo->tbo.pin_count) {
946 uint32_t mem_type = bo->tbo.mem.mem_type;
947 uint32_t mem_flags = bo->tbo.mem.placement;
948
949 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
950 return -EINVAL;
951
952 if ((mem_type == TTM_PL_VRAM) &&
953 (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
954 !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
955 return -EINVAL;
956
957 ttm_bo_pin(&bo->tbo);
958
959 if (max_offset != 0) {
960 u64 domain_start = amdgpu_ttm_domain_start(adev,
961 mem_type);
962 WARN_ON_ONCE(max_offset <
963 (amdgpu_bo_gpu_offset(bo) - domain_start));
964 }
965
966 return 0;
967 }
968
969 if (bo->tbo.base.import_attach)
970 dma_buf_pin(bo->tbo.base.import_attach);
971
972 /* force to pin into visible video ram */
973 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
974 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
975 amdgpu_bo_placement_from_domain(bo, domain);
976 for (i = 0; i < bo->placement.num_placement; i++) {
977 unsigned fpfn, lpfn;
978
979 fpfn = min_offset >> PAGE_SHIFT;
980 lpfn = max_offset >> PAGE_SHIFT;
981
982 if (fpfn > bo->placements[i].fpfn)
983 bo->placements[i].fpfn = fpfn;
984 if (!bo->placements[i].lpfn ||
985 (lpfn && lpfn < bo->placements[i].lpfn))
986 bo->placements[i].lpfn = lpfn;
987 }
988
989 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
990 if (unlikely(r)) {
991 dev_err(adev->dev, "%p pin failed\n", bo);
992 goto error;
993 }
994
995 ttm_bo_pin(&bo->tbo);
996
997 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
998 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
999 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
1000 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
1001 &adev->visible_pin_size);
1002 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
1003 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
1004 }
1005
1006error:
1007 return r;
1008}
1009
1010/**
1011 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
1012 * @bo: &amdgpu_bo buffer object to be pinned
1013 * @domain: domain to be pinned to
1014 *
1015 * A simple wrapper to amdgpu_bo_pin_restricted().
1016 * Provides a simpler API for buffers that do not have any strict restrictions
1017 * on where a buffer must be located.
1018 *
1019 * Returns:
1020 * 0 for success or a negative error code on failure.
1021 */
1022int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
1023{
1024 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1025 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1026}
1027
1028/**
1029 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1030 * @bo: &amdgpu_bo buffer object to be unpinned
1031 *
1032 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1033 * Changes placement and pin size accordingly.
1034 *
1035 * Returns:
1036 * 0 for success or a negative error code on failure.
1037 */
1038void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1039{
1040 ttm_bo_unpin(&bo->tbo);
1041 if (bo->tbo.pin_count)
1042 return;
1043
1044 amdgpu_bo_subtract_pin_size(bo);
1045
1046 if (bo->tbo.base.import_attach)
1047 dma_buf_unpin(bo->tbo.base.import_attach);
1048}
1049
1050/**
1051 * amdgpu_bo_evict_vram - evict VRAM buffers
1052 * @adev: amdgpu device object
1053 *
1054 * Evicts all VRAM buffers on the lru list of the memory type.
1055 * Mainly used for evicting vram at suspend time.
1056 *
1057 * Returns:
1058 * 0 for success or a negative error code on failure.
1059 */
1060int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1061{
1062 struct ttm_resource_manager *man;
1063
1064 if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
1065 /* No need to evict vram on APUs for suspend to ram */
1066 return 0;
1067 }
1068
1069 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1070 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1071}
1072
1073static const char *amdgpu_vram_names[] = {
1074 "UNKNOWN",
1075 "GDDR1",
1076 "DDR2",
1077 "GDDR3",
1078 "GDDR4",
1079 "GDDR5",
1080 "HBM",
1081 "DDR3",
1082 "DDR4",
1083 "GDDR6",
1084 "DDR5"
1085};
1086
1087/**
1088 * amdgpu_bo_init - initialize memory manager
1089 * @adev: amdgpu device object
1090 *
1091 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1092 *
1093 * Returns:
1094 * 0 for success or a negative error code on failure.
1095 */
1096int amdgpu_bo_init(struct amdgpu_device *adev)
1097{
1098 /* On A+A platform, VRAM can be mapped as WB */
1099 if (!adev->gmc.xgmi.connected_to_cpu) {
1100 /* reserve PAT memory space to WC for VRAM */
1101 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1102 adev->gmc.aper_size);
1103
1104 /* Add an MTRR for the VRAM */
1105 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1106 adev->gmc.aper_size);
1107 }
1108
1109 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1110 adev->gmc.mc_vram_size >> 20,
1111 (unsigned long long)adev->gmc.aper_size >> 20);
1112 DRM_INFO("RAM width %dbits %s\n",
1113 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1114 return amdgpu_ttm_init(adev);
1115}
1116
1117/**
1118 * amdgpu_bo_fini - tear down memory manager
1119 * @adev: amdgpu device object
1120 *
1121 * Reverses amdgpu_bo_init() to tear down memory manager.
1122 */
1123void amdgpu_bo_fini(struct amdgpu_device *adev)
1124{
1125 amdgpu_ttm_fini(adev);
1126 if (!adev->gmc.xgmi.connected_to_cpu) {
1127 arch_phys_wc_del(adev->gmc.vram_mtrr);
1128 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1129 }
1130}
1131
1132/**
1133 * amdgpu_bo_set_tiling_flags - set tiling flags
1134 * @bo: &amdgpu_bo buffer object
1135 * @tiling_flags: new flags
1136 *
1137 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1138 * kernel driver to set the tiling flags on a buffer.
1139 *
1140 * Returns:
1141 * 0 for success or a negative error code on failure.
1142 */
1143int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1144{
1145 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1146 struct amdgpu_bo_user *ubo;
1147
1148 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1149 if (adev->family <= AMDGPU_FAMILY_CZ &&
1150 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1151 return -EINVAL;
1152
1153 ubo = to_amdgpu_bo_user(bo);
1154 ubo->tiling_flags = tiling_flags;
1155 return 0;
1156}
1157
1158/**
1159 * amdgpu_bo_get_tiling_flags - get tiling flags
1160 * @bo: &amdgpu_bo buffer object
1161 * @tiling_flags: returned flags
1162 *
1163 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1164 * set the tiling flags on a buffer.
1165 */
1166void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1167{
1168 struct amdgpu_bo_user *ubo;
1169
1170 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1171 dma_resv_assert_held(bo->tbo.base.resv);
1172 ubo = to_amdgpu_bo_user(bo);
1173
1174 if (tiling_flags)
1175 *tiling_flags = ubo->tiling_flags;
1176}
1177
1178/**
1179 * amdgpu_bo_set_metadata - set metadata
1180 * @bo: &amdgpu_bo buffer object
1181 * @metadata: new metadata
1182 * @metadata_size: size of the new metadata
1183 * @flags: flags of the new metadata
1184 *
1185 * Sets buffer object's metadata, its size and flags.
1186 * Used via GEM ioctl.
1187 *
1188 * Returns:
1189 * 0 for success or a negative error code on failure.
1190 */
1191int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1192 uint32_t metadata_size, uint64_t flags)
1193{
1194 struct amdgpu_bo_user *ubo;
1195 void *buffer;
1196
1197 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1198 ubo = to_amdgpu_bo_user(bo);
1199 if (!metadata_size) {
1200 if (ubo->metadata_size) {
1201 kfree(ubo->metadata);
1202 ubo->metadata = NULL;
1203 ubo->metadata_size = 0;
1204 }
1205 return 0;
1206 }
1207
1208 if (metadata == NULL)
1209 return -EINVAL;
1210
1211 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1212 if (buffer == NULL)
1213 return -ENOMEM;
1214
1215 kfree(ubo->metadata);
1216 ubo->metadata_flags = flags;
1217 ubo->metadata = buffer;
1218 ubo->metadata_size = metadata_size;
1219
1220 return 0;
1221}
1222
1223/**
1224 * amdgpu_bo_get_metadata - get metadata
1225 * @bo: &amdgpu_bo buffer object
1226 * @buffer: returned metadata
1227 * @buffer_size: size of the buffer
1228 * @metadata_size: size of the returned metadata
1229 * @flags: flags of the returned metadata
1230 *
1231 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1232 * less than metadata_size.
1233 * Used via GEM ioctl.
1234 *
1235 * Returns:
1236 * 0 for success or a negative error code on failure.
1237 */
1238int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1239 size_t buffer_size, uint32_t *metadata_size,
1240 uint64_t *flags)
1241{
1242 struct amdgpu_bo_user *ubo;
1243
1244 if (!buffer && !metadata_size)
1245 return -EINVAL;
1246
1247 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1248 ubo = to_amdgpu_bo_user(bo);
1249 if (buffer) {
1250 if (buffer_size < ubo->metadata_size)
1251 return -EINVAL;
1252
1253 if (ubo->metadata_size)
1254 memcpy(buffer, ubo->metadata, ubo->metadata_size);
1255 }
1256
1257 if (metadata_size)
1258 *metadata_size = ubo->metadata_size;
1259 if (flags)
1260 *flags = ubo->metadata_flags;
1261
1262 return 0;
1263}
1264
1265/**
1266 * amdgpu_bo_move_notify - notification about a memory move
1267 * @bo: pointer to a buffer object
1268 * @evict: if this move is evicting the buffer from the graphics address space
1269 * @new_mem: new information of the bufer object
1270 *
1271 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1272 * bookkeeping.
1273 * TTM driver callback which is called when ttm moves a buffer.
1274 */
1275void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1276 bool evict,
1277 struct ttm_resource *new_mem)
1278{
1279 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1280 struct amdgpu_bo *abo;
1281 struct ttm_resource *old_mem = &bo->mem;
1282
1283 if (!amdgpu_bo_is_amdgpu_bo(bo))
1284 return;
1285
1286 abo = ttm_to_amdgpu_bo(bo);
1287 amdgpu_vm_bo_invalidate(adev, abo, evict);
1288
1289 amdgpu_bo_kunmap(abo);
1290
1291 if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1292 bo->mem.mem_type != TTM_PL_SYSTEM)
1293 dma_buf_move_notify(abo->tbo.base.dma_buf);
1294
1295 /* remember the eviction */
1296 if (evict)
1297 atomic64_inc(&adev->num_evictions);
1298
1299 /* update statistics */
1300 if (!new_mem)
1301 return;
1302
1303 /* move_notify is called before move happens */
1304 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1305}
1306
1307/**
1308 * amdgpu_bo_release_notify - notification about a BO being released
1309 * @bo: pointer to a buffer object
1310 *
1311 * Wipes VRAM buffers whose contents should not be leaked before the
1312 * memory is released.
1313 */
1314void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1315{
1316 struct dma_fence *fence = NULL;
1317 struct amdgpu_bo *abo;
1318 int r;
1319
1320 if (!amdgpu_bo_is_amdgpu_bo(bo))
1321 return;
1322
1323 abo = ttm_to_amdgpu_bo(bo);
1324
1325 if (abo->kfd_bo)
1326 amdgpu_amdkfd_unreserve_memory_limit(abo);
1327
1328 /* We only remove the fence if the resv has individualized. */
1329 WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1330 && bo->base.resv != &bo->base._resv);
1331 if (bo->base.resv == &bo->base._resv)
1332 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1333
1334 if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1335 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1336 return;
1337
1338 dma_resv_lock(bo->base.resv, NULL);
1339
1340 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1341 if (!WARN_ON(r)) {
1342 amdgpu_bo_fence(abo, fence, false);
1343 dma_fence_put(fence);
1344 }
1345
1346 dma_resv_unlock(bo->base.resv);
1347}
1348
1349/**
1350 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1351 * @bo: pointer to a buffer object
1352 *
1353 * Notifies the driver we are taking a fault on this BO and have reserved it,
1354 * also performs bookkeeping.
1355 * TTM driver callback for dealing with vm faults.
1356 *
1357 * Returns:
1358 * 0 for success or a negative error code on failure.
1359 */
1360vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1361{
1362 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1363 struct ttm_operation_ctx ctx = { false, false };
1364 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1365 unsigned long offset, size;
1366 int r;
1367
1368 /* Remember that this BO was accessed by the CPU */
1369 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1370
1371 if (bo->mem.mem_type != TTM_PL_VRAM)
1372 return 0;
1373
1374 size = bo->mem.num_pages << PAGE_SHIFT;
1375 offset = bo->mem.start << PAGE_SHIFT;
1376 if ((offset + size) <= adev->gmc.visible_vram_size)
1377 return 0;
1378
1379 /* Can't move a pinned BO to visible VRAM */
1380 if (abo->tbo.pin_count > 0)
1381 return VM_FAULT_SIGBUS;
1382
1383 /* hurrah the memory is not visible ! */
1384 atomic64_inc(&adev->num_vram_cpu_page_faults);
1385 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1386 AMDGPU_GEM_DOMAIN_GTT);
1387
1388 /* Avoid costly evictions; only set GTT as a busy placement */
1389 abo->placement.num_busy_placement = 1;
1390 abo->placement.busy_placement = &abo->placements[1];
1391
1392 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1393 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1394 return VM_FAULT_NOPAGE;
1395 else if (unlikely(r))
1396 return VM_FAULT_SIGBUS;
1397
1398 offset = bo->mem.start << PAGE_SHIFT;
1399 /* this should never happen */
1400 if (bo->mem.mem_type == TTM_PL_VRAM &&
1401 (offset + size) > adev->gmc.visible_vram_size)
1402 return VM_FAULT_SIGBUS;
1403
1404 ttm_bo_move_to_lru_tail_unlocked(bo);
1405 return 0;
1406}
1407
1408/**
1409 * amdgpu_bo_fence - add fence to buffer object
1410 *
1411 * @bo: buffer object in question
1412 * @fence: fence to add
1413 * @shared: true if fence should be added shared
1414 *
1415 */
1416void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1417 bool shared)
1418{
1419 struct dma_resv *resv = bo->tbo.base.resv;
1420
1421 if (shared)
1422 dma_resv_add_shared_fence(resv, fence);
1423 else
1424 dma_resv_add_excl_fence(resv, fence);
1425}
1426
1427/**
1428 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1429 *
1430 * @adev: amdgpu device pointer
1431 * @resv: reservation object to sync to
1432 * @sync_mode: synchronization mode
1433 * @owner: fence owner
1434 * @intr: Whether the wait is interruptible
1435 *
1436 * Extract the fences from the reservation object and waits for them to finish.
1437 *
1438 * Returns:
1439 * 0 on success, errno otherwise.
1440 */
1441int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1442 enum amdgpu_sync_mode sync_mode, void *owner,
1443 bool intr)
1444{
1445 struct amdgpu_sync sync;
1446 int r;
1447
1448 amdgpu_sync_create(&sync);
1449 amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1450 r = amdgpu_sync_wait(&sync, intr);
1451 amdgpu_sync_free(&sync);
1452 return r;
1453}
1454
1455/**
1456 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1457 * @bo: buffer object to wait for
1458 * @owner: fence owner
1459 * @intr: Whether the wait is interruptible
1460 *
1461 * Wrapper to wait for fences in a BO.
1462 * Returns:
1463 * 0 on success, errno otherwise.
1464 */
1465int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1466{
1467 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1468
1469 return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1470 AMDGPU_SYNC_NE_OWNER, owner, intr);
1471}
1472
1473/**
1474 * amdgpu_bo_gpu_offset - return GPU offset of bo
1475 * @bo: amdgpu object for which we query the offset
1476 *
1477 * Note: object should either be pinned or reserved when calling this
1478 * function, it might be useful to add check for this for debugging.
1479 *
1480 * Returns:
1481 * current GPU offset of the object.
1482 */
1483u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1484{
1485 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1486 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1487 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1488 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1489 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1490 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1491
1492 return amdgpu_bo_gpu_offset_no_check(bo);
1493}
1494
1495/**
1496 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1497 * @bo: amdgpu object for which we query the offset
1498 *
1499 * Returns:
1500 * current GPU offset of the object without raising warnings.
1501 */
1502u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1503{
1504 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1505 uint64_t offset;
1506
1507 offset = (bo->tbo.mem.start << PAGE_SHIFT) +
1508 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
1509
1510 return amdgpu_gmc_sign_extend(offset);
1511}
1512
1513/**
1514 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1515 * @adev: amdgpu device object
1516 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1517 *
1518 * Returns:
1519 * Which of the allowed domains is preferred for pinning the BO for scanout.
1520 */
1521uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1522 uint32_t domain)
1523{
1524 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1525 domain = AMDGPU_GEM_DOMAIN_VRAM;
1526 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1527 domain = AMDGPU_GEM_DOMAIN_GTT;
1528 }
1529 return domain;
1530}
1531
1532#if defined(CONFIG_DEBUG_FS)
1533#define amdgpu_bo_print_flag(m, bo, flag) \
1534 do { \
1535 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
1536 seq_printf((m), " " #flag); \
1537 } \
1538 } while (0)
1539
1540/**
1541 * amdgpu_bo_print_info - print BO info in debugfs file
1542 *
1543 * @id: Index or Id of the BO
1544 * @bo: Requested BO for printing info
1545 * @m: debugfs file
1546 *
1547 * Print BO information in debugfs file
1548 *
1549 * Returns:
1550 * Size of the BO in bytes.
1551 */
1552u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1553{
1554 struct dma_buf_attachment *attachment;
1555 struct dma_buf *dma_buf;
1556 unsigned int domain;
1557 const char *placement;
1558 unsigned int pin_count;
1559 u64 size;
1560
1561 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
1562 switch (domain) {
1563 case AMDGPU_GEM_DOMAIN_VRAM:
1564 placement = "VRAM";
1565 break;
1566 case AMDGPU_GEM_DOMAIN_GTT:
1567 placement = " GTT";
1568 break;
1569 case AMDGPU_GEM_DOMAIN_CPU:
1570 default:
1571 placement = " CPU";
1572 break;
1573 }
1574
1575 size = amdgpu_bo_size(bo);
1576 seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1577 id, size, placement);
1578
1579 pin_count = READ_ONCE(bo->tbo.pin_count);
1580 if (pin_count)
1581 seq_printf(m, " pin count %d", pin_count);
1582
1583 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1584 attachment = READ_ONCE(bo->tbo.base.import_attach);
1585
1586 if (attachment)
1587 seq_printf(m, " imported from %p", dma_buf);
1588 else if (dma_buf)
1589 seq_printf(m, " exported as %p", dma_buf);
1590
1591 amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1592 amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1593 amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1594 amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1595 amdgpu_bo_print_flag(m, bo, SHADOW);
1596 amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1597 amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1598 amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1599
1600 seq_puts(m, "\n");
1601
1602 return size;
1603}
1604#endif