Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33#include <linux/dma-mapping.h>
34#include <linux/iommu.h>
35#include <linux/pagemap.h>
36#include <linux/sched/task.h>
37#include <linux/sched/mm.h>
38#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swiotlb.h>
42#include <linux/dma-buf.h>
43#include <linux/sizes.h>
44#include <linux/module.h>
45
46#include <drm/drm_drv.h>
47#include <drm/ttm/ttm_bo_api.h>
48#include <drm/ttm/ttm_bo_driver.h>
49#include <drm/ttm/ttm_placement.h>
50#include <drm/ttm/ttm_range_manager.h>
51
52#include <drm/amdgpu_drm.h>
53#include <drm/drm_drv.h>
54
55#include "amdgpu.h"
56#include "amdgpu_object.h"
57#include "amdgpu_trace.h"
58#include "amdgpu_amdkfd.h"
59#include "amdgpu_sdma.h"
60#include "amdgpu_ras.h"
61#include "amdgpu_atomfirmware.h"
62#include "amdgpu_res_cursor.h"
63#include "bif/bif_4_1_d.h"
64
65MODULE_IMPORT_NS(DMA_BUF);
66
67#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
68
69static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
70 struct ttm_tt *ttm,
71 struct ttm_resource *bo_mem);
72static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
73 struct ttm_tt *ttm);
74
75static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
76 unsigned int type,
77 uint64_t size_in_page)
78{
79 return ttm_range_man_init(&adev->mman.bdev, type,
80 false, size_in_page);
81}
82
83/**
84 * amdgpu_evict_flags - Compute placement flags
85 *
86 * @bo: The buffer object to evict
87 * @placement: Possible destination(s) for evicted BO
88 *
89 * Fill in placement data when ttm_bo_evict() is called
90 */
91static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
92 struct ttm_placement *placement)
93{
94 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
95 struct amdgpu_bo *abo;
96 static const struct ttm_place placements = {
97 .fpfn = 0,
98 .lpfn = 0,
99 .mem_type = TTM_PL_SYSTEM,
100 .flags = 0
101 };
102
103 /* Don't handle scatter gather BOs */
104 if (bo->type == ttm_bo_type_sg) {
105 placement->num_placement = 0;
106 placement->num_busy_placement = 0;
107 return;
108 }
109
110 /* Object isn't an AMDGPU object so ignore */
111 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
112 placement->placement = &placements;
113 placement->busy_placement = &placements;
114 placement->num_placement = 1;
115 placement->num_busy_placement = 1;
116 return;
117 }
118
119 abo = ttm_to_amdgpu_bo(bo);
120 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
121 placement->num_placement = 0;
122 placement->num_busy_placement = 0;
123 return;
124 }
125
126 switch (bo->resource->mem_type) {
127 case AMDGPU_PL_GDS:
128 case AMDGPU_PL_GWS:
129 case AMDGPU_PL_OA:
130 placement->num_placement = 0;
131 placement->num_busy_placement = 0;
132 return;
133
134 case TTM_PL_VRAM:
135 if (!adev->mman.buffer_funcs_enabled) {
136 /* Move to system memory */
137 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
138 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
139 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
140 amdgpu_bo_in_cpu_visible_vram(abo)) {
141
142 /* Try evicting to the CPU inaccessible part of VRAM
143 * first, but only set GTT as busy placement, so this
144 * BO will be evicted to GTT rather than causing other
145 * BOs to be evicted from VRAM
146 */
147 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
148 AMDGPU_GEM_DOMAIN_GTT |
149 AMDGPU_GEM_DOMAIN_CPU);
150 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
151 abo->placements[0].lpfn = 0;
152 abo->placement.busy_placement = &abo->placements[1];
153 abo->placement.num_busy_placement = 1;
154 } else {
155 /* Move to GTT memory */
156 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
157 AMDGPU_GEM_DOMAIN_CPU);
158 }
159 break;
160 case TTM_PL_TT:
161 case AMDGPU_PL_PREEMPT:
162 default:
163 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
164 break;
165 }
166 *placement = abo->placement;
167}
168
169/**
170 * amdgpu_ttm_map_buffer - Map memory into the GART windows
171 * @bo: buffer object to map
172 * @mem: memory object to map
173 * @mm_cur: range to map
174 * @window: which GART window to use
175 * @ring: DMA ring to use for the copy
176 * @tmz: if we should setup a TMZ enabled mapping
177 * @size: in number of bytes to map, out number of bytes mapped
178 * @addr: resulting address inside the MC address space
179 *
180 * Setup one of the GART windows to access a specific piece of memory or return
181 * the physical address for local memory.
182 */
183static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
184 struct ttm_resource *mem,
185 struct amdgpu_res_cursor *mm_cur,
186 unsigned window, struct amdgpu_ring *ring,
187 bool tmz, uint64_t *size, uint64_t *addr)
188{
189 struct amdgpu_device *adev = ring->adev;
190 unsigned offset, num_pages, num_dw, num_bytes;
191 uint64_t src_addr, dst_addr;
192 struct dma_fence *fence;
193 struct amdgpu_job *job;
194 void *cpu_addr;
195 uint64_t flags;
196 unsigned int i;
197 int r;
198
199 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
200 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
201
202 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
203 return -EINVAL;
204
205 /* Map only what can't be accessed directly */
206 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
207 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
208 mm_cur->start;
209 return 0;
210 }
211
212
213 /*
214 * If start begins at an offset inside the page, then adjust the size
215 * and addr accordingly
216 */
217 offset = mm_cur->start & ~PAGE_MASK;
218
219 num_pages = PFN_UP(*size + offset);
220 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
221
222 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
223
224 *addr = adev->gmc.gart_start;
225 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
226 AMDGPU_GPU_PAGE_SIZE;
227 *addr += offset;
228
229 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
230 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
231
232 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
233 AMDGPU_IB_POOL_DELAYED, &job);
234 if (r)
235 return r;
236
237 src_addr = num_dw * 4;
238 src_addr += job->ibs[0].gpu_addr;
239
240 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
241 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
242 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
243 dst_addr, num_bytes, false);
244
245 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
246 WARN_ON(job->ibs[0].length_dw > num_dw);
247
248 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
249 if (tmz)
250 flags |= AMDGPU_PTE_TMZ;
251
252 cpu_addr = &job->ibs[0].ptr[num_dw];
253
254 if (mem->mem_type == TTM_PL_TT) {
255 dma_addr_t *dma_addr;
256
257 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
258 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
259 } else {
260 dma_addr_t dma_address;
261
262 dma_address = mm_cur->start;
263 dma_address += adev->vm_manager.vram_base_offset;
264
265 for (i = 0; i < num_pages; ++i) {
266 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
267 flags, cpu_addr);
268 dma_address += PAGE_SIZE;
269 }
270 }
271
272 r = amdgpu_job_submit(job, &adev->mman.entity,
273 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
274 if (r)
275 goto error_free;
276
277 dma_fence_put(fence);
278
279 return r;
280
281error_free:
282 amdgpu_job_free(job);
283 return r;
284}
285
286/**
287 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
288 * @adev: amdgpu device
289 * @src: buffer/address where to read from
290 * @dst: buffer/address where to write to
291 * @size: number of bytes to copy
292 * @tmz: if a secure copy should be used
293 * @resv: resv object to sync to
294 * @f: Returns the last fence if multiple jobs are submitted.
295 *
296 * The function copies @size bytes from {src->mem + src->offset} to
297 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
298 * move and different for a BO to BO copy.
299 *
300 */
301int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
302 const struct amdgpu_copy_mem *src,
303 const struct amdgpu_copy_mem *dst,
304 uint64_t size, bool tmz,
305 struct dma_resv *resv,
306 struct dma_fence **f)
307{
308 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
309 struct amdgpu_res_cursor src_mm, dst_mm;
310 struct dma_fence *fence = NULL;
311 int r = 0;
312
313 if (!adev->mman.buffer_funcs_enabled) {
314 DRM_ERROR("Trying to move memory with ring turned off.\n");
315 return -EINVAL;
316 }
317
318 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
319 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
320
321 mutex_lock(&adev->mman.gtt_window_lock);
322 while (src_mm.remaining) {
323 uint64_t from, to, cur_size;
324 struct dma_fence *next;
325
326 /* Never copy more than 256MiB at once to avoid a timeout */
327 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
328
329 /* Map src to window 0 and dst to window 1. */
330 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
331 0, ring, tmz, &cur_size, &from);
332 if (r)
333 goto error;
334
335 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
336 1, ring, tmz, &cur_size, &to);
337 if (r)
338 goto error;
339
340 r = amdgpu_copy_buffer(ring, from, to, cur_size,
341 resv, &next, false, true, tmz);
342 if (r)
343 goto error;
344
345 dma_fence_put(fence);
346 fence = next;
347
348 amdgpu_res_next(&src_mm, cur_size);
349 amdgpu_res_next(&dst_mm, cur_size);
350 }
351error:
352 mutex_unlock(&adev->mman.gtt_window_lock);
353 if (f)
354 *f = dma_fence_get(fence);
355 dma_fence_put(fence);
356 return r;
357}
358
359/*
360 * amdgpu_move_blit - Copy an entire buffer to another buffer
361 *
362 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
363 * help move buffers to and from VRAM.
364 */
365static int amdgpu_move_blit(struct ttm_buffer_object *bo,
366 bool evict,
367 struct ttm_resource *new_mem,
368 struct ttm_resource *old_mem)
369{
370 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
371 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
372 struct amdgpu_copy_mem src, dst;
373 struct dma_fence *fence = NULL;
374 int r;
375
376 src.bo = bo;
377 dst.bo = bo;
378 src.mem = old_mem;
379 dst.mem = new_mem;
380 src.offset = 0;
381 dst.offset = 0;
382
383 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
384 new_mem->num_pages << PAGE_SHIFT,
385 amdgpu_bo_encrypted(abo),
386 bo->base.resv, &fence);
387 if (r)
388 goto error;
389
390 /* clear the space being freed */
391 if (old_mem->mem_type == TTM_PL_VRAM &&
392 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
393 struct dma_fence *wipe_fence = NULL;
394
395 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
396 if (r) {
397 goto error;
398 } else if (wipe_fence) {
399 dma_fence_put(fence);
400 fence = wipe_fence;
401 }
402 }
403
404 /* Always block for VM page tables before committing the new location */
405 if (bo->type == ttm_bo_type_kernel)
406 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
407 else
408 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
409 dma_fence_put(fence);
410 return r;
411
412error:
413 if (fence)
414 dma_fence_wait(fence, false);
415 dma_fence_put(fence);
416 return r;
417}
418
419/*
420 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
421 *
422 * Called by amdgpu_bo_move()
423 */
424static bool amdgpu_mem_visible(struct amdgpu_device *adev,
425 struct ttm_resource *mem)
426{
427 uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
428 struct amdgpu_res_cursor cursor;
429
430 if (mem->mem_type == TTM_PL_SYSTEM ||
431 mem->mem_type == TTM_PL_TT)
432 return true;
433 if (mem->mem_type != TTM_PL_VRAM)
434 return false;
435
436 amdgpu_res_first(mem, 0, mem_size, &cursor);
437
438 /* ttm_resource_ioremap only supports contiguous memory */
439 if (cursor.size != mem_size)
440 return false;
441
442 return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
443}
444
445/*
446 * amdgpu_bo_move - Move a buffer object to a new memory location
447 *
448 * Called by ttm_bo_handle_move_mem()
449 */
450static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
451 struct ttm_operation_ctx *ctx,
452 struct ttm_resource *new_mem,
453 struct ttm_place *hop)
454{
455 struct amdgpu_device *adev;
456 struct amdgpu_bo *abo;
457 struct ttm_resource *old_mem = bo->resource;
458 int r;
459
460 if (new_mem->mem_type == TTM_PL_TT ||
461 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
462 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
463 if (r)
464 return r;
465 }
466
467 /* Can't move a pinned BO */
468 abo = ttm_to_amdgpu_bo(bo);
469 if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
470 return -EINVAL;
471
472 adev = amdgpu_ttm_adev(bo->bdev);
473
474 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
475 ttm_bo_move_null(bo, new_mem);
476 goto out;
477 }
478 if (old_mem->mem_type == TTM_PL_SYSTEM &&
479 (new_mem->mem_type == TTM_PL_TT ||
480 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
481 ttm_bo_move_null(bo, new_mem);
482 goto out;
483 }
484 if ((old_mem->mem_type == TTM_PL_TT ||
485 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
486 new_mem->mem_type == TTM_PL_SYSTEM) {
487 r = ttm_bo_wait_ctx(bo, ctx);
488 if (r)
489 return r;
490
491 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
492 ttm_resource_free(bo, &bo->resource);
493 ttm_bo_assign_mem(bo, new_mem);
494 goto out;
495 }
496
497 if (old_mem->mem_type == AMDGPU_PL_GDS ||
498 old_mem->mem_type == AMDGPU_PL_GWS ||
499 old_mem->mem_type == AMDGPU_PL_OA ||
500 new_mem->mem_type == AMDGPU_PL_GDS ||
501 new_mem->mem_type == AMDGPU_PL_GWS ||
502 new_mem->mem_type == AMDGPU_PL_OA) {
503 /* Nothing to save here */
504 ttm_bo_move_null(bo, new_mem);
505 goto out;
506 }
507
508 if (bo->type == ttm_bo_type_device &&
509 new_mem->mem_type == TTM_PL_VRAM &&
510 old_mem->mem_type != TTM_PL_VRAM) {
511 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
512 * accesses the BO after it's moved.
513 */
514 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
515 }
516
517 if (adev->mman.buffer_funcs_enabled) {
518 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
519 new_mem->mem_type == TTM_PL_VRAM) ||
520 (old_mem->mem_type == TTM_PL_VRAM &&
521 new_mem->mem_type == TTM_PL_SYSTEM))) {
522 hop->fpfn = 0;
523 hop->lpfn = 0;
524 hop->mem_type = TTM_PL_TT;
525 hop->flags = TTM_PL_FLAG_TEMPORARY;
526 return -EMULTIHOP;
527 }
528
529 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
530 } else {
531 r = -ENODEV;
532 }
533
534 if (r) {
535 /* Check that all memory is CPU accessible */
536 if (!amdgpu_mem_visible(adev, old_mem) ||
537 !amdgpu_mem_visible(adev, new_mem)) {
538 pr_err("Move buffer fallback to memcpy unavailable\n");
539 return r;
540 }
541
542 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
543 if (r)
544 return r;
545 }
546
547out:
548 /* update statistics */
549 atomic64_add(bo->base.size, &adev->num_bytes_moved);
550 amdgpu_bo_move_notify(bo, evict, new_mem);
551 return 0;
552}
553
554/*
555 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
556 *
557 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
558 */
559static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
560 struct ttm_resource *mem)
561{
562 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
563 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
564
565 switch (mem->mem_type) {
566 case TTM_PL_SYSTEM:
567 /* system memory */
568 return 0;
569 case TTM_PL_TT:
570 case AMDGPU_PL_PREEMPT:
571 break;
572 case TTM_PL_VRAM:
573 mem->bus.offset = mem->start << PAGE_SHIFT;
574 /* check if it's visible */
575 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
576 return -EINVAL;
577
578 if (adev->mman.aper_base_kaddr &&
579 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
580 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
581 mem->bus.offset;
582
583 mem->bus.offset += adev->gmc.aper_base;
584 mem->bus.is_iomem = true;
585 break;
586 default:
587 return -EINVAL;
588 }
589 return 0;
590}
591
592static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
593 unsigned long page_offset)
594{
595 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
596 struct amdgpu_res_cursor cursor;
597
598 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
599 &cursor);
600 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
601}
602
603/**
604 * amdgpu_ttm_domain_start - Returns GPU start address
605 * @adev: amdgpu device object
606 * @type: type of the memory
607 *
608 * Returns:
609 * GPU start address of a memory domain
610 */
611
612uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
613{
614 switch (type) {
615 case TTM_PL_TT:
616 return adev->gmc.gart_start;
617 case TTM_PL_VRAM:
618 return adev->gmc.vram_start;
619 }
620
621 return 0;
622}
623
624/*
625 * TTM backend functions.
626 */
627struct amdgpu_ttm_tt {
628 struct ttm_tt ttm;
629 struct drm_gem_object *gobj;
630 u64 offset;
631 uint64_t userptr;
632 struct task_struct *usertask;
633 uint32_t userflags;
634 bool bound;
635#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
636 struct hmm_range *range;
637#endif
638};
639
640#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
641
642#ifdef CONFIG_DRM_AMDGPU_USERPTR
643/*
644 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
645 * memory and start HMM tracking CPU page table update
646 *
647 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
648 * once afterwards to stop HMM tracking
649 */
650int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
651{
652 struct ttm_tt *ttm = bo->tbo.ttm;
653 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
654 unsigned long start = gtt->userptr;
655 struct vm_area_struct *vma;
656 struct mm_struct *mm;
657 bool readonly;
658 int r = 0;
659
660 mm = bo->notifier.mm;
661 if (unlikely(!mm)) {
662 DRM_DEBUG_DRIVER("BO is not registered?\n");
663 return -EFAULT;
664 }
665
666 /* Another get_user_pages is running at the same time?? */
667 if (WARN_ON(gtt->range))
668 return -EFAULT;
669
670 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
671 return -ESRCH;
672
673 mmap_read_lock(mm);
674 vma = vma_lookup(mm, start);
675 if (unlikely(!vma)) {
676 r = -EFAULT;
677 goto out_unlock;
678 }
679 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
680 vma->vm_file)) {
681 r = -EPERM;
682 goto out_unlock;
683 }
684
685 readonly = amdgpu_ttm_tt_is_readonly(ttm);
686 r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
687 ttm->num_pages, >t->range, readonly,
688 true, NULL);
689out_unlock:
690 mmap_read_unlock(mm);
691 if (r)
692 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
693
694 mmput(mm);
695
696 return r;
697}
698
699/*
700 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
701 * Check if the pages backing this ttm range have been invalidated
702 *
703 * Returns: true if pages are still valid
704 */
705bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
706{
707 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
708 bool r = false;
709
710 if (!gtt || !gtt->userptr)
711 return false;
712
713 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
714 gtt->userptr, ttm->num_pages);
715
716 WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
717 "No user pages to check\n");
718
719 if (gtt->range) {
720 /*
721 * FIXME: Must always hold notifier_lock for this, and must
722 * not ignore the return code.
723 */
724 r = amdgpu_hmm_range_get_pages_done(gtt->range);
725 gtt->range = NULL;
726 }
727
728 return !r;
729}
730#endif
731
732/*
733 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
734 *
735 * Called by amdgpu_cs_list_validate(). This creates the page list
736 * that backs user memory and will ultimately be mapped into the device
737 * address space.
738 */
739void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
740{
741 unsigned long i;
742
743 for (i = 0; i < ttm->num_pages; ++i)
744 ttm->pages[i] = pages ? pages[i] : NULL;
745}
746
747/*
748 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
749 *
750 * Called by amdgpu_ttm_backend_bind()
751 **/
752static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
753 struct ttm_tt *ttm)
754{
755 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
756 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
757 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
758 enum dma_data_direction direction = write ?
759 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
760 int r;
761
762 /* Allocate an SG array and squash pages into it */
763 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
764 (u64)ttm->num_pages << PAGE_SHIFT,
765 GFP_KERNEL);
766 if (r)
767 goto release_sg;
768
769 /* Map SG to device */
770 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
771 if (r)
772 goto release_sg;
773
774 /* convert SG to linear array of pages and dma addresses */
775 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
776 ttm->num_pages);
777
778 return 0;
779
780release_sg:
781 kfree(ttm->sg);
782 ttm->sg = NULL;
783 return r;
784}
785
786/*
787 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
788 */
789static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
790 struct ttm_tt *ttm)
791{
792 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
793 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
794 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
795 enum dma_data_direction direction = write ?
796 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
797
798 /* double check that we don't free the table twice */
799 if (!ttm->sg || !ttm->sg->sgl)
800 return;
801
802 /* unmap the pages mapped to the device */
803 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
804 sg_free_table(ttm->sg);
805
806#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
807 if (gtt->range) {
808 unsigned long i;
809
810 for (i = 0; i < ttm->num_pages; i++) {
811 if (ttm->pages[i] !=
812 hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
813 break;
814 }
815
816 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
817 }
818#endif
819}
820
821static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
822 struct ttm_buffer_object *tbo,
823 uint64_t flags)
824{
825 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
826 struct ttm_tt *ttm = tbo->ttm;
827 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
828
829 if (amdgpu_bo_encrypted(abo))
830 flags |= AMDGPU_PTE_TMZ;
831
832 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
833 uint64_t page_idx = 1;
834
835 amdgpu_gart_bind(adev, gtt->offset, page_idx,
836 gtt->ttm.dma_address, flags);
837
838 /* The memory type of the first page defaults to UC. Now
839 * modify the memory type to NC from the second page of
840 * the BO onward.
841 */
842 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
843 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
844
845 amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
846 ttm->num_pages - page_idx,
847 &(gtt->ttm.dma_address[page_idx]), flags);
848 } else {
849 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
850 gtt->ttm.dma_address, flags);
851 }
852}
853
854/*
855 * amdgpu_ttm_backend_bind - Bind GTT memory
856 *
857 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
858 * This handles binding GTT memory to the device address space.
859 */
860static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
861 struct ttm_tt *ttm,
862 struct ttm_resource *bo_mem)
863{
864 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
865 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
866 uint64_t flags;
867 int r;
868
869 if (!bo_mem)
870 return -EINVAL;
871
872 if (gtt->bound)
873 return 0;
874
875 if (gtt->userptr) {
876 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
877 if (r) {
878 DRM_ERROR("failed to pin userptr\n");
879 return r;
880 }
881 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
882 if (!ttm->sg) {
883 struct dma_buf_attachment *attach;
884 struct sg_table *sgt;
885
886 attach = gtt->gobj->import_attach;
887 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
888 if (IS_ERR(sgt))
889 return PTR_ERR(sgt);
890
891 ttm->sg = sgt;
892 }
893
894 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
895 ttm->num_pages);
896 }
897
898 if (!ttm->num_pages) {
899 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
900 ttm->num_pages, bo_mem, ttm);
901 }
902
903 if (bo_mem->mem_type != TTM_PL_TT ||
904 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
905 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
906 return 0;
907 }
908
909 /* compute PTE flags relevant to this BO memory */
910 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
911
912 /* bind pages into GART page tables */
913 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
914 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
915 gtt->ttm.dma_address, flags);
916 gtt->bound = true;
917 return 0;
918}
919
920/*
921 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
922 * through AGP or GART aperture.
923 *
924 * If bo is accessible through AGP aperture, then use AGP aperture
925 * to access bo; otherwise allocate logical space in GART aperture
926 * and map bo to GART aperture.
927 */
928int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
929{
930 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
931 struct ttm_operation_ctx ctx = { false, false };
932 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
933 struct ttm_placement placement;
934 struct ttm_place placements;
935 struct ttm_resource *tmp;
936 uint64_t addr, flags;
937 int r;
938
939 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
940 return 0;
941
942 addr = amdgpu_gmc_agp_addr(bo);
943 if (addr != AMDGPU_BO_INVALID_OFFSET) {
944 bo->resource->start = addr >> PAGE_SHIFT;
945 return 0;
946 }
947
948 /* allocate GART space */
949 placement.num_placement = 1;
950 placement.placement = &placements;
951 placement.num_busy_placement = 1;
952 placement.busy_placement = &placements;
953 placements.fpfn = 0;
954 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
955 placements.mem_type = TTM_PL_TT;
956 placements.flags = bo->resource->placement;
957
958 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
959 if (unlikely(r))
960 return r;
961
962 /* compute PTE flags for this buffer object */
963 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
964
965 /* Bind pages */
966 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
967 amdgpu_ttm_gart_bind(adev, bo, flags);
968 amdgpu_gart_invalidate_tlb(adev);
969 ttm_resource_free(bo, &bo->resource);
970 ttm_bo_assign_mem(bo, tmp);
971
972 return 0;
973}
974
975/*
976 * amdgpu_ttm_recover_gart - Rebind GTT pages
977 *
978 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
979 * rebind GTT pages during a GPU reset.
980 */
981void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
982{
983 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
984 uint64_t flags;
985
986 if (!tbo->ttm)
987 return;
988
989 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
990 amdgpu_ttm_gart_bind(adev, tbo, flags);
991}
992
993/*
994 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
995 *
996 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
997 * ttm_tt_destroy().
998 */
999static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1000 struct ttm_tt *ttm)
1001{
1002 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1003 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1004
1005 /* if the pages have userptr pinning then clear that first */
1006 if (gtt->userptr) {
1007 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1008 } else if (ttm->sg && gtt->gobj->import_attach) {
1009 struct dma_buf_attachment *attach;
1010
1011 attach = gtt->gobj->import_attach;
1012 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1013 ttm->sg = NULL;
1014 }
1015
1016 if (!gtt->bound)
1017 return;
1018
1019 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1020 return;
1021
1022 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1023 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1024 gtt->bound = false;
1025}
1026
1027static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1028 struct ttm_tt *ttm)
1029{
1030 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1031
1032 if (gtt->usertask)
1033 put_task_struct(gtt->usertask);
1034
1035 ttm_tt_fini(>t->ttm);
1036 kfree(gtt);
1037}
1038
1039/**
1040 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1041 *
1042 * @bo: The buffer object to create a GTT ttm_tt object around
1043 * @page_flags: Page flags to be added to the ttm_tt object
1044 *
1045 * Called by ttm_tt_create().
1046 */
1047static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1048 uint32_t page_flags)
1049{
1050 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1051 struct amdgpu_ttm_tt *gtt;
1052 enum ttm_caching caching;
1053
1054 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1055 if (gtt == NULL) {
1056 return NULL;
1057 }
1058 gtt->gobj = &bo->base;
1059
1060 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1061 caching = ttm_write_combined;
1062 else
1063 caching = ttm_cached;
1064
1065 /* allocate space for the uninitialized page entries */
1066 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1067 kfree(gtt);
1068 return NULL;
1069 }
1070 return >t->ttm;
1071}
1072
1073/*
1074 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1075 *
1076 * Map the pages of a ttm_tt object to an address space visible
1077 * to the underlying device.
1078 */
1079static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1080 struct ttm_tt *ttm,
1081 struct ttm_operation_ctx *ctx)
1082{
1083 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1084 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1085 pgoff_t i;
1086 int ret;
1087
1088 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1089 if (gtt->userptr) {
1090 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1091 if (!ttm->sg)
1092 return -ENOMEM;
1093 return 0;
1094 }
1095
1096 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1097 return 0;
1098
1099 ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1100 if (ret)
1101 return ret;
1102
1103 for (i = 0; i < ttm->num_pages; ++i)
1104 ttm->pages[i]->mapping = bdev->dev_mapping;
1105
1106 return 0;
1107}
1108
1109/*
1110 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1111 *
1112 * Unmaps pages of a ttm_tt object from the device address space and
1113 * unpopulates the page array backing it.
1114 */
1115static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1116 struct ttm_tt *ttm)
1117{
1118 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1119 struct amdgpu_device *adev;
1120 pgoff_t i;
1121
1122 amdgpu_ttm_backend_unbind(bdev, ttm);
1123
1124 if (gtt->userptr) {
1125 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1126 kfree(ttm->sg);
1127 ttm->sg = NULL;
1128 return;
1129 }
1130
1131 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1132 return;
1133
1134 for (i = 0; i < ttm->num_pages; ++i)
1135 ttm->pages[i]->mapping = NULL;
1136
1137 adev = amdgpu_ttm_adev(bdev);
1138 return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1139}
1140
1141/**
1142 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1143 * task
1144 *
1145 * @tbo: The ttm_buffer_object that contains the userptr
1146 * @user_addr: The returned value
1147 */
1148int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1149 uint64_t *user_addr)
1150{
1151 struct amdgpu_ttm_tt *gtt;
1152
1153 if (!tbo->ttm)
1154 return -EINVAL;
1155
1156 gtt = (void *)tbo->ttm;
1157 *user_addr = gtt->userptr;
1158 return 0;
1159}
1160
1161/**
1162 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1163 * task
1164 *
1165 * @bo: The ttm_buffer_object to bind this userptr to
1166 * @addr: The address in the current tasks VM space to use
1167 * @flags: Requirements of userptr object.
1168 *
1169 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1170 * to current task
1171 */
1172int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1173 uint64_t addr, uint32_t flags)
1174{
1175 struct amdgpu_ttm_tt *gtt;
1176
1177 if (!bo->ttm) {
1178 /* TODO: We want a separate TTM object type for userptrs */
1179 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1180 if (bo->ttm == NULL)
1181 return -ENOMEM;
1182 }
1183
1184 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1185 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1186
1187 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1188 gtt->userptr = addr;
1189 gtt->userflags = flags;
1190
1191 if (gtt->usertask)
1192 put_task_struct(gtt->usertask);
1193 gtt->usertask = current->group_leader;
1194 get_task_struct(gtt->usertask);
1195
1196 return 0;
1197}
1198
1199/*
1200 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1201 */
1202struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1203{
1204 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1205
1206 if (gtt == NULL)
1207 return NULL;
1208
1209 if (gtt->usertask == NULL)
1210 return NULL;
1211
1212 return gtt->usertask->mm;
1213}
1214
1215/*
1216 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1217 * address range for the current task.
1218 *
1219 */
1220bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1221 unsigned long end, unsigned long *userptr)
1222{
1223 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1224 unsigned long size;
1225
1226 if (gtt == NULL || !gtt->userptr)
1227 return false;
1228
1229 /* Return false if no part of the ttm_tt object lies within
1230 * the range
1231 */
1232 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1233 if (gtt->userptr > end || gtt->userptr + size <= start)
1234 return false;
1235
1236 if (userptr)
1237 *userptr = gtt->userptr;
1238 return true;
1239}
1240
1241/*
1242 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1243 */
1244bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1245{
1246 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1247
1248 if (gtt == NULL || !gtt->userptr)
1249 return false;
1250
1251 return true;
1252}
1253
1254/*
1255 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1256 */
1257bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1258{
1259 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1260
1261 if (gtt == NULL)
1262 return false;
1263
1264 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1265}
1266
1267/**
1268 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1269 *
1270 * @ttm: The ttm_tt object to compute the flags for
1271 * @mem: The memory registry backing this ttm_tt object
1272 *
1273 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1274 */
1275uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1276{
1277 uint64_t flags = 0;
1278
1279 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1280 flags |= AMDGPU_PTE_VALID;
1281
1282 if (mem && (mem->mem_type == TTM_PL_TT ||
1283 mem->mem_type == AMDGPU_PL_PREEMPT)) {
1284 flags |= AMDGPU_PTE_SYSTEM;
1285
1286 if (ttm->caching == ttm_cached)
1287 flags |= AMDGPU_PTE_SNOOPED;
1288 }
1289
1290 if (mem && mem->mem_type == TTM_PL_VRAM &&
1291 mem->bus.caching == ttm_cached)
1292 flags |= AMDGPU_PTE_SNOOPED;
1293
1294 return flags;
1295}
1296
1297/**
1298 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1299 *
1300 * @adev: amdgpu_device pointer
1301 * @ttm: The ttm_tt object to compute the flags for
1302 * @mem: The memory registry backing this ttm_tt object
1303 *
1304 * Figure out the flags to use for a VM PTE (Page Table Entry).
1305 */
1306uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1307 struct ttm_resource *mem)
1308{
1309 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1310
1311 flags |= adev->gart.gart_pte_flags;
1312 flags |= AMDGPU_PTE_READABLE;
1313
1314 if (!amdgpu_ttm_tt_is_readonly(ttm))
1315 flags |= AMDGPU_PTE_WRITEABLE;
1316
1317 return flags;
1318}
1319
1320/*
1321 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1322 * object.
1323 *
1324 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1325 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1326 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1327 * used to clean out a memory space.
1328 */
1329static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1330 const struct ttm_place *place)
1331{
1332 unsigned long num_pages = bo->resource->num_pages;
1333 struct dma_resv_iter resv_cursor;
1334 struct amdgpu_res_cursor cursor;
1335 struct dma_fence *f;
1336
1337 /* Swapout? */
1338 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1339 return true;
1340
1341 if (bo->type == ttm_bo_type_kernel &&
1342 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1343 return false;
1344
1345 /* If bo is a KFD BO, check if the bo belongs to the current process.
1346 * If true, then return false as any KFD process needs all its BOs to
1347 * be resident to run successfully
1348 */
1349 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1350 DMA_RESV_USAGE_BOOKKEEP, f) {
1351 if (amdkfd_fence_check_mm(f, current->mm))
1352 return false;
1353 }
1354
1355 switch (bo->resource->mem_type) {
1356 case AMDGPU_PL_PREEMPT:
1357 /* Preemptible BOs don't own system resources managed by the
1358 * driver (pages, VRAM, GART space). They point to resources
1359 * owned by someone else (e.g. pageable memory in user mode
1360 * or a DMABuf). They are used in a preemptible context so we
1361 * can guarantee no deadlocks and good QoS in case of MMU
1362 * notifiers or DMABuf move notifiers from the resource owner.
1363 */
1364 return false;
1365 case TTM_PL_TT:
1366 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1367 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1368 return false;
1369 return true;
1370
1371 case TTM_PL_VRAM:
1372 /* Check each drm MM node individually */
1373 amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
1374 &cursor);
1375 while (cursor.remaining) {
1376 if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
1377 && !(place->lpfn &&
1378 place->lpfn <= PFN_DOWN(cursor.start)))
1379 return true;
1380
1381 amdgpu_res_next(&cursor, cursor.size);
1382 }
1383 return false;
1384
1385 default:
1386 break;
1387 }
1388
1389 return ttm_bo_eviction_valuable(bo, place);
1390}
1391
1392static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1393 void *buf, size_t size, bool write)
1394{
1395 while (size) {
1396 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1397 uint64_t bytes = 4 - (pos & 0x3);
1398 uint32_t shift = (pos & 0x3) * 8;
1399 uint32_t mask = 0xffffffff << shift;
1400 uint32_t value = 0;
1401
1402 if (size < bytes) {
1403 mask &= 0xffffffff >> (bytes - size) * 8;
1404 bytes = size;
1405 }
1406
1407 if (mask != 0xffffffff) {
1408 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1409 if (write) {
1410 value &= ~mask;
1411 value |= (*(uint32_t *)buf << shift) & mask;
1412 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1413 } else {
1414 value = (value & mask) >> shift;
1415 memcpy(buf, &value, bytes);
1416 }
1417 } else {
1418 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1419 }
1420
1421 pos += bytes;
1422 buf += bytes;
1423 size -= bytes;
1424 }
1425}
1426
1427static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1428 unsigned long offset, void *buf, int len, int write)
1429{
1430 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1431 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1432 struct amdgpu_res_cursor src_mm;
1433 struct amdgpu_job *job;
1434 struct dma_fence *fence;
1435 uint64_t src_addr, dst_addr;
1436 unsigned int num_dw;
1437 int r, idx;
1438
1439 if (len != PAGE_SIZE)
1440 return -EINVAL;
1441
1442 if (!adev->mman.sdma_access_ptr)
1443 return -EACCES;
1444
1445 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1446 return -ENODEV;
1447
1448 if (write)
1449 memcpy(adev->mman.sdma_access_ptr, buf, len);
1450
1451 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1452 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
1453 if (r)
1454 goto out;
1455
1456 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1457 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
1458 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1459 if (write)
1460 swap(src_addr, dst_addr);
1461
1462 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
1463
1464 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1465 WARN_ON(job->ibs[0].length_dw > num_dw);
1466
1467 r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1468 if (r) {
1469 amdgpu_job_free(job);
1470 goto out;
1471 }
1472
1473 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1474 r = -ETIMEDOUT;
1475 dma_fence_put(fence);
1476
1477 if (!(r || write))
1478 memcpy(buf, adev->mman.sdma_access_ptr, len);
1479out:
1480 drm_dev_exit(idx);
1481 return r;
1482}
1483
1484/**
1485 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1486 *
1487 * @bo: The buffer object to read/write
1488 * @offset: Offset into buffer object
1489 * @buf: Secondary buffer to write/read from
1490 * @len: Length in bytes of access
1491 * @write: true if writing
1492 *
1493 * This is used to access VRAM that backs a buffer object via MMIO
1494 * access for debugging purposes.
1495 */
1496static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1497 unsigned long offset, void *buf, int len,
1498 int write)
1499{
1500 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1501 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1502 struct amdgpu_res_cursor cursor;
1503 int ret = 0;
1504
1505 if (bo->resource->mem_type != TTM_PL_VRAM)
1506 return -EIO;
1507
1508 if (amdgpu_device_has_timeouts_enabled(adev) &&
1509 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1510 return len;
1511
1512 amdgpu_res_first(bo->resource, offset, len, &cursor);
1513 while (cursor.remaining) {
1514 size_t count, size = cursor.size;
1515 loff_t pos = cursor.start;
1516
1517 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1518 size -= count;
1519 if (size) {
1520 /* using MM to access rest vram and handle un-aligned address */
1521 pos += count;
1522 buf += count;
1523 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1524 }
1525
1526 ret += cursor.size;
1527 buf += cursor.size;
1528 amdgpu_res_next(&cursor, cursor.size);
1529 }
1530
1531 return ret;
1532}
1533
1534static void
1535amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1536{
1537 amdgpu_bo_move_notify(bo, false, NULL);
1538}
1539
1540static struct ttm_device_funcs amdgpu_bo_driver = {
1541 .ttm_tt_create = &amdgpu_ttm_tt_create,
1542 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1543 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1544 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1545 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1546 .evict_flags = &amdgpu_evict_flags,
1547 .move = &amdgpu_bo_move,
1548 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1549 .release_notify = &amdgpu_bo_release_notify,
1550 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1551 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1552 .access_memory = &amdgpu_ttm_access_memory,
1553};
1554
1555/*
1556 * Firmware Reservation functions
1557 */
1558/**
1559 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1560 *
1561 * @adev: amdgpu_device pointer
1562 *
1563 * free fw reserved vram if it has been reserved.
1564 */
1565static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1566{
1567 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1568 NULL, &adev->mman.fw_vram_usage_va);
1569}
1570
1571/**
1572 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1573 *
1574 * @adev: amdgpu_device pointer
1575 *
1576 * create bo vram reservation from fw.
1577 */
1578static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1579{
1580 uint64_t vram_size = adev->gmc.visible_vram_size;
1581
1582 adev->mman.fw_vram_usage_va = NULL;
1583 adev->mman.fw_vram_usage_reserved_bo = NULL;
1584
1585 if (adev->mman.fw_vram_usage_size == 0 ||
1586 adev->mman.fw_vram_usage_size > vram_size)
1587 return 0;
1588
1589 return amdgpu_bo_create_kernel_at(adev,
1590 adev->mman.fw_vram_usage_start_offset,
1591 adev->mman.fw_vram_usage_size,
1592 AMDGPU_GEM_DOMAIN_VRAM,
1593 &adev->mman.fw_vram_usage_reserved_bo,
1594 &adev->mman.fw_vram_usage_va);
1595}
1596
1597/*
1598 * Memoy training reservation functions
1599 */
1600
1601/**
1602 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1603 *
1604 * @adev: amdgpu_device pointer
1605 *
1606 * free memory training reserved vram if it has been reserved.
1607 */
1608static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1609{
1610 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1611
1612 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1613 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1614 ctx->c2p_bo = NULL;
1615
1616 return 0;
1617}
1618
1619static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1620{
1621 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1622
1623 memset(ctx, 0, sizeof(*ctx));
1624
1625 ctx->c2p_train_data_offset =
1626 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1627 ctx->p2c_train_data_offset =
1628 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1629 ctx->train_data_size =
1630 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1631
1632 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1633 ctx->train_data_size,
1634 ctx->p2c_train_data_offset,
1635 ctx->c2p_train_data_offset);
1636}
1637
1638/*
1639 * reserve TMR memory at the top of VRAM which holds
1640 * IP Discovery data and is protected by PSP.
1641 */
1642static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1643{
1644 int ret;
1645 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1646 bool mem_train_support = false;
1647
1648 if (!amdgpu_sriov_vf(adev)) {
1649 if (amdgpu_atomfirmware_mem_training_supported(adev))
1650 mem_train_support = true;
1651 else
1652 DRM_DEBUG("memory training does not support!\n");
1653 }
1654
1655 /*
1656 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1657 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1658 *
1659 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1660 * discovery data and G6 memory training data respectively
1661 */
1662 adev->mman.discovery_tmr_size =
1663 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1664 if (!adev->mman.discovery_tmr_size)
1665 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1666
1667 if (mem_train_support) {
1668 /* reserve vram for mem train according to TMR location */
1669 amdgpu_ttm_training_data_block_init(adev);
1670 ret = amdgpu_bo_create_kernel_at(adev,
1671 ctx->c2p_train_data_offset,
1672 ctx->train_data_size,
1673 AMDGPU_GEM_DOMAIN_VRAM,
1674 &ctx->c2p_bo,
1675 NULL);
1676 if (ret) {
1677 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1678 amdgpu_ttm_training_reserve_vram_fini(adev);
1679 return ret;
1680 }
1681 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1682 }
1683
1684 ret = amdgpu_bo_create_kernel_at(adev,
1685 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1686 adev->mman.discovery_tmr_size,
1687 AMDGPU_GEM_DOMAIN_VRAM,
1688 &adev->mman.discovery_memory,
1689 NULL);
1690 if (ret) {
1691 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1692 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1693 return ret;
1694 }
1695
1696 return 0;
1697}
1698
1699/*
1700 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1701 * gtt/vram related fields.
1702 *
1703 * This initializes all of the memory space pools that the TTM layer
1704 * will need such as the GTT space (system memory mapped to the device),
1705 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1706 * can be mapped per VMID.
1707 */
1708int amdgpu_ttm_init(struct amdgpu_device *adev)
1709{
1710 uint64_t gtt_size;
1711 int r;
1712 u64 vis_vram_limit;
1713
1714 mutex_init(&adev->mman.gtt_window_lock);
1715
1716 /* No others user of address space so set it to 0 */
1717 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1718 adev_to_drm(adev)->anon_inode->i_mapping,
1719 adev_to_drm(adev)->vma_offset_manager,
1720 adev->need_swiotlb,
1721 dma_addressing_limited(adev->dev));
1722 if (r) {
1723 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1724 return r;
1725 }
1726 adev->mman.initialized = true;
1727
1728 /* Initialize VRAM pool with all of VRAM divided into pages */
1729 r = amdgpu_vram_mgr_init(adev);
1730 if (r) {
1731 DRM_ERROR("Failed initializing VRAM heap.\n");
1732 return r;
1733 }
1734
1735 /* Reduce size of CPU-visible VRAM if requested */
1736 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1737 if (amdgpu_vis_vram_limit > 0 &&
1738 vis_vram_limit <= adev->gmc.visible_vram_size)
1739 adev->gmc.visible_vram_size = vis_vram_limit;
1740
1741 /* Change the size here instead of the init above so only lpfn is affected */
1742 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1743#ifdef CONFIG_64BIT
1744#ifdef CONFIG_X86
1745 if (adev->gmc.xgmi.connected_to_cpu)
1746 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1747 adev->gmc.visible_vram_size);
1748
1749 else
1750#endif
1751 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1752 adev->gmc.visible_vram_size);
1753#endif
1754
1755 /*
1756 *The reserved vram for firmware must be pinned to the specified
1757 *place on the VRAM, so reserve it early.
1758 */
1759 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1760 if (r) {
1761 return r;
1762 }
1763
1764 /*
1765 * only NAVI10 and onwards ASIC support for IP discovery.
1766 * If IP discovery enabled, a block of memory should be
1767 * reserved for IP discovey.
1768 */
1769 if (adev->mman.discovery_bin) {
1770 r = amdgpu_ttm_reserve_tmr(adev);
1771 if (r)
1772 return r;
1773 }
1774
1775 /* allocate memory as required for VGA
1776 * This is used for VGA emulation and pre-OS scanout buffers to
1777 * avoid display artifacts while transitioning between pre-OS
1778 * and driver. */
1779 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1780 AMDGPU_GEM_DOMAIN_VRAM,
1781 &adev->mman.stolen_vga_memory,
1782 NULL);
1783 if (r)
1784 return r;
1785 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1786 adev->mman.stolen_extended_size,
1787 AMDGPU_GEM_DOMAIN_VRAM,
1788 &adev->mman.stolen_extended_memory,
1789 NULL);
1790 if (r)
1791 return r;
1792 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1793 adev->mman.stolen_reserved_size,
1794 AMDGPU_GEM_DOMAIN_VRAM,
1795 &adev->mman.stolen_reserved_memory,
1796 NULL);
1797 if (r)
1798 return r;
1799
1800 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1801 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1802
1803 /* Compute GTT size, either based on 1/2 the size of RAM size
1804 * or whatever the user passed on module init */
1805 if (amdgpu_gtt_size == -1) {
1806 struct sysinfo si;
1807
1808 si_meminfo(&si);
1809 /* Certain GL unit tests for large textures can cause problems
1810 * with the OOM killer since there is no way to link this memory
1811 * to a process. This was originally mitigated (but not necessarily
1812 * eliminated) by limiting the GTT size. The problem is this limit
1813 * is often too low for many modern games so just make the limit 1/2
1814 * of system memory which aligns with TTM. The OOM accounting needs
1815 * to be addressed, but we shouldn't prevent common 3D applications
1816 * from being usable just to potentially mitigate that corner case.
1817 */
1818 gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1819 (u64)si.totalram * si.mem_unit / 2);
1820 } else {
1821 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1822 }
1823
1824 /* Initialize GTT memory pool */
1825 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1826 if (r) {
1827 DRM_ERROR("Failed initializing GTT heap.\n");
1828 return r;
1829 }
1830 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1831 (unsigned)(gtt_size / (1024 * 1024)));
1832
1833 /* Initialize preemptible memory pool */
1834 r = amdgpu_preempt_mgr_init(adev);
1835 if (r) {
1836 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1837 return r;
1838 }
1839
1840 /* Initialize various on-chip memory pools */
1841 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1842 if (r) {
1843 DRM_ERROR("Failed initializing GDS heap.\n");
1844 return r;
1845 }
1846
1847 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1848 if (r) {
1849 DRM_ERROR("Failed initializing gws heap.\n");
1850 return r;
1851 }
1852
1853 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1854 if (r) {
1855 DRM_ERROR("Failed initializing oa heap.\n");
1856 return r;
1857 }
1858
1859 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
1860 AMDGPU_GEM_DOMAIN_GTT,
1861 &adev->mman.sdma_access_bo, NULL,
1862 &adev->mman.sdma_access_ptr))
1863 DRM_WARN("Debug VRAM access will use slowpath MM access\n");
1864
1865 return 0;
1866}
1867
1868/*
1869 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1870 */
1871void amdgpu_ttm_fini(struct amdgpu_device *adev)
1872{
1873 int idx;
1874 if (!adev->mman.initialized)
1875 return;
1876
1877 amdgpu_ttm_training_reserve_vram_fini(adev);
1878 /* return the stolen vga memory back to VRAM */
1879 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1880 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1881 /* return the IP Discovery TMR memory back to VRAM */
1882 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1883 if (adev->mman.stolen_reserved_size)
1884 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1885 NULL, NULL);
1886 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
1887 &adev->mman.sdma_access_ptr);
1888 amdgpu_ttm_fw_reserve_vram_fini(adev);
1889
1890 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1891
1892 if (adev->mman.aper_base_kaddr)
1893 iounmap(adev->mman.aper_base_kaddr);
1894 adev->mman.aper_base_kaddr = NULL;
1895
1896 drm_dev_exit(idx);
1897 }
1898
1899 amdgpu_vram_mgr_fini(adev);
1900 amdgpu_gtt_mgr_fini(adev);
1901 amdgpu_preempt_mgr_fini(adev);
1902 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1903 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1904 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1905 ttm_device_fini(&adev->mman.bdev);
1906 adev->mman.initialized = false;
1907 DRM_INFO("amdgpu: ttm finalized\n");
1908}
1909
1910/**
1911 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1912 *
1913 * @adev: amdgpu_device pointer
1914 * @enable: true when we can use buffer functions.
1915 *
1916 * Enable/disable use of buffer functions during suspend/resume. This should
1917 * only be called at bootup or when userspace isn't running.
1918 */
1919void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1920{
1921 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1922 uint64_t size;
1923 int r;
1924
1925 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1926 adev->mman.buffer_funcs_enabled == enable)
1927 return;
1928
1929 if (enable) {
1930 struct amdgpu_ring *ring;
1931 struct drm_gpu_scheduler *sched;
1932
1933 ring = adev->mman.buffer_funcs_ring;
1934 sched = &ring->sched;
1935 r = drm_sched_entity_init(&adev->mman.entity,
1936 DRM_SCHED_PRIORITY_KERNEL, &sched,
1937 1, NULL);
1938 if (r) {
1939 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1940 r);
1941 return;
1942 }
1943 } else {
1944 drm_sched_entity_destroy(&adev->mman.entity);
1945 dma_fence_put(man->move);
1946 man->move = NULL;
1947 }
1948
1949 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1950 if (enable)
1951 size = adev->gmc.real_vram_size;
1952 else
1953 size = adev->gmc.visible_vram_size;
1954 man->size = size;
1955 adev->mman.buffer_funcs_enabled = enable;
1956}
1957
1958static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
1959 bool direct_submit,
1960 unsigned int num_dw,
1961 struct dma_resv *resv,
1962 bool vm_needs_flush,
1963 struct amdgpu_job **job)
1964{
1965 enum amdgpu_ib_pool_type pool = direct_submit ?
1966 AMDGPU_IB_POOL_DIRECT :
1967 AMDGPU_IB_POOL_DELAYED;
1968 int r;
1969
1970 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
1971 if (r)
1972 return r;
1973
1974 if (vm_needs_flush) {
1975 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1976 adev->gmc.pdb0_bo :
1977 adev->gart.bo);
1978 (*job)->vm_needs_flush = true;
1979 }
1980 if (resv) {
1981 r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
1982 AMDGPU_SYNC_ALWAYS,
1983 AMDGPU_FENCE_OWNER_UNDEFINED);
1984 if (r) {
1985 DRM_ERROR("sync failed (%d).\n", r);
1986 amdgpu_job_free(*job);
1987 return r;
1988 }
1989 }
1990 return 0;
1991}
1992
1993int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1994 uint64_t dst_offset, uint32_t byte_count,
1995 struct dma_resv *resv,
1996 struct dma_fence **fence, bool direct_submit,
1997 bool vm_needs_flush, bool tmz)
1998{
1999 struct amdgpu_device *adev = ring->adev;
2000 unsigned num_loops, num_dw;
2001 struct amdgpu_job *job;
2002 uint32_t max_bytes;
2003 unsigned i;
2004 int r;
2005
2006 if (!direct_submit && !ring->sched.ready) {
2007 DRM_ERROR("Trying to move memory with ring turned off.\n");
2008 return -EINVAL;
2009 }
2010
2011 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2012 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2013 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2014 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2015 resv, vm_needs_flush, &job);
2016 if (r)
2017 return r;
2018
2019 for (i = 0; i < num_loops; i++) {
2020 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2021
2022 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2023 dst_offset, cur_size_in_bytes, tmz);
2024
2025 src_offset += cur_size_in_bytes;
2026 dst_offset += cur_size_in_bytes;
2027 byte_count -= cur_size_in_bytes;
2028 }
2029
2030 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2031 WARN_ON(job->ibs[0].length_dw > num_dw);
2032 if (direct_submit)
2033 r = amdgpu_job_submit_direct(job, ring, fence);
2034 else
2035 r = amdgpu_job_submit(job, &adev->mman.entity,
2036 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2037 if (r)
2038 goto error_free;
2039
2040 return r;
2041
2042error_free:
2043 amdgpu_job_free(job);
2044 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2045 return r;
2046}
2047
2048static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2049 uint64_t dst_addr, uint32_t byte_count,
2050 struct dma_resv *resv,
2051 struct dma_fence **fence,
2052 bool vm_needs_flush)
2053{
2054 struct amdgpu_device *adev = ring->adev;
2055 unsigned int num_loops, num_dw;
2056 struct amdgpu_job *job;
2057 uint32_t max_bytes;
2058 unsigned int i;
2059 int r;
2060
2061 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2062 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2063 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2064 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2065 &job);
2066 if (r)
2067 return r;
2068
2069 for (i = 0; i < num_loops; i++) {
2070 uint32_t cur_size = min(byte_count, max_bytes);
2071
2072 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2073 cur_size);
2074
2075 dst_addr += cur_size;
2076 byte_count -= cur_size;
2077 }
2078
2079 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2080 WARN_ON(job->ibs[0].length_dw > num_dw);
2081 r = amdgpu_job_submit(job, &adev->mman.entity,
2082 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2083 if (r)
2084 goto error_free;
2085
2086 return 0;
2087
2088error_free:
2089 amdgpu_job_free(job);
2090 return r;
2091}
2092
2093int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2094 uint32_t src_data,
2095 struct dma_resv *resv,
2096 struct dma_fence **f)
2097{
2098 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2099 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2100 struct dma_fence *fence = NULL;
2101 struct amdgpu_res_cursor dst;
2102 int r;
2103
2104 if (!adev->mman.buffer_funcs_enabled) {
2105 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2106 return -EINVAL;
2107 }
2108
2109 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2110
2111 mutex_lock(&adev->mman.gtt_window_lock);
2112 while (dst.remaining) {
2113 struct dma_fence *next;
2114 uint64_t cur_size, to;
2115
2116 /* Never fill more than 256MiB at once to avoid timeouts */
2117 cur_size = min(dst.size, 256ULL << 20);
2118
2119 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2120 1, ring, false, &cur_size, &to);
2121 if (r)
2122 goto error;
2123
2124 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2125 &next, true);
2126 if (r)
2127 goto error;
2128
2129 dma_fence_put(fence);
2130 fence = next;
2131
2132 amdgpu_res_next(&dst, cur_size);
2133 }
2134error:
2135 mutex_unlock(&adev->mman.gtt_window_lock);
2136 if (f)
2137 *f = dma_fence_get(fence);
2138 dma_fence_put(fence);
2139 return r;
2140}
2141
2142/**
2143 * amdgpu_ttm_evict_resources - evict memory buffers
2144 * @adev: amdgpu device object
2145 * @mem_type: evicted BO's memory type
2146 *
2147 * Evicts all @mem_type buffers on the lru list of the memory type.
2148 *
2149 * Returns:
2150 * 0 for success or a negative error code on failure.
2151 */
2152int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2153{
2154 struct ttm_resource_manager *man;
2155
2156 switch (mem_type) {
2157 case TTM_PL_VRAM:
2158 case TTM_PL_TT:
2159 case AMDGPU_PL_GWS:
2160 case AMDGPU_PL_GDS:
2161 case AMDGPU_PL_OA:
2162 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2163 break;
2164 default:
2165 DRM_ERROR("Trying to evict invalid memory type\n");
2166 return -EINVAL;
2167 }
2168
2169 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2170}
2171
2172#if defined(CONFIG_DEBUG_FS)
2173
2174static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2175{
2176 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2177
2178 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2179}
2180
2181DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2182
2183/*
2184 * amdgpu_ttm_vram_read - Linear read access to VRAM
2185 *
2186 * Accesses VRAM via MMIO for debugging purposes.
2187 */
2188static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2189 size_t size, loff_t *pos)
2190{
2191 struct amdgpu_device *adev = file_inode(f)->i_private;
2192 ssize_t result = 0;
2193
2194 if (size & 0x3 || *pos & 0x3)
2195 return -EINVAL;
2196
2197 if (*pos >= adev->gmc.mc_vram_size)
2198 return -ENXIO;
2199
2200 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2201 while (size) {
2202 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2203 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2204
2205 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2206 if (copy_to_user(buf, value, bytes))
2207 return -EFAULT;
2208
2209 result += bytes;
2210 buf += bytes;
2211 *pos += bytes;
2212 size -= bytes;
2213 }
2214
2215 return result;
2216}
2217
2218/*
2219 * amdgpu_ttm_vram_write - Linear write access to VRAM
2220 *
2221 * Accesses VRAM via MMIO for debugging purposes.
2222 */
2223static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2224 size_t size, loff_t *pos)
2225{
2226 struct amdgpu_device *adev = file_inode(f)->i_private;
2227 ssize_t result = 0;
2228 int r;
2229
2230 if (size & 0x3 || *pos & 0x3)
2231 return -EINVAL;
2232
2233 if (*pos >= adev->gmc.mc_vram_size)
2234 return -ENXIO;
2235
2236 while (size) {
2237 uint32_t value;
2238
2239 if (*pos >= adev->gmc.mc_vram_size)
2240 return result;
2241
2242 r = get_user(value, (uint32_t *)buf);
2243 if (r)
2244 return r;
2245
2246 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2247
2248 result += 4;
2249 buf += 4;
2250 *pos += 4;
2251 size -= 4;
2252 }
2253
2254 return result;
2255}
2256
2257static const struct file_operations amdgpu_ttm_vram_fops = {
2258 .owner = THIS_MODULE,
2259 .read = amdgpu_ttm_vram_read,
2260 .write = amdgpu_ttm_vram_write,
2261 .llseek = default_llseek,
2262};
2263
2264/*
2265 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2266 *
2267 * This function is used to read memory that has been mapped to the
2268 * GPU and the known addresses are not physical addresses but instead
2269 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2270 */
2271static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2272 size_t size, loff_t *pos)
2273{
2274 struct amdgpu_device *adev = file_inode(f)->i_private;
2275 struct iommu_domain *dom;
2276 ssize_t result = 0;
2277 int r;
2278
2279 /* retrieve the IOMMU domain if any for this device */
2280 dom = iommu_get_domain_for_dev(adev->dev);
2281
2282 while (size) {
2283 phys_addr_t addr = *pos & PAGE_MASK;
2284 loff_t off = *pos & ~PAGE_MASK;
2285 size_t bytes = PAGE_SIZE - off;
2286 unsigned long pfn;
2287 struct page *p;
2288 void *ptr;
2289
2290 bytes = bytes < size ? bytes : size;
2291
2292 /* Translate the bus address to a physical address. If
2293 * the domain is NULL it means there is no IOMMU active
2294 * and the address translation is the identity
2295 */
2296 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2297
2298 pfn = addr >> PAGE_SHIFT;
2299 if (!pfn_valid(pfn))
2300 return -EPERM;
2301
2302 p = pfn_to_page(pfn);
2303 if (p->mapping != adev->mman.bdev.dev_mapping)
2304 return -EPERM;
2305
2306 ptr = kmap(p);
2307 r = copy_to_user(buf, ptr + off, bytes);
2308 kunmap(p);
2309 if (r)
2310 return -EFAULT;
2311
2312 size -= bytes;
2313 *pos += bytes;
2314 result += bytes;
2315 }
2316
2317 return result;
2318}
2319
2320/*
2321 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2322 *
2323 * This function is used to write memory that has been mapped to the
2324 * GPU and the known addresses are not physical addresses but instead
2325 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2326 */
2327static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2328 size_t size, loff_t *pos)
2329{
2330 struct amdgpu_device *adev = file_inode(f)->i_private;
2331 struct iommu_domain *dom;
2332 ssize_t result = 0;
2333 int r;
2334
2335 dom = iommu_get_domain_for_dev(adev->dev);
2336
2337 while (size) {
2338 phys_addr_t addr = *pos & PAGE_MASK;
2339 loff_t off = *pos & ~PAGE_MASK;
2340 size_t bytes = PAGE_SIZE - off;
2341 unsigned long pfn;
2342 struct page *p;
2343 void *ptr;
2344
2345 bytes = bytes < size ? bytes : size;
2346
2347 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2348
2349 pfn = addr >> PAGE_SHIFT;
2350 if (!pfn_valid(pfn))
2351 return -EPERM;
2352
2353 p = pfn_to_page(pfn);
2354 if (p->mapping != adev->mman.bdev.dev_mapping)
2355 return -EPERM;
2356
2357 ptr = kmap(p);
2358 r = copy_from_user(ptr + off, buf, bytes);
2359 kunmap(p);
2360 if (r)
2361 return -EFAULT;
2362
2363 size -= bytes;
2364 *pos += bytes;
2365 result += bytes;
2366 }
2367
2368 return result;
2369}
2370
2371static const struct file_operations amdgpu_ttm_iomem_fops = {
2372 .owner = THIS_MODULE,
2373 .read = amdgpu_iomem_read,
2374 .write = amdgpu_iomem_write,
2375 .llseek = default_llseek
2376};
2377
2378#endif
2379
2380void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2381{
2382#if defined(CONFIG_DEBUG_FS)
2383 struct drm_minor *minor = adev_to_drm(adev)->primary;
2384 struct dentry *root = minor->debugfs_root;
2385
2386 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2387 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2388 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2389 &amdgpu_ttm_iomem_fops);
2390 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2391 &amdgpu_ttm_page_pool_fops);
2392 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2393 TTM_PL_VRAM),
2394 root, "amdgpu_vram_mm");
2395 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2396 TTM_PL_TT),
2397 root, "amdgpu_gtt_mm");
2398 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2399 AMDGPU_PL_GDS),
2400 root, "amdgpu_gds_mm");
2401 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2402 AMDGPU_PL_GWS),
2403 root, "amdgpu_gws_mm");
2404 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2405 AMDGPU_PL_OA),
2406 root, "amdgpu_oa_mm");
2407
2408#endif
2409}