Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33#include <linux/dma-mapping.h>
34#include <linux/iommu.h>
35#include <linux/pagemap.h>
36#include <linux/sched/task.h>
37#include <linux/sched/mm.h>
38#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swiotlb.h>
42#include <linux/dma-buf.h>
43#include <linux/sizes.h>
44#include <linux/module.h>
45
46#include <drm/drm_drv.h>
47#include <drm/ttm/ttm_bo_api.h>
48#include <drm/ttm/ttm_bo_driver.h>
49#include <drm/ttm/ttm_placement.h>
50#include <drm/ttm/ttm_range_manager.h>
51
52#include <drm/amdgpu_drm.h>
53#include <drm/drm_drv.h>
54
55#include "amdgpu.h"
56#include "amdgpu_object.h"
57#include "amdgpu_trace.h"
58#include "amdgpu_amdkfd.h"
59#include "amdgpu_sdma.h"
60#include "amdgpu_ras.h"
61#include "amdgpu_atomfirmware.h"
62#include "amdgpu_res_cursor.h"
63#include "bif/bif_4_1_d.h"
64
65MODULE_IMPORT_NS(DMA_BUF);
66
67#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
68
69static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
70 struct ttm_tt *ttm,
71 struct ttm_resource *bo_mem);
72static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
73 struct ttm_tt *ttm);
74
75static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
76 unsigned int type,
77 uint64_t size_in_page)
78{
79 return ttm_range_man_init(&adev->mman.bdev, type,
80 false, size_in_page);
81}
82
83/**
84 * amdgpu_evict_flags - Compute placement flags
85 *
86 * @bo: The buffer object to evict
87 * @placement: Possible destination(s) for evicted BO
88 *
89 * Fill in placement data when ttm_bo_evict() is called
90 */
91static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
92 struct ttm_placement *placement)
93{
94 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
95 struct amdgpu_bo *abo;
96 static const struct ttm_place placements = {
97 .fpfn = 0,
98 .lpfn = 0,
99 .mem_type = TTM_PL_SYSTEM,
100 .flags = 0
101 };
102
103 /* Don't handle scatter gather BOs */
104 if (bo->type == ttm_bo_type_sg) {
105 placement->num_placement = 0;
106 placement->num_busy_placement = 0;
107 return;
108 }
109
110 /* Object isn't an AMDGPU object so ignore */
111 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
112 placement->placement = &placements;
113 placement->busy_placement = &placements;
114 placement->num_placement = 1;
115 placement->num_busy_placement = 1;
116 return;
117 }
118
119 abo = ttm_to_amdgpu_bo(bo);
120 if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
121 placement->num_placement = 0;
122 placement->num_busy_placement = 0;
123 return;
124 }
125
126 switch (bo->resource->mem_type) {
127 case AMDGPU_PL_GDS:
128 case AMDGPU_PL_GWS:
129 case AMDGPU_PL_OA:
130 placement->num_placement = 0;
131 placement->num_busy_placement = 0;
132 return;
133
134 case TTM_PL_VRAM:
135 if (!adev->mman.buffer_funcs_enabled) {
136 /* Move to system memory */
137 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
138 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
139 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
140 amdgpu_bo_in_cpu_visible_vram(abo)) {
141
142 /* Try evicting to the CPU inaccessible part of VRAM
143 * first, but only set GTT as busy placement, so this
144 * BO will be evicted to GTT rather than causing other
145 * BOs to be evicted from VRAM
146 */
147 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
148 AMDGPU_GEM_DOMAIN_GTT |
149 AMDGPU_GEM_DOMAIN_CPU);
150 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
151 abo->placements[0].lpfn = 0;
152 abo->placement.busy_placement = &abo->placements[1];
153 abo->placement.num_busy_placement = 1;
154 } else {
155 /* Move to GTT memory */
156 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
157 AMDGPU_GEM_DOMAIN_CPU);
158 }
159 break;
160 case TTM_PL_TT:
161 case AMDGPU_PL_PREEMPT:
162 default:
163 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
164 break;
165 }
166 *placement = abo->placement;
167}
168
169/**
170 * amdgpu_ttm_map_buffer - Map memory into the GART windows
171 * @bo: buffer object to map
172 * @mem: memory object to map
173 * @mm_cur: range to map
174 * @window: which GART window to use
175 * @ring: DMA ring to use for the copy
176 * @tmz: if we should setup a TMZ enabled mapping
177 * @size: in number of bytes to map, out number of bytes mapped
178 * @addr: resulting address inside the MC address space
179 *
180 * Setup one of the GART windows to access a specific piece of memory or return
181 * the physical address for local memory.
182 */
183static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
184 struct ttm_resource *mem,
185 struct amdgpu_res_cursor *mm_cur,
186 unsigned window, struct amdgpu_ring *ring,
187 bool tmz, uint64_t *size, uint64_t *addr)
188{
189 struct amdgpu_device *adev = ring->adev;
190 unsigned offset, num_pages, num_dw, num_bytes;
191 uint64_t src_addr, dst_addr;
192 struct dma_fence *fence;
193 struct amdgpu_job *job;
194 void *cpu_addr;
195 uint64_t flags;
196 unsigned int i;
197 int r;
198
199 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
200 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
201
202 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
203 return -EINVAL;
204
205 /* Map only what can't be accessed directly */
206 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
207 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
208 mm_cur->start;
209 return 0;
210 }
211
212
213 /*
214 * If start begins at an offset inside the page, then adjust the size
215 * and addr accordingly
216 */
217 offset = mm_cur->start & ~PAGE_MASK;
218
219 num_pages = PFN_UP(*size + offset);
220 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
221
222 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
223
224 *addr = adev->gmc.gart_start;
225 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
226 AMDGPU_GPU_PAGE_SIZE;
227 *addr += offset;
228
229 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
230 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
231
232 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
233 AMDGPU_IB_POOL_DELAYED, &job);
234 if (r)
235 return r;
236
237 src_addr = num_dw * 4;
238 src_addr += job->ibs[0].gpu_addr;
239
240 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
241 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
242 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
243 dst_addr, num_bytes, false);
244
245 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
246 WARN_ON(job->ibs[0].length_dw > num_dw);
247
248 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
249 if (tmz)
250 flags |= AMDGPU_PTE_TMZ;
251
252 cpu_addr = &job->ibs[0].ptr[num_dw];
253
254 if (mem->mem_type == TTM_PL_TT) {
255 dma_addr_t *dma_addr;
256
257 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
258 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
259 } else {
260 dma_addr_t dma_address;
261
262 dma_address = mm_cur->start;
263 dma_address += adev->vm_manager.vram_base_offset;
264
265 for (i = 0; i < num_pages; ++i) {
266 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
267 flags, cpu_addr);
268 dma_address += PAGE_SIZE;
269 }
270 }
271
272 r = amdgpu_job_submit(job, &adev->mman.entity,
273 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
274 if (r)
275 goto error_free;
276
277 dma_fence_put(fence);
278
279 return r;
280
281error_free:
282 amdgpu_job_free(job);
283 return r;
284}
285
286/**
287 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
288 * @adev: amdgpu device
289 * @src: buffer/address where to read from
290 * @dst: buffer/address where to write to
291 * @size: number of bytes to copy
292 * @tmz: if a secure copy should be used
293 * @resv: resv object to sync to
294 * @f: Returns the last fence if multiple jobs are submitted.
295 *
296 * The function copies @size bytes from {src->mem + src->offset} to
297 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
298 * move and different for a BO to BO copy.
299 *
300 */
301int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
302 const struct amdgpu_copy_mem *src,
303 const struct amdgpu_copy_mem *dst,
304 uint64_t size, bool tmz,
305 struct dma_resv *resv,
306 struct dma_fence **f)
307{
308 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
309 struct amdgpu_res_cursor src_mm, dst_mm;
310 struct dma_fence *fence = NULL;
311 int r = 0;
312
313 if (!adev->mman.buffer_funcs_enabled) {
314 DRM_ERROR("Trying to move memory with ring turned off.\n");
315 return -EINVAL;
316 }
317
318 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
319 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
320
321 mutex_lock(&adev->mman.gtt_window_lock);
322 while (src_mm.remaining) {
323 uint64_t from, to, cur_size;
324 struct dma_fence *next;
325
326 /* Never copy more than 256MiB at once to avoid a timeout */
327 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
328
329 /* Map src to window 0 and dst to window 1. */
330 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
331 0, ring, tmz, &cur_size, &from);
332 if (r)
333 goto error;
334
335 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
336 1, ring, tmz, &cur_size, &to);
337 if (r)
338 goto error;
339
340 r = amdgpu_copy_buffer(ring, from, to, cur_size,
341 resv, &next, false, true, tmz);
342 if (r)
343 goto error;
344
345 dma_fence_put(fence);
346 fence = next;
347
348 amdgpu_res_next(&src_mm, cur_size);
349 amdgpu_res_next(&dst_mm, cur_size);
350 }
351error:
352 mutex_unlock(&adev->mman.gtt_window_lock);
353 if (f)
354 *f = dma_fence_get(fence);
355 dma_fence_put(fence);
356 return r;
357}
358
359/*
360 * amdgpu_move_blit - Copy an entire buffer to another buffer
361 *
362 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
363 * help move buffers to and from VRAM.
364 */
365static int amdgpu_move_blit(struct ttm_buffer_object *bo,
366 bool evict,
367 struct ttm_resource *new_mem,
368 struct ttm_resource *old_mem)
369{
370 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
371 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
372 struct amdgpu_copy_mem src, dst;
373 struct dma_fence *fence = NULL;
374 int r;
375
376 src.bo = bo;
377 dst.bo = bo;
378 src.mem = old_mem;
379 dst.mem = new_mem;
380 src.offset = 0;
381 dst.offset = 0;
382
383 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
384 new_mem->num_pages << PAGE_SHIFT,
385 amdgpu_bo_encrypted(abo),
386 bo->base.resv, &fence);
387 if (r)
388 goto error;
389
390 /* clear the space being freed */
391 if (old_mem->mem_type == TTM_PL_VRAM &&
392 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
393 struct dma_fence *wipe_fence = NULL;
394
395 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
396 if (r) {
397 goto error;
398 } else if (wipe_fence) {
399 dma_fence_put(fence);
400 fence = wipe_fence;
401 }
402 }
403
404 /* Always block for VM page tables before committing the new location */
405 if (bo->type == ttm_bo_type_kernel)
406 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
407 else
408 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
409 dma_fence_put(fence);
410 return r;
411
412error:
413 if (fence)
414 dma_fence_wait(fence, false);
415 dma_fence_put(fence);
416 return r;
417}
418
419/*
420 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
421 *
422 * Called by amdgpu_bo_move()
423 */
424static bool amdgpu_mem_visible(struct amdgpu_device *adev,
425 struct ttm_resource *mem)
426{
427 uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
428 struct amdgpu_res_cursor cursor;
429
430 if (mem->mem_type == TTM_PL_SYSTEM ||
431 mem->mem_type == TTM_PL_TT)
432 return true;
433 if (mem->mem_type != TTM_PL_VRAM)
434 return false;
435
436 amdgpu_res_first(mem, 0, mem_size, &cursor);
437
438 /* ttm_resource_ioremap only supports contiguous memory */
439 if (cursor.size != mem_size)
440 return false;
441
442 return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
443}
444
445/*
446 * amdgpu_bo_move - Move a buffer object to a new memory location
447 *
448 * Called by ttm_bo_handle_move_mem()
449 */
450static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
451 struct ttm_operation_ctx *ctx,
452 struct ttm_resource *new_mem,
453 struct ttm_place *hop)
454{
455 struct amdgpu_device *adev;
456 struct amdgpu_bo *abo;
457 struct ttm_resource *old_mem = bo->resource;
458 int r;
459
460 if (new_mem->mem_type == TTM_PL_TT ||
461 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
462 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
463 if (r)
464 return r;
465 }
466
467 /* Can't move a pinned BO */
468 abo = ttm_to_amdgpu_bo(bo);
469 if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
470 return -EINVAL;
471
472 adev = amdgpu_ttm_adev(bo->bdev);
473
474 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
475 ttm_bo_move_null(bo, new_mem);
476 goto out;
477 }
478 if (old_mem->mem_type == TTM_PL_SYSTEM &&
479 (new_mem->mem_type == TTM_PL_TT ||
480 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
481 ttm_bo_move_null(bo, new_mem);
482 goto out;
483 }
484 if ((old_mem->mem_type == TTM_PL_TT ||
485 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
486 new_mem->mem_type == TTM_PL_SYSTEM) {
487 r = ttm_bo_wait_ctx(bo, ctx);
488 if (r)
489 return r;
490
491 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
492 ttm_resource_free(bo, &bo->resource);
493 ttm_bo_assign_mem(bo, new_mem);
494 goto out;
495 }
496
497 if (old_mem->mem_type == AMDGPU_PL_GDS ||
498 old_mem->mem_type == AMDGPU_PL_GWS ||
499 old_mem->mem_type == AMDGPU_PL_OA ||
500 new_mem->mem_type == AMDGPU_PL_GDS ||
501 new_mem->mem_type == AMDGPU_PL_GWS ||
502 new_mem->mem_type == AMDGPU_PL_OA) {
503 /* Nothing to save here */
504 ttm_bo_move_null(bo, new_mem);
505 goto out;
506 }
507
508 if (bo->type == ttm_bo_type_device &&
509 new_mem->mem_type == TTM_PL_VRAM &&
510 old_mem->mem_type != TTM_PL_VRAM) {
511 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
512 * accesses the BO after it's moved.
513 */
514 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
515 }
516
517 if (adev->mman.buffer_funcs_enabled) {
518 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
519 new_mem->mem_type == TTM_PL_VRAM) ||
520 (old_mem->mem_type == TTM_PL_VRAM &&
521 new_mem->mem_type == TTM_PL_SYSTEM))) {
522 hop->fpfn = 0;
523 hop->lpfn = 0;
524 hop->mem_type = TTM_PL_TT;
525 hop->flags = TTM_PL_FLAG_TEMPORARY;
526 return -EMULTIHOP;
527 }
528
529 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
530 } else {
531 r = -ENODEV;
532 }
533
534 if (r) {
535 /* Check that all memory is CPU accessible */
536 if (!amdgpu_mem_visible(adev, old_mem) ||
537 !amdgpu_mem_visible(adev, new_mem)) {
538 pr_err("Move buffer fallback to memcpy unavailable\n");
539 return r;
540 }
541
542 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
543 if (r)
544 return r;
545 }
546
547out:
548 /* update statistics */
549 atomic64_add(bo->base.size, &adev->num_bytes_moved);
550 amdgpu_bo_move_notify(bo, evict, new_mem);
551 return 0;
552}
553
554/*
555 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
556 *
557 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
558 */
559static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
560 struct ttm_resource *mem)
561{
562 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
563 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
564
565 switch (mem->mem_type) {
566 case TTM_PL_SYSTEM:
567 /* system memory */
568 return 0;
569 case TTM_PL_TT:
570 case AMDGPU_PL_PREEMPT:
571 break;
572 case TTM_PL_VRAM:
573 mem->bus.offset = mem->start << PAGE_SHIFT;
574 /* check if it's visible */
575 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
576 return -EINVAL;
577
578 if (adev->mman.aper_base_kaddr &&
579 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
580 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
581 mem->bus.offset;
582
583 mem->bus.offset += adev->gmc.aper_base;
584 mem->bus.is_iomem = true;
585 break;
586 default:
587 return -EINVAL;
588 }
589 return 0;
590}
591
592static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
593 unsigned long page_offset)
594{
595 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
596 struct amdgpu_res_cursor cursor;
597
598 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
599 &cursor);
600 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
601}
602
603/**
604 * amdgpu_ttm_domain_start - Returns GPU start address
605 * @adev: amdgpu device object
606 * @type: type of the memory
607 *
608 * Returns:
609 * GPU start address of a memory domain
610 */
611
612uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
613{
614 switch (type) {
615 case TTM_PL_TT:
616 return adev->gmc.gart_start;
617 case TTM_PL_VRAM:
618 return adev->gmc.vram_start;
619 }
620
621 return 0;
622}
623
624/*
625 * TTM backend functions.
626 */
627struct amdgpu_ttm_tt {
628 struct ttm_tt ttm;
629 struct drm_gem_object *gobj;
630 u64 offset;
631 uint64_t userptr;
632 struct task_struct *usertask;
633 uint32_t userflags;
634 bool bound;
635#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
636 struct hmm_range *range;
637#endif
638};
639
640#ifdef CONFIG_DRM_AMDGPU_USERPTR
641/*
642 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
643 * memory and start HMM tracking CPU page table update
644 *
645 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
646 * once afterwards to stop HMM tracking
647 */
648int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
649{
650 struct ttm_tt *ttm = bo->tbo.ttm;
651 struct amdgpu_ttm_tt *gtt = (void *)ttm;
652 unsigned long start = gtt->userptr;
653 struct vm_area_struct *vma;
654 struct mm_struct *mm;
655 bool readonly;
656 int r = 0;
657
658 mm = bo->notifier.mm;
659 if (unlikely(!mm)) {
660 DRM_DEBUG_DRIVER("BO is not registered?\n");
661 return -EFAULT;
662 }
663
664 /* Another get_user_pages is running at the same time?? */
665 if (WARN_ON(gtt->range))
666 return -EFAULT;
667
668 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
669 return -ESRCH;
670
671 mmap_read_lock(mm);
672 vma = vma_lookup(mm, start);
673 if (unlikely(!vma)) {
674 r = -EFAULT;
675 goto out_unlock;
676 }
677 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
678 vma->vm_file)) {
679 r = -EPERM;
680 goto out_unlock;
681 }
682
683 readonly = amdgpu_ttm_tt_is_readonly(ttm);
684 r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
685 ttm->num_pages, >t->range, readonly,
686 true, NULL);
687out_unlock:
688 mmap_read_unlock(mm);
689 if (r)
690 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
691
692 mmput(mm);
693
694 return r;
695}
696
697/*
698 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
699 * Check if the pages backing this ttm range have been invalidated
700 *
701 * Returns: true if pages are still valid
702 */
703bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
704{
705 struct amdgpu_ttm_tt *gtt = (void *)ttm;
706 bool r = false;
707
708 if (!gtt || !gtt->userptr)
709 return false;
710
711 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
712 gtt->userptr, ttm->num_pages);
713
714 WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
715 "No user pages to check\n");
716
717 if (gtt->range) {
718 /*
719 * FIXME: Must always hold notifier_lock for this, and must
720 * not ignore the return code.
721 */
722 r = amdgpu_hmm_range_get_pages_done(gtt->range);
723 gtt->range = NULL;
724 }
725
726 return !r;
727}
728#endif
729
730/*
731 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
732 *
733 * Called by amdgpu_cs_list_validate(). This creates the page list
734 * that backs user memory and will ultimately be mapped into the device
735 * address space.
736 */
737void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
738{
739 unsigned long i;
740
741 for (i = 0; i < ttm->num_pages; ++i)
742 ttm->pages[i] = pages ? pages[i] : NULL;
743}
744
745/*
746 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
747 *
748 * Called by amdgpu_ttm_backend_bind()
749 **/
750static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
751 struct ttm_tt *ttm)
752{
753 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
754 struct amdgpu_ttm_tt *gtt = (void *)ttm;
755 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
756 enum dma_data_direction direction = write ?
757 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
758 int r;
759
760 /* Allocate an SG array and squash pages into it */
761 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
762 (u64)ttm->num_pages << PAGE_SHIFT,
763 GFP_KERNEL);
764 if (r)
765 goto release_sg;
766
767 /* Map SG to device */
768 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
769 if (r)
770 goto release_sg;
771
772 /* convert SG to linear array of pages and dma addresses */
773 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
774 ttm->num_pages);
775
776 return 0;
777
778release_sg:
779 kfree(ttm->sg);
780 ttm->sg = NULL;
781 return r;
782}
783
784/*
785 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
786 */
787static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
788 struct ttm_tt *ttm)
789{
790 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
791 struct amdgpu_ttm_tt *gtt = (void *)ttm;
792 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
793 enum dma_data_direction direction = write ?
794 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
795
796 /* double check that we don't free the table twice */
797 if (!ttm->sg || !ttm->sg->sgl)
798 return;
799
800 /* unmap the pages mapped to the device */
801 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
802 sg_free_table(ttm->sg);
803
804#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
805 if (gtt->range) {
806 unsigned long i;
807
808 for (i = 0; i < ttm->num_pages; i++) {
809 if (ttm->pages[i] !=
810 hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
811 break;
812 }
813
814 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
815 }
816#endif
817}
818
819static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
820 struct ttm_buffer_object *tbo,
821 uint64_t flags)
822{
823 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
824 struct ttm_tt *ttm = tbo->ttm;
825 struct amdgpu_ttm_tt *gtt = (void *)ttm;
826
827 if (amdgpu_bo_encrypted(abo))
828 flags |= AMDGPU_PTE_TMZ;
829
830 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
831 uint64_t page_idx = 1;
832
833 amdgpu_gart_bind(adev, gtt->offset, page_idx,
834 gtt->ttm.dma_address, flags);
835
836 /* The memory type of the first page defaults to UC. Now
837 * modify the memory type to NC from the second page of
838 * the BO onward.
839 */
840 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
841 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
842
843 amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
844 ttm->num_pages - page_idx,
845 &(gtt->ttm.dma_address[page_idx]), flags);
846 } else {
847 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
848 gtt->ttm.dma_address, flags);
849 }
850}
851
852/*
853 * amdgpu_ttm_backend_bind - Bind GTT memory
854 *
855 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
856 * This handles binding GTT memory to the device address space.
857 */
858static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
859 struct ttm_tt *ttm,
860 struct ttm_resource *bo_mem)
861{
862 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
863 struct amdgpu_ttm_tt *gtt = (void*)ttm;
864 uint64_t flags;
865 int r;
866
867 if (!bo_mem)
868 return -EINVAL;
869
870 if (gtt->bound)
871 return 0;
872
873 if (gtt->userptr) {
874 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
875 if (r) {
876 DRM_ERROR("failed to pin userptr\n");
877 return r;
878 }
879 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
880 if (!ttm->sg) {
881 struct dma_buf_attachment *attach;
882 struct sg_table *sgt;
883
884 attach = gtt->gobj->import_attach;
885 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
886 if (IS_ERR(sgt))
887 return PTR_ERR(sgt);
888
889 ttm->sg = sgt;
890 }
891
892 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
893 ttm->num_pages);
894 }
895
896 if (!ttm->num_pages) {
897 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
898 ttm->num_pages, bo_mem, ttm);
899 }
900
901 if (bo_mem->mem_type != TTM_PL_TT ||
902 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
903 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
904 return 0;
905 }
906
907 /* compute PTE flags relevant to this BO memory */
908 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
909
910 /* bind pages into GART page tables */
911 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
912 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
913 gtt->ttm.dma_address, flags);
914 gtt->bound = true;
915 return 0;
916}
917
918/*
919 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
920 * through AGP or GART aperture.
921 *
922 * If bo is accessible through AGP aperture, then use AGP aperture
923 * to access bo; otherwise allocate logical space in GART aperture
924 * and map bo to GART aperture.
925 */
926int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
927{
928 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
929 struct ttm_operation_ctx ctx = { false, false };
930 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
931 struct ttm_placement placement;
932 struct ttm_place placements;
933 struct ttm_resource *tmp;
934 uint64_t addr, flags;
935 int r;
936
937 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
938 return 0;
939
940 addr = amdgpu_gmc_agp_addr(bo);
941 if (addr != AMDGPU_BO_INVALID_OFFSET) {
942 bo->resource->start = addr >> PAGE_SHIFT;
943 return 0;
944 }
945
946 /* allocate GART space */
947 placement.num_placement = 1;
948 placement.placement = &placements;
949 placement.num_busy_placement = 1;
950 placement.busy_placement = &placements;
951 placements.fpfn = 0;
952 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
953 placements.mem_type = TTM_PL_TT;
954 placements.flags = bo->resource->placement;
955
956 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
957 if (unlikely(r))
958 return r;
959
960 /* compute PTE flags for this buffer object */
961 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
962
963 /* Bind pages */
964 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
965 amdgpu_ttm_gart_bind(adev, bo, flags);
966 amdgpu_gart_invalidate_tlb(adev);
967 ttm_resource_free(bo, &bo->resource);
968 ttm_bo_assign_mem(bo, tmp);
969
970 return 0;
971}
972
973/*
974 * amdgpu_ttm_recover_gart - Rebind GTT pages
975 *
976 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
977 * rebind GTT pages during a GPU reset.
978 */
979void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
980{
981 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
982 uint64_t flags;
983
984 if (!tbo->ttm)
985 return;
986
987 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
988 amdgpu_ttm_gart_bind(adev, tbo, flags);
989}
990
991/*
992 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
993 *
994 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
995 * ttm_tt_destroy().
996 */
997static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
998 struct ttm_tt *ttm)
999{
1000 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1001 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1002
1003 /* if the pages have userptr pinning then clear that first */
1004 if (gtt->userptr) {
1005 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1006 } else if (ttm->sg && gtt->gobj->import_attach) {
1007 struct dma_buf_attachment *attach;
1008
1009 attach = gtt->gobj->import_attach;
1010 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1011 ttm->sg = NULL;
1012 }
1013
1014 if (!gtt->bound)
1015 return;
1016
1017 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1018 return;
1019
1020 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1021 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1022 gtt->bound = false;
1023}
1024
1025static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1026 struct ttm_tt *ttm)
1027{
1028 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1029
1030 if (gtt->usertask)
1031 put_task_struct(gtt->usertask);
1032
1033 ttm_tt_fini(>t->ttm);
1034 kfree(gtt);
1035}
1036
1037/**
1038 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1039 *
1040 * @bo: The buffer object to create a GTT ttm_tt object around
1041 * @page_flags: Page flags to be added to the ttm_tt object
1042 *
1043 * Called by ttm_tt_create().
1044 */
1045static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1046 uint32_t page_flags)
1047{
1048 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1049 struct amdgpu_ttm_tt *gtt;
1050 enum ttm_caching caching;
1051
1052 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1053 if (gtt == NULL) {
1054 return NULL;
1055 }
1056 gtt->gobj = &bo->base;
1057
1058 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1059 caching = ttm_write_combined;
1060 else
1061 caching = ttm_cached;
1062
1063 /* allocate space for the uninitialized page entries */
1064 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1065 kfree(gtt);
1066 return NULL;
1067 }
1068 return >t->ttm;
1069}
1070
1071/*
1072 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1073 *
1074 * Map the pages of a ttm_tt object to an address space visible
1075 * to the underlying device.
1076 */
1077static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1078 struct ttm_tt *ttm,
1079 struct ttm_operation_ctx *ctx)
1080{
1081 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1082 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1083 pgoff_t i;
1084 int ret;
1085
1086 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1087 if (gtt->userptr) {
1088 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1089 if (!ttm->sg)
1090 return -ENOMEM;
1091 return 0;
1092 }
1093
1094 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1095 return 0;
1096
1097 ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1098 if (ret)
1099 return ret;
1100
1101 for (i = 0; i < ttm->num_pages; ++i)
1102 ttm->pages[i]->mapping = bdev->dev_mapping;
1103
1104 return 0;
1105}
1106
1107/*
1108 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1109 *
1110 * Unmaps pages of a ttm_tt object from the device address space and
1111 * unpopulates the page array backing it.
1112 */
1113static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1114 struct ttm_tt *ttm)
1115{
1116 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1117 struct amdgpu_device *adev;
1118 pgoff_t i;
1119
1120 amdgpu_ttm_backend_unbind(bdev, ttm);
1121
1122 if (gtt->userptr) {
1123 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1124 kfree(ttm->sg);
1125 ttm->sg = NULL;
1126 return;
1127 }
1128
1129 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1130 return;
1131
1132 for (i = 0; i < ttm->num_pages; ++i)
1133 ttm->pages[i]->mapping = NULL;
1134
1135 adev = amdgpu_ttm_adev(bdev);
1136 return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1137}
1138
1139/**
1140 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1141 * task
1142 *
1143 * @tbo: The ttm_buffer_object that contains the userptr
1144 * @user_addr: The returned value
1145 */
1146int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1147 uint64_t *user_addr)
1148{
1149 struct amdgpu_ttm_tt *gtt;
1150
1151 if (!tbo->ttm)
1152 return -EINVAL;
1153
1154 gtt = (void *)tbo->ttm;
1155 *user_addr = gtt->userptr;
1156 return 0;
1157}
1158
1159/**
1160 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1161 * task
1162 *
1163 * @bo: The ttm_buffer_object to bind this userptr to
1164 * @addr: The address in the current tasks VM space to use
1165 * @flags: Requirements of userptr object.
1166 *
1167 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1168 * to current task
1169 */
1170int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1171 uint64_t addr, uint32_t flags)
1172{
1173 struct amdgpu_ttm_tt *gtt;
1174
1175 if (!bo->ttm) {
1176 /* TODO: We want a separate TTM object type for userptrs */
1177 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1178 if (bo->ttm == NULL)
1179 return -ENOMEM;
1180 }
1181
1182 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1183 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1184
1185 gtt = (void *)bo->ttm;
1186 gtt->userptr = addr;
1187 gtt->userflags = flags;
1188
1189 if (gtt->usertask)
1190 put_task_struct(gtt->usertask);
1191 gtt->usertask = current->group_leader;
1192 get_task_struct(gtt->usertask);
1193
1194 return 0;
1195}
1196
1197/*
1198 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1199 */
1200struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1201{
1202 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1203
1204 if (gtt == NULL)
1205 return NULL;
1206
1207 if (gtt->usertask == NULL)
1208 return NULL;
1209
1210 return gtt->usertask->mm;
1211}
1212
1213/*
1214 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1215 * address range for the current task.
1216 *
1217 */
1218bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1219 unsigned long end, unsigned long *userptr)
1220{
1221 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1222 unsigned long size;
1223
1224 if (gtt == NULL || !gtt->userptr)
1225 return false;
1226
1227 /* Return false if no part of the ttm_tt object lies within
1228 * the range
1229 */
1230 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1231 if (gtt->userptr > end || gtt->userptr + size <= start)
1232 return false;
1233
1234 if (userptr)
1235 *userptr = gtt->userptr;
1236 return true;
1237}
1238
1239/*
1240 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1241 */
1242bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1243{
1244 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1245
1246 if (gtt == NULL || !gtt->userptr)
1247 return false;
1248
1249 return true;
1250}
1251
1252/*
1253 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1254 */
1255bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1256{
1257 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1258
1259 if (gtt == NULL)
1260 return false;
1261
1262 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1263}
1264
1265/**
1266 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1267 *
1268 * @ttm: The ttm_tt object to compute the flags for
1269 * @mem: The memory registry backing this ttm_tt object
1270 *
1271 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1272 */
1273uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1274{
1275 uint64_t flags = 0;
1276
1277 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1278 flags |= AMDGPU_PTE_VALID;
1279
1280 if (mem && (mem->mem_type == TTM_PL_TT ||
1281 mem->mem_type == AMDGPU_PL_PREEMPT)) {
1282 flags |= AMDGPU_PTE_SYSTEM;
1283
1284 if (ttm->caching == ttm_cached)
1285 flags |= AMDGPU_PTE_SNOOPED;
1286 }
1287
1288 if (mem && mem->mem_type == TTM_PL_VRAM &&
1289 mem->bus.caching == ttm_cached)
1290 flags |= AMDGPU_PTE_SNOOPED;
1291
1292 return flags;
1293}
1294
1295/**
1296 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1297 *
1298 * @adev: amdgpu_device pointer
1299 * @ttm: The ttm_tt object to compute the flags for
1300 * @mem: The memory registry backing this ttm_tt object
1301 *
1302 * Figure out the flags to use for a VM PTE (Page Table Entry).
1303 */
1304uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1305 struct ttm_resource *mem)
1306{
1307 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1308
1309 flags |= adev->gart.gart_pte_flags;
1310 flags |= AMDGPU_PTE_READABLE;
1311
1312 if (!amdgpu_ttm_tt_is_readonly(ttm))
1313 flags |= AMDGPU_PTE_WRITEABLE;
1314
1315 return flags;
1316}
1317
1318/*
1319 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1320 * object.
1321 *
1322 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1323 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1324 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1325 * used to clean out a memory space.
1326 */
1327static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1328 const struct ttm_place *place)
1329{
1330 unsigned long num_pages = bo->resource->num_pages;
1331 struct dma_resv_iter resv_cursor;
1332 struct amdgpu_res_cursor cursor;
1333 struct dma_fence *f;
1334
1335 /* Swapout? */
1336 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1337 return true;
1338
1339 if (bo->type == ttm_bo_type_kernel &&
1340 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1341 return false;
1342
1343 /* If bo is a KFD BO, check if the bo belongs to the current process.
1344 * If true, then return false as any KFD process needs all its BOs to
1345 * be resident to run successfully
1346 */
1347 dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
1348 if (amdkfd_fence_check_mm(f, current->mm))
1349 return false;
1350 }
1351
1352 switch (bo->resource->mem_type) {
1353 case AMDGPU_PL_PREEMPT:
1354 /* Preemptible BOs don't own system resources managed by the
1355 * driver (pages, VRAM, GART space). They point to resources
1356 * owned by someone else (e.g. pageable memory in user mode
1357 * or a DMABuf). They are used in a preemptible context so we
1358 * can guarantee no deadlocks and good QoS in case of MMU
1359 * notifiers or DMABuf move notifiers from the resource owner.
1360 */
1361 return false;
1362 case TTM_PL_TT:
1363 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1364 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1365 return false;
1366 return true;
1367
1368 case TTM_PL_VRAM:
1369 /* Check each drm MM node individually */
1370 amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
1371 &cursor);
1372 while (cursor.remaining) {
1373 if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
1374 && !(place->lpfn &&
1375 place->lpfn <= PFN_DOWN(cursor.start)))
1376 return true;
1377
1378 amdgpu_res_next(&cursor, cursor.size);
1379 }
1380 return false;
1381
1382 default:
1383 break;
1384 }
1385
1386 return ttm_bo_eviction_valuable(bo, place);
1387}
1388
1389static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1390 void *buf, size_t size, bool write)
1391{
1392 while (size) {
1393 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1394 uint64_t bytes = 4 - (pos & 0x3);
1395 uint32_t shift = (pos & 0x3) * 8;
1396 uint32_t mask = 0xffffffff << shift;
1397 uint32_t value = 0;
1398
1399 if (size < bytes) {
1400 mask &= 0xffffffff >> (bytes - size) * 8;
1401 bytes = size;
1402 }
1403
1404 if (mask != 0xffffffff) {
1405 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1406 if (write) {
1407 value &= ~mask;
1408 value |= (*(uint32_t *)buf << shift) & mask;
1409 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1410 } else {
1411 value = (value & mask) >> shift;
1412 memcpy(buf, &value, bytes);
1413 }
1414 } else {
1415 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1416 }
1417
1418 pos += bytes;
1419 buf += bytes;
1420 size -= bytes;
1421 }
1422}
1423
1424static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1425 unsigned long offset, void *buf, int len, int write)
1426{
1427 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1428 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1429 struct amdgpu_res_cursor src_mm;
1430 struct amdgpu_job *job;
1431 struct dma_fence *fence;
1432 uint64_t src_addr, dst_addr;
1433 unsigned int num_dw;
1434 int r, idx;
1435
1436 if (len != PAGE_SIZE)
1437 return -EINVAL;
1438
1439 if (!adev->mman.sdma_access_ptr)
1440 return -EACCES;
1441
1442 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1443 return -ENODEV;
1444
1445 if (write)
1446 memcpy(adev->mman.sdma_access_ptr, buf, len);
1447
1448 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1449 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
1450 if (r)
1451 goto out;
1452
1453 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1454 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
1455 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1456 if (write)
1457 swap(src_addr, dst_addr);
1458
1459 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
1460
1461 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1462 WARN_ON(job->ibs[0].length_dw > num_dw);
1463
1464 r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1465 if (r) {
1466 amdgpu_job_free(job);
1467 goto out;
1468 }
1469
1470 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1471 r = -ETIMEDOUT;
1472 dma_fence_put(fence);
1473
1474 if (!(r || write))
1475 memcpy(buf, adev->mman.sdma_access_ptr, len);
1476out:
1477 drm_dev_exit(idx);
1478 return r;
1479}
1480
1481/**
1482 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1483 *
1484 * @bo: The buffer object to read/write
1485 * @offset: Offset into buffer object
1486 * @buf: Secondary buffer to write/read from
1487 * @len: Length in bytes of access
1488 * @write: true if writing
1489 *
1490 * This is used to access VRAM that backs a buffer object via MMIO
1491 * access for debugging purposes.
1492 */
1493static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1494 unsigned long offset, void *buf, int len,
1495 int write)
1496{
1497 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1498 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1499 struct amdgpu_res_cursor cursor;
1500 int ret = 0;
1501
1502 if (bo->resource->mem_type != TTM_PL_VRAM)
1503 return -EIO;
1504
1505 if (amdgpu_device_has_timeouts_enabled(adev) &&
1506 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1507 return len;
1508
1509 amdgpu_res_first(bo->resource, offset, len, &cursor);
1510 while (cursor.remaining) {
1511 size_t count, size = cursor.size;
1512 loff_t pos = cursor.start;
1513
1514 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1515 size -= count;
1516 if (size) {
1517 /* using MM to access rest vram and handle un-aligned address */
1518 pos += count;
1519 buf += count;
1520 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1521 }
1522
1523 ret += cursor.size;
1524 buf += cursor.size;
1525 amdgpu_res_next(&cursor, cursor.size);
1526 }
1527
1528 return ret;
1529}
1530
1531static void
1532amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1533{
1534 amdgpu_bo_move_notify(bo, false, NULL);
1535}
1536
1537static struct ttm_device_funcs amdgpu_bo_driver = {
1538 .ttm_tt_create = &amdgpu_ttm_tt_create,
1539 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1540 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1541 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1542 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1543 .evict_flags = &amdgpu_evict_flags,
1544 .move = &amdgpu_bo_move,
1545 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1546 .release_notify = &amdgpu_bo_release_notify,
1547 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1548 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1549 .access_memory = &amdgpu_ttm_access_memory,
1550 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1551};
1552
1553/*
1554 * Firmware Reservation functions
1555 */
1556/**
1557 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1558 *
1559 * @adev: amdgpu_device pointer
1560 *
1561 * free fw reserved vram if it has been reserved.
1562 */
1563static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1564{
1565 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1566 NULL, &adev->mman.fw_vram_usage_va);
1567}
1568
1569/**
1570 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1571 *
1572 * @adev: amdgpu_device pointer
1573 *
1574 * create bo vram reservation from fw.
1575 */
1576static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1577{
1578 uint64_t vram_size = adev->gmc.visible_vram_size;
1579
1580 adev->mman.fw_vram_usage_va = NULL;
1581 adev->mman.fw_vram_usage_reserved_bo = NULL;
1582
1583 if (adev->mman.fw_vram_usage_size == 0 ||
1584 adev->mman.fw_vram_usage_size > vram_size)
1585 return 0;
1586
1587 return amdgpu_bo_create_kernel_at(adev,
1588 adev->mman.fw_vram_usage_start_offset,
1589 adev->mman.fw_vram_usage_size,
1590 AMDGPU_GEM_DOMAIN_VRAM,
1591 &adev->mman.fw_vram_usage_reserved_bo,
1592 &adev->mman.fw_vram_usage_va);
1593}
1594
1595/*
1596 * Memoy training reservation functions
1597 */
1598
1599/**
1600 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1601 *
1602 * @adev: amdgpu_device pointer
1603 *
1604 * free memory training reserved vram if it has been reserved.
1605 */
1606static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1607{
1608 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1609
1610 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1611 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1612 ctx->c2p_bo = NULL;
1613
1614 return 0;
1615}
1616
1617static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1618{
1619 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1620
1621 memset(ctx, 0, sizeof(*ctx));
1622
1623 ctx->c2p_train_data_offset =
1624 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1625 ctx->p2c_train_data_offset =
1626 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1627 ctx->train_data_size =
1628 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1629
1630 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1631 ctx->train_data_size,
1632 ctx->p2c_train_data_offset,
1633 ctx->c2p_train_data_offset);
1634}
1635
1636/*
1637 * reserve TMR memory at the top of VRAM which holds
1638 * IP Discovery data and is protected by PSP.
1639 */
1640static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1641{
1642 int ret;
1643 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1644 bool mem_train_support = false;
1645
1646 if (!amdgpu_sriov_vf(adev)) {
1647 if (amdgpu_atomfirmware_mem_training_supported(adev))
1648 mem_train_support = true;
1649 else
1650 DRM_DEBUG("memory training does not support!\n");
1651 }
1652
1653 /*
1654 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1655 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1656 *
1657 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1658 * discovery data and G6 memory training data respectively
1659 */
1660 adev->mman.discovery_tmr_size =
1661 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1662 if (!adev->mman.discovery_tmr_size)
1663 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1664
1665 if (mem_train_support) {
1666 /* reserve vram for mem train according to TMR location */
1667 amdgpu_ttm_training_data_block_init(adev);
1668 ret = amdgpu_bo_create_kernel_at(adev,
1669 ctx->c2p_train_data_offset,
1670 ctx->train_data_size,
1671 AMDGPU_GEM_DOMAIN_VRAM,
1672 &ctx->c2p_bo,
1673 NULL);
1674 if (ret) {
1675 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1676 amdgpu_ttm_training_reserve_vram_fini(adev);
1677 return ret;
1678 }
1679 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1680 }
1681
1682 ret = amdgpu_bo_create_kernel_at(adev,
1683 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1684 adev->mman.discovery_tmr_size,
1685 AMDGPU_GEM_DOMAIN_VRAM,
1686 &adev->mman.discovery_memory,
1687 NULL);
1688 if (ret) {
1689 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1690 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1691 return ret;
1692 }
1693
1694 return 0;
1695}
1696
1697/*
1698 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1699 * gtt/vram related fields.
1700 *
1701 * This initializes all of the memory space pools that the TTM layer
1702 * will need such as the GTT space (system memory mapped to the device),
1703 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1704 * can be mapped per VMID.
1705 */
1706int amdgpu_ttm_init(struct amdgpu_device *adev)
1707{
1708 uint64_t gtt_size;
1709 int r;
1710 u64 vis_vram_limit;
1711
1712 mutex_init(&adev->mman.gtt_window_lock);
1713
1714 /* No others user of address space so set it to 0 */
1715 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1716 adev_to_drm(adev)->anon_inode->i_mapping,
1717 adev_to_drm(adev)->vma_offset_manager,
1718 adev->need_swiotlb,
1719 dma_addressing_limited(adev->dev));
1720 if (r) {
1721 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1722 return r;
1723 }
1724 adev->mman.initialized = true;
1725
1726 /* Initialize VRAM pool with all of VRAM divided into pages */
1727 r = amdgpu_vram_mgr_init(adev);
1728 if (r) {
1729 DRM_ERROR("Failed initializing VRAM heap.\n");
1730 return r;
1731 }
1732
1733 /* Reduce size of CPU-visible VRAM if requested */
1734 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1735 if (amdgpu_vis_vram_limit > 0 &&
1736 vis_vram_limit <= adev->gmc.visible_vram_size)
1737 adev->gmc.visible_vram_size = vis_vram_limit;
1738
1739 /* Change the size here instead of the init above so only lpfn is affected */
1740 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1741#ifdef CONFIG_64BIT
1742#ifdef CONFIG_X86
1743 if (adev->gmc.xgmi.connected_to_cpu)
1744 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1745 adev->gmc.visible_vram_size);
1746
1747 else
1748#endif
1749 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1750 adev->gmc.visible_vram_size);
1751#endif
1752
1753 /*
1754 *The reserved vram for firmware must be pinned to the specified
1755 *place on the VRAM, so reserve it early.
1756 */
1757 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1758 if (r) {
1759 return r;
1760 }
1761
1762 /*
1763 * only NAVI10 and onwards ASIC support for IP discovery.
1764 * If IP discovery enabled, a block of memory should be
1765 * reserved for IP discovey.
1766 */
1767 if (adev->mman.discovery_bin) {
1768 r = amdgpu_ttm_reserve_tmr(adev);
1769 if (r)
1770 return r;
1771 }
1772
1773 /* allocate memory as required for VGA
1774 * This is used for VGA emulation and pre-OS scanout buffers to
1775 * avoid display artifacts while transitioning between pre-OS
1776 * and driver. */
1777 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1778 AMDGPU_GEM_DOMAIN_VRAM,
1779 &adev->mman.stolen_vga_memory,
1780 NULL);
1781 if (r)
1782 return r;
1783 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1784 adev->mman.stolen_extended_size,
1785 AMDGPU_GEM_DOMAIN_VRAM,
1786 &adev->mman.stolen_extended_memory,
1787 NULL);
1788 if (r)
1789 return r;
1790 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1791 adev->mman.stolen_reserved_size,
1792 AMDGPU_GEM_DOMAIN_VRAM,
1793 &adev->mman.stolen_reserved_memory,
1794 NULL);
1795 if (r)
1796 return r;
1797
1798 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1799 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1800
1801 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1802 * or whatever the user passed on module init */
1803 if (amdgpu_gtt_size == -1) {
1804 struct sysinfo si;
1805
1806 si_meminfo(&si);
1807 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1808 adev->gmc.mc_vram_size),
1809 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1810 }
1811 else
1812 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1813
1814 /* Initialize GTT memory pool */
1815 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1816 if (r) {
1817 DRM_ERROR("Failed initializing GTT heap.\n");
1818 return r;
1819 }
1820 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1821 (unsigned)(gtt_size / (1024 * 1024)));
1822
1823 /* Initialize preemptible memory pool */
1824 r = amdgpu_preempt_mgr_init(adev);
1825 if (r) {
1826 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1827 return r;
1828 }
1829
1830 /* Initialize various on-chip memory pools */
1831 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1832 if (r) {
1833 DRM_ERROR("Failed initializing GDS heap.\n");
1834 return r;
1835 }
1836
1837 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1838 if (r) {
1839 DRM_ERROR("Failed initializing gws heap.\n");
1840 return r;
1841 }
1842
1843 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1844 if (r) {
1845 DRM_ERROR("Failed initializing oa heap.\n");
1846 return r;
1847 }
1848
1849 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
1850 AMDGPU_GEM_DOMAIN_GTT,
1851 &adev->mman.sdma_access_bo, NULL,
1852 &adev->mman.sdma_access_ptr))
1853 DRM_WARN("Debug VRAM access will use slowpath MM access\n");
1854
1855 return 0;
1856}
1857
1858/*
1859 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1860 */
1861void amdgpu_ttm_fini(struct amdgpu_device *adev)
1862{
1863 int idx;
1864 if (!adev->mman.initialized)
1865 return;
1866
1867 amdgpu_ttm_training_reserve_vram_fini(adev);
1868 /* return the stolen vga memory back to VRAM */
1869 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1870 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1871 /* return the IP Discovery TMR memory back to VRAM */
1872 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1873 if (adev->mman.stolen_reserved_size)
1874 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1875 NULL, NULL);
1876 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
1877 &adev->mman.sdma_access_ptr);
1878 amdgpu_ttm_fw_reserve_vram_fini(adev);
1879
1880 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1881
1882 if (adev->mman.aper_base_kaddr)
1883 iounmap(adev->mman.aper_base_kaddr);
1884 adev->mman.aper_base_kaddr = NULL;
1885
1886 drm_dev_exit(idx);
1887 }
1888
1889 amdgpu_vram_mgr_fini(adev);
1890 amdgpu_gtt_mgr_fini(adev);
1891 amdgpu_preempt_mgr_fini(adev);
1892 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1893 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1894 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1895 ttm_device_fini(&adev->mman.bdev);
1896 adev->mman.initialized = false;
1897 DRM_INFO("amdgpu: ttm finalized\n");
1898}
1899
1900/**
1901 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1902 *
1903 * @adev: amdgpu_device pointer
1904 * @enable: true when we can use buffer functions.
1905 *
1906 * Enable/disable use of buffer functions during suspend/resume. This should
1907 * only be called at bootup or when userspace isn't running.
1908 */
1909void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1910{
1911 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1912 uint64_t size;
1913 int r;
1914
1915 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1916 adev->mman.buffer_funcs_enabled == enable)
1917 return;
1918
1919 if (enable) {
1920 struct amdgpu_ring *ring;
1921 struct drm_gpu_scheduler *sched;
1922
1923 ring = adev->mman.buffer_funcs_ring;
1924 sched = &ring->sched;
1925 r = drm_sched_entity_init(&adev->mman.entity,
1926 DRM_SCHED_PRIORITY_KERNEL, &sched,
1927 1, NULL);
1928 if (r) {
1929 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1930 r);
1931 return;
1932 }
1933 } else {
1934 drm_sched_entity_destroy(&adev->mman.entity);
1935 dma_fence_put(man->move);
1936 man->move = NULL;
1937 }
1938
1939 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1940 if (enable)
1941 size = adev->gmc.real_vram_size;
1942 else
1943 size = adev->gmc.visible_vram_size;
1944 man->size = size;
1945 adev->mman.buffer_funcs_enabled = enable;
1946}
1947
1948static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
1949 bool direct_submit,
1950 unsigned int num_dw,
1951 struct dma_resv *resv,
1952 bool vm_needs_flush,
1953 struct amdgpu_job **job)
1954{
1955 enum amdgpu_ib_pool_type pool = direct_submit ?
1956 AMDGPU_IB_POOL_DIRECT :
1957 AMDGPU_IB_POOL_DELAYED;
1958 int r;
1959
1960 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
1961 if (r)
1962 return r;
1963
1964 if (vm_needs_flush) {
1965 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1966 adev->gmc.pdb0_bo :
1967 adev->gart.bo);
1968 (*job)->vm_needs_flush = true;
1969 }
1970 if (resv) {
1971 r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
1972 AMDGPU_SYNC_ALWAYS,
1973 AMDGPU_FENCE_OWNER_UNDEFINED);
1974 if (r) {
1975 DRM_ERROR("sync failed (%d).\n", r);
1976 amdgpu_job_free(*job);
1977 return r;
1978 }
1979 }
1980 return 0;
1981}
1982
1983int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1984 uint64_t dst_offset, uint32_t byte_count,
1985 struct dma_resv *resv,
1986 struct dma_fence **fence, bool direct_submit,
1987 bool vm_needs_flush, bool tmz)
1988{
1989 struct amdgpu_device *adev = ring->adev;
1990 unsigned num_loops, num_dw;
1991 struct amdgpu_job *job;
1992 uint32_t max_bytes;
1993 unsigned i;
1994 int r;
1995
1996 if (!direct_submit && !ring->sched.ready) {
1997 DRM_ERROR("Trying to move memory with ring turned off.\n");
1998 return -EINVAL;
1999 }
2000
2001 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2002 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2003 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2004 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2005 resv, vm_needs_flush, &job);
2006 if (r)
2007 return r;
2008
2009 for (i = 0; i < num_loops; i++) {
2010 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2011
2012 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2013 dst_offset, cur_size_in_bytes, tmz);
2014
2015 src_offset += cur_size_in_bytes;
2016 dst_offset += cur_size_in_bytes;
2017 byte_count -= cur_size_in_bytes;
2018 }
2019
2020 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2021 WARN_ON(job->ibs[0].length_dw > num_dw);
2022 if (direct_submit)
2023 r = amdgpu_job_submit_direct(job, ring, fence);
2024 else
2025 r = amdgpu_job_submit(job, &adev->mman.entity,
2026 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2027 if (r)
2028 goto error_free;
2029
2030 return r;
2031
2032error_free:
2033 amdgpu_job_free(job);
2034 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2035 return r;
2036}
2037
2038static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2039 uint64_t dst_addr, uint32_t byte_count,
2040 struct dma_resv *resv,
2041 struct dma_fence **fence,
2042 bool vm_needs_flush)
2043{
2044 struct amdgpu_device *adev = ring->adev;
2045 unsigned int num_loops, num_dw;
2046 struct amdgpu_job *job;
2047 uint32_t max_bytes;
2048 unsigned int i;
2049 int r;
2050
2051 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2052 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2053 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2054 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2055 &job);
2056 if (r)
2057 return r;
2058
2059 for (i = 0; i < num_loops; i++) {
2060 uint32_t cur_size = min(byte_count, max_bytes);
2061
2062 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2063 cur_size);
2064
2065 dst_addr += cur_size;
2066 byte_count -= cur_size;
2067 }
2068
2069 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2070 WARN_ON(job->ibs[0].length_dw > num_dw);
2071 r = amdgpu_job_submit(job, &adev->mman.entity,
2072 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2073 if (r)
2074 goto error_free;
2075
2076 return 0;
2077
2078error_free:
2079 amdgpu_job_free(job);
2080 return r;
2081}
2082
2083int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2084 uint32_t src_data,
2085 struct dma_resv *resv,
2086 struct dma_fence **f)
2087{
2088 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2089 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2090 struct dma_fence *fence = NULL;
2091 struct amdgpu_res_cursor dst;
2092 int r;
2093
2094 if (!adev->mman.buffer_funcs_enabled) {
2095 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2096 return -EINVAL;
2097 }
2098
2099 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2100
2101 mutex_lock(&adev->mman.gtt_window_lock);
2102 while (dst.remaining) {
2103 struct dma_fence *next;
2104 uint64_t cur_size, to;
2105
2106 /* Never fill more than 256MiB at once to avoid timeouts */
2107 cur_size = min(dst.size, 256ULL << 20);
2108
2109 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2110 1, ring, false, &cur_size, &to);
2111 if (r)
2112 goto error;
2113
2114 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2115 &next, true);
2116 if (r)
2117 goto error;
2118
2119 dma_fence_put(fence);
2120 fence = next;
2121
2122 amdgpu_res_next(&dst, cur_size);
2123 }
2124error:
2125 mutex_unlock(&adev->mman.gtt_window_lock);
2126 if (f)
2127 *f = dma_fence_get(fence);
2128 dma_fence_put(fence);
2129 return r;
2130}
2131
2132/**
2133 * amdgpu_ttm_evict_resources - evict memory buffers
2134 * @adev: amdgpu device object
2135 * @mem_type: evicted BO's memory type
2136 *
2137 * Evicts all @mem_type buffers on the lru list of the memory type.
2138 *
2139 * Returns:
2140 * 0 for success or a negative error code on failure.
2141 */
2142int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2143{
2144 struct ttm_resource_manager *man;
2145
2146 switch (mem_type) {
2147 case TTM_PL_VRAM:
2148 case TTM_PL_TT:
2149 case AMDGPU_PL_GWS:
2150 case AMDGPU_PL_GDS:
2151 case AMDGPU_PL_OA:
2152 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2153 break;
2154 default:
2155 DRM_ERROR("Trying to evict invalid memory type\n");
2156 return -EINVAL;
2157 }
2158
2159 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2160}
2161
2162#if defined(CONFIG_DEBUG_FS)
2163
2164static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
2165{
2166 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2167 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2168 TTM_PL_VRAM);
2169 struct drm_printer p = drm_seq_file_printer(m);
2170
2171 ttm_resource_manager_debug(man, &p);
2172 return 0;
2173}
2174
2175static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2176{
2177 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2178
2179 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2180}
2181
2182static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
2183{
2184 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2185 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2186 TTM_PL_TT);
2187 struct drm_printer p = drm_seq_file_printer(m);
2188
2189 ttm_resource_manager_debug(man, &p);
2190 return 0;
2191}
2192
2193static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
2194{
2195 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2196 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2197 AMDGPU_PL_GDS);
2198 struct drm_printer p = drm_seq_file_printer(m);
2199
2200 ttm_resource_manager_debug(man, &p);
2201 return 0;
2202}
2203
2204static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
2205{
2206 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2207 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2208 AMDGPU_PL_GWS);
2209 struct drm_printer p = drm_seq_file_printer(m);
2210
2211 ttm_resource_manager_debug(man, &p);
2212 return 0;
2213}
2214
2215static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
2216{
2217 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2218 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2219 AMDGPU_PL_OA);
2220 struct drm_printer p = drm_seq_file_printer(m);
2221
2222 ttm_resource_manager_debug(man, &p);
2223 return 0;
2224}
2225
2226DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
2227DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
2228DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
2229DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
2230DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
2231DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2232
2233/*
2234 * amdgpu_ttm_vram_read - Linear read access to VRAM
2235 *
2236 * Accesses VRAM via MMIO for debugging purposes.
2237 */
2238static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2239 size_t size, loff_t *pos)
2240{
2241 struct amdgpu_device *adev = file_inode(f)->i_private;
2242 ssize_t result = 0;
2243
2244 if (size & 0x3 || *pos & 0x3)
2245 return -EINVAL;
2246
2247 if (*pos >= adev->gmc.mc_vram_size)
2248 return -ENXIO;
2249
2250 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2251 while (size) {
2252 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2253 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2254
2255 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2256 if (copy_to_user(buf, value, bytes))
2257 return -EFAULT;
2258
2259 result += bytes;
2260 buf += bytes;
2261 *pos += bytes;
2262 size -= bytes;
2263 }
2264
2265 return result;
2266}
2267
2268/*
2269 * amdgpu_ttm_vram_write - Linear write access to VRAM
2270 *
2271 * Accesses VRAM via MMIO for debugging purposes.
2272 */
2273static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2274 size_t size, loff_t *pos)
2275{
2276 struct amdgpu_device *adev = file_inode(f)->i_private;
2277 ssize_t result = 0;
2278 int r;
2279
2280 if (size & 0x3 || *pos & 0x3)
2281 return -EINVAL;
2282
2283 if (*pos >= adev->gmc.mc_vram_size)
2284 return -ENXIO;
2285
2286 while (size) {
2287 uint32_t value;
2288
2289 if (*pos >= adev->gmc.mc_vram_size)
2290 return result;
2291
2292 r = get_user(value, (uint32_t *)buf);
2293 if (r)
2294 return r;
2295
2296 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2297
2298 result += 4;
2299 buf += 4;
2300 *pos += 4;
2301 size -= 4;
2302 }
2303
2304 return result;
2305}
2306
2307static const struct file_operations amdgpu_ttm_vram_fops = {
2308 .owner = THIS_MODULE,
2309 .read = amdgpu_ttm_vram_read,
2310 .write = amdgpu_ttm_vram_write,
2311 .llseek = default_llseek,
2312};
2313
2314/*
2315 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2316 *
2317 * This function is used to read memory that has been mapped to the
2318 * GPU and the known addresses are not physical addresses but instead
2319 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2320 */
2321static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2322 size_t size, loff_t *pos)
2323{
2324 struct amdgpu_device *adev = file_inode(f)->i_private;
2325 struct iommu_domain *dom;
2326 ssize_t result = 0;
2327 int r;
2328
2329 /* retrieve the IOMMU domain if any for this device */
2330 dom = iommu_get_domain_for_dev(adev->dev);
2331
2332 while (size) {
2333 phys_addr_t addr = *pos & PAGE_MASK;
2334 loff_t off = *pos & ~PAGE_MASK;
2335 size_t bytes = PAGE_SIZE - off;
2336 unsigned long pfn;
2337 struct page *p;
2338 void *ptr;
2339
2340 bytes = bytes < size ? bytes : size;
2341
2342 /* Translate the bus address to a physical address. If
2343 * the domain is NULL it means there is no IOMMU active
2344 * and the address translation is the identity
2345 */
2346 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2347
2348 pfn = addr >> PAGE_SHIFT;
2349 if (!pfn_valid(pfn))
2350 return -EPERM;
2351
2352 p = pfn_to_page(pfn);
2353 if (p->mapping != adev->mman.bdev.dev_mapping)
2354 return -EPERM;
2355
2356 ptr = kmap(p);
2357 r = copy_to_user(buf, ptr + off, bytes);
2358 kunmap(p);
2359 if (r)
2360 return -EFAULT;
2361
2362 size -= bytes;
2363 *pos += bytes;
2364 result += bytes;
2365 }
2366
2367 return result;
2368}
2369
2370/*
2371 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2372 *
2373 * This function is used to write memory that has been mapped to the
2374 * GPU and the known addresses are not physical addresses but instead
2375 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2376 */
2377static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2378 size_t size, loff_t *pos)
2379{
2380 struct amdgpu_device *adev = file_inode(f)->i_private;
2381 struct iommu_domain *dom;
2382 ssize_t result = 0;
2383 int r;
2384
2385 dom = iommu_get_domain_for_dev(adev->dev);
2386
2387 while (size) {
2388 phys_addr_t addr = *pos & PAGE_MASK;
2389 loff_t off = *pos & ~PAGE_MASK;
2390 size_t bytes = PAGE_SIZE - off;
2391 unsigned long pfn;
2392 struct page *p;
2393 void *ptr;
2394
2395 bytes = bytes < size ? bytes : size;
2396
2397 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2398
2399 pfn = addr >> PAGE_SHIFT;
2400 if (!pfn_valid(pfn))
2401 return -EPERM;
2402
2403 p = pfn_to_page(pfn);
2404 if (p->mapping != adev->mman.bdev.dev_mapping)
2405 return -EPERM;
2406
2407 ptr = kmap(p);
2408 r = copy_from_user(ptr + off, buf, bytes);
2409 kunmap(p);
2410 if (r)
2411 return -EFAULT;
2412
2413 size -= bytes;
2414 *pos += bytes;
2415 result += bytes;
2416 }
2417
2418 return result;
2419}
2420
2421static const struct file_operations amdgpu_ttm_iomem_fops = {
2422 .owner = THIS_MODULE,
2423 .read = amdgpu_iomem_read,
2424 .write = amdgpu_iomem_write,
2425 .llseek = default_llseek
2426};
2427
2428#endif
2429
2430void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2431{
2432#if defined(CONFIG_DEBUG_FS)
2433 struct drm_minor *minor = adev_to_drm(adev)->primary;
2434 struct dentry *root = minor->debugfs_root;
2435
2436 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2437 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2438 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2439 &amdgpu_ttm_iomem_fops);
2440 debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
2441 &amdgpu_mm_vram_table_fops);
2442 debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
2443 &amdgpu_mm_tt_table_fops);
2444 debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
2445 &amdgpu_mm_gds_table_fops);
2446 debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
2447 &amdgpu_mm_gws_table_fops);
2448 debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
2449 &amdgpu_mm_oa_table_fops);
2450 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2451 &amdgpu_ttm_page_pool_fops);
2452#endif
2453}