Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/dma-fence-array.h>
29#include <linux/interval_tree_generic.h>
30#include <linux/idr.h>
31
32#include <drm/amdgpu_drm.h>
33#include "amdgpu.h"
34#include "amdgpu_trace.h"
35#include "amdgpu_amdkfd.h"
36#include "amdgpu_gmc.h"
37#include "amdgpu_xgmi.h"
38
39/**
40 * DOC: GPUVM
41 *
42 * GPUVM is similar to the legacy gart on older asics, however
43 * rather than there being a single global gart table
44 * for the entire GPU, there are multiple VM page tables active
45 * at any given time. The VM page tables can contain a mix
46 * vram pages and system memory pages and system memory pages
47 * can be mapped as snooped (cached system pages) or unsnooped
48 * (uncached system pages).
49 * Each VM has an ID associated with it and there is a page table
50 * associated with each VMID. When execting a command buffer,
51 * the kernel tells the the ring what VMID to use for that command
52 * buffer. VMIDs are allocated dynamically as commands are submitted.
53 * The userspace drivers maintain their own address space and the kernel
54 * sets up their pages tables accordingly when they submit their
55 * command buffers and a VMID is assigned.
56 * Cayman/Trinity support up to 8 active VMs at any given time;
57 * SI supports 16.
58 */
59
60#define START(node) ((node)->start)
61#define LAST(node) ((node)->last)
62
63INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
64 START, LAST, static, amdgpu_vm_it)
65
66#undef START
67#undef LAST
68
69/**
70 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
71 */
72struct amdgpu_prt_cb {
73
74 /**
75 * @adev: amdgpu device
76 */
77 struct amdgpu_device *adev;
78
79 /**
80 * @cb: callback
81 */
82 struct dma_fence_cb cb;
83};
84
85/**
86 * amdgpu_vm_level_shift - return the addr shift for each level
87 *
88 * @adev: amdgpu_device pointer
89 * @level: VMPT level
90 *
91 * Returns:
92 * The number of bits the pfn needs to be right shifted for a level.
93 */
94static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
95 unsigned level)
96{
97 unsigned shift = 0xff;
98
99 switch (level) {
100 case AMDGPU_VM_PDB2:
101 case AMDGPU_VM_PDB1:
102 case AMDGPU_VM_PDB0:
103 shift = 9 * (AMDGPU_VM_PDB0 - level) +
104 adev->vm_manager.block_size;
105 break;
106 case AMDGPU_VM_PTB:
107 shift = 0;
108 break;
109 default:
110 dev_err(adev->dev, "the level%d isn't supported.\n", level);
111 }
112
113 return shift;
114}
115
116/**
117 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
118 *
119 * @adev: amdgpu_device pointer
120 * @level: VMPT level
121 *
122 * Returns:
123 * The number of entries in a page directory or page table.
124 */
125static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
126 unsigned level)
127{
128 unsigned shift = amdgpu_vm_level_shift(adev,
129 adev->vm_manager.root_level);
130
131 if (level == adev->vm_manager.root_level)
132 /* For the root directory */
133 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
134 >> shift;
135 else if (level != AMDGPU_VM_PTB)
136 /* Everything in between */
137 return 512;
138 else
139 /* For the page tables on the leaves */
140 return AMDGPU_VM_PTE_COUNT(adev);
141}
142
143/**
144 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
145 *
146 * @adev: amdgpu_device pointer
147 *
148 * Returns:
149 * The number of entries in the root page directory which needs the ATS setting.
150 */
151static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
152{
153 unsigned shift;
154
155 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
156 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
157}
158
159/**
160 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
161 *
162 * @adev: amdgpu_device pointer
163 * @level: VMPT level
164 *
165 * Returns:
166 * The mask to extract the entry number of a PD/PT from an address.
167 */
168static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
169 unsigned int level)
170{
171 if (level <= adev->vm_manager.root_level)
172 return 0xffffffff;
173 else if (level != AMDGPU_VM_PTB)
174 return 0x1ff;
175 else
176 return AMDGPU_VM_PTE_COUNT(adev) - 1;
177}
178
179/**
180 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
181 *
182 * @adev: amdgpu_device pointer
183 * @level: VMPT level
184 *
185 * Returns:
186 * The size of the BO for a page directory or page table in bytes.
187 */
188static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
189{
190 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
191}
192
193/**
194 * amdgpu_vm_bo_evicted - vm_bo is evicted
195 *
196 * @vm_bo: vm_bo which is evicted
197 *
198 * State for PDs/PTs and per VM BOs which are not at the location they should
199 * be.
200 */
201static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
202{
203 struct amdgpu_vm *vm = vm_bo->vm;
204 struct amdgpu_bo *bo = vm_bo->bo;
205
206 vm_bo->moved = true;
207 if (bo->tbo.type == ttm_bo_type_kernel)
208 list_move(&vm_bo->vm_status, &vm->evicted);
209 else
210 list_move_tail(&vm_bo->vm_status, &vm->evicted);
211}
212
213/**
214 * amdgpu_vm_bo_relocated - vm_bo is reloacted
215 *
216 * @vm_bo: vm_bo which is relocated
217 *
218 * State for PDs/PTs which needs to update their parent PD.
219 */
220static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
221{
222 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
223}
224
225/**
226 * amdgpu_vm_bo_moved - vm_bo is moved
227 *
228 * @vm_bo: vm_bo which is moved
229 *
230 * State for per VM BOs which are moved, but that change is not yet reflected
231 * in the page tables.
232 */
233static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
234{
235 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
236}
237
238/**
239 * amdgpu_vm_bo_idle - vm_bo is idle
240 *
241 * @vm_bo: vm_bo which is now idle
242 *
243 * State for PDs/PTs and per VM BOs which have gone through the state machine
244 * and are now idle.
245 */
246static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
247{
248 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
249 vm_bo->moved = false;
250}
251
252/**
253 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
254 *
255 * @vm_bo: vm_bo which is now invalidated
256 *
257 * State for normal BOs which are invalidated and that change not yet reflected
258 * in the PTs.
259 */
260static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
261{
262 spin_lock(&vm_bo->vm->invalidated_lock);
263 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
264 spin_unlock(&vm_bo->vm->invalidated_lock);
265}
266
267/**
268 * amdgpu_vm_bo_done - vm_bo is done
269 *
270 * @vm_bo: vm_bo which is now done
271 *
272 * State for normal BOs which are invalidated and that change has been updated
273 * in the PTs.
274 */
275static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
276{
277 spin_lock(&vm_bo->vm->invalidated_lock);
278 list_del_init(&vm_bo->vm_status);
279 spin_unlock(&vm_bo->vm->invalidated_lock);
280}
281
282/**
283 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
284 *
285 * @base: base structure for tracking BO usage in a VM
286 * @vm: vm to which bo is to be added
287 * @bo: amdgpu buffer object
288 *
289 * Initialize a bo_va_base structure and add it to the appropriate lists
290 *
291 */
292static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
293 struct amdgpu_vm *vm,
294 struct amdgpu_bo *bo)
295{
296 base->vm = vm;
297 base->bo = bo;
298 base->next = NULL;
299 INIT_LIST_HEAD(&base->vm_status);
300
301 if (!bo)
302 return;
303 base->next = bo->vm_bo;
304 bo->vm_bo = base;
305
306 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
307 return;
308
309 vm->bulk_moveable = false;
310 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
311 amdgpu_vm_bo_relocated(base);
312 else
313 amdgpu_vm_bo_idle(base);
314
315 if (bo->preferred_domains &
316 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
317 return;
318
319 /*
320 * we checked all the prerequisites, but it looks like this per vm bo
321 * is currently evicted. add the bo to the evicted list to make sure it
322 * is validated on next vm use to avoid fault.
323 * */
324 amdgpu_vm_bo_evicted(base);
325}
326
327/**
328 * amdgpu_vm_pt_parent - get the parent page directory
329 *
330 * @pt: child page table
331 *
332 * Helper to get the parent entry for the child page table. NULL if we are at
333 * the root page directory.
334 */
335static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
336{
337 struct amdgpu_bo *parent = pt->base.bo->parent;
338
339 if (!parent)
340 return NULL;
341
342 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
343}
344
345/*
346 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
347 */
348struct amdgpu_vm_pt_cursor {
349 uint64_t pfn;
350 struct amdgpu_vm_pt *parent;
351 struct amdgpu_vm_pt *entry;
352 unsigned level;
353};
354
355/**
356 * amdgpu_vm_pt_start - start PD/PT walk
357 *
358 * @adev: amdgpu_device pointer
359 * @vm: amdgpu_vm structure
360 * @start: start address of the walk
361 * @cursor: state to initialize
362 *
363 * Initialize a amdgpu_vm_pt_cursor to start a walk.
364 */
365static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
366 struct amdgpu_vm *vm, uint64_t start,
367 struct amdgpu_vm_pt_cursor *cursor)
368{
369 cursor->pfn = start;
370 cursor->parent = NULL;
371 cursor->entry = &vm->root;
372 cursor->level = adev->vm_manager.root_level;
373}
374
375/**
376 * amdgpu_vm_pt_descendant - go to child node
377 *
378 * @adev: amdgpu_device pointer
379 * @cursor: current state
380 *
381 * Walk to the child node of the current node.
382 * Returns:
383 * True if the walk was possible, false otherwise.
384 */
385static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
386 struct amdgpu_vm_pt_cursor *cursor)
387{
388 unsigned mask, shift, idx;
389
390 if (!cursor->entry->entries)
391 return false;
392
393 BUG_ON(!cursor->entry->base.bo);
394 mask = amdgpu_vm_entries_mask(adev, cursor->level);
395 shift = amdgpu_vm_level_shift(adev, cursor->level);
396
397 ++cursor->level;
398 idx = (cursor->pfn >> shift) & mask;
399 cursor->parent = cursor->entry;
400 cursor->entry = &cursor->entry->entries[idx];
401 return true;
402}
403
404/**
405 * amdgpu_vm_pt_sibling - go to sibling node
406 *
407 * @adev: amdgpu_device pointer
408 * @cursor: current state
409 *
410 * Walk to the sibling node of the current node.
411 * Returns:
412 * True if the walk was possible, false otherwise.
413 */
414static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
415 struct amdgpu_vm_pt_cursor *cursor)
416{
417 unsigned shift, num_entries;
418
419 /* Root doesn't have a sibling */
420 if (!cursor->parent)
421 return false;
422
423 /* Go to our parents and see if we got a sibling */
424 shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
425 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
426
427 if (cursor->entry == &cursor->parent->entries[num_entries - 1])
428 return false;
429
430 cursor->pfn += 1ULL << shift;
431 cursor->pfn &= ~((1ULL << shift) - 1);
432 ++cursor->entry;
433 return true;
434}
435
436/**
437 * amdgpu_vm_pt_ancestor - go to parent node
438 *
439 * @cursor: current state
440 *
441 * Walk to the parent node of the current node.
442 * Returns:
443 * True if the walk was possible, false otherwise.
444 */
445static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
446{
447 if (!cursor->parent)
448 return false;
449
450 --cursor->level;
451 cursor->entry = cursor->parent;
452 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
453 return true;
454}
455
456/**
457 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
458 *
459 * @adev: amdgpu_device pointer
460 * @cursor: current state
461 *
462 * Walk the PD/PT tree to the next node.
463 */
464static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
465 struct amdgpu_vm_pt_cursor *cursor)
466{
467 /* First try a newborn child */
468 if (amdgpu_vm_pt_descendant(adev, cursor))
469 return;
470
471 /* If that didn't worked try to find a sibling */
472 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
473 /* No sibling, go to our parents and grandparents */
474 if (!amdgpu_vm_pt_ancestor(cursor)) {
475 cursor->pfn = ~0ll;
476 return;
477 }
478 }
479}
480
481/**
482 * amdgpu_vm_pt_first_dfs - start a deep first search
483 *
484 * @adev: amdgpu_device structure
485 * @vm: amdgpu_vm structure
486 * @start: optional cursor to start with
487 * @cursor: state to initialize
488 *
489 * Starts a deep first traversal of the PD/PT tree.
490 */
491static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
492 struct amdgpu_vm *vm,
493 struct amdgpu_vm_pt_cursor *start,
494 struct amdgpu_vm_pt_cursor *cursor)
495{
496 if (start)
497 *cursor = *start;
498 else
499 amdgpu_vm_pt_start(adev, vm, 0, cursor);
500 while (amdgpu_vm_pt_descendant(adev, cursor));
501}
502
503/**
504 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
505 *
506 * @start: starting point for the search
507 * @entry: current entry
508 *
509 * Returns:
510 * True when the search should continue, false otherwise.
511 */
512static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
513 struct amdgpu_vm_pt *entry)
514{
515 return entry && (!start || entry != start->entry);
516}
517
518/**
519 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
520 *
521 * @adev: amdgpu_device structure
522 * @cursor: current state
523 *
524 * Move the cursor to the next node in a deep first search.
525 */
526static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
527 struct amdgpu_vm_pt_cursor *cursor)
528{
529 if (!cursor->entry)
530 return;
531
532 if (!cursor->parent)
533 cursor->entry = NULL;
534 else if (amdgpu_vm_pt_sibling(adev, cursor))
535 while (amdgpu_vm_pt_descendant(adev, cursor));
536 else
537 amdgpu_vm_pt_ancestor(cursor);
538}
539
540/*
541 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
542 */
543#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
544 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
545 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
546 amdgpu_vm_pt_continue_dfs((start), (entry)); \
547 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
548
549/**
550 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
551 *
552 * @vm: vm providing the BOs
553 * @validated: head of validation list
554 * @entry: entry to add
555 *
556 * Add the page directory to the list of BOs to
557 * validate for command submission.
558 */
559void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
560 struct list_head *validated,
561 struct amdgpu_bo_list_entry *entry)
562{
563 entry->priority = 0;
564 entry->tv.bo = &vm->root.base.bo->tbo;
565 /* One for the VM updates, one for TTM and one for the CS job */
566 entry->tv.num_shared = 3;
567 entry->user_pages = NULL;
568 list_add(&entry->tv.head, validated);
569}
570
571/**
572 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
573 *
574 * @bo: BO which was removed from the LRU
575 *
576 * Make sure the bulk_moveable flag is updated when a BO is removed from the
577 * LRU.
578 */
579void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
580{
581 struct amdgpu_bo *abo;
582 struct amdgpu_vm_bo_base *bo_base;
583
584 if (!amdgpu_bo_is_amdgpu_bo(bo))
585 return;
586
587 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
588 return;
589
590 abo = ttm_to_amdgpu_bo(bo);
591 if (!abo->parent)
592 return;
593 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
594 struct amdgpu_vm *vm = bo_base->vm;
595
596 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
597 vm->bulk_moveable = false;
598 }
599
600}
601/**
602 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
603 *
604 * @adev: amdgpu device pointer
605 * @vm: vm providing the BOs
606 *
607 * Move all BOs to the end of LRU and remember their positions to put them
608 * together.
609 */
610void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
611 struct amdgpu_vm *vm)
612{
613 struct amdgpu_vm_bo_base *bo_base;
614
615 if (vm->bulk_moveable) {
616 spin_lock(&ttm_bo_glob.lru_lock);
617 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
618 spin_unlock(&ttm_bo_glob.lru_lock);
619 return;
620 }
621
622 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
623
624 spin_lock(&ttm_bo_glob.lru_lock);
625 list_for_each_entry(bo_base, &vm->idle, vm_status) {
626 struct amdgpu_bo *bo = bo_base->bo;
627
628 if (!bo->parent)
629 continue;
630
631 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
632 if (bo->shadow)
633 ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
634 &vm->lru_bulk_move);
635 }
636 spin_unlock(&ttm_bo_glob.lru_lock);
637
638 vm->bulk_moveable = true;
639}
640
641/**
642 * amdgpu_vm_validate_pt_bos - validate the page table BOs
643 *
644 * @adev: amdgpu device pointer
645 * @vm: vm providing the BOs
646 * @validate: callback to do the validation
647 * @param: parameter for the validation callback
648 *
649 * Validate the page table BOs on command submission if neccessary.
650 *
651 * Returns:
652 * Validation result.
653 */
654int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
655 int (*validate)(void *p, struct amdgpu_bo *bo),
656 void *param)
657{
658 struct amdgpu_vm_bo_base *bo_base, *tmp;
659 int r = 0;
660
661 vm->bulk_moveable &= list_empty(&vm->evicted);
662
663 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
664 struct amdgpu_bo *bo = bo_base->bo;
665
666 r = validate(param, bo);
667 if (r)
668 break;
669
670 if (bo->tbo.type != ttm_bo_type_kernel) {
671 amdgpu_vm_bo_moved(bo_base);
672 } else {
673 vm->update_funcs->map_table(bo);
674 if (bo->parent)
675 amdgpu_vm_bo_relocated(bo_base);
676 else
677 amdgpu_vm_bo_idle(bo_base);
678 }
679 }
680
681 return r;
682}
683
684/**
685 * amdgpu_vm_ready - check VM is ready for updates
686 *
687 * @vm: VM to check
688 *
689 * Check if all VM PDs/PTs are ready for updates
690 *
691 * Returns:
692 * True if eviction list is empty.
693 */
694bool amdgpu_vm_ready(struct amdgpu_vm *vm)
695{
696 return list_empty(&vm->evicted);
697}
698
699/**
700 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
701 *
702 * @adev: amdgpu_device pointer
703 * @vm: VM to clear BO from
704 * @bo: BO to clear
705 * @direct: use a direct update
706 *
707 * Root PD needs to be reserved when calling this.
708 *
709 * Returns:
710 * 0 on success, errno otherwise.
711 */
712static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
713 struct amdgpu_vm *vm,
714 struct amdgpu_bo *bo,
715 bool direct)
716{
717 struct ttm_operation_ctx ctx = { true, false };
718 unsigned level = adev->vm_manager.root_level;
719 struct amdgpu_vm_update_params params;
720 struct amdgpu_bo *ancestor = bo;
721 unsigned entries, ats_entries;
722 uint64_t addr;
723 int r;
724
725 /* Figure out our place in the hierarchy */
726 if (ancestor->parent) {
727 ++level;
728 while (ancestor->parent->parent) {
729 ++level;
730 ancestor = ancestor->parent;
731 }
732 }
733
734 entries = amdgpu_bo_size(bo) / 8;
735 if (!vm->pte_support_ats) {
736 ats_entries = 0;
737
738 } else if (!bo->parent) {
739 ats_entries = amdgpu_vm_num_ats_entries(adev);
740 ats_entries = min(ats_entries, entries);
741 entries -= ats_entries;
742
743 } else {
744 struct amdgpu_vm_pt *pt;
745
746 pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
747 ats_entries = amdgpu_vm_num_ats_entries(adev);
748 if ((pt - vm->root.entries) >= ats_entries) {
749 ats_entries = 0;
750 } else {
751 ats_entries = entries;
752 entries = 0;
753 }
754 }
755
756 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
757 if (r)
758 return r;
759
760 if (bo->shadow) {
761 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
762 &ctx);
763 if (r)
764 return r;
765 }
766
767 r = vm->update_funcs->map_table(bo);
768 if (r)
769 return r;
770
771 memset(¶ms, 0, sizeof(params));
772 params.adev = adev;
773 params.vm = vm;
774 params.direct = direct;
775
776 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
777 if (r)
778 return r;
779
780 addr = 0;
781 if (ats_entries) {
782 uint64_t value = 0, flags;
783
784 flags = AMDGPU_PTE_DEFAULT_ATC;
785 if (level != AMDGPU_VM_PTB) {
786 /* Handle leaf PDEs as PTEs */
787 flags |= AMDGPU_PDE_PTE;
788 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
789 }
790
791 r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries,
792 value, flags);
793 if (r)
794 return r;
795
796 addr += ats_entries * 8;
797 }
798
799 if (entries) {
800 uint64_t value = 0, flags = 0;
801
802 if (adev->asic_type >= CHIP_VEGA10) {
803 if (level != AMDGPU_VM_PTB) {
804 /* Handle leaf PDEs as PTEs */
805 flags |= AMDGPU_PDE_PTE;
806 amdgpu_gmc_get_vm_pde(adev, level,
807 &value, &flags);
808 } else {
809 /* Workaround for fault priority problem on GMC9 */
810 flags = AMDGPU_PTE_EXECUTABLE;
811 }
812 }
813
814 r = vm->update_funcs->update(¶ms, bo, addr, 0, entries,
815 value, flags);
816 if (r)
817 return r;
818 }
819
820 return vm->update_funcs->commit(¶ms, NULL);
821}
822
823/**
824 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
825 *
826 * @adev: amdgpu_device pointer
827 * @vm: requesting vm
828 * @level: the page table level
829 * @direct: use a direct update
830 * @bp: resulting BO allocation parameters
831 */
832static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
833 int level, bool direct,
834 struct amdgpu_bo_param *bp)
835{
836 memset(bp, 0, sizeof(*bp));
837
838 bp->size = amdgpu_vm_bo_size(adev, level);
839 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
840 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
841 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
842 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
843 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
844 if (vm->use_cpu_for_update)
845 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
846 else if (!vm->root.base.bo || vm->root.base.bo->shadow)
847 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
848 bp->type = ttm_bo_type_kernel;
849 bp->no_wait_gpu = direct;
850 if (vm->root.base.bo)
851 bp->resv = vm->root.base.bo->tbo.base.resv;
852}
853
854/**
855 * amdgpu_vm_alloc_pts - Allocate a specific page table
856 *
857 * @adev: amdgpu_device pointer
858 * @vm: VM to allocate page tables for
859 * @cursor: Which page table to allocate
860 * @direct: use a direct update
861 *
862 * Make sure a specific page table or directory is allocated.
863 *
864 * Returns:
865 * 1 if page table needed to be allocated, 0 if page table was already
866 * allocated, negative errno if an error occurred.
867 */
868static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
869 struct amdgpu_vm *vm,
870 struct amdgpu_vm_pt_cursor *cursor,
871 bool direct)
872{
873 struct amdgpu_vm_pt *entry = cursor->entry;
874 struct amdgpu_bo_param bp;
875 struct amdgpu_bo *pt;
876 int r;
877
878 if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
879 unsigned num_entries;
880
881 num_entries = amdgpu_vm_num_entries(adev, cursor->level);
882 entry->entries = kvmalloc_array(num_entries,
883 sizeof(*entry->entries),
884 GFP_KERNEL | __GFP_ZERO);
885 if (!entry->entries)
886 return -ENOMEM;
887 }
888
889 if (entry->base.bo)
890 return 0;
891
892 amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
893
894 r = amdgpu_bo_create(adev, &bp, &pt);
895 if (r)
896 return r;
897
898 /* Keep a reference to the root directory to avoid
899 * freeing them up in the wrong order.
900 */
901 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
902 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
903
904 r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
905 if (r)
906 goto error_free_pt;
907
908 return 0;
909
910error_free_pt:
911 amdgpu_bo_unref(&pt->shadow);
912 amdgpu_bo_unref(&pt);
913 return r;
914}
915
916/**
917 * amdgpu_vm_free_table - fre one PD/PT
918 *
919 * @entry: PDE to free
920 */
921static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
922{
923 if (entry->base.bo) {
924 entry->base.bo->vm_bo = NULL;
925 list_del(&entry->base.vm_status);
926 amdgpu_bo_unref(&entry->base.bo->shadow);
927 amdgpu_bo_unref(&entry->base.bo);
928 }
929 kvfree(entry->entries);
930 entry->entries = NULL;
931}
932
933/**
934 * amdgpu_vm_free_pts - free PD/PT levels
935 *
936 * @adev: amdgpu device structure
937 * @vm: amdgpu vm structure
938 * @start: optional cursor where to start freeing PDs/PTs
939 *
940 * Free the page directory or page table level and all sub levels.
941 */
942static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
943 struct amdgpu_vm *vm,
944 struct amdgpu_vm_pt_cursor *start)
945{
946 struct amdgpu_vm_pt_cursor cursor;
947 struct amdgpu_vm_pt *entry;
948
949 vm->bulk_moveable = false;
950
951 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
952 amdgpu_vm_free_table(entry);
953
954 if (start)
955 amdgpu_vm_free_table(start->entry);
956}
957
958/**
959 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
960 *
961 * @adev: amdgpu_device pointer
962 */
963void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
964{
965 const struct amdgpu_ip_block *ip_block;
966 bool has_compute_vm_bug;
967 struct amdgpu_ring *ring;
968 int i;
969
970 has_compute_vm_bug = false;
971
972 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
973 if (ip_block) {
974 /* Compute has a VM bug for GFX version < 7.
975 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
976 if (ip_block->version->major <= 7)
977 has_compute_vm_bug = true;
978 else if (ip_block->version->major == 8)
979 if (adev->gfx.mec_fw_version < 673)
980 has_compute_vm_bug = true;
981 }
982
983 for (i = 0; i < adev->num_rings; i++) {
984 ring = adev->rings[i];
985 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
986 /* only compute rings */
987 ring->has_compute_vm_bug = has_compute_vm_bug;
988 else
989 ring->has_compute_vm_bug = false;
990 }
991}
992
993/**
994 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
995 *
996 * @ring: ring on which the job will be submitted
997 * @job: job to submit
998 *
999 * Returns:
1000 * True if sync is needed.
1001 */
1002bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1003 struct amdgpu_job *job)
1004{
1005 struct amdgpu_device *adev = ring->adev;
1006 unsigned vmhub = ring->funcs->vmhub;
1007 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1008 struct amdgpu_vmid *id;
1009 bool gds_switch_needed;
1010 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1011
1012 if (job->vmid == 0)
1013 return false;
1014 id = &id_mgr->ids[job->vmid];
1015 gds_switch_needed = ring->funcs->emit_gds_switch && (
1016 id->gds_base != job->gds_base ||
1017 id->gds_size != job->gds_size ||
1018 id->gws_base != job->gws_base ||
1019 id->gws_size != job->gws_size ||
1020 id->oa_base != job->oa_base ||
1021 id->oa_size != job->oa_size);
1022
1023 if (amdgpu_vmid_had_gpu_reset(adev, id))
1024 return true;
1025
1026 return vm_flush_needed || gds_switch_needed;
1027}
1028
1029/**
1030 * amdgpu_vm_flush - hardware flush the vm
1031 *
1032 * @ring: ring to use for flush
1033 * @job: related job
1034 * @need_pipe_sync: is pipe sync needed
1035 *
1036 * Emit a VM flush when it is necessary.
1037 *
1038 * Returns:
1039 * 0 on success, errno otherwise.
1040 */
1041int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1042 bool need_pipe_sync)
1043{
1044 struct amdgpu_device *adev = ring->adev;
1045 unsigned vmhub = ring->funcs->vmhub;
1046 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1047 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1048 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1049 id->gds_base != job->gds_base ||
1050 id->gds_size != job->gds_size ||
1051 id->gws_base != job->gws_base ||
1052 id->gws_size != job->gws_size ||
1053 id->oa_base != job->oa_base ||
1054 id->oa_size != job->oa_size);
1055 bool vm_flush_needed = job->vm_needs_flush;
1056 struct dma_fence *fence = NULL;
1057 bool pasid_mapping_needed = false;
1058 unsigned patch_offset = 0;
1059 int r;
1060
1061 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1062 gds_switch_needed = true;
1063 vm_flush_needed = true;
1064 pasid_mapping_needed = true;
1065 }
1066
1067 mutex_lock(&id_mgr->lock);
1068 if (id->pasid != job->pasid || !id->pasid_mapping ||
1069 !dma_fence_is_signaled(id->pasid_mapping))
1070 pasid_mapping_needed = true;
1071 mutex_unlock(&id_mgr->lock);
1072
1073 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1074 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
1075 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1076 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1077 ring->funcs->emit_wreg;
1078
1079 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1080 return 0;
1081
1082 if (ring->funcs->init_cond_exec)
1083 patch_offset = amdgpu_ring_init_cond_exec(ring);
1084
1085 if (need_pipe_sync)
1086 amdgpu_ring_emit_pipeline_sync(ring);
1087
1088 if (vm_flush_needed) {
1089 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1090 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1091 }
1092
1093 if (pasid_mapping_needed)
1094 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1095
1096 if (vm_flush_needed || pasid_mapping_needed) {
1097 r = amdgpu_fence_emit(ring, &fence, 0);
1098 if (r)
1099 return r;
1100 }
1101
1102 if (vm_flush_needed) {
1103 mutex_lock(&id_mgr->lock);
1104 dma_fence_put(id->last_flush);
1105 id->last_flush = dma_fence_get(fence);
1106 id->current_gpu_reset_count =
1107 atomic_read(&adev->gpu_reset_counter);
1108 mutex_unlock(&id_mgr->lock);
1109 }
1110
1111 if (pasid_mapping_needed) {
1112 mutex_lock(&id_mgr->lock);
1113 id->pasid = job->pasid;
1114 dma_fence_put(id->pasid_mapping);
1115 id->pasid_mapping = dma_fence_get(fence);
1116 mutex_unlock(&id_mgr->lock);
1117 }
1118 dma_fence_put(fence);
1119
1120 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1121 id->gds_base = job->gds_base;
1122 id->gds_size = job->gds_size;
1123 id->gws_base = job->gws_base;
1124 id->gws_size = job->gws_size;
1125 id->oa_base = job->oa_base;
1126 id->oa_size = job->oa_size;
1127 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1128 job->gds_size, job->gws_base,
1129 job->gws_size, job->oa_base,
1130 job->oa_size);
1131 }
1132
1133 if (ring->funcs->patch_cond_exec)
1134 amdgpu_ring_patch_cond_exec(ring, patch_offset);
1135
1136 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1137 if (ring->funcs->emit_switch_buffer) {
1138 amdgpu_ring_emit_switch_buffer(ring);
1139 amdgpu_ring_emit_switch_buffer(ring);
1140 }
1141 return 0;
1142}
1143
1144/**
1145 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1146 *
1147 * @vm: requested vm
1148 * @bo: requested buffer object
1149 *
1150 * Find @bo inside the requested vm.
1151 * Search inside the @bos vm list for the requested vm
1152 * Returns the found bo_va or NULL if none is found
1153 *
1154 * Object has to be reserved!
1155 *
1156 * Returns:
1157 * Found bo_va or NULL.
1158 */
1159struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1160 struct amdgpu_bo *bo)
1161{
1162 struct amdgpu_vm_bo_base *base;
1163
1164 for (base = bo->vm_bo; base; base = base->next) {
1165 if (base->vm != vm)
1166 continue;
1167
1168 return container_of(base, struct amdgpu_bo_va, base);
1169 }
1170 return NULL;
1171}
1172
1173/**
1174 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1175 *
1176 * @pages_addr: optional DMA address to use for lookup
1177 * @addr: the unmapped addr
1178 *
1179 * Look up the physical address of the page that the pte resolves
1180 * to.
1181 *
1182 * Returns:
1183 * The pointer for the page table entry.
1184 */
1185uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1186{
1187 uint64_t result;
1188
1189 /* page table offset */
1190 result = pages_addr[addr >> PAGE_SHIFT];
1191
1192 /* in case cpu page size != gpu page size*/
1193 result |= addr & (~PAGE_MASK);
1194
1195 result &= 0xFFFFFFFFFFFFF000ULL;
1196
1197 return result;
1198}
1199
1200/**
1201 * amdgpu_vm_update_pde - update a single level in the hierarchy
1202 *
1203 * @params: parameters for the update
1204 * @vm: requested vm
1205 * @entry: entry to update
1206 *
1207 * Makes sure the requested entry in parent is up to date.
1208 */
1209static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1210 struct amdgpu_vm *vm,
1211 struct amdgpu_vm_pt *entry)
1212{
1213 struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
1214 struct amdgpu_bo *bo = parent->base.bo, *pbo;
1215 uint64_t pde, pt, flags;
1216 unsigned level;
1217
1218 for (level = 0, pbo = bo->parent; pbo; ++level)
1219 pbo = pbo->parent;
1220
1221 level += params->adev->vm_manager.root_level;
1222 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1223 pde = (entry - parent->entries) * 8;
1224 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
1225}
1226
1227/**
1228 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1229 *
1230 * @adev: amdgpu_device pointer
1231 * @vm: related vm
1232 *
1233 * Mark all PD level as invalid after an error.
1234 */
1235static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1236 struct amdgpu_vm *vm)
1237{
1238 struct amdgpu_vm_pt_cursor cursor;
1239 struct amdgpu_vm_pt *entry;
1240
1241 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1242 if (entry->base.bo && !entry->base.moved)
1243 amdgpu_vm_bo_relocated(&entry->base);
1244}
1245
1246/**
1247 * amdgpu_vm_update_pdes - make sure that all directories are valid
1248 *
1249 * @adev: amdgpu_device pointer
1250 * @vm: requested vm
1251 * @direct: submit directly to the paging queue
1252 *
1253 * Makes sure all directories are up to date.
1254 *
1255 * Returns:
1256 * 0 for success, error for failure.
1257 */
1258int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1259 struct amdgpu_vm *vm, bool direct)
1260{
1261 struct amdgpu_vm_update_params params;
1262 int r;
1263
1264 if (list_empty(&vm->relocated))
1265 return 0;
1266
1267 memset(¶ms, 0, sizeof(params));
1268 params.adev = adev;
1269 params.vm = vm;
1270 params.direct = direct;
1271
1272 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
1273 if (r)
1274 return r;
1275
1276 while (!list_empty(&vm->relocated)) {
1277 struct amdgpu_vm_pt *entry;
1278
1279 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1280 base.vm_status);
1281 amdgpu_vm_bo_idle(&entry->base);
1282
1283 r = amdgpu_vm_update_pde(¶ms, vm, entry);
1284 if (r)
1285 goto error;
1286 }
1287
1288 r = vm->update_funcs->commit(¶ms, &vm->last_update);
1289 if (r)
1290 goto error;
1291 return 0;
1292
1293error:
1294 amdgpu_vm_invalidate_pds(adev, vm);
1295 return r;
1296}
1297
1298/*
1299 * amdgpu_vm_update_flags - figure out flags for PTE updates
1300 *
1301 * Make sure to set the right flags for the PTEs at the desired level.
1302 */
1303static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1304 struct amdgpu_bo *bo, unsigned level,
1305 uint64_t pe, uint64_t addr,
1306 unsigned count, uint32_t incr,
1307 uint64_t flags)
1308
1309{
1310 if (level != AMDGPU_VM_PTB) {
1311 flags |= AMDGPU_PDE_PTE;
1312 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1313
1314 } else if (params->adev->asic_type >= CHIP_VEGA10 &&
1315 !(flags & AMDGPU_PTE_VALID) &&
1316 !(flags & AMDGPU_PTE_PRT)) {
1317
1318 /* Workaround for fault priority problem on GMC9 */
1319 flags |= AMDGPU_PTE_EXECUTABLE;
1320 }
1321
1322 params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
1323 flags);
1324}
1325
1326/**
1327 * amdgpu_vm_fragment - get fragment for PTEs
1328 *
1329 * @params: see amdgpu_vm_update_params definition
1330 * @start: first PTE to handle
1331 * @end: last PTE to handle
1332 * @flags: hw mapping flags
1333 * @frag: resulting fragment size
1334 * @frag_end: end of this fragment
1335 *
1336 * Returns the first possible fragment for the start and end address.
1337 */
1338static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1339 uint64_t start, uint64_t end, uint64_t flags,
1340 unsigned int *frag, uint64_t *frag_end)
1341{
1342 /**
1343 * The MC L1 TLB supports variable sized pages, based on a fragment
1344 * field in the PTE. When this field is set to a non-zero value, page
1345 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1346 * flags are considered valid for all PTEs within the fragment range
1347 * and corresponding mappings are assumed to be physically contiguous.
1348 *
1349 * The L1 TLB can store a single PTE for the whole fragment,
1350 * significantly increasing the space available for translation
1351 * caching. This leads to large improvements in throughput when the
1352 * TLB is under pressure.
1353 *
1354 * The L2 TLB distributes small and large fragments into two
1355 * asymmetric partitions. The large fragment cache is significantly
1356 * larger. Thus, we try to use large fragments wherever possible.
1357 * Userspace can support this by aligning virtual base address and
1358 * allocation size to the fragment size.
1359 *
1360 * Starting with Vega10 the fragment size only controls the L1. The L2
1361 * is now directly feed with small/huge/giant pages from the walker.
1362 */
1363 unsigned max_frag;
1364
1365 if (params->adev->asic_type < CHIP_VEGA10)
1366 max_frag = params->adev->vm_manager.fragment_size;
1367 else
1368 max_frag = 31;
1369
1370 /* system pages are non continuously */
1371 if (params->pages_addr) {
1372 *frag = 0;
1373 *frag_end = end;
1374 return;
1375 }
1376
1377 /* This intentionally wraps around if no bit is set */
1378 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1379 if (*frag >= max_frag) {
1380 *frag = max_frag;
1381 *frag_end = end & ~((1ULL << max_frag) - 1);
1382 } else {
1383 *frag_end = start + (1 << *frag);
1384 }
1385}
1386
1387/**
1388 * amdgpu_vm_update_ptes - make sure that page tables are valid
1389 *
1390 * @params: see amdgpu_vm_update_params definition
1391 * @start: start of GPU address range
1392 * @end: end of GPU address range
1393 * @dst: destination address to map to, the next dst inside the function
1394 * @flags: mapping flags
1395 *
1396 * Update the page tables in the range @start - @end.
1397 *
1398 * Returns:
1399 * 0 for success, -EINVAL for failure.
1400 */
1401static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1402 uint64_t start, uint64_t end,
1403 uint64_t dst, uint64_t flags)
1404{
1405 struct amdgpu_device *adev = params->adev;
1406 struct amdgpu_vm_pt_cursor cursor;
1407 uint64_t frag_start = start, frag_end;
1408 unsigned int frag;
1409 int r;
1410
1411 /* figure out the initial fragment */
1412 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1413
1414 /* walk over the address space and update the PTs */
1415 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1416 while (cursor.pfn < end) {
1417 unsigned shift, parent_shift, mask;
1418 uint64_t incr, entry_end, pe_start;
1419 struct amdgpu_bo *pt;
1420
1421 /* make sure that the page tables covering the address range are
1422 * actually allocated
1423 */
1424 r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
1425 params->direct);
1426 if (r)
1427 return r;
1428
1429 pt = cursor.entry->base.bo;
1430
1431 /* The root level can't be a huge page */
1432 if (cursor.level == adev->vm_manager.root_level) {
1433 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1434 return -ENOENT;
1435 continue;
1436 }
1437
1438 shift = amdgpu_vm_level_shift(adev, cursor.level);
1439 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1440 if (adev->asic_type < CHIP_VEGA10 &&
1441 (flags & AMDGPU_PTE_VALID)) {
1442 /* No huge page support before GMC v9 */
1443 if (cursor.level != AMDGPU_VM_PTB) {
1444 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1445 return -ENOENT;
1446 continue;
1447 }
1448 } else if (frag < shift) {
1449 /* We can't use this level when the fragment size is
1450 * smaller than the address shift. Go to the next
1451 * child entry and try again.
1452 */
1453 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1454 return -ENOENT;
1455 continue;
1456 } else if (frag >= parent_shift &&
1457 cursor.level - 1 != adev->vm_manager.root_level) {
1458 /* If the fragment size is even larger than the parent
1459 * shift we should go up one level and check it again
1460 * unless one level up is the root level.
1461 */
1462 if (!amdgpu_vm_pt_ancestor(&cursor))
1463 return -ENOENT;
1464 continue;
1465 }
1466
1467 /* Looks good so far, calculate parameters for the update */
1468 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1469 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1470 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1471 entry_end = (uint64_t)(mask + 1) << shift;
1472 entry_end += cursor.pfn & ~(entry_end - 1);
1473 entry_end = min(entry_end, end);
1474
1475 do {
1476 uint64_t upd_end = min(entry_end, frag_end);
1477 unsigned nptes = (upd_end - frag_start) >> shift;
1478
1479 amdgpu_vm_update_flags(params, pt, cursor.level,
1480 pe_start, dst, nptes, incr,
1481 flags | AMDGPU_PTE_FRAG(frag));
1482
1483 pe_start += nptes * 8;
1484 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1485
1486 frag_start = upd_end;
1487 if (frag_start >= frag_end) {
1488 /* figure out the next fragment */
1489 amdgpu_vm_fragment(params, frag_start, end,
1490 flags, &frag, &frag_end);
1491 if (frag < shift)
1492 break;
1493 }
1494 } while (frag_start < entry_end);
1495
1496 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1497 /* Free all child entries.
1498 * Update the tables with the flags and addresses and free up subsequent
1499 * tables in the case of huge pages or freed up areas.
1500 * This is the maximum you can free, because all other page tables are not
1501 * completely covered by the range and so potentially still in use.
1502 */
1503 while (cursor.pfn < frag_start) {
1504 amdgpu_vm_free_pts(adev, params->vm, &cursor);
1505 amdgpu_vm_pt_next(adev, &cursor);
1506 }
1507
1508 } else if (frag >= shift) {
1509 /* or just move on to the next on the same level. */
1510 amdgpu_vm_pt_next(adev, &cursor);
1511 }
1512 }
1513
1514 return 0;
1515}
1516
1517/**
1518 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1519 *
1520 * @adev: amdgpu_device pointer
1521 * @vm: requested vm
1522 * @direct: direct submission in a page fault
1523 * @exclusive: fence we need to sync to
1524 * @start: start of mapped range
1525 * @last: last mapped entry
1526 * @flags: flags for the entries
1527 * @addr: addr to set the area to
1528 * @pages_addr: DMA addresses to use for mapping
1529 * @fence: optional resulting fence
1530 *
1531 * Fill in the page table entries between @start and @last.
1532 *
1533 * Returns:
1534 * 0 for success, -EINVAL for failure.
1535 */
1536static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1537 struct amdgpu_vm *vm, bool direct,
1538 struct dma_fence *exclusive,
1539 uint64_t start, uint64_t last,
1540 uint64_t flags, uint64_t addr,
1541 dma_addr_t *pages_addr,
1542 struct dma_fence **fence)
1543{
1544 struct amdgpu_vm_update_params params;
1545 void *owner = AMDGPU_FENCE_OWNER_VM;
1546 int r;
1547
1548 memset(¶ms, 0, sizeof(params));
1549 params.adev = adev;
1550 params.vm = vm;
1551 params.direct = direct;
1552 params.pages_addr = pages_addr;
1553
1554 /* sync to everything except eviction fences on unmapping */
1555 if (!(flags & AMDGPU_PTE_VALID))
1556 owner = AMDGPU_FENCE_OWNER_KFD;
1557
1558 r = vm->update_funcs->prepare(¶ms, owner, exclusive);
1559 if (r)
1560 return r;
1561
1562 r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags);
1563 if (r)
1564 return r;
1565
1566 return vm->update_funcs->commit(¶ms, fence);
1567}
1568
1569/**
1570 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1571 *
1572 * @adev: amdgpu_device pointer
1573 * @exclusive: fence we need to sync to
1574 * @pages_addr: DMA addresses to use for mapping
1575 * @vm: requested vm
1576 * @mapping: mapped range and flags to use for the update
1577 * @flags: HW flags for the mapping
1578 * @bo_adev: amdgpu_device pointer that bo actually been allocated
1579 * @nodes: array of drm_mm_nodes with the MC addresses
1580 * @fence: optional resulting fence
1581 *
1582 * Split the mapping into smaller chunks so that each update fits
1583 * into a SDMA IB.
1584 *
1585 * Returns:
1586 * 0 for success, -EINVAL for failure.
1587 */
1588static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1589 struct dma_fence *exclusive,
1590 dma_addr_t *pages_addr,
1591 struct amdgpu_vm *vm,
1592 struct amdgpu_bo_va_mapping *mapping,
1593 uint64_t flags,
1594 struct amdgpu_device *bo_adev,
1595 struct drm_mm_node *nodes,
1596 struct dma_fence **fence)
1597{
1598 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1599 uint64_t pfn, start = mapping->start;
1600 int r;
1601
1602 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1603 * but in case of something, we filter the flags in first place
1604 */
1605 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1606 flags &= ~AMDGPU_PTE_READABLE;
1607 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1608 flags &= ~AMDGPU_PTE_WRITEABLE;
1609
1610 /* Apply ASIC specific mapping flags */
1611 amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
1612
1613 trace_amdgpu_vm_bo_update(mapping);
1614
1615 pfn = mapping->offset >> PAGE_SHIFT;
1616 if (nodes) {
1617 while (pfn >= nodes->size) {
1618 pfn -= nodes->size;
1619 ++nodes;
1620 }
1621 }
1622
1623 do {
1624 dma_addr_t *dma_addr = NULL;
1625 uint64_t max_entries;
1626 uint64_t addr, last;
1627
1628 if (nodes) {
1629 addr = nodes->start << PAGE_SHIFT;
1630 max_entries = (nodes->size - pfn) *
1631 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1632 } else {
1633 addr = 0;
1634 max_entries = S64_MAX;
1635 }
1636
1637 if (pages_addr) {
1638 uint64_t count;
1639
1640 for (count = 1;
1641 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1642 ++count) {
1643 uint64_t idx = pfn + count;
1644
1645 if (pages_addr[idx] !=
1646 (pages_addr[idx - 1] + PAGE_SIZE))
1647 break;
1648 }
1649
1650 if (count < min_linear_pages) {
1651 addr = pfn << PAGE_SHIFT;
1652 dma_addr = pages_addr;
1653 } else {
1654 addr = pages_addr[pfn];
1655 max_entries = count *
1656 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1657 }
1658
1659 } else if (flags & AMDGPU_PTE_VALID) {
1660 addr += bo_adev->vm_manager.vram_base_offset;
1661 addr += pfn << PAGE_SHIFT;
1662 }
1663
1664 last = min((uint64_t)mapping->last, start + max_entries - 1);
1665 r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
1666 start, last, flags, addr,
1667 dma_addr, fence);
1668 if (r)
1669 return r;
1670
1671 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1672 if (nodes && nodes->size == pfn) {
1673 pfn = 0;
1674 ++nodes;
1675 }
1676 start = last + 1;
1677
1678 } while (unlikely(start != mapping->last + 1));
1679
1680 return 0;
1681}
1682
1683/**
1684 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1685 *
1686 * @adev: amdgpu_device pointer
1687 * @bo_va: requested BO and VM object
1688 * @clear: if true clear the entries
1689 *
1690 * Fill in the page table entries for @bo_va.
1691 *
1692 * Returns:
1693 * 0 for success, -EINVAL for failure.
1694 */
1695int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1696 bool clear)
1697{
1698 struct amdgpu_bo *bo = bo_va->base.bo;
1699 struct amdgpu_vm *vm = bo_va->base.vm;
1700 struct amdgpu_bo_va_mapping *mapping;
1701 dma_addr_t *pages_addr = NULL;
1702 struct ttm_mem_reg *mem;
1703 struct drm_mm_node *nodes;
1704 struct dma_fence *exclusive, **last_update;
1705 uint64_t flags;
1706 struct amdgpu_device *bo_adev = adev;
1707 int r;
1708
1709 if (clear || !bo) {
1710 mem = NULL;
1711 nodes = NULL;
1712 exclusive = NULL;
1713 } else {
1714 struct ttm_dma_tt *ttm;
1715
1716 mem = &bo->tbo.mem;
1717 nodes = mem->mm_node;
1718 if (mem->mem_type == TTM_PL_TT) {
1719 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1720 pages_addr = ttm->dma_address;
1721 }
1722 exclusive = bo->tbo.moving;
1723 }
1724
1725 if (bo) {
1726 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1727 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1728 } else {
1729 flags = 0x0;
1730 }
1731
1732 if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
1733 last_update = &vm->last_update;
1734 else
1735 last_update = &bo_va->last_pt_update;
1736
1737 if (!clear && bo_va->base.moved) {
1738 bo_va->base.moved = false;
1739 list_splice_init(&bo_va->valids, &bo_va->invalids);
1740
1741 } else if (bo_va->cleared != clear) {
1742 list_splice_init(&bo_va->valids, &bo_va->invalids);
1743 }
1744
1745 list_for_each_entry(mapping, &bo_va->invalids, list) {
1746 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1747 mapping, flags, bo_adev, nodes,
1748 last_update);
1749 if (r)
1750 return r;
1751 }
1752
1753 /* If the BO is not in its preferred location add it back to
1754 * the evicted list so that it gets validated again on the
1755 * next command submission.
1756 */
1757 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
1758 uint32_t mem_type = bo->tbo.mem.mem_type;
1759
1760 if (!(bo->preferred_domains &
1761 amdgpu_mem_type_to_domain(mem_type)))
1762 amdgpu_vm_bo_evicted(&bo_va->base);
1763 else
1764 amdgpu_vm_bo_idle(&bo_va->base);
1765 } else {
1766 amdgpu_vm_bo_done(&bo_va->base);
1767 }
1768
1769 list_splice_init(&bo_va->invalids, &bo_va->valids);
1770 bo_va->cleared = clear;
1771
1772 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1773 list_for_each_entry(mapping, &bo_va->valids, list)
1774 trace_amdgpu_vm_bo_mapping(mapping);
1775 }
1776
1777 return 0;
1778}
1779
1780/**
1781 * amdgpu_vm_update_prt_state - update the global PRT state
1782 *
1783 * @adev: amdgpu_device pointer
1784 */
1785static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1786{
1787 unsigned long flags;
1788 bool enable;
1789
1790 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1791 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1792 adev->gmc.gmc_funcs->set_prt(adev, enable);
1793 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1794}
1795
1796/**
1797 * amdgpu_vm_prt_get - add a PRT user
1798 *
1799 * @adev: amdgpu_device pointer
1800 */
1801static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1802{
1803 if (!adev->gmc.gmc_funcs->set_prt)
1804 return;
1805
1806 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1807 amdgpu_vm_update_prt_state(adev);
1808}
1809
1810/**
1811 * amdgpu_vm_prt_put - drop a PRT user
1812 *
1813 * @adev: amdgpu_device pointer
1814 */
1815static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1816{
1817 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1818 amdgpu_vm_update_prt_state(adev);
1819}
1820
1821/**
1822 * amdgpu_vm_prt_cb - callback for updating the PRT status
1823 *
1824 * @fence: fence for the callback
1825 * @_cb: the callback function
1826 */
1827static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1828{
1829 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1830
1831 amdgpu_vm_prt_put(cb->adev);
1832 kfree(cb);
1833}
1834
1835/**
1836 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1837 *
1838 * @adev: amdgpu_device pointer
1839 * @fence: fence for the callback
1840 */
1841static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1842 struct dma_fence *fence)
1843{
1844 struct amdgpu_prt_cb *cb;
1845
1846 if (!adev->gmc.gmc_funcs->set_prt)
1847 return;
1848
1849 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1850 if (!cb) {
1851 /* Last resort when we are OOM */
1852 if (fence)
1853 dma_fence_wait(fence, false);
1854
1855 amdgpu_vm_prt_put(adev);
1856 } else {
1857 cb->adev = adev;
1858 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1859 amdgpu_vm_prt_cb))
1860 amdgpu_vm_prt_cb(fence, &cb->cb);
1861 }
1862}
1863
1864/**
1865 * amdgpu_vm_free_mapping - free a mapping
1866 *
1867 * @adev: amdgpu_device pointer
1868 * @vm: requested vm
1869 * @mapping: mapping to be freed
1870 * @fence: fence of the unmap operation
1871 *
1872 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1873 */
1874static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1875 struct amdgpu_vm *vm,
1876 struct amdgpu_bo_va_mapping *mapping,
1877 struct dma_fence *fence)
1878{
1879 if (mapping->flags & AMDGPU_PTE_PRT)
1880 amdgpu_vm_add_prt_cb(adev, fence);
1881 kfree(mapping);
1882}
1883
1884/**
1885 * amdgpu_vm_prt_fini - finish all prt mappings
1886 *
1887 * @adev: amdgpu_device pointer
1888 * @vm: requested vm
1889 *
1890 * Register a cleanup callback to disable PRT support after VM dies.
1891 */
1892static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1893{
1894 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
1895 struct dma_fence *excl, **shared;
1896 unsigned i, shared_count;
1897 int r;
1898
1899 r = dma_resv_get_fences_rcu(resv, &excl,
1900 &shared_count, &shared);
1901 if (r) {
1902 /* Not enough memory to grab the fence list, as last resort
1903 * block for all the fences to complete.
1904 */
1905 dma_resv_wait_timeout_rcu(resv, true, false,
1906 MAX_SCHEDULE_TIMEOUT);
1907 return;
1908 }
1909
1910 /* Add a callback for each fence in the reservation object */
1911 amdgpu_vm_prt_get(adev);
1912 amdgpu_vm_add_prt_cb(adev, excl);
1913
1914 for (i = 0; i < shared_count; ++i) {
1915 amdgpu_vm_prt_get(adev);
1916 amdgpu_vm_add_prt_cb(adev, shared[i]);
1917 }
1918
1919 kfree(shared);
1920}
1921
1922/**
1923 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1924 *
1925 * @adev: amdgpu_device pointer
1926 * @vm: requested vm
1927 * @fence: optional resulting fence (unchanged if no work needed to be done
1928 * or if an error occurred)
1929 *
1930 * Make sure all freed BOs are cleared in the PT.
1931 * PTs have to be reserved and mutex must be locked!
1932 *
1933 * Returns:
1934 * 0 for success.
1935 *
1936 */
1937int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1938 struct amdgpu_vm *vm,
1939 struct dma_fence **fence)
1940{
1941 struct amdgpu_bo_va_mapping *mapping;
1942 uint64_t init_pte_value = 0;
1943 struct dma_fence *f = NULL;
1944 int r;
1945
1946 while (!list_empty(&vm->freed)) {
1947 mapping = list_first_entry(&vm->freed,
1948 struct amdgpu_bo_va_mapping, list);
1949 list_del(&mapping->list);
1950
1951 if (vm->pte_support_ats &&
1952 mapping->start < AMDGPU_GMC_HOLE_START)
1953 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1954
1955 r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
1956 mapping->start, mapping->last,
1957 init_pte_value, 0, NULL, &f);
1958 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1959 if (r) {
1960 dma_fence_put(f);
1961 return r;
1962 }
1963 }
1964
1965 if (fence && f) {
1966 dma_fence_put(*fence);
1967 *fence = f;
1968 } else {
1969 dma_fence_put(f);
1970 }
1971
1972 return 0;
1973
1974}
1975
1976/**
1977 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1978 *
1979 * @adev: amdgpu_device pointer
1980 * @vm: requested vm
1981 *
1982 * Make sure all BOs which are moved are updated in the PTs.
1983 *
1984 * Returns:
1985 * 0 for success.
1986 *
1987 * PTs have to be reserved!
1988 */
1989int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1990 struct amdgpu_vm *vm)
1991{
1992 struct amdgpu_bo_va *bo_va, *tmp;
1993 struct dma_resv *resv;
1994 bool clear;
1995 int r;
1996
1997 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1998 /* Per VM BOs never need to bo cleared in the page tables */
1999 r = amdgpu_vm_bo_update(adev, bo_va, false);
2000 if (r)
2001 return r;
2002 }
2003
2004 spin_lock(&vm->invalidated_lock);
2005 while (!list_empty(&vm->invalidated)) {
2006 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2007 base.vm_status);
2008 resv = bo_va->base.bo->tbo.base.resv;
2009 spin_unlock(&vm->invalidated_lock);
2010
2011 /* Try to reserve the BO to avoid clearing its ptes */
2012 if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2013 clear = false;
2014 /* Somebody else is using the BO right now */
2015 else
2016 clear = true;
2017
2018 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2019 if (r)
2020 return r;
2021
2022 if (!clear)
2023 dma_resv_unlock(resv);
2024 spin_lock(&vm->invalidated_lock);
2025 }
2026 spin_unlock(&vm->invalidated_lock);
2027
2028 return 0;
2029}
2030
2031/**
2032 * amdgpu_vm_bo_add - add a bo to a specific vm
2033 *
2034 * @adev: amdgpu_device pointer
2035 * @vm: requested vm
2036 * @bo: amdgpu buffer object
2037 *
2038 * Add @bo into the requested vm.
2039 * Add @bo to the list of bos associated with the vm
2040 *
2041 * Returns:
2042 * Newly added bo_va or NULL for failure
2043 *
2044 * Object has to be reserved!
2045 */
2046struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2047 struct amdgpu_vm *vm,
2048 struct amdgpu_bo *bo)
2049{
2050 struct amdgpu_bo_va *bo_va;
2051
2052 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2053 if (bo_va == NULL) {
2054 return NULL;
2055 }
2056 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2057
2058 bo_va->ref_count = 1;
2059 INIT_LIST_HEAD(&bo_va->valids);
2060 INIT_LIST_HEAD(&bo_va->invalids);
2061
2062 if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
2063 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
2064 bo_va->is_xgmi = true;
2065 mutex_lock(&adev->vm_manager.lock_pstate);
2066 /* Power up XGMI if it can be potentially used */
2067 if (++adev->vm_manager.xgmi_map_counter == 1)
2068 amdgpu_xgmi_set_pstate(adev, 1);
2069 mutex_unlock(&adev->vm_manager.lock_pstate);
2070 }
2071
2072 return bo_va;
2073}
2074
2075
2076/**
2077 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2078 *
2079 * @adev: amdgpu_device pointer
2080 * @bo_va: bo_va to store the address
2081 * @mapping: the mapping to insert
2082 *
2083 * Insert a new mapping into all structures.
2084 */
2085static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2086 struct amdgpu_bo_va *bo_va,
2087 struct amdgpu_bo_va_mapping *mapping)
2088{
2089 struct amdgpu_vm *vm = bo_va->base.vm;
2090 struct amdgpu_bo *bo = bo_va->base.bo;
2091
2092 mapping->bo_va = bo_va;
2093 list_add(&mapping->list, &bo_va->invalids);
2094 amdgpu_vm_it_insert(mapping, &vm->va);
2095
2096 if (mapping->flags & AMDGPU_PTE_PRT)
2097 amdgpu_vm_prt_get(adev);
2098
2099 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
2100 !bo_va->base.moved) {
2101 list_move(&bo_va->base.vm_status, &vm->moved);
2102 }
2103 trace_amdgpu_vm_bo_map(bo_va, mapping);
2104}
2105
2106/**
2107 * amdgpu_vm_bo_map - map bo inside a vm
2108 *
2109 * @adev: amdgpu_device pointer
2110 * @bo_va: bo_va to store the address
2111 * @saddr: where to map the BO
2112 * @offset: requested offset in the BO
2113 * @size: BO size in bytes
2114 * @flags: attributes of pages (read/write/valid/etc.)
2115 *
2116 * Add a mapping of the BO at the specefied addr into the VM.
2117 *
2118 * Returns:
2119 * 0 for success, error for failure.
2120 *
2121 * Object has to be reserved and unreserved outside!
2122 */
2123int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2124 struct amdgpu_bo_va *bo_va,
2125 uint64_t saddr, uint64_t offset,
2126 uint64_t size, uint64_t flags)
2127{
2128 struct amdgpu_bo_va_mapping *mapping, *tmp;
2129 struct amdgpu_bo *bo = bo_va->base.bo;
2130 struct amdgpu_vm *vm = bo_va->base.vm;
2131 uint64_t eaddr;
2132
2133 /* validate the parameters */
2134 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2135 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2136 return -EINVAL;
2137
2138 /* make sure object fit at this offset */
2139 eaddr = saddr + size - 1;
2140 if (saddr >= eaddr ||
2141 (bo && offset + size > amdgpu_bo_size(bo)))
2142 return -EINVAL;
2143
2144 saddr /= AMDGPU_GPU_PAGE_SIZE;
2145 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2146
2147 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2148 if (tmp) {
2149 /* bo and tmp overlap, invalid addr */
2150 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2151 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2152 tmp->start, tmp->last + 1);
2153 return -EINVAL;
2154 }
2155
2156 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2157 if (!mapping)
2158 return -ENOMEM;
2159
2160 mapping->start = saddr;
2161 mapping->last = eaddr;
2162 mapping->offset = offset;
2163 mapping->flags = flags;
2164
2165 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2166
2167 return 0;
2168}
2169
2170/**
2171 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2172 *
2173 * @adev: amdgpu_device pointer
2174 * @bo_va: bo_va to store the address
2175 * @saddr: where to map the BO
2176 * @offset: requested offset in the BO
2177 * @size: BO size in bytes
2178 * @flags: attributes of pages (read/write/valid/etc.)
2179 *
2180 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2181 * mappings as we do so.
2182 *
2183 * Returns:
2184 * 0 for success, error for failure.
2185 *
2186 * Object has to be reserved and unreserved outside!
2187 */
2188int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2189 struct amdgpu_bo_va *bo_va,
2190 uint64_t saddr, uint64_t offset,
2191 uint64_t size, uint64_t flags)
2192{
2193 struct amdgpu_bo_va_mapping *mapping;
2194 struct amdgpu_bo *bo = bo_va->base.bo;
2195 uint64_t eaddr;
2196 int r;
2197
2198 /* validate the parameters */
2199 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2200 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2201 return -EINVAL;
2202
2203 /* make sure object fit at this offset */
2204 eaddr = saddr + size - 1;
2205 if (saddr >= eaddr ||
2206 (bo && offset + size > amdgpu_bo_size(bo)))
2207 return -EINVAL;
2208
2209 /* Allocate all the needed memory */
2210 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2211 if (!mapping)
2212 return -ENOMEM;
2213
2214 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2215 if (r) {
2216 kfree(mapping);
2217 return r;
2218 }
2219
2220 saddr /= AMDGPU_GPU_PAGE_SIZE;
2221 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2222
2223 mapping->start = saddr;
2224 mapping->last = eaddr;
2225 mapping->offset = offset;
2226 mapping->flags = flags;
2227
2228 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2229
2230 return 0;
2231}
2232
2233/**
2234 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2235 *
2236 * @adev: amdgpu_device pointer
2237 * @bo_va: bo_va to remove the address from
2238 * @saddr: where to the BO is mapped
2239 *
2240 * Remove a mapping of the BO at the specefied addr from the VM.
2241 *
2242 * Returns:
2243 * 0 for success, error for failure.
2244 *
2245 * Object has to be reserved and unreserved outside!
2246 */
2247int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2248 struct amdgpu_bo_va *bo_va,
2249 uint64_t saddr)
2250{
2251 struct amdgpu_bo_va_mapping *mapping;
2252 struct amdgpu_vm *vm = bo_va->base.vm;
2253 bool valid = true;
2254
2255 saddr /= AMDGPU_GPU_PAGE_SIZE;
2256
2257 list_for_each_entry(mapping, &bo_va->valids, list) {
2258 if (mapping->start == saddr)
2259 break;
2260 }
2261
2262 if (&mapping->list == &bo_va->valids) {
2263 valid = false;
2264
2265 list_for_each_entry(mapping, &bo_va->invalids, list) {
2266 if (mapping->start == saddr)
2267 break;
2268 }
2269
2270 if (&mapping->list == &bo_va->invalids)
2271 return -ENOENT;
2272 }
2273
2274 list_del(&mapping->list);
2275 amdgpu_vm_it_remove(mapping, &vm->va);
2276 mapping->bo_va = NULL;
2277 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2278
2279 if (valid)
2280 list_add(&mapping->list, &vm->freed);
2281 else
2282 amdgpu_vm_free_mapping(adev, vm, mapping,
2283 bo_va->last_pt_update);
2284
2285 return 0;
2286}
2287
2288/**
2289 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2290 *
2291 * @adev: amdgpu_device pointer
2292 * @vm: VM structure to use
2293 * @saddr: start of the range
2294 * @size: size of the range
2295 *
2296 * Remove all mappings in a range, split them as appropriate.
2297 *
2298 * Returns:
2299 * 0 for success, error for failure.
2300 */
2301int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2302 struct amdgpu_vm *vm,
2303 uint64_t saddr, uint64_t size)
2304{
2305 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2306 LIST_HEAD(removed);
2307 uint64_t eaddr;
2308
2309 eaddr = saddr + size - 1;
2310 saddr /= AMDGPU_GPU_PAGE_SIZE;
2311 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2312
2313 /* Allocate all the needed memory */
2314 before = kzalloc(sizeof(*before), GFP_KERNEL);
2315 if (!before)
2316 return -ENOMEM;
2317 INIT_LIST_HEAD(&before->list);
2318
2319 after = kzalloc(sizeof(*after), GFP_KERNEL);
2320 if (!after) {
2321 kfree(before);
2322 return -ENOMEM;
2323 }
2324 INIT_LIST_HEAD(&after->list);
2325
2326 /* Now gather all removed mappings */
2327 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2328 while (tmp) {
2329 /* Remember mapping split at the start */
2330 if (tmp->start < saddr) {
2331 before->start = tmp->start;
2332 before->last = saddr - 1;
2333 before->offset = tmp->offset;
2334 before->flags = tmp->flags;
2335 before->bo_va = tmp->bo_va;
2336 list_add(&before->list, &tmp->bo_va->invalids);
2337 }
2338
2339 /* Remember mapping split at the end */
2340 if (tmp->last > eaddr) {
2341 after->start = eaddr + 1;
2342 after->last = tmp->last;
2343 after->offset = tmp->offset;
2344 after->offset += after->start - tmp->start;
2345 after->flags = tmp->flags;
2346 after->bo_va = tmp->bo_va;
2347 list_add(&after->list, &tmp->bo_va->invalids);
2348 }
2349
2350 list_del(&tmp->list);
2351 list_add(&tmp->list, &removed);
2352
2353 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2354 }
2355
2356 /* And free them up */
2357 list_for_each_entry_safe(tmp, next, &removed, list) {
2358 amdgpu_vm_it_remove(tmp, &vm->va);
2359 list_del(&tmp->list);
2360
2361 if (tmp->start < saddr)
2362 tmp->start = saddr;
2363 if (tmp->last > eaddr)
2364 tmp->last = eaddr;
2365
2366 tmp->bo_va = NULL;
2367 list_add(&tmp->list, &vm->freed);
2368 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2369 }
2370
2371 /* Insert partial mapping before the range */
2372 if (!list_empty(&before->list)) {
2373 amdgpu_vm_it_insert(before, &vm->va);
2374 if (before->flags & AMDGPU_PTE_PRT)
2375 amdgpu_vm_prt_get(adev);
2376 } else {
2377 kfree(before);
2378 }
2379
2380 /* Insert partial mapping after the range */
2381 if (!list_empty(&after->list)) {
2382 amdgpu_vm_it_insert(after, &vm->va);
2383 if (after->flags & AMDGPU_PTE_PRT)
2384 amdgpu_vm_prt_get(adev);
2385 } else {
2386 kfree(after);
2387 }
2388
2389 return 0;
2390}
2391
2392/**
2393 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2394 *
2395 * @vm: the requested VM
2396 * @addr: the address
2397 *
2398 * Find a mapping by it's address.
2399 *
2400 * Returns:
2401 * The amdgpu_bo_va_mapping matching for addr or NULL
2402 *
2403 */
2404struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2405 uint64_t addr)
2406{
2407 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2408}
2409
2410/**
2411 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2412 *
2413 * @vm: the requested vm
2414 * @ticket: CS ticket
2415 *
2416 * Trace all mappings of BOs reserved during a command submission.
2417 */
2418void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2419{
2420 struct amdgpu_bo_va_mapping *mapping;
2421
2422 if (!trace_amdgpu_vm_bo_cs_enabled())
2423 return;
2424
2425 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2426 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2427 if (mapping->bo_va && mapping->bo_va->base.bo) {
2428 struct amdgpu_bo *bo;
2429
2430 bo = mapping->bo_va->base.bo;
2431 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2432 ticket)
2433 continue;
2434 }
2435
2436 trace_amdgpu_vm_bo_cs(mapping);
2437 }
2438}
2439
2440/**
2441 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2442 *
2443 * @adev: amdgpu_device pointer
2444 * @bo_va: requested bo_va
2445 *
2446 * Remove @bo_va->bo from the requested vm.
2447 *
2448 * Object have to be reserved!
2449 */
2450void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2451 struct amdgpu_bo_va *bo_va)
2452{
2453 struct amdgpu_bo_va_mapping *mapping, *next;
2454 struct amdgpu_bo *bo = bo_va->base.bo;
2455 struct amdgpu_vm *vm = bo_va->base.vm;
2456 struct amdgpu_vm_bo_base **base;
2457
2458 if (bo) {
2459 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2460 vm->bulk_moveable = false;
2461
2462 for (base = &bo_va->base.bo->vm_bo; *base;
2463 base = &(*base)->next) {
2464 if (*base != &bo_va->base)
2465 continue;
2466
2467 *base = bo_va->base.next;
2468 break;
2469 }
2470 }
2471
2472 spin_lock(&vm->invalidated_lock);
2473 list_del(&bo_va->base.vm_status);
2474 spin_unlock(&vm->invalidated_lock);
2475
2476 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2477 list_del(&mapping->list);
2478 amdgpu_vm_it_remove(mapping, &vm->va);
2479 mapping->bo_va = NULL;
2480 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2481 list_add(&mapping->list, &vm->freed);
2482 }
2483 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2484 list_del(&mapping->list);
2485 amdgpu_vm_it_remove(mapping, &vm->va);
2486 amdgpu_vm_free_mapping(adev, vm, mapping,
2487 bo_va->last_pt_update);
2488 }
2489
2490 dma_fence_put(bo_va->last_pt_update);
2491
2492 if (bo && bo_va->is_xgmi) {
2493 mutex_lock(&adev->vm_manager.lock_pstate);
2494 if (--adev->vm_manager.xgmi_map_counter == 0)
2495 amdgpu_xgmi_set_pstate(adev, 0);
2496 mutex_unlock(&adev->vm_manager.lock_pstate);
2497 }
2498
2499 kfree(bo_va);
2500}
2501
2502/**
2503 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2504 *
2505 * @adev: amdgpu_device pointer
2506 * @bo: amdgpu buffer object
2507 * @evicted: is the BO evicted
2508 *
2509 * Mark @bo as invalid.
2510 */
2511void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2512 struct amdgpu_bo *bo, bool evicted)
2513{
2514 struct amdgpu_vm_bo_base *bo_base;
2515
2516 /* shadow bo doesn't have bo base, its validation needs its parent */
2517 if (bo->parent && bo->parent->shadow == bo)
2518 bo = bo->parent;
2519
2520 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2521 struct amdgpu_vm *vm = bo_base->vm;
2522
2523 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
2524 amdgpu_vm_bo_evicted(bo_base);
2525 continue;
2526 }
2527
2528 if (bo_base->moved)
2529 continue;
2530 bo_base->moved = true;
2531
2532 if (bo->tbo.type == ttm_bo_type_kernel)
2533 amdgpu_vm_bo_relocated(bo_base);
2534 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2535 amdgpu_vm_bo_moved(bo_base);
2536 else
2537 amdgpu_vm_bo_invalidated(bo_base);
2538 }
2539}
2540
2541/**
2542 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2543 *
2544 * @vm_size: VM size
2545 *
2546 * Returns:
2547 * VM page table as power of two
2548 */
2549static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2550{
2551 /* Total bits covered by PD + PTs */
2552 unsigned bits = ilog2(vm_size) + 18;
2553
2554 /* Make sure the PD is 4K in size up to 8GB address space.
2555 Above that split equal between PD and PTs */
2556 if (vm_size <= 8)
2557 return (bits - 9);
2558 else
2559 return ((bits + 3) / 2);
2560}
2561
2562/**
2563 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2564 *
2565 * @adev: amdgpu_device pointer
2566 * @min_vm_size: the minimum vm size in GB if it's set auto
2567 * @fragment_size_default: Default PTE fragment size
2568 * @max_level: max VMPT level
2569 * @max_bits: max address space size in bits
2570 *
2571 */
2572void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2573 uint32_t fragment_size_default, unsigned max_level,
2574 unsigned max_bits)
2575{
2576 unsigned int max_size = 1 << (max_bits - 30);
2577 unsigned int vm_size;
2578 uint64_t tmp;
2579
2580 /* adjust vm size first */
2581 if (amdgpu_vm_size != -1) {
2582 vm_size = amdgpu_vm_size;
2583 if (vm_size > max_size) {
2584 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2585 amdgpu_vm_size, max_size);
2586 vm_size = max_size;
2587 }
2588 } else {
2589 struct sysinfo si;
2590 unsigned int phys_ram_gb;
2591
2592 /* Optimal VM size depends on the amount of physical
2593 * RAM available. Underlying requirements and
2594 * assumptions:
2595 *
2596 * - Need to map system memory and VRAM from all GPUs
2597 * - VRAM from other GPUs not known here
2598 * - Assume VRAM <= system memory
2599 * - On GFX8 and older, VM space can be segmented for
2600 * different MTYPEs
2601 * - Need to allow room for fragmentation, guard pages etc.
2602 *
2603 * This adds up to a rough guess of system memory x3.
2604 * Round up to power of two to maximize the available
2605 * VM size with the given page table size.
2606 */
2607 si_meminfo(&si);
2608 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2609 (1 << 30) - 1) >> 30;
2610 vm_size = roundup_pow_of_two(
2611 min(max(phys_ram_gb * 3, min_vm_size), max_size));
2612 }
2613
2614 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2615
2616 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2617 if (amdgpu_vm_block_size != -1)
2618 tmp >>= amdgpu_vm_block_size - 9;
2619 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2620 adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2621 switch (adev->vm_manager.num_level) {
2622 case 3:
2623 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2624 break;
2625 case 2:
2626 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2627 break;
2628 case 1:
2629 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2630 break;
2631 default:
2632 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2633 }
2634 /* block size depends on vm size and hw setup*/
2635 if (amdgpu_vm_block_size != -1)
2636 adev->vm_manager.block_size =
2637 min((unsigned)amdgpu_vm_block_size, max_bits
2638 - AMDGPU_GPU_PAGE_SHIFT
2639 - 9 * adev->vm_manager.num_level);
2640 else if (adev->vm_manager.num_level > 1)
2641 adev->vm_manager.block_size = 9;
2642 else
2643 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2644
2645 if (amdgpu_vm_fragment_size == -1)
2646 adev->vm_manager.fragment_size = fragment_size_default;
2647 else
2648 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2649
2650 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2651 vm_size, adev->vm_manager.num_level + 1,
2652 adev->vm_manager.block_size,
2653 adev->vm_manager.fragment_size);
2654}
2655
2656/**
2657 * amdgpu_vm_wait_idle - wait for the VM to become idle
2658 *
2659 * @vm: VM object to wait for
2660 * @timeout: timeout to wait for VM to become idle
2661 */
2662long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2663{
2664 return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
2665 true, true, timeout);
2666}
2667
2668/**
2669 * amdgpu_vm_init - initialize a vm instance
2670 *
2671 * @adev: amdgpu_device pointer
2672 * @vm: requested vm
2673 * @vm_context: Indicates if it GFX or Compute context
2674 * @pasid: Process address space identifier
2675 *
2676 * Init @vm fields.
2677 *
2678 * Returns:
2679 * 0 for success, error for failure.
2680 */
2681int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2682 int vm_context, unsigned int pasid)
2683{
2684 struct amdgpu_bo_param bp;
2685 struct amdgpu_bo *root;
2686 int r, i;
2687
2688 vm->va = RB_ROOT_CACHED;
2689 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2690 vm->reserved_vmid[i] = NULL;
2691 INIT_LIST_HEAD(&vm->evicted);
2692 INIT_LIST_HEAD(&vm->relocated);
2693 INIT_LIST_HEAD(&vm->moved);
2694 INIT_LIST_HEAD(&vm->idle);
2695 INIT_LIST_HEAD(&vm->invalidated);
2696 spin_lock_init(&vm->invalidated_lock);
2697 INIT_LIST_HEAD(&vm->freed);
2698
2699 /* create scheduler entities for page table updates */
2700 r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
2701 adev->vm_manager.vm_pte_num_rqs, NULL);
2702 if (r)
2703 return r;
2704
2705 r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
2706 adev->vm_manager.vm_pte_num_rqs, NULL);
2707 if (r)
2708 goto error_free_direct;
2709
2710 vm->pte_support_ats = false;
2711
2712 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2713 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2714 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2715
2716 if (adev->asic_type == CHIP_RAVEN)
2717 vm->pte_support_ats = true;
2718 } else {
2719 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2720 AMDGPU_VM_USE_CPU_FOR_GFX);
2721 }
2722 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2723 vm->use_cpu_for_update ? "CPU" : "SDMA");
2724 WARN_ONCE((vm->use_cpu_for_update &&
2725 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2726 "CPU update of VM recommended only for large BAR system\n");
2727
2728 if (vm->use_cpu_for_update)
2729 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2730 else
2731 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2732 vm->last_update = NULL;
2733
2734 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
2735 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2736 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2737 r = amdgpu_bo_create(adev, &bp, &root);
2738 if (r)
2739 goto error_free_delayed;
2740
2741 r = amdgpu_bo_reserve(root, true);
2742 if (r)
2743 goto error_free_root;
2744
2745 r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
2746 if (r)
2747 goto error_unreserve;
2748
2749 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2750
2751 r = amdgpu_vm_clear_bo(adev, vm, root, false);
2752 if (r)
2753 goto error_unreserve;
2754
2755 amdgpu_bo_unreserve(vm->root.base.bo);
2756
2757 if (pasid) {
2758 unsigned long flags;
2759
2760 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2761 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2762 GFP_ATOMIC);
2763 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2764 if (r < 0)
2765 goto error_free_root;
2766
2767 vm->pasid = pasid;
2768 }
2769
2770 INIT_KFIFO(vm->faults);
2771
2772 return 0;
2773
2774error_unreserve:
2775 amdgpu_bo_unreserve(vm->root.base.bo);
2776
2777error_free_root:
2778 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2779 amdgpu_bo_unref(&vm->root.base.bo);
2780 vm->root.base.bo = NULL;
2781
2782error_free_delayed:
2783 drm_sched_entity_destroy(&vm->delayed);
2784
2785error_free_direct:
2786 drm_sched_entity_destroy(&vm->direct);
2787
2788 return r;
2789}
2790
2791/**
2792 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2793 *
2794 * @adev: amdgpu_device pointer
2795 * @vm: the VM to check
2796 *
2797 * check all entries of the root PD, if any subsequent PDs are allocated,
2798 * it means there are page table creating and filling, and is no a clean
2799 * VM
2800 *
2801 * Returns:
2802 * 0 if this VM is clean
2803 */
2804static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2805 struct amdgpu_vm *vm)
2806{
2807 enum amdgpu_vm_level root = adev->vm_manager.root_level;
2808 unsigned int entries = amdgpu_vm_num_entries(adev, root);
2809 unsigned int i = 0;
2810
2811 if (!(vm->root.entries))
2812 return 0;
2813
2814 for (i = 0; i < entries; i++) {
2815 if (vm->root.entries[i].base.bo)
2816 return -EINVAL;
2817 }
2818
2819 return 0;
2820}
2821
2822/**
2823 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2824 *
2825 * @adev: amdgpu_device pointer
2826 * @vm: requested vm
2827 * @pasid: pasid to use
2828 *
2829 * This only works on GFX VMs that don't have any BOs added and no
2830 * page tables allocated yet.
2831 *
2832 * Changes the following VM parameters:
2833 * - use_cpu_for_update
2834 * - pte_supports_ats
2835 * - pasid (old PASID is released, because compute manages its own PASIDs)
2836 *
2837 * Reinitializes the page directory to reflect the changed ATS
2838 * setting.
2839 *
2840 * Returns:
2841 * 0 for success, -errno for errors.
2842 */
2843int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2844 unsigned int pasid)
2845{
2846 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2847 int r;
2848
2849 r = amdgpu_bo_reserve(vm->root.base.bo, true);
2850 if (r)
2851 return r;
2852
2853 /* Sanity checks */
2854 r = amdgpu_vm_check_clean_reserved(adev, vm);
2855 if (r)
2856 goto unreserve_bo;
2857
2858 if (pasid) {
2859 unsigned long flags;
2860
2861 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2862 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2863 GFP_ATOMIC);
2864 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2865
2866 if (r == -ENOSPC)
2867 goto unreserve_bo;
2868 r = 0;
2869 }
2870
2871 /* Check if PD needs to be reinitialized and do it before
2872 * changing any other state, in case it fails.
2873 */
2874 if (pte_support_ats != vm->pte_support_ats) {
2875 vm->pte_support_ats = pte_support_ats;
2876 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
2877 if (r)
2878 goto free_idr;
2879 }
2880
2881 /* Update VM state */
2882 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2883 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2884 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2885 vm->use_cpu_for_update ? "CPU" : "SDMA");
2886 WARN_ONCE((vm->use_cpu_for_update &&
2887 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2888 "CPU update of VM recommended only for large BAR system\n");
2889
2890 if (vm->use_cpu_for_update)
2891 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2892 else
2893 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2894 dma_fence_put(vm->last_update);
2895 vm->last_update = NULL;
2896
2897 if (vm->pasid) {
2898 unsigned long flags;
2899
2900 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2901 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2902 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2903
2904 /* Free the original amdgpu allocated pasid
2905 * Will be replaced with kfd allocated pasid
2906 */
2907 amdgpu_pasid_free(vm->pasid);
2908 vm->pasid = 0;
2909 }
2910
2911 /* Free the shadow bo for compute VM */
2912 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2913
2914 if (pasid)
2915 vm->pasid = pasid;
2916
2917 goto unreserve_bo;
2918
2919free_idr:
2920 if (pasid) {
2921 unsigned long flags;
2922
2923 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2924 idr_remove(&adev->vm_manager.pasid_idr, pasid);
2925 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2926 }
2927unreserve_bo:
2928 amdgpu_bo_unreserve(vm->root.base.bo);
2929 return r;
2930}
2931
2932/**
2933 * amdgpu_vm_release_compute - release a compute vm
2934 * @adev: amdgpu_device pointer
2935 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2936 *
2937 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2938 * pasid from vm. Compute should stop use of vm after this call.
2939 */
2940void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2941{
2942 if (vm->pasid) {
2943 unsigned long flags;
2944
2945 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2946 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2947 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2948 }
2949 vm->pasid = 0;
2950}
2951
2952/**
2953 * amdgpu_vm_fini - tear down a vm instance
2954 *
2955 * @adev: amdgpu_device pointer
2956 * @vm: requested vm
2957 *
2958 * Tear down @vm.
2959 * Unbind the VM and remove all bos from the vm bo list
2960 */
2961void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2962{
2963 struct amdgpu_bo_va_mapping *mapping, *tmp;
2964 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2965 struct amdgpu_bo *root;
2966 int i;
2967
2968 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2969
2970 root = amdgpu_bo_ref(vm->root.base.bo);
2971 amdgpu_bo_reserve(root, true);
2972 if (vm->pasid) {
2973 unsigned long flags;
2974
2975 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2976 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2977 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2978 vm->pasid = 0;
2979 }
2980
2981 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2982 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2983 amdgpu_vm_prt_fini(adev, vm);
2984 prt_fini_needed = false;
2985 }
2986
2987 list_del(&mapping->list);
2988 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2989 }
2990
2991 amdgpu_vm_free_pts(adev, vm, NULL);
2992 amdgpu_bo_unreserve(root);
2993 amdgpu_bo_unref(&root);
2994 WARN_ON(vm->root.base.bo);
2995
2996 drm_sched_entity_destroy(&vm->direct);
2997 drm_sched_entity_destroy(&vm->delayed);
2998
2999 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3000 dev_err(adev->dev, "still active bo inside vm\n");
3001 }
3002 rbtree_postorder_for_each_entry_safe(mapping, tmp,
3003 &vm->va.rb_root, rb) {
3004 /* Don't remove the mapping here, we don't want to trigger a
3005 * rebalance and the tree is about to be destroyed anyway.
3006 */
3007 list_del(&mapping->list);
3008 kfree(mapping);
3009 }
3010
3011 dma_fence_put(vm->last_update);
3012 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3013 amdgpu_vmid_free_reserved(adev, vm, i);
3014}
3015
3016/**
3017 * amdgpu_vm_manager_init - init the VM manager
3018 *
3019 * @adev: amdgpu_device pointer
3020 *
3021 * Initialize the VM manager structures
3022 */
3023void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3024{
3025 unsigned i;
3026
3027 amdgpu_vmid_mgr_init(adev);
3028
3029 adev->vm_manager.fence_context =
3030 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3031 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3032 adev->vm_manager.seqno[i] = 0;
3033
3034 spin_lock_init(&adev->vm_manager.prt_lock);
3035 atomic_set(&adev->vm_manager.num_prt_users, 0);
3036
3037 /* If not overridden by the user, by default, only in large BAR systems
3038 * Compute VM tables will be updated by CPU
3039 */
3040#ifdef CONFIG_X86_64
3041 if (amdgpu_vm_update_mode == -1) {
3042 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3043 adev->vm_manager.vm_update_mode =
3044 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3045 else
3046 adev->vm_manager.vm_update_mode = 0;
3047 } else
3048 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3049#else
3050 adev->vm_manager.vm_update_mode = 0;
3051#endif
3052
3053 idr_init(&adev->vm_manager.pasid_idr);
3054 spin_lock_init(&adev->vm_manager.pasid_lock);
3055
3056 adev->vm_manager.xgmi_map_counter = 0;
3057 mutex_init(&adev->vm_manager.lock_pstate);
3058}
3059
3060/**
3061 * amdgpu_vm_manager_fini - cleanup VM manager
3062 *
3063 * @adev: amdgpu_device pointer
3064 *
3065 * Cleanup the VM manager and free resources.
3066 */
3067void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3068{
3069 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3070 idr_destroy(&adev->vm_manager.pasid_idr);
3071
3072 amdgpu_vmid_mgr_fini(adev);
3073}
3074
3075/**
3076 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3077 *
3078 * @dev: drm device pointer
3079 * @data: drm_amdgpu_vm
3080 * @filp: drm file pointer
3081 *
3082 * Returns:
3083 * 0 for success, -errno for errors.
3084 */
3085int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3086{
3087 union drm_amdgpu_vm *args = data;
3088 struct amdgpu_device *adev = dev->dev_private;
3089 struct amdgpu_fpriv *fpriv = filp->driver_priv;
3090 int r;
3091
3092 switch (args->in.op) {
3093 case AMDGPU_VM_OP_RESERVE_VMID:
3094 /* We only have requirement to reserve vmid from gfxhub */
3095 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3096 AMDGPU_GFXHUB_0);
3097 if (r)
3098 return r;
3099 break;
3100 case AMDGPU_VM_OP_UNRESERVE_VMID:
3101 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3102 break;
3103 default:
3104 return -EINVAL;
3105 }
3106
3107 return 0;
3108}
3109
3110/**
3111 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3112 *
3113 * @adev: drm device pointer
3114 * @pasid: PASID identifier for VM
3115 * @task_info: task_info to fill.
3116 */
3117void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3118 struct amdgpu_task_info *task_info)
3119{
3120 struct amdgpu_vm *vm;
3121 unsigned long flags;
3122
3123 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3124
3125 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3126 if (vm)
3127 *task_info = vm->task_info;
3128
3129 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3130}
3131
3132/**
3133 * amdgpu_vm_set_task_info - Sets VMs task info.
3134 *
3135 * @vm: vm for which to set the info
3136 */
3137void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3138{
3139 if (vm->task_info.pid)
3140 return;
3141
3142 vm->task_info.pid = current->pid;
3143 get_task_comm(vm->task_info.task_name, current);
3144
3145 if (current->group_leader->mm != current->mm)
3146 return;
3147
3148 vm->task_info.tgid = current->group_leader->pid;
3149 get_task_comm(vm->task_info.process_name, current->group_leader);
3150}
3151
3152/**
3153 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3154 * @adev: amdgpu device pointer
3155 * @pasid: PASID of the VM
3156 * @addr: Address of the fault
3157 *
3158 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3159 * shouldn't be reported any more.
3160 */
3161bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
3162 uint64_t addr)
3163{
3164 struct amdgpu_bo *root;
3165 uint64_t value, flags;
3166 struct amdgpu_vm *vm;
3167 long r;
3168
3169 spin_lock(&adev->vm_manager.pasid_lock);
3170 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3171 if (vm)
3172 root = amdgpu_bo_ref(vm->root.base.bo);
3173 else
3174 root = NULL;
3175 spin_unlock(&adev->vm_manager.pasid_lock);
3176
3177 if (!root)
3178 return false;
3179
3180 r = amdgpu_bo_reserve(root, true);
3181 if (r)
3182 goto error_unref;
3183
3184 /* Double check that the VM still exists */
3185 spin_lock(&adev->vm_manager.pasid_lock);
3186 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3187 if (vm && vm->root.base.bo != root)
3188 vm = NULL;
3189 spin_unlock(&adev->vm_manager.pasid_lock);
3190 if (!vm)
3191 goto error_unlock;
3192
3193 addr /= AMDGPU_GPU_PAGE_SIZE;
3194 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3195 AMDGPU_PTE_SYSTEM;
3196
3197 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3198 /* Redirect the access to the dummy page */
3199 value = adev->dummy_page_addr;
3200 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3201 AMDGPU_PTE_WRITEABLE;
3202 } else {
3203 /* Let the hw retry silently on the PTE */
3204 value = 0;
3205 }
3206
3207 r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
3208 flags, value, NULL, NULL);
3209 if (r)
3210 goto error_unlock;
3211
3212 r = amdgpu_vm_update_pdes(adev, vm, true);
3213
3214error_unlock:
3215 amdgpu_bo_unreserve(root);
3216 if (r < 0)
3217 DRM_ERROR("Can't handle page fault (%ld)\n", r);
3218
3219error_unref:
3220 amdgpu_bo_unref(&root);
3221
3222 return false;
3223}