Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include <linux/dma-buf.h>
23#include <linux/list.h>
24#include <linux/pagemap.h>
25#include <linux/sched/mm.h>
26#include <linux/sched/task.h>
27
28#include "amdgpu_object.h"
29#include "amdgpu_gem.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_amdkfd.h"
32#include "amdgpu_dma_buf.h"
33#include <uapi/linux/kfd_ioctl.h>
34#include "amdgpu_xgmi.h"
35
36/* Userptr restore delay, just long enough to allow consecutive VM
37 * changes to accumulate
38 */
39#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41/* Impose limit on how much memory KFD can use */
42static struct {
43 uint64_t max_system_mem_limit;
44 uint64_t max_ttm_mem_limit;
45 int64_t system_mem_used;
46 int64_t ttm_mem_used;
47 spinlock_t mem_limit_lock;
48} kfd_mem_limit;
49
50static const char * const domain_bit_to_string[] = {
51 "CPU",
52 "GTT",
53 "VRAM",
54 "GDS",
55 "GWS",
56 "OA"
57};
58
59#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
60
61static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
62
63static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
64 struct kgd_mem *mem)
65{
66 struct kfd_mem_attachment *entry;
67
68 list_for_each_entry(entry, &mem->attachments, list)
69 if (entry->bo_va->base.vm == avm)
70 return true;
71
72 return false;
73}
74
75/* Set memory usage limits. Current, limits are
76 * System (TTM + userptr) memory - 15/16th System RAM
77 * TTM memory - 3/8th System RAM
78 */
79void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
80{
81 struct sysinfo si;
82 uint64_t mem;
83
84 si_meminfo(&si);
85 mem = si.freeram - si.freehigh;
86 mem *= si.mem_unit;
87
88 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
89 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
90 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
91 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
92 (kfd_mem_limit.max_system_mem_limit >> 20),
93 (kfd_mem_limit.max_ttm_mem_limit >> 20));
94}
95
96void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
97{
98 kfd_mem_limit.system_mem_used += size;
99}
100
101/* Estimate page table size needed to represent a given memory size
102 *
103 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
104 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
105 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
106 * for 2MB pages for TLB efficiency. However, small allocations and
107 * fragmented system memory still need some 4KB pages. We choose a
108 * compromise that should work in most cases without reserving too
109 * much memory for page tables unnecessarily (factor 16K, >> 14).
110 */
111#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
112
113static size_t amdgpu_amdkfd_acc_size(uint64_t size)
114{
115 size >>= PAGE_SHIFT;
116 size *= sizeof(dma_addr_t) + sizeof(void *);
117
118 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
119 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
120 PAGE_ALIGN(size);
121}
122
123/**
124 * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
125 * of buffer including any reserved for control structures
126 *
127 * @adev: Device to which allocated BO belongs to
128 * @size: Size of buffer, in bytes, encapsulated by B0. This should be
129 * equivalent to amdgpu_bo_size(BO)
130 * @alloc_flag: Flag used in allocating a BO as noted above
131 *
132 * Return: returns -ENOMEM in case of error, ZERO otherwise
133 */
134static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
135 uint64_t size, u32 alloc_flag)
136{
137 uint64_t reserved_for_pt =
138 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
139 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
140 int ret = 0;
141
142 acc_size = amdgpu_amdkfd_acc_size(size);
143
144 vram_needed = 0;
145 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
146 system_mem_needed = acc_size + size;
147 ttm_mem_needed = acc_size + size;
148 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
149 system_mem_needed = acc_size;
150 ttm_mem_needed = acc_size;
151 vram_needed = size;
152 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
153 system_mem_needed = acc_size + size;
154 ttm_mem_needed = acc_size;
155 } else if (alloc_flag &
156 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
157 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
158 system_mem_needed = acc_size;
159 ttm_mem_needed = acc_size;
160 } else {
161 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
162 return -ENOMEM;
163 }
164
165 spin_lock(&kfd_mem_limit.mem_limit_lock);
166
167 if (kfd_mem_limit.system_mem_used + system_mem_needed >
168 kfd_mem_limit.max_system_mem_limit)
169 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
170
171 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
172 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
173 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
174 kfd_mem_limit.max_ttm_mem_limit) ||
175 (adev->kfd.vram_used + vram_needed >
176 adev->gmc.real_vram_size - reserved_for_pt)) {
177 ret = -ENOMEM;
178 goto release;
179 }
180
181 /* Update memory accounting by decreasing available system
182 * memory, TTM memory and GPU memory as computed above
183 */
184 adev->kfd.vram_used += vram_needed;
185 kfd_mem_limit.system_mem_used += system_mem_needed;
186 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
187
188release:
189 spin_unlock(&kfd_mem_limit.mem_limit_lock);
190 return ret;
191}
192
193static void unreserve_mem_limit(struct amdgpu_device *adev,
194 uint64_t size, u32 alloc_flag)
195{
196 size_t acc_size;
197
198 acc_size = amdgpu_amdkfd_acc_size(size);
199
200 spin_lock(&kfd_mem_limit.mem_limit_lock);
201
202 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
203 kfd_mem_limit.system_mem_used -= (acc_size + size);
204 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
205 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
206 kfd_mem_limit.system_mem_used -= acc_size;
207 kfd_mem_limit.ttm_mem_used -= acc_size;
208 adev->kfd.vram_used -= size;
209 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
210 kfd_mem_limit.system_mem_used -= (acc_size + size);
211 kfd_mem_limit.ttm_mem_used -= acc_size;
212 } else if (alloc_flag &
213 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
214 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
215 kfd_mem_limit.system_mem_used -= acc_size;
216 kfd_mem_limit.ttm_mem_used -= acc_size;
217 } else {
218 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
219 goto release;
220 }
221
222 WARN_ONCE(adev->kfd.vram_used < 0,
223 "KFD VRAM memory accounting unbalanced");
224 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
225 "KFD TTM memory accounting unbalanced");
226 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
227 "KFD system memory accounting unbalanced");
228
229release:
230 spin_unlock(&kfd_mem_limit.mem_limit_lock);
231}
232
233void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
234{
235 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
236 u32 alloc_flags = bo->kfd_bo->alloc_flags;
237 u64 size = amdgpu_bo_size(bo);
238
239 unreserve_mem_limit(adev, size, alloc_flags);
240
241 kfree(bo->kfd_bo);
242}
243
244/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
245 * reservation object.
246 *
247 * @bo: [IN] Remove eviction fence(s) from this BO
248 * @ef: [IN] This eviction fence is removed if it
249 * is present in the shared list.
250 *
251 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
252 */
253static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
254 struct amdgpu_amdkfd_fence *ef)
255{
256 struct dma_resv *resv = bo->tbo.base.resv;
257 struct dma_resv_list *old, *new;
258 unsigned int i, j, k;
259
260 if (!ef)
261 return -EINVAL;
262
263 old = dma_resv_shared_list(resv);
264 if (!old)
265 return 0;
266
267 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
268 if (!new)
269 return -ENOMEM;
270
271 /* Go through all the shared fences in the resevation object and sort
272 * the interesting ones to the end of the list.
273 */
274 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
275 struct dma_fence *f;
276
277 f = rcu_dereference_protected(old->shared[i],
278 dma_resv_held(resv));
279
280 if (f->context == ef->base.context)
281 RCU_INIT_POINTER(new->shared[--j], f);
282 else
283 RCU_INIT_POINTER(new->shared[k++], f);
284 }
285 new->shared_max = old->shared_max;
286 new->shared_count = k;
287
288 /* Install the new fence list, seqcount provides the barriers */
289 write_seqcount_begin(&resv->seq);
290 RCU_INIT_POINTER(resv->fence, new);
291 write_seqcount_end(&resv->seq);
292
293 /* Drop the references to the removed fences or move them to ef_list */
294 for (i = j; i < old->shared_count; ++i) {
295 struct dma_fence *f;
296
297 f = rcu_dereference_protected(new->shared[i],
298 dma_resv_held(resv));
299 dma_fence_put(f);
300 }
301 kfree_rcu(old, rcu);
302
303 return 0;
304}
305
306int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
307{
308 struct amdgpu_bo *root = bo;
309 struct amdgpu_vm_bo_base *vm_bo;
310 struct amdgpu_vm *vm;
311 struct amdkfd_process_info *info;
312 struct amdgpu_amdkfd_fence *ef;
313 int ret;
314
315 /* we can always get vm_bo from root PD bo.*/
316 while (root->parent)
317 root = root->parent;
318
319 vm_bo = root->vm_bo;
320 if (!vm_bo)
321 return 0;
322
323 vm = vm_bo->vm;
324 if (!vm)
325 return 0;
326
327 info = vm->process_info;
328 if (!info || !info->eviction_fence)
329 return 0;
330
331 ef = container_of(dma_fence_get(&info->eviction_fence->base),
332 struct amdgpu_amdkfd_fence, base);
333
334 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
335 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
336 dma_resv_unlock(bo->tbo.base.resv);
337
338 dma_fence_put(&ef->base);
339 return ret;
340}
341
342static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
343 bool wait)
344{
345 struct ttm_operation_ctx ctx = { false, false };
346 int ret;
347
348 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
349 "Called with userptr BO"))
350 return -EINVAL;
351
352 amdgpu_bo_placement_from_domain(bo, domain);
353
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
355 if (ret)
356 goto validate_fail;
357 if (wait)
358 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
359
360validate_fail:
361 return ret;
362}
363
364static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
365{
366 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
367}
368
369/* vm_validate_pt_pd_bos - Validate page table and directory BOs
370 *
371 * Page directories are not updated here because huge page handling
372 * during page table updates can invalidate page directory entries
373 * again. Page directories are only updated after updating page
374 * tables.
375 */
376static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
377{
378 struct amdgpu_bo *pd = vm->root.bo;
379 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
380 int ret;
381
382 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
383 if (ret) {
384 pr_err("failed to validate PT BOs\n");
385 return ret;
386 }
387
388 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
389 if (ret) {
390 pr_err("failed to validate PD\n");
391 return ret;
392 }
393
394 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
395
396 if (vm->use_cpu_for_update) {
397 ret = amdgpu_bo_kmap(pd, NULL);
398 if (ret) {
399 pr_err("failed to kmap PD, ret=%d\n", ret);
400 return ret;
401 }
402 }
403
404 return 0;
405}
406
407static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
408{
409 struct amdgpu_bo *pd = vm->root.bo;
410 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
411 int ret;
412
413 ret = amdgpu_vm_update_pdes(adev, vm, false);
414 if (ret)
415 return ret;
416
417 return amdgpu_sync_fence(sync, vm->last_update);
418}
419
420static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
421{
422 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
423 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
424 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
425 uint32_t mapping_flags;
426 uint64_t pte_flags;
427 bool snoop = false;
428
429 mapping_flags = AMDGPU_VM_PAGE_READABLE;
430 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
431 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
432 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
433 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
434
435 switch (adev->asic_type) {
436 case CHIP_ARCTURUS:
437 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
438 if (bo_adev == adev)
439 mapping_flags |= coherent ?
440 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
441 else
442 mapping_flags |= coherent ?
443 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
444 } else {
445 mapping_flags |= coherent ?
446 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
447 }
448 break;
449 case CHIP_ALDEBARAN:
450 if (coherent && uncached) {
451 if (adev->gmc.xgmi.connected_to_cpu ||
452 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
453 snoop = true;
454 mapping_flags |= AMDGPU_VM_MTYPE_UC;
455 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
456 if (bo_adev == adev) {
457 mapping_flags |= coherent ?
458 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
459 if (adev->gmc.xgmi.connected_to_cpu)
460 snoop = true;
461 } else {
462 mapping_flags |= coherent ?
463 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
464 if (amdgpu_xgmi_same_hive(adev, bo_adev))
465 snoop = true;
466 }
467 } else {
468 snoop = true;
469 mapping_flags |= coherent ?
470 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
471 }
472 break;
473 default:
474 mapping_flags |= coherent ?
475 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
476 }
477
478 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
479 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
480
481 return pte_flags;
482}
483
484static int
485kfd_mem_dmamap_userptr(struct kgd_mem *mem,
486 struct kfd_mem_attachment *attachment)
487{
488 enum dma_data_direction direction =
489 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
490 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
491 struct ttm_operation_ctx ctx = {.interruptible = true};
492 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
493 struct amdgpu_device *adev = attachment->adev;
494 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
495 struct ttm_tt *ttm = bo->tbo.ttm;
496 int ret;
497
498 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
499 if (unlikely(!ttm->sg))
500 return -ENOMEM;
501
502 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
503 return -EINVAL;
504
505 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
506 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
507 ttm->num_pages, 0,
508 (u64)ttm->num_pages << PAGE_SHIFT,
509 GFP_KERNEL);
510 if (unlikely(ret))
511 goto free_sg;
512
513 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
514 if (unlikely(ret))
515 goto release_sg;
516
517 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
518 ttm->num_pages);
519
520 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
521 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
522 if (ret)
523 goto unmap_sg;
524
525 return 0;
526
527unmap_sg:
528 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
529release_sg:
530 pr_err("DMA map userptr failed: %d\n", ret);
531 sg_free_table(ttm->sg);
532free_sg:
533 kfree(ttm->sg);
534 ttm->sg = NULL;
535 return ret;
536}
537
538static int
539kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
540{
541 struct ttm_operation_ctx ctx = {.interruptible = true};
542 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
543
544 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
545 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
546}
547
548static int
549kfd_mem_dmamap_attachment(struct kgd_mem *mem,
550 struct kfd_mem_attachment *attachment)
551{
552 switch (attachment->type) {
553 case KFD_MEM_ATT_SHARED:
554 return 0;
555 case KFD_MEM_ATT_USERPTR:
556 return kfd_mem_dmamap_userptr(mem, attachment);
557 case KFD_MEM_ATT_DMABUF:
558 return kfd_mem_dmamap_dmabuf(attachment);
559 default:
560 WARN_ON_ONCE(1);
561 }
562 return -EINVAL;
563}
564
565static void
566kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
567 struct kfd_mem_attachment *attachment)
568{
569 enum dma_data_direction direction =
570 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
571 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
572 struct ttm_operation_ctx ctx = {.interruptible = false};
573 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
574 struct amdgpu_device *adev = attachment->adev;
575 struct ttm_tt *ttm = bo->tbo.ttm;
576
577 if (unlikely(!ttm->sg))
578 return;
579
580 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
581 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
582
583 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
584 sg_free_table(ttm->sg);
585 kfree(ttm->sg);
586 ttm->sg = NULL;
587}
588
589static void
590kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
591{
592 struct ttm_operation_ctx ctx = {.interruptible = true};
593 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
594
595 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
596 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
597}
598
599static void
600kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
601 struct kfd_mem_attachment *attachment)
602{
603 switch (attachment->type) {
604 case KFD_MEM_ATT_SHARED:
605 break;
606 case KFD_MEM_ATT_USERPTR:
607 kfd_mem_dmaunmap_userptr(mem, attachment);
608 break;
609 case KFD_MEM_ATT_DMABUF:
610 kfd_mem_dmaunmap_dmabuf(attachment);
611 break;
612 default:
613 WARN_ON_ONCE(1);
614 }
615}
616
617static int
618kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
619 struct amdgpu_bo **bo)
620{
621 unsigned long bo_size = mem->bo->tbo.base.size;
622 struct drm_gem_object *gobj;
623 int ret;
624
625 ret = amdgpu_bo_reserve(mem->bo, false);
626 if (ret)
627 return ret;
628
629 ret = amdgpu_gem_object_create(adev, bo_size, 1,
630 AMDGPU_GEM_DOMAIN_CPU,
631 AMDGPU_GEM_CREATE_PREEMPTIBLE,
632 ttm_bo_type_sg, mem->bo->tbo.base.resv,
633 &gobj);
634 amdgpu_bo_unreserve(mem->bo);
635 if (ret)
636 return ret;
637
638 *bo = gem_to_amdgpu_bo(gobj);
639 (*bo)->parent = amdgpu_bo_ref(mem->bo);
640
641 return 0;
642}
643
644static int
645kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
646 struct amdgpu_bo **bo)
647{
648 struct drm_gem_object *gobj;
649 int ret;
650
651 if (!mem->dmabuf) {
652 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
653 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
654 DRM_RDWR : 0);
655 if (IS_ERR(mem->dmabuf)) {
656 ret = PTR_ERR(mem->dmabuf);
657 mem->dmabuf = NULL;
658 return ret;
659 }
660 }
661
662 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
663 if (IS_ERR(gobj))
664 return PTR_ERR(gobj);
665
666 *bo = gem_to_amdgpu_bo(gobj);
667 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
668 (*bo)->parent = amdgpu_bo_ref(mem->bo);
669
670 return 0;
671}
672
673/* kfd_mem_attach - Add a BO to a VM
674 *
675 * Everything that needs to bo done only once when a BO is first added
676 * to a VM. It can later be mapped and unmapped many times without
677 * repeating these steps.
678 *
679 * 0. Create BO for DMA mapping, if needed
680 * 1. Allocate and initialize BO VA entry data structure
681 * 2. Add BO to the VM
682 * 3. Determine ASIC-specific PTE flags
683 * 4. Alloc page tables and directories if needed
684 * 4a. Validate new page tables and directories
685 */
686static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
687 struct amdgpu_vm *vm, bool is_aql)
688{
689 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
690 unsigned long bo_size = mem->bo->tbo.base.size;
691 uint64_t va = mem->va;
692 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
693 struct amdgpu_bo *bo[2] = {NULL, NULL};
694 int i, ret;
695
696 if (!va) {
697 pr_err("Invalid VA when adding BO to VM\n");
698 return -EINVAL;
699 }
700
701 for (i = 0; i <= is_aql; i++) {
702 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
703 if (unlikely(!attachment[i])) {
704 ret = -ENOMEM;
705 goto unwind;
706 }
707
708 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
709 va + bo_size, vm);
710
711 if (adev == bo_adev ||
712 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
713 (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) {
714 /* Mappings on the local GPU, or VRAM mappings in the
715 * local hive, or userptr mapping IOMMU direct map mode
716 * share the original BO
717 */
718 attachment[i]->type = KFD_MEM_ATT_SHARED;
719 bo[i] = mem->bo;
720 drm_gem_object_get(&bo[i]->tbo.base);
721 } else if (i > 0) {
722 /* Multiple mappings on the same GPU share the BO */
723 attachment[i]->type = KFD_MEM_ATT_SHARED;
724 bo[i] = bo[0];
725 drm_gem_object_get(&bo[i]->tbo.base);
726 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
727 /* Create an SG BO to DMA-map userptrs on other GPUs */
728 attachment[i]->type = KFD_MEM_ATT_USERPTR;
729 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
730 if (ret)
731 goto unwind;
732 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
733 mem->bo->tbo.type != ttm_bo_type_sg) {
734 /* GTT BOs use DMA-mapping ability of dynamic-attach
735 * DMA bufs. TODO: The same should work for VRAM on
736 * large-BAR GPUs.
737 */
738 attachment[i]->type = KFD_MEM_ATT_DMABUF;
739 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
740 if (ret)
741 goto unwind;
742 } else {
743 /* FIXME: Need to DMA-map other BO types:
744 * large-BAR VRAM, doorbells, MMIO remap
745 */
746 attachment[i]->type = KFD_MEM_ATT_SHARED;
747 bo[i] = mem->bo;
748 drm_gem_object_get(&bo[i]->tbo.base);
749 }
750
751 /* Add BO to VM internal data structures */
752 ret = amdgpu_bo_reserve(bo[i], false);
753 if (ret) {
754 pr_debug("Unable to reserve BO during memory attach");
755 goto unwind;
756 }
757 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
758 amdgpu_bo_unreserve(bo[i]);
759 if (unlikely(!attachment[i]->bo_va)) {
760 ret = -ENOMEM;
761 pr_err("Failed to add BO object to VM. ret == %d\n",
762 ret);
763 goto unwind;
764 }
765 attachment[i]->va = va;
766 attachment[i]->pte_flags = get_pte_flags(adev, mem);
767 attachment[i]->adev = adev;
768 list_add(&attachment[i]->list, &mem->attachments);
769
770 va += bo_size;
771 }
772
773 return 0;
774
775unwind:
776 for (; i >= 0; i--) {
777 if (!attachment[i])
778 continue;
779 if (attachment[i]->bo_va) {
780 amdgpu_bo_reserve(bo[i], true);
781 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
782 amdgpu_bo_unreserve(bo[i]);
783 list_del(&attachment[i]->list);
784 }
785 if (bo[i])
786 drm_gem_object_put(&bo[i]->tbo.base);
787 kfree(attachment[i]);
788 }
789 return ret;
790}
791
792static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
793{
794 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
795
796 pr_debug("\t remove VA 0x%llx in entry %p\n",
797 attachment->va, attachment);
798 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
799 drm_gem_object_put(&bo->tbo.base);
800 list_del(&attachment->list);
801 kfree(attachment);
802}
803
804static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
805 struct amdkfd_process_info *process_info,
806 bool userptr)
807{
808 struct ttm_validate_buffer *entry = &mem->validate_list;
809 struct amdgpu_bo *bo = mem->bo;
810
811 INIT_LIST_HEAD(&entry->head);
812 entry->num_shared = 1;
813 entry->bo = &bo->tbo;
814 mutex_lock(&process_info->lock);
815 if (userptr)
816 list_add_tail(&entry->head, &process_info->userptr_valid_list);
817 else
818 list_add_tail(&entry->head, &process_info->kfd_bo_list);
819 mutex_unlock(&process_info->lock);
820}
821
822static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
823 struct amdkfd_process_info *process_info)
824{
825 struct ttm_validate_buffer *bo_list_entry;
826
827 bo_list_entry = &mem->validate_list;
828 mutex_lock(&process_info->lock);
829 list_del(&bo_list_entry->head);
830 mutex_unlock(&process_info->lock);
831}
832
833/* Initializes user pages. It registers the MMU notifier and validates
834 * the userptr BO in the GTT domain.
835 *
836 * The BO must already be on the userptr_valid_list. Otherwise an
837 * eviction and restore may happen that leaves the new BO unmapped
838 * with the user mode queues running.
839 *
840 * Takes the process_info->lock to protect against concurrent restore
841 * workers.
842 *
843 * Returns 0 for success, negative errno for errors.
844 */
845static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
846{
847 struct amdkfd_process_info *process_info = mem->process_info;
848 struct amdgpu_bo *bo = mem->bo;
849 struct ttm_operation_ctx ctx = { true, false };
850 int ret = 0;
851
852 mutex_lock(&process_info->lock);
853
854 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
855 if (ret) {
856 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
857 goto out;
858 }
859
860 ret = amdgpu_mn_register(bo, user_addr);
861 if (ret) {
862 pr_err("%s: Failed to register MMU notifier: %d\n",
863 __func__, ret);
864 goto out;
865 }
866
867 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
868 if (ret) {
869 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
870 goto unregister_out;
871 }
872
873 ret = amdgpu_bo_reserve(bo, true);
874 if (ret) {
875 pr_err("%s: Failed to reserve BO\n", __func__);
876 goto release_out;
877 }
878 amdgpu_bo_placement_from_domain(bo, mem->domain);
879 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
880 if (ret)
881 pr_err("%s: failed to validate BO\n", __func__);
882 amdgpu_bo_unreserve(bo);
883
884release_out:
885 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
886unregister_out:
887 if (ret)
888 amdgpu_mn_unregister(bo);
889out:
890 mutex_unlock(&process_info->lock);
891 return ret;
892}
893
894/* Reserving a BO and its page table BOs must happen atomically to
895 * avoid deadlocks. Some operations update multiple VMs at once. Track
896 * all the reservation info in a context structure. Optionally a sync
897 * object can track VM updates.
898 */
899struct bo_vm_reservation_context {
900 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
901 unsigned int n_vms; /* Number of VMs reserved */
902 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
903 struct ww_acquire_ctx ticket; /* Reservation ticket */
904 struct list_head list, duplicates; /* BO lists */
905 struct amdgpu_sync *sync; /* Pointer to sync object */
906 bool reserved; /* Whether BOs are reserved */
907};
908
909enum bo_vm_match {
910 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
911 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
912 BO_VM_ALL, /* Match all VMs a BO was added to */
913};
914
915/**
916 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
917 * @mem: KFD BO structure.
918 * @vm: the VM to reserve.
919 * @ctx: the struct that will be used in unreserve_bo_and_vms().
920 */
921static int reserve_bo_and_vm(struct kgd_mem *mem,
922 struct amdgpu_vm *vm,
923 struct bo_vm_reservation_context *ctx)
924{
925 struct amdgpu_bo *bo = mem->bo;
926 int ret;
927
928 WARN_ON(!vm);
929
930 ctx->reserved = false;
931 ctx->n_vms = 1;
932 ctx->sync = &mem->sync;
933
934 INIT_LIST_HEAD(&ctx->list);
935 INIT_LIST_HEAD(&ctx->duplicates);
936
937 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
938 if (!ctx->vm_pd)
939 return -ENOMEM;
940
941 ctx->kfd_bo.priority = 0;
942 ctx->kfd_bo.tv.bo = &bo->tbo;
943 ctx->kfd_bo.tv.num_shared = 1;
944 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
945
946 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
947
948 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
949 false, &ctx->duplicates);
950 if (ret) {
951 pr_err("Failed to reserve buffers in ttm.\n");
952 kfree(ctx->vm_pd);
953 ctx->vm_pd = NULL;
954 return ret;
955 }
956
957 ctx->reserved = true;
958 return 0;
959}
960
961/**
962 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
963 * @mem: KFD BO structure.
964 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
965 * is used. Otherwise, a single VM associated with the BO.
966 * @map_type: the mapping status that will be used to filter the VMs.
967 * @ctx: the struct that will be used in unreserve_bo_and_vms().
968 *
969 * Returns 0 for success, negative for failure.
970 */
971static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
972 struct amdgpu_vm *vm, enum bo_vm_match map_type,
973 struct bo_vm_reservation_context *ctx)
974{
975 struct amdgpu_bo *bo = mem->bo;
976 struct kfd_mem_attachment *entry;
977 unsigned int i;
978 int ret;
979
980 ctx->reserved = false;
981 ctx->n_vms = 0;
982 ctx->vm_pd = NULL;
983 ctx->sync = &mem->sync;
984
985 INIT_LIST_HEAD(&ctx->list);
986 INIT_LIST_HEAD(&ctx->duplicates);
987
988 list_for_each_entry(entry, &mem->attachments, list) {
989 if ((vm && vm != entry->bo_va->base.vm) ||
990 (entry->is_mapped != map_type
991 && map_type != BO_VM_ALL))
992 continue;
993
994 ctx->n_vms++;
995 }
996
997 if (ctx->n_vms != 0) {
998 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
999 GFP_KERNEL);
1000 if (!ctx->vm_pd)
1001 return -ENOMEM;
1002 }
1003
1004 ctx->kfd_bo.priority = 0;
1005 ctx->kfd_bo.tv.bo = &bo->tbo;
1006 ctx->kfd_bo.tv.num_shared = 1;
1007 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1008
1009 i = 0;
1010 list_for_each_entry(entry, &mem->attachments, list) {
1011 if ((vm && vm != entry->bo_va->base.vm) ||
1012 (entry->is_mapped != map_type
1013 && map_type != BO_VM_ALL))
1014 continue;
1015
1016 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1017 &ctx->vm_pd[i]);
1018 i++;
1019 }
1020
1021 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1022 false, &ctx->duplicates);
1023 if (ret) {
1024 pr_err("Failed to reserve buffers in ttm.\n");
1025 kfree(ctx->vm_pd);
1026 ctx->vm_pd = NULL;
1027 return ret;
1028 }
1029
1030 ctx->reserved = true;
1031 return 0;
1032}
1033
1034/**
1035 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1036 * @ctx: Reservation context to unreserve
1037 * @wait: Optionally wait for a sync object representing pending VM updates
1038 * @intr: Whether the wait is interruptible
1039 *
1040 * Also frees any resources allocated in
1041 * reserve_bo_and_(cond_)vm(s). Returns the status from
1042 * amdgpu_sync_wait.
1043 */
1044static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1045 bool wait, bool intr)
1046{
1047 int ret = 0;
1048
1049 if (wait)
1050 ret = amdgpu_sync_wait(ctx->sync, intr);
1051
1052 if (ctx->reserved)
1053 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1054 kfree(ctx->vm_pd);
1055
1056 ctx->sync = NULL;
1057
1058 ctx->reserved = false;
1059 ctx->vm_pd = NULL;
1060
1061 return ret;
1062}
1063
1064static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1065 struct kfd_mem_attachment *entry,
1066 struct amdgpu_sync *sync)
1067{
1068 struct amdgpu_bo_va *bo_va = entry->bo_va;
1069 struct amdgpu_device *adev = entry->adev;
1070 struct amdgpu_vm *vm = bo_va->base.vm;
1071
1072 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1073
1074 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1075
1076 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1077
1078 kfd_mem_dmaunmap_attachment(mem, entry);
1079}
1080
1081static int update_gpuvm_pte(struct kgd_mem *mem,
1082 struct kfd_mem_attachment *entry,
1083 struct amdgpu_sync *sync,
1084 bool *table_freed)
1085{
1086 struct amdgpu_bo_va *bo_va = entry->bo_va;
1087 struct amdgpu_device *adev = entry->adev;
1088 int ret;
1089
1090 ret = kfd_mem_dmamap_attachment(mem, entry);
1091 if (ret)
1092 return ret;
1093
1094 /* Update the page tables */
1095 ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1096 if (ret) {
1097 pr_err("amdgpu_vm_bo_update failed\n");
1098 return ret;
1099 }
1100
1101 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1102}
1103
1104static int map_bo_to_gpuvm(struct kgd_mem *mem,
1105 struct kfd_mem_attachment *entry,
1106 struct amdgpu_sync *sync,
1107 bool no_update_pte,
1108 bool *table_freed)
1109{
1110 int ret;
1111
1112 /* Set virtual address for the allocation */
1113 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1114 amdgpu_bo_size(entry->bo_va->base.bo),
1115 entry->pte_flags);
1116 if (ret) {
1117 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1118 entry->va, ret);
1119 return ret;
1120 }
1121
1122 if (no_update_pte)
1123 return 0;
1124
1125 ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1126 if (ret) {
1127 pr_err("update_gpuvm_pte() failed\n");
1128 goto update_gpuvm_pte_failed;
1129 }
1130
1131 return 0;
1132
1133update_gpuvm_pte_failed:
1134 unmap_bo_from_gpuvm(mem, entry, sync);
1135 return ret;
1136}
1137
1138static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1139{
1140 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1141
1142 if (!sg)
1143 return NULL;
1144 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1145 kfree(sg);
1146 return NULL;
1147 }
1148 sg->sgl->dma_address = addr;
1149 sg->sgl->length = size;
1150#ifdef CONFIG_NEED_SG_DMA_LENGTH
1151 sg->sgl->dma_length = size;
1152#endif
1153 return sg;
1154}
1155
1156static int process_validate_vms(struct amdkfd_process_info *process_info)
1157{
1158 struct amdgpu_vm *peer_vm;
1159 int ret;
1160
1161 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1162 vm_list_node) {
1163 ret = vm_validate_pt_pd_bos(peer_vm);
1164 if (ret)
1165 return ret;
1166 }
1167
1168 return 0;
1169}
1170
1171static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1172 struct amdgpu_sync *sync)
1173{
1174 struct amdgpu_vm *peer_vm;
1175 int ret;
1176
1177 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1178 vm_list_node) {
1179 struct amdgpu_bo *pd = peer_vm->root.bo;
1180
1181 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1182 AMDGPU_SYNC_NE_OWNER,
1183 AMDGPU_FENCE_OWNER_KFD);
1184 if (ret)
1185 return ret;
1186 }
1187
1188 return 0;
1189}
1190
1191static int process_update_pds(struct amdkfd_process_info *process_info,
1192 struct amdgpu_sync *sync)
1193{
1194 struct amdgpu_vm *peer_vm;
1195 int ret;
1196
1197 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1198 vm_list_node) {
1199 ret = vm_update_pds(peer_vm, sync);
1200 if (ret)
1201 return ret;
1202 }
1203
1204 return 0;
1205}
1206
1207static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1208 struct dma_fence **ef)
1209{
1210 struct amdkfd_process_info *info = NULL;
1211 int ret;
1212
1213 if (!*process_info) {
1214 info = kzalloc(sizeof(*info), GFP_KERNEL);
1215 if (!info)
1216 return -ENOMEM;
1217
1218 mutex_init(&info->lock);
1219 INIT_LIST_HEAD(&info->vm_list_head);
1220 INIT_LIST_HEAD(&info->kfd_bo_list);
1221 INIT_LIST_HEAD(&info->userptr_valid_list);
1222 INIT_LIST_HEAD(&info->userptr_inval_list);
1223
1224 info->eviction_fence =
1225 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1226 current->mm,
1227 NULL);
1228 if (!info->eviction_fence) {
1229 pr_err("Failed to create eviction fence\n");
1230 ret = -ENOMEM;
1231 goto create_evict_fence_fail;
1232 }
1233
1234 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1235 atomic_set(&info->evicted_bos, 0);
1236 INIT_DELAYED_WORK(&info->restore_userptr_work,
1237 amdgpu_amdkfd_restore_userptr_worker);
1238
1239 *process_info = info;
1240 *ef = dma_fence_get(&info->eviction_fence->base);
1241 }
1242
1243 vm->process_info = *process_info;
1244
1245 /* Validate page directory and attach eviction fence */
1246 ret = amdgpu_bo_reserve(vm->root.bo, true);
1247 if (ret)
1248 goto reserve_pd_fail;
1249 ret = vm_validate_pt_pd_bos(vm);
1250 if (ret) {
1251 pr_err("validate_pt_pd_bos() failed\n");
1252 goto validate_pd_fail;
1253 }
1254 ret = amdgpu_bo_sync_wait(vm->root.bo,
1255 AMDGPU_FENCE_OWNER_KFD, false);
1256 if (ret)
1257 goto wait_pd_fail;
1258 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1259 if (ret)
1260 goto reserve_shared_fail;
1261 amdgpu_bo_fence(vm->root.bo,
1262 &vm->process_info->eviction_fence->base, true);
1263 amdgpu_bo_unreserve(vm->root.bo);
1264
1265 /* Update process info */
1266 mutex_lock(&vm->process_info->lock);
1267 list_add_tail(&vm->vm_list_node,
1268 &(vm->process_info->vm_list_head));
1269 vm->process_info->n_vms++;
1270 mutex_unlock(&vm->process_info->lock);
1271
1272 return 0;
1273
1274reserve_shared_fail:
1275wait_pd_fail:
1276validate_pd_fail:
1277 amdgpu_bo_unreserve(vm->root.bo);
1278reserve_pd_fail:
1279 vm->process_info = NULL;
1280 if (info) {
1281 /* Two fence references: one in info and one in *ef */
1282 dma_fence_put(&info->eviction_fence->base);
1283 dma_fence_put(*ef);
1284 *ef = NULL;
1285 *process_info = NULL;
1286 put_pid(info->pid);
1287create_evict_fence_fail:
1288 mutex_destroy(&info->lock);
1289 kfree(info);
1290 }
1291 return ret;
1292}
1293
1294/**
1295 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1296 * @bo: Handle of buffer object being pinned
1297 * @domain: Domain into which BO should be pinned
1298 *
1299 * - USERPTR BOs are UNPINNABLE and will return error
1300 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1301 * PIN count incremented. It is valid to PIN a BO multiple times
1302 *
1303 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1304 */
1305static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1306{
1307 int ret = 0;
1308
1309 ret = amdgpu_bo_reserve(bo, false);
1310 if (unlikely(ret))
1311 return ret;
1312
1313 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1314 if (ret)
1315 pr_err("Error in Pinning BO to domain: %d\n", domain);
1316
1317 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1318 amdgpu_bo_unreserve(bo);
1319
1320 return ret;
1321}
1322
1323/**
1324 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1325 * @bo: Handle of buffer object being unpinned
1326 *
1327 * - Is a illegal request for USERPTR BOs and is ignored
1328 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1329 * PIN count decremented. Calls to UNPIN must balance calls to PIN
1330 */
1331static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1332{
1333 int ret = 0;
1334
1335 ret = amdgpu_bo_reserve(bo, false);
1336 if (unlikely(ret))
1337 return;
1338
1339 amdgpu_bo_unpin(bo);
1340 amdgpu_bo_unreserve(bo);
1341}
1342
1343int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1344 struct file *filp, u32 pasid,
1345 void **process_info,
1346 struct dma_fence **ef)
1347{
1348 struct amdgpu_fpriv *drv_priv;
1349 struct amdgpu_vm *avm;
1350 int ret;
1351
1352 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1353 if (ret)
1354 return ret;
1355 avm = &drv_priv->vm;
1356
1357 /* Already a compute VM? */
1358 if (avm->process_info)
1359 return -EINVAL;
1360
1361 /* Free the original amdgpu allocated pasid,
1362 * will be replaced with kfd allocated pasid.
1363 */
1364 if (avm->pasid) {
1365 amdgpu_pasid_free(avm->pasid);
1366 amdgpu_vm_set_pasid(adev, avm, 0);
1367 }
1368
1369 /* Convert VM into a compute VM */
1370 ret = amdgpu_vm_make_compute(adev, avm);
1371 if (ret)
1372 return ret;
1373
1374 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1375 if (ret)
1376 return ret;
1377 /* Initialize KFD part of the VM and process info */
1378 ret = init_kfd_vm(avm, process_info, ef);
1379 if (ret)
1380 return ret;
1381
1382 amdgpu_vm_set_task_info(avm);
1383
1384 return 0;
1385}
1386
1387void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1388 struct amdgpu_vm *vm)
1389{
1390 struct amdkfd_process_info *process_info = vm->process_info;
1391 struct amdgpu_bo *pd = vm->root.bo;
1392
1393 if (!process_info)
1394 return;
1395
1396 /* Release eviction fence from PD */
1397 amdgpu_bo_reserve(pd, false);
1398 amdgpu_bo_fence(pd, NULL, false);
1399 amdgpu_bo_unreserve(pd);
1400
1401 /* Update process info */
1402 mutex_lock(&process_info->lock);
1403 process_info->n_vms--;
1404 list_del(&vm->vm_list_node);
1405 mutex_unlock(&process_info->lock);
1406
1407 vm->process_info = NULL;
1408
1409 /* Release per-process resources when last compute VM is destroyed */
1410 if (!process_info->n_vms) {
1411 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1412 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1413 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1414
1415 dma_fence_put(&process_info->eviction_fence->base);
1416 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1417 put_pid(process_info->pid);
1418 mutex_destroy(&process_info->lock);
1419 kfree(process_info);
1420 }
1421}
1422
1423void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1424 void *drm_priv)
1425{
1426 struct amdgpu_vm *avm;
1427
1428 if (WARN_ON(!adev || !drm_priv))
1429 return;
1430
1431 avm = drm_priv_to_vm(drm_priv);
1432
1433 pr_debug("Releasing process vm %p\n", avm);
1434
1435 /* The original pasid of amdgpu vm has already been
1436 * released during making a amdgpu vm to a compute vm
1437 * The current pasid is managed by kfd and will be
1438 * released on kfd process destroy. Set amdgpu pasid
1439 * to 0 to avoid duplicate release.
1440 */
1441 amdgpu_vm_release_compute(adev, avm);
1442}
1443
1444uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1445{
1446 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1447 struct amdgpu_bo *pd = avm->root.bo;
1448 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1449
1450 if (adev->asic_type < CHIP_VEGA10)
1451 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1452 return avm->pd_phys_addr;
1453}
1454
1455int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1456 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1457 void *drm_priv, struct kgd_mem **mem,
1458 uint64_t *offset, uint32_t flags)
1459{
1460 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1461 enum ttm_bo_type bo_type = ttm_bo_type_device;
1462 struct sg_table *sg = NULL;
1463 uint64_t user_addr = 0;
1464 struct amdgpu_bo *bo;
1465 struct drm_gem_object *gobj = NULL;
1466 u32 domain, alloc_domain;
1467 u64 alloc_flags;
1468 int ret;
1469
1470 /*
1471 * Check on which domain to allocate BO
1472 */
1473 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1474 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1475 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1476 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1477 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1478 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1479 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1480 alloc_flags = 0;
1481 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1482 domain = AMDGPU_GEM_DOMAIN_GTT;
1483 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1484 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1485 if (!offset || !*offset)
1486 return -EINVAL;
1487 user_addr = untagged_addr(*offset);
1488 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1489 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1490 domain = AMDGPU_GEM_DOMAIN_GTT;
1491 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1492 bo_type = ttm_bo_type_sg;
1493 alloc_flags = 0;
1494 if (size > UINT_MAX)
1495 return -EINVAL;
1496 sg = create_doorbell_sg(*offset, size);
1497 if (!sg)
1498 return -ENOMEM;
1499 } else {
1500 return -EINVAL;
1501 }
1502
1503 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1504 if (!*mem) {
1505 ret = -ENOMEM;
1506 goto err;
1507 }
1508 INIT_LIST_HEAD(&(*mem)->attachments);
1509 mutex_init(&(*mem)->lock);
1510 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1511
1512 /* Workaround for AQL queue wraparound bug. Map the same
1513 * memory twice. That means we only actually allocate half
1514 * the memory.
1515 */
1516 if ((*mem)->aql_queue)
1517 size = size >> 1;
1518
1519 (*mem)->alloc_flags = flags;
1520
1521 amdgpu_sync_create(&(*mem)->sync);
1522
1523 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
1524 if (ret) {
1525 pr_debug("Insufficient memory\n");
1526 goto err_reserve_limit;
1527 }
1528
1529 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1530 va, size, domain_string(alloc_domain));
1531
1532 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1533 bo_type, NULL, &gobj);
1534 if (ret) {
1535 pr_debug("Failed to create BO on domain %s. ret %d\n",
1536 domain_string(alloc_domain), ret);
1537 goto err_bo_create;
1538 }
1539 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1540 if (ret) {
1541 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1542 goto err_node_allow;
1543 }
1544 bo = gem_to_amdgpu_bo(gobj);
1545 if (bo_type == ttm_bo_type_sg) {
1546 bo->tbo.sg = sg;
1547 bo->tbo.ttm->sg = sg;
1548 }
1549 bo->kfd_bo = *mem;
1550 (*mem)->bo = bo;
1551 if (user_addr)
1552 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1553
1554 (*mem)->va = va;
1555 (*mem)->domain = domain;
1556 (*mem)->mapped_to_gpu_memory = 0;
1557 (*mem)->process_info = avm->process_info;
1558 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1559
1560 if (user_addr) {
1561 ret = init_user_pages(*mem, user_addr);
1562 if (ret)
1563 goto allocate_init_user_pages_failed;
1564 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1565 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1566 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1567 if (ret) {
1568 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1569 goto err_pin_bo;
1570 }
1571 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1572 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1573 }
1574
1575 if (offset)
1576 *offset = amdgpu_bo_mmap_offset(bo);
1577
1578 return 0;
1579
1580allocate_init_user_pages_failed:
1581err_pin_bo:
1582 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1583 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1584err_node_allow:
1585 /* Don't unreserve system mem limit twice */
1586 goto err_reserve_limit;
1587err_bo_create:
1588 unreserve_mem_limit(adev, size, flags);
1589err_reserve_limit:
1590 mutex_destroy(&(*mem)->lock);
1591 if (gobj)
1592 drm_gem_object_put(gobj);
1593 else
1594 kfree(*mem);
1595err:
1596 if (sg) {
1597 sg_free_table(sg);
1598 kfree(sg);
1599 }
1600 return ret;
1601}
1602
1603int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1604 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1605 uint64_t *size)
1606{
1607 struct amdkfd_process_info *process_info = mem->process_info;
1608 unsigned long bo_size = mem->bo->tbo.base.size;
1609 struct kfd_mem_attachment *entry, *tmp;
1610 struct bo_vm_reservation_context ctx;
1611 struct ttm_validate_buffer *bo_list_entry;
1612 unsigned int mapped_to_gpu_memory;
1613 int ret;
1614 bool is_imported = false;
1615
1616 mutex_lock(&mem->lock);
1617
1618 /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
1619 if (mem->alloc_flags &
1620 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1621 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1622 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1623 }
1624
1625 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1626 is_imported = mem->is_imported;
1627 mutex_unlock(&mem->lock);
1628 /* lock is not needed after this, since mem is unused and will
1629 * be freed anyway
1630 */
1631
1632 if (mapped_to_gpu_memory > 0) {
1633 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1634 mem->va, bo_size);
1635 return -EBUSY;
1636 }
1637
1638 /* Make sure restore workers don't access the BO any more */
1639 bo_list_entry = &mem->validate_list;
1640 mutex_lock(&process_info->lock);
1641 list_del(&bo_list_entry->head);
1642 mutex_unlock(&process_info->lock);
1643
1644 /* No more MMU notifiers */
1645 amdgpu_mn_unregister(mem->bo);
1646
1647 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1648 if (unlikely(ret))
1649 return ret;
1650
1651 /* The eviction fence should be removed by the last unmap.
1652 * TODO: Log an error condition if the bo still has the eviction fence
1653 * attached
1654 */
1655 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1656 process_info->eviction_fence);
1657 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1658 mem->va + bo_size * (1 + mem->aql_queue));
1659
1660 /* Remove from VM internal data structures */
1661 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1662 kfd_mem_detach(entry);
1663
1664 ret = unreserve_bo_and_vms(&ctx, false, false);
1665
1666 /* Free the sync object */
1667 amdgpu_sync_free(&mem->sync);
1668
1669 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1670 * remap BO. We need to free it.
1671 */
1672 if (mem->bo->tbo.sg) {
1673 sg_free_table(mem->bo->tbo.sg);
1674 kfree(mem->bo->tbo.sg);
1675 }
1676
1677 /* Update the size of the BO being freed if it was allocated from
1678 * VRAM and is not imported.
1679 */
1680 if (size) {
1681 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1682 (!is_imported))
1683 *size = bo_size;
1684 else
1685 *size = 0;
1686 }
1687
1688 /* Free the BO*/
1689 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1690 if (mem->dmabuf)
1691 dma_buf_put(mem->dmabuf);
1692 mutex_destroy(&mem->lock);
1693
1694 /* If this releases the last reference, it will end up calling
1695 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1696 * this needs to be the last call here.
1697 */
1698 drm_gem_object_put(&mem->bo->tbo.base);
1699
1700 return ret;
1701}
1702
1703int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1704 struct amdgpu_device *adev, struct kgd_mem *mem,
1705 void *drm_priv, bool *table_freed)
1706{
1707 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1708 int ret;
1709 struct amdgpu_bo *bo;
1710 uint32_t domain;
1711 struct kfd_mem_attachment *entry;
1712 struct bo_vm_reservation_context ctx;
1713 unsigned long bo_size;
1714 bool is_invalid_userptr = false;
1715
1716 bo = mem->bo;
1717 if (!bo) {
1718 pr_err("Invalid BO when mapping memory to GPU\n");
1719 return -EINVAL;
1720 }
1721
1722 /* Make sure restore is not running concurrently. Since we
1723 * don't map invalid userptr BOs, we rely on the next restore
1724 * worker to do the mapping
1725 */
1726 mutex_lock(&mem->process_info->lock);
1727
1728 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1729 * sure that the MMU notifier is no longer running
1730 * concurrently and the queues are actually stopped
1731 */
1732 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1733 mmap_write_lock(current->mm);
1734 is_invalid_userptr = atomic_read(&mem->invalid);
1735 mmap_write_unlock(current->mm);
1736 }
1737
1738 mutex_lock(&mem->lock);
1739
1740 domain = mem->domain;
1741 bo_size = bo->tbo.base.size;
1742
1743 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1744 mem->va,
1745 mem->va + bo_size * (1 + mem->aql_queue),
1746 avm, domain_string(domain));
1747
1748 if (!kfd_mem_is_attached(avm, mem)) {
1749 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1750 if (ret)
1751 goto out;
1752 }
1753
1754 ret = reserve_bo_and_vm(mem, avm, &ctx);
1755 if (unlikely(ret))
1756 goto out;
1757
1758 /* Userptr can be marked as "not invalid", but not actually be
1759 * validated yet (still in the system domain). In that case
1760 * the queues are still stopped and we can leave mapping for
1761 * the next restore worker
1762 */
1763 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1764 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1765 is_invalid_userptr = true;
1766
1767 ret = vm_validate_pt_pd_bos(avm);
1768 if (unlikely(ret))
1769 goto out_unreserve;
1770
1771 if (mem->mapped_to_gpu_memory == 0 &&
1772 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1773 /* Validate BO only once. The eviction fence gets added to BO
1774 * the first time it is mapped. Validate will wait for all
1775 * background evictions to complete.
1776 */
1777 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1778 if (ret) {
1779 pr_debug("Validate failed\n");
1780 goto out_unreserve;
1781 }
1782 }
1783
1784 list_for_each_entry(entry, &mem->attachments, list) {
1785 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1786 continue;
1787
1788 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1789 entry->va, entry->va + bo_size, entry);
1790
1791 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1792 is_invalid_userptr, table_freed);
1793 if (ret) {
1794 pr_err("Failed to map bo to gpuvm\n");
1795 goto out_unreserve;
1796 }
1797
1798 ret = vm_update_pds(avm, ctx.sync);
1799 if (ret) {
1800 pr_err("Failed to update page directories\n");
1801 goto out_unreserve;
1802 }
1803
1804 entry->is_mapped = true;
1805 mem->mapped_to_gpu_memory++;
1806 pr_debug("\t INC mapping count %d\n",
1807 mem->mapped_to_gpu_memory);
1808 }
1809
1810 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1811 amdgpu_bo_fence(bo,
1812 &avm->process_info->eviction_fence->base,
1813 true);
1814 ret = unreserve_bo_and_vms(&ctx, false, false);
1815
1816 /* Only apply no TLB flush on Aldebaran to
1817 * workaround regressions on other Asics.
1818 */
1819 if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
1820 *table_freed = true;
1821
1822 goto out;
1823
1824out_unreserve:
1825 unreserve_bo_and_vms(&ctx, false, false);
1826out:
1827 mutex_unlock(&mem->process_info->lock);
1828 mutex_unlock(&mem->lock);
1829 return ret;
1830}
1831
1832int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1833 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
1834{
1835 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1836 struct amdkfd_process_info *process_info = avm->process_info;
1837 unsigned long bo_size = mem->bo->tbo.base.size;
1838 struct kfd_mem_attachment *entry;
1839 struct bo_vm_reservation_context ctx;
1840 int ret;
1841
1842 mutex_lock(&mem->lock);
1843
1844 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1845 if (unlikely(ret))
1846 goto out;
1847 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1848 if (ctx.n_vms == 0) {
1849 ret = -EINVAL;
1850 goto unreserve_out;
1851 }
1852
1853 ret = vm_validate_pt_pd_bos(avm);
1854 if (unlikely(ret))
1855 goto unreserve_out;
1856
1857 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1858 mem->va,
1859 mem->va + bo_size * (1 + mem->aql_queue),
1860 avm);
1861
1862 list_for_each_entry(entry, &mem->attachments, list) {
1863 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1864 continue;
1865
1866 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1867 entry->va, entry->va + bo_size, entry);
1868
1869 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1870 entry->is_mapped = false;
1871
1872 mem->mapped_to_gpu_memory--;
1873 pr_debug("\t DEC mapping count %d\n",
1874 mem->mapped_to_gpu_memory);
1875 }
1876
1877 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1878 * required.
1879 */
1880 if (mem->mapped_to_gpu_memory == 0 &&
1881 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1882 !mem->bo->tbo.pin_count)
1883 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1884 process_info->eviction_fence);
1885
1886unreserve_out:
1887 unreserve_bo_and_vms(&ctx, false, false);
1888out:
1889 mutex_unlock(&mem->lock);
1890 return ret;
1891}
1892
1893int amdgpu_amdkfd_gpuvm_sync_memory(
1894 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
1895{
1896 struct amdgpu_sync sync;
1897 int ret;
1898
1899 amdgpu_sync_create(&sync);
1900
1901 mutex_lock(&mem->lock);
1902 amdgpu_sync_clone(&mem->sync, &sync);
1903 mutex_unlock(&mem->lock);
1904
1905 ret = amdgpu_sync_wait(&sync, intr);
1906 amdgpu_sync_free(&sync);
1907 return ret;
1908}
1909
1910int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
1911 struct kgd_mem *mem, void **kptr, uint64_t *size)
1912{
1913 int ret;
1914 struct amdgpu_bo *bo = mem->bo;
1915
1916 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1917 pr_err("userptr can't be mapped to kernel\n");
1918 return -EINVAL;
1919 }
1920
1921 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1922 * this BO in BO's restoring after eviction.
1923 */
1924 mutex_lock(&mem->process_info->lock);
1925
1926 ret = amdgpu_bo_reserve(bo, true);
1927 if (ret) {
1928 pr_err("Failed to reserve bo. ret %d\n", ret);
1929 goto bo_reserve_failed;
1930 }
1931
1932 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1933 if (ret) {
1934 pr_err("Failed to pin bo. ret %d\n", ret);
1935 goto pin_failed;
1936 }
1937
1938 ret = amdgpu_bo_kmap(bo, kptr);
1939 if (ret) {
1940 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1941 goto kmap_failed;
1942 }
1943
1944 amdgpu_amdkfd_remove_eviction_fence(
1945 bo, mem->process_info->eviction_fence);
1946 list_del_init(&mem->validate_list.head);
1947
1948 if (size)
1949 *size = amdgpu_bo_size(bo);
1950
1951 amdgpu_bo_unreserve(bo);
1952
1953 mutex_unlock(&mem->process_info->lock);
1954 return 0;
1955
1956kmap_failed:
1957 amdgpu_bo_unpin(bo);
1958pin_failed:
1959 amdgpu_bo_unreserve(bo);
1960bo_reserve_failed:
1961 mutex_unlock(&mem->process_info->lock);
1962
1963 return ret;
1964}
1965
1966void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev,
1967 struct kgd_mem *mem)
1968{
1969 struct amdgpu_bo *bo = mem->bo;
1970
1971 amdgpu_bo_reserve(bo, true);
1972 amdgpu_bo_kunmap(bo);
1973 amdgpu_bo_unpin(bo);
1974 amdgpu_bo_unreserve(bo);
1975}
1976
1977int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
1978 struct kfd_vm_fault_info *mem)
1979{
1980 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1981 *mem = *adev->gmc.vm_fault_info;
1982 mb();
1983 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1984 }
1985 return 0;
1986}
1987
1988int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
1989 struct dma_buf *dma_buf,
1990 uint64_t va, void *drm_priv,
1991 struct kgd_mem **mem, uint64_t *size,
1992 uint64_t *mmap_offset)
1993{
1994 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1995 struct drm_gem_object *obj;
1996 struct amdgpu_bo *bo;
1997 int ret;
1998
1999 if (dma_buf->ops != &amdgpu_dmabuf_ops)
2000 /* Can't handle non-graphics buffers */
2001 return -EINVAL;
2002
2003 obj = dma_buf->priv;
2004 if (drm_to_adev(obj->dev) != adev)
2005 /* Can't handle buffers from other devices */
2006 return -EINVAL;
2007
2008 bo = gem_to_amdgpu_bo(obj);
2009 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2010 AMDGPU_GEM_DOMAIN_GTT)))
2011 /* Only VRAM and GTT BOs are supported */
2012 return -EINVAL;
2013
2014 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2015 if (!*mem)
2016 return -ENOMEM;
2017
2018 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2019 if (ret) {
2020 kfree(mem);
2021 return ret;
2022 }
2023
2024 if (size)
2025 *size = amdgpu_bo_size(bo);
2026
2027 if (mmap_offset)
2028 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2029
2030 INIT_LIST_HEAD(&(*mem)->attachments);
2031 mutex_init(&(*mem)->lock);
2032
2033 (*mem)->alloc_flags =
2034 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2035 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2036 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2037 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2038
2039 drm_gem_object_get(&bo->tbo.base);
2040 (*mem)->bo = bo;
2041 (*mem)->va = va;
2042 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2043 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2044 (*mem)->mapped_to_gpu_memory = 0;
2045 (*mem)->process_info = avm->process_info;
2046 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2047 amdgpu_sync_create(&(*mem)->sync);
2048 (*mem)->is_imported = true;
2049
2050 return 0;
2051}
2052
2053/* Evict a userptr BO by stopping the queues if necessary
2054 *
2055 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2056 * cannot do any memory allocations, and cannot take any locks that
2057 * are held elsewhere while allocating memory. Therefore this is as
2058 * simple as possible, using atomic counters.
2059 *
2060 * It doesn't do anything to the BO itself. The real work happens in
2061 * restore, where we get updated page addresses. This function only
2062 * ensures that GPU access to the BO is stopped.
2063 */
2064int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
2065 struct mm_struct *mm)
2066{
2067 struct amdkfd_process_info *process_info = mem->process_info;
2068 int evicted_bos;
2069 int r = 0;
2070
2071 atomic_inc(&mem->invalid);
2072 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
2073 if (evicted_bos == 1) {
2074 /* First eviction, stop the queues */
2075 r = kgd2kfd_quiesce_mm(mm);
2076 if (r)
2077 pr_err("Failed to quiesce KFD\n");
2078 schedule_delayed_work(&process_info->restore_userptr_work,
2079 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2080 }
2081
2082 return r;
2083}
2084
2085/* Update invalid userptr BOs
2086 *
2087 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2088 * userptr_inval_list and updates user pages for all BOs that have
2089 * been invalidated since their last update.
2090 */
2091static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2092 struct mm_struct *mm)
2093{
2094 struct kgd_mem *mem, *tmp_mem;
2095 struct amdgpu_bo *bo;
2096 struct ttm_operation_ctx ctx = { false, false };
2097 int invalid, ret;
2098
2099 /* Move all invalidated BOs to the userptr_inval_list and
2100 * release their user pages by migration to the CPU domain
2101 */
2102 list_for_each_entry_safe(mem, tmp_mem,
2103 &process_info->userptr_valid_list,
2104 validate_list.head) {
2105 if (!atomic_read(&mem->invalid))
2106 continue; /* BO is still valid */
2107
2108 bo = mem->bo;
2109
2110 if (amdgpu_bo_reserve(bo, true))
2111 return -EAGAIN;
2112 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2113 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2114 amdgpu_bo_unreserve(bo);
2115 if (ret) {
2116 pr_err("%s: Failed to invalidate userptr BO\n",
2117 __func__);
2118 return -EAGAIN;
2119 }
2120
2121 list_move_tail(&mem->validate_list.head,
2122 &process_info->userptr_inval_list);
2123 }
2124
2125 if (list_empty(&process_info->userptr_inval_list))
2126 return 0; /* All evicted userptr BOs were freed */
2127
2128 /* Go through userptr_inval_list and update any invalid user_pages */
2129 list_for_each_entry(mem, &process_info->userptr_inval_list,
2130 validate_list.head) {
2131 invalid = atomic_read(&mem->invalid);
2132 if (!invalid)
2133 /* BO hasn't been invalidated since the last
2134 * revalidation attempt. Keep its BO list.
2135 */
2136 continue;
2137
2138 bo = mem->bo;
2139
2140 /* Get updated user pages */
2141 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2142 if (ret) {
2143 pr_debug("Failed %d to get user pages\n", ret);
2144
2145 /* Return -EFAULT bad address error as success. It will
2146 * fail later with a VM fault if the GPU tries to access
2147 * it. Better than hanging indefinitely with stalled
2148 * user mode queues.
2149 *
2150 * Return other error -EBUSY or -ENOMEM to retry restore
2151 */
2152 if (ret != -EFAULT)
2153 return ret;
2154 } else {
2155
2156 /*
2157 * FIXME: Cannot ignore the return code, must hold
2158 * notifier_lock
2159 */
2160 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2161 }
2162
2163 /* Mark the BO as valid unless it was invalidated
2164 * again concurrently.
2165 */
2166 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2167 return -EAGAIN;
2168 }
2169
2170 return 0;
2171}
2172
2173/* Validate invalid userptr BOs
2174 *
2175 * Validates BOs on the userptr_inval_list, and moves them back to the
2176 * userptr_valid_list. Also updates GPUVM page tables with new page
2177 * addresses and waits for the page table updates to complete.
2178 */
2179static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2180{
2181 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2182 struct list_head resv_list, duplicates;
2183 struct ww_acquire_ctx ticket;
2184 struct amdgpu_sync sync;
2185
2186 struct amdgpu_vm *peer_vm;
2187 struct kgd_mem *mem, *tmp_mem;
2188 struct amdgpu_bo *bo;
2189 struct ttm_operation_ctx ctx = { false, false };
2190 int i, ret;
2191
2192 pd_bo_list_entries = kcalloc(process_info->n_vms,
2193 sizeof(struct amdgpu_bo_list_entry),
2194 GFP_KERNEL);
2195 if (!pd_bo_list_entries) {
2196 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2197 ret = -ENOMEM;
2198 goto out_no_mem;
2199 }
2200
2201 INIT_LIST_HEAD(&resv_list);
2202 INIT_LIST_HEAD(&duplicates);
2203
2204 /* Get all the page directory BOs that need to be reserved */
2205 i = 0;
2206 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2207 vm_list_node)
2208 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2209 &pd_bo_list_entries[i++]);
2210 /* Add the userptr_inval_list entries to resv_list */
2211 list_for_each_entry(mem, &process_info->userptr_inval_list,
2212 validate_list.head) {
2213 list_add_tail(&mem->resv_list.head, &resv_list);
2214 mem->resv_list.bo = mem->validate_list.bo;
2215 mem->resv_list.num_shared = mem->validate_list.num_shared;
2216 }
2217
2218 /* Reserve all BOs and page tables for validation */
2219 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2220 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2221 if (ret)
2222 goto out_free;
2223
2224 amdgpu_sync_create(&sync);
2225
2226 ret = process_validate_vms(process_info);
2227 if (ret)
2228 goto unreserve_out;
2229
2230 /* Validate BOs and update GPUVM page tables */
2231 list_for_each_entry_safe(mem, tmp_mem,
2232 &process_info->userptr_inval_list,
2233 validate_list.head) {
2234 struct kfd_mem_attachment *attachment;
2235
2236 bo = mem->bo;
2237
2238 /* Validate the BO if we got user pages */
2239 if (bo->tbo.ttm->pages[0]) {
2240 amdgpu_bo_placement_from_domain(bo, mem->domain);
2241 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2242 if (ret) {
2243 pr_err("%s: failed to validate BO\n", __func__);
2244 goto unreserve_out;
2245 }
2246 }
2247
2248 list_move_tail(&mem->validate_list.head,
2249 &process_info->userptr_valid_list);
2250
2251 /* Update mapping. If the BO was not validated
2252 * (because we couldn't get user pages), this will
2253 * clear the page table entries, which will result in
2254 * VM faults if the GPU tries to access the invalid
2255 * memory.
2256 */
2257 list_for_each_entry(attachment, &mem->attachments, list) {
2258 if (!attachment->is_mapped)
2259 continue;
2260
2261 kfd_mem_dmaunmap_attachment(mem, attachment);
2262 ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2263 if (ret) {
2264 pr_err("%s: update PTE failed\n", __func__);
2265 /* make sure this gets validated again */
2266 atomic_inc(&mem->invalid);
2267 goto unreserve_out;
2268 }
2269 }
2270 }
2271
2272 /* Update page directories */
2273 ret = process_update_pds(process_info, &sync);
2274
2275unreserve_out:
2276 ttm_eu_backoff_reservation(&ticket, &resv_list);
2277 amdgpu_sync_wait(&sync, false);
2278 amdgpu_sync_free(&sync);
2279out_free:
2280 kfree(pd_bo_list_entries);
2281out_no_mem:
2282
2283 return ret;
2284}
2285
2286/* Worker callback to restore evicted userptr BOs
2287 *
2288 * Tries to update and validate all userptr BOs. If successful and no
2289 * concurrent evictions happened, the queues are restarted. Otherwise,
2290 * reschedule for another attempt later.
2291 */
2292static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2293{
2294 struct delayed_work *dwork = to_delayed_work(work);
2295 struct amdkfd_process_info *process_info =
2296 container_of(dwork, struct amdkfd_process_info,
2297 restore_userptr_work);
2298 struct task_struct *usertask;
2299 struct mm_struct *mm;
2300 int evicted_bos;
2301
2302 evicted_bos = atomic_read(&process_info->evicted_bos);
2303 if (!evicted_bos)
2304 return;
2305
2306 /* Reference task and mm in case of concurrent process termination */
2307 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2308 if (!usertask)
2309 return;
2310 mm = get_task_mm(usertask);
2311 if (!mm) {
2312 put_task_struct(usertask);
2313 return;
2314 }
2315
2316 mutex_lock(&process_info->lock);
2317
2318 if (update_invalid_user_pages(process_info, mm))
2319 goto unlock_out;
2320 /* userptr_inval_list can be empty if all evicted userptr BOs
2321 * have been freed. In that case there is nothing to validate
2322 * and we can just restart the queues.
2323 */
2324 if (!list_empty(&process_info->userptr_inval_list)) {
2325 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2326 goto unlock_out; /* Concurrent eviction, try again */
2327
2328 if (validate_invalid_user_pages(process_info))
2329 goto unlock_out;
2330 }
2331 /* Final check for concurrent evicton and atomic update. If
2332 * another eviction happens after successful update, it will
2333 * be a first eviction that calls quiesce_mm. The eviction
2334 * reference counting inside KFD will handle this case.
2335 */
2336 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2337 evicted_bos)
2338 goto unlock_out;
2339 evicted_bos = 0;
2340 if (kgd2kfd_resume_mm(mm)) {
2341 pr_err("%s: Failed to resume KFD\n", __func__);
2342 /* No recovery from this failure. Probably the CP is
2343 * hanging. No point trying again.
2344 */
2345 }
2346
2347unlock_out:
2348 mutex_unlock(&process_info->lock);
2349 mmput(mm);
2350 put_task_struct(usertask);
2351
2352 /* If validation failed, reschedule another attempt */
2353 if (evicted_bos)
2354 schedule_delayed_work(&process_info->restore_userptr_work,
2355 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2356}
2357
2358/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2359 * KFD process identified by process_info
2360 *
2361 * @process_info: amdkfd_process_info of the KFD process
2362 *
2363 * After memory eviction, restore thread calls this function. The function
2364 * should be called when the Process is still valid. BO restore involves -
2365 *
2366 * 1. Release old eviction fence and create new one
2367 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2368 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2369 * BOs that need to be reserved.
2370 * 4. Reserve all the BOs
2371 * 5. Validate of PD and PT BOs.
2372 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2373 * 7. Add fence to all PD and PT BOs.
2374 * 8. Unreserve all BOs
2375 */
2376int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2377{
2378 struct amdgpu_bo_list_entry *pd_bo_list;
2379 struct amdkfd_process_info *process_info = info;
2380 struct amdgpu_vm *peer_vm;
2381 struct kgd_mem *mem;
2382 struct bo_vm_reservation_context ctx;
2383 struct amdgpu_amdkfd_fence *new_fence;
2384 int ret = 0, i;
2385 struct list_head duplicate_save;
2386 struct amdgpu_sync sync_obj;
2387 unsigned long failed_size = 0;
2388 unsigned long total_size = 0;
2389
2390 INIT_LIST_HEAD(&duplicate_save);
2391 INIT_LIST_HEAD(&ctx.list);
2392 INIT_LIST_HEAD(&ctx.duplicates);
2393
2394 pd_bo_list = kcalloc(process_info->n_vms,
2395 sizeof(struct amdgpu_bo_list_entry),
2396 GFP_KERNEL);
2397 if (!pd_bo_list)
2398 return -ENOMEM;
2399
2400 i = 0;
2401 mutex_lock(&process_info->lock);
2402 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2403 vm_list_node)
2404 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2405
2406 /* Reserve all BOs and page tables/directory. Add all BOs from
2407 * kfd_bo_list to ctx.list
2408 */
2409 list_for_each_entry(mem, &process_info->kfd_bo_list,
2410 validate_list.head) {
2411
2412 list_add_tail(&mem->resv_list.head, &ctx.list);
2413 mem->resv_list.bo = mem->validate_list.bo;
2414 mem->resv_list.num_shared = mem->validate_list.num_shared;
2415 }
2416
2417 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2418 false, &duplicate_save);
2419 if (ret) {
2420 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2421 goto ttm_reserve_fail;
2422 }
2423
2424 amdgpu_sync_create(&sync_obj);
2425
2426 /* Validate PDs and PTs */
2427 ret = process_validate_vms(process_info);
2428 if (ret)
2429 goto validate_map_fail;
2430
2431 ret = process_sync_pds_resv(process_info, &sync_obj);
2432 if (ret) {
2433 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2434 goto validate_map_fail;
2435 }
2436
2437 /* Validate BOs and map them to GPUVM (update VM page tables). */
2438 list_for_each_entry(mem, &process_info->kfd_bo_list,
2439 validate_list.head) {
2440
2441 struct amdgpu_bo *bo = mem->bo;
2442 uint32_t domain = mem->domain;
2443 struct kfd_mem_attachment *attachment;
2444
2445 total_size += amdgpu_bo_size(bo);
2446
2447 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2448 if (ret) {
2449 pr_debug("Memory eviction: Validate BOs failed\n");
2450 failed_size += amdgpu_bo_size(bo);
2451 ret = amdgpu_amdkfd_bo_validate(bo,
2452 AMDGPU_GEM_DOMAIN_GTT, false);
2453 if (ret) {
2454 pr_debug("Memory eviction: Try again\n");
2455 goto validate_map_fail;
2456 }
2457 }
2458 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2459 if (ret) {
2460 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2461 goto validate_map_fail;
2462 }
2463 list_for_each_entry(attachment, &mem->attachments, list) {
2464 if (!attachment->is_mapped)
2465 continue;
2466
2467 kfd_mem_dmaunmap_attachment(mem, attachment);
2468 ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2469 if (ret) {
2470 pr_debug("Memory eviction: update PTE failed. Try again\n");
2471 goto validate_map_fail;
2472 }
2473 }
2474 }
2475
2476 if (failed_size)
2477 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2478
2479 /* Update page directories */
2480 ret = process_update_pds(process_info, &sync_obj);
2481 if (ret) {
2482 pr_debug("Memory eviction: update PDs failed. Try again\n");
2483 goto validate_map_fail;
2484 }
2485
2486 /* Wait for validate and PT updates to finish */
2487 amdgpu_sync_wait(&sync_obj, false);
2488
2489 /* Release old eviction fence and create new one, because fence only
2490 * goes from unsignaled to signaled, fence cannot be reused.
2491 * Use context and mm from the old fence.
2492 */
2493 new_fence = amdgpu_amdkfd_fence_create(
2494 process_info->eviction_fence->base.context,
2495 process_info->eviction_fence->mm,
2496 NULL);
2497 if (!new_fence) {
2498 pr_err("Failed to create eviction fence\n");
2499 ret = -ENOMEM;
2500 goto validate_map_fail;
2501 }
2502 dma_fence_put(&process_info->eviction_fence->base);
2503 process_info->eviction_fence = new_fence;
2504 *ef = dma_fence_get(&new_fence->base);
2505
2506 /* Attach new eviction fence to all BOs */
2507 list_for_each_entry(mem, &process_info->kfd_bo_list,
2508 validate_list.head)
2509 amdgpu_bo_fence(mem->bo,
2510 &process_info->eviction_fence->base, true);
2511
2512 /* Attach eviction fence to PD / PT BOs */
2513 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2514 vm_list_node) {
2515 struct amdgpu_bo *bo = peer_vm->root.bo;
2516
2517 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2518 }
2519
2520validate_map_fail:
2521 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2522 amdgpu_sync_free(&sync_obj);
2523ttm_reserve_fail:
2524 mutex_unlock(&process_info->lock);
2525 kfree(pd_bo_list);
2526 return ret;
2527}
2528
2529int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2530{
2531 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2532 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2533 int ret;
2534
2535 if (!info || !gws)
2536 return -EINVAL;
2537
2538 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2539 if (!*mem)
2540 return -ENOMEM;
2541
2542 mutex_init(&(*mem)->lock);
2543 INIT_LIST_HEAD(&(*mem)->attachments);
2544 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2545 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2546 (*mem)->process_info = process_info;
2547 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2548 amdgpu_sync_create(&(*mem)->sync);
2549
2550
2551 /* Validate gws bo the first time it is added to process */
2552 mutex_lock(&(*mem)->process_info->lock);
2553 ret = amdgpu_bo_reserve(gws_bo, false);
2554 if (unlikely(ret)) {
2555 pr_err("Reserve gws bo failed %d\n", ret);
2556 goto bo_reservation_failure;
2557 }
2558
2559 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2560 if (ret) {
2561 pr_err("GWS BO validate failed %d\n", ret);
2562 goto bo_validation_failure;
2563 }
2564 /* GWS resource is shared b/t amdgpu and amdkfd
2565 * Add process eviction fence to bo so they can
2566 * evict each other.
2567 */
2568 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2569 if (ret)
2570 goto reserve_shared_fail;
2571 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2572 amdgpu_bo_unreserve(gws_bo);
2573 mutex_unlock(&(*mem)->process_info->lock);
2574
2575 return ret;
2576
2577reserve_shared_fail:
2578bo_validation_failure:
2579 amdgpu_bo_unreserve(gws_bo);
2580bo_reservation_failure:
2581 mutex_unlock(&(*mem)->process_info->lock);
2582 amdgpu_sync_free(&(*mem)->sync);
2583 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2584 amdgpu_bo_unref(&gws_bo);
2585 mutex_destroy(&(*mem)->lock);
2586 kfree(*mem);
2587 *mem = NULL;
2588 return ret;
2589}
2590
2591int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2592{
2593 int ret;
2594 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2595 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2596 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2597
2598 /* Remove BO from process's validate list so restore worker won't touch
2599 * it anymore
2600 */
2601 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2602
2603 ret = amdgpu_bo_reserve(gws_bo, false);
2604 if (unlikely(ret)) {
2605 pr_err("Reserve gws bo failed %d\n", ret);
2606 //TODO add BO back to validate_list?
2607 return ret;
2608 }
2609 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2610 process_info->eviction_fence);
2611 amdgpu_bo_unreserve(gws_bo);
2612 amdgpu_sync_free(&kgd_mem->sync);
2613 amdgpu_bo_unref(&gws_bo);
2614 mutex_destroy(&kgd_mem->lock);
2615 kfree(mem);
2616 return 0;
2617}
2618
2619/* Returns GPU-specific tiling mode information */
2620int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
2621 struct tile_config *config)
2622{
2623 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2624 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2625 config->num_tile_configs =
2626 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2627 config->macro_tile_config_ptr =
2628 adev->gfx.config.macrotile_mode_array;
2629 config->num_macro_tile_configs =
2630 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2631
2632 /* Those values are not set from GFX9 onwards */
2633 config->num_banks = adev->gfx.config.num_banks;
2634 config->num_ranks = adev->gfx.config.num_ranks;
2635
2636 return 0;
2637}