Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include <linux/dma-buf.h>
23#include <linux/list.h>
24#include <linux/pagemap.h>
25#include <linux/sched/mm.h>
26#include <linux/sched/task.h>
27
28#include "amdgpu_object.h"
29#include "amdgpu_gem.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_amdkfd.h"
32#include "amdgpu_dma_buf.h"
33#include <uapi/linux/kfd_ioctl.h>
34#include "amdgpu_xgmi.h"
35
36/* Userptr restore delay, just long enough to allow consecutive VM
37 * changes to accumulate
38 */
39#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41/* Impose limit on how much memory KFD can use */
42static struct {
43 uint64_t max_system_mem_limit;
44 uint64_t max_ttm_mem_limit;
45 int64_t system_mem_used;
46 int64_t ttm_mem_used;
47 spinlock_t mem_limit_lock;
48} kfd_mem_limit;
49
50static const char * const domain_bit_to_string[] = {
51 "CPU",
52 "GTT",
53 "VRAM",
54 "GDS",
55 "GWS",
56 "OA"
57};
58
59#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
60
61static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
62
63
64static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
65{
66 return (struct amdgpu_device *)kgd;
67}
68
69static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
70 struct kgd_mem *mem)
71{
72 struct kfd_mem_attachment *entry;
73
74 list_for_each_entry(entry, &mem->attachments, list)
75 if (entry->bo_va->base.vm == avm)
76 return true;
77
78 return false;
79}
80
81/* Set memory usage limits. Current, limits are
82 * System (TTM + userptr) memory - 15/16th System RAM
83 * TTM memory - 3/8th System RAM
84 */
85void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
86{
87 struct sysinfo si;
88 uint64_t mem;
89
90 si_meminfo(&si);
91 mem = si.freeram - si.freehigh;
92 mem *= si.mem_unit;
93
94 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
95 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
96 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
97 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
98 (kfd_mem_limit.max_system_mem_limit >> 20),
99 (kfd_mem_limit.max_ttm_mem_limit >> 20));
100}
101
102void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
103{
104 kfd_mem_limit.system_mem_used += size;
105}
106
107/* Estimate page table size needed to represent a given memory size
108 *
109 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
110 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
111 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
112 * for 2MB pages for TLB efficiency. However, small allocations and
113 * fragmented system memory still need some 4KB pages. We choose a
114 * compromise that should work in most cases without reserving too
115 * much memory for page tables unnecessarily (factor 16K, >> 14).
116 */
117#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
118
119static size_t amdgpu_amdkfd_acc_size(uint64_t size)
120{
121 size >>= PAGE_SHIFT;
122 size *= sizeof(dma_addr_t) + sizeof(void *);
123
124 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
125 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
126 PAGE_ALIGN(size);
127}
128
129static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
130 uint64_t size, u32 domain, bool sg)
131{
132 uint64_t reserved_for_pt =
133 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
134 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
135 int ret = 0;
136
137 acc_size = amdgpu_amdkfd_acc_size(size);
138
139 vram_needed = 0;
140 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
141 /* TTM GTT memory */
142 system_mem_needed = acc_size + size;
143 ttm_mem_needed = acc_size + size;
144 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
145 /* Userptr */
146 system_mem_needed = acc_size + size;
147 ttm_mem_needed = acc_size;
148 } else {
149 /* VRAM and SG */
150 system_mem_needed = acc_size;
151 ttm_mem_needed = acc_size;
152 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
153 vram_needed = size;
154 }
155
156 spin_lock(&kfd_mem_limit.mem_limit_lock);
157
158 if (kfd_mem_limit.system_mem_used + system_mem_needed >
159 kfd_mem_limit.max_system_mem_limit)
160 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
161
162 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
163 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
164 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
165 kfd_mem_limit.max_ttm_mem_limit) ||
166 (adev->kfd.vram_used + vram_needed >
167 adev->gmc.real_vram_size - reserved_for_pt)) {
168 ret = -ENOMEM;
169 } else {
170 kfd_mem_limit.system_mem_used += system_mem_needed;
171 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
172 adev->kfd.vram_used += vram_needed;
173 }
174
175 spin_unlock(&kfd_mem_limit.mem_limit_lock);
176 return ret;
177}
178
179static void unreserve_mem_limit(struct amdgpu_device *adev,
180 uint64_t size, u32 domain, bool sg)
181{
182 size_t acc_size;
183
184 acc_size = amdgpu_amdkfd_acc_size(size);
185
186 spin_lock(&kfd_mem_limit.mem_limit_lock);
187 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
188 kfd_mem_limit.system_mem_used -= (acc_size + size);
189 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
190 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
191 kfd_mem_limit.system_mem_used -= (acc_size + size);
192 kfd_mem_limit.ttm_mem_used -= acc_size;
193 } else {
194 kfd_mem_limit.system_mem_used -= acc_size;
195 kfd_mem_limit.ttm_mem_used -= acc_size;
196 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
197 adev->kfd.vram_used -= size;
198 WARN_ONCE(adev->kfd.vram_used < 0,
199 "kfd VRAM memory accounting unbalanced");
200 }
201 }
202 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
203 "kfd system memory accounting unbalanced");
204 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
205 "kfd TTM memory accounting unbalanced");
206
207 spin_unlock(&kfd_mem_limit.mem_limit_lock);
208}
209
210void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
211{
212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213 u32 domain = bo->preferred_domains;
214 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
215
216 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
217 domain = AMDGPU_GEM_DOMAIN_CPU;
218 sg = false;
219 }
220
221 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
222}
223
224
225/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
226 * reservation object.
227 *
228 * @bo: [IN] Remove eviction fence(s) from this BO
229 * @ef: [IN] This eviction fence is removed if it
230 * is present in the shared list.
231 *
232 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
233 */
234static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
235 struct amdgpu_amdkfd_fence *ef)
236{
237 struct dma_resv *resv = bo->tbo.base.resv;
238 struct dma_resv_list *old, *new;
239 unsigned int i, j, k;
240
241 if (!ef)
242 return -EINVAL;
243
244 old = dma_resv_shared_list(resv);
245 if (!old)
246 return 0;
247
248 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
249 if (!new)
250 return -ENOMEM;
251
252 /* Go through all the shared fences in the resevation object and sort
253 * the interesting ones to the end of the list.
254 */
255 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
256 struct dma_fence *f;
257
258 f = rcu_dereference_protected(old->shared[i],
259 dma_resv_held(resv));
260
261 if (f->context == ef->base.context)
262 RCU_INIT_POINTER(new->shared[--j], f);
263 else
264 RCU_INIT_POINTER(new->shared[k++], f);
265 }
266 new->shared_max = old->shared_max;
267 new->shared_count = k;
268
269 /* Install the new fence list, seqcount provides the barriers */
270 write_seqcount_begin(&resv->seq);
271 RCU_INIT_POINTER(resv->fence, new);
272 write_seqcount_end(&resv->seq);
273
274 /* Drop the references to the removed fences or move them to ef_list */
275 for (i = j; i < old->shared_count; ++i) {
276 struct dma_fence *f;
277
278 f = rcu_dereference_protected(new->shared[i],
279 dma_resv_held(resv));
280 dma_fence_put(f);
281 }
282 kfree_rcu(old, rcu);
283
284 return 0;
285}
286
287int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
288{
289 struct amdgpu_bo *root = bo;
290 struct amdgpu_vm_bo_base *vm_bo;
291 struct amdgpu_vm *vm;
292 struct amdkfd_process_info *info;
293 struct amdgpu_amdkfd_fence *ef;
294 int ret;
295
296 /* we can always get vm_bo from root PD bo.*/
297 while (root->parent)
298 root = root->parent;
299
300 vm_bo = root->vm_bo;
301 if (!vm_bo)
302 return 0;
303
304 vm = vm_bo->vm;
305 if (!vm)
306 return 0;
307
308 info = vm->process_info;
309 if (!info || !info->eviction_fence)
310 return 0;
311
312 ef = container_of(dma_fence_get(&info->eviction_fence->base),
313 struct amdgpu_amdkfd_fence, base);
314
315 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
316 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
317 dma_resv_unlock(bo->tbo.base.resv);
318
319 dma_fence_put(&ef->base);
320 return ret;
321}
322
323static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
324 bool wait)
325{
326 struct ttm_operation_ctx ctx = { false, false };
327 int ret;
328
329 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
330 "Called with userptr BO"))
331 return -EINVAL;
332
333 amdgpu_bo_placement_from_domain(bo, domain);
334
335 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
336 if (ret)
337 goto validate_fail;
338 if (wait)
339 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
340
341validate_fail:
342 return ret;
343}
344
345static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
346{
347 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
348}
349
350/* vm_validate_pt_pd_bos - Validate page table and directory BOs
351 *
352 * Page directories are not updated here because huge page handling
353 * during page table updates can invalidate page directory entries
354 * again. Page directories are only updated after updating page
355 * tables.
356 */
357static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
358{
359 struct amdgpu_bo *pd = vm->root.bo;
360 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
361 int ret;
362
363 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
364 if (ret) {
365 pr_err("failed to validate PT BOs\n");
366 return ret;
367 }
368
369 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
370 if (ret) {
371 pr_err("failed to validate PD\n");
372 return ret;
373 }
374
375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
376
377 if (vm->use_cpu_for_update) {
378 ret = amdgpu_bo_kmap(pd, NULL);
379 if (ret) {
380 pr_err("failed to kmap PD, ret=%d\n", ret);
381 return ret;
382 }
383 }
384
385 return 0;
386}
387
388static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
389{
390 struct amdgpu_bo *pd = vm->root.bo;
391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
392 int ret;
393
394 ret = amdgpu_vm_update_pdes(adev, vm, false);
395 if (ret)
396 return ret;
397
398 return amdgpu_sync_fence(sync, vm->last_update);
399}
400
401static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
402{
403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
405 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
406 uint32_t mapping_flags;
407 uint64_t pte_flags;
408 bool snoop = false;
409
410 mapping_flags = AMDGPU_VM_PAGE_READABLE;
411 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
412 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
413 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
414 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
415
416 switch (adev->asic_type) {
417 case CHIP_ARCTURUS:
418 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
419 if (bo_adev == adev)
420 mapping_flags |= coherent ?
421 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
422 else
423 mapping_flags |= coherent ?
424 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
425 } else {
426 mapping_flags |= coherent ?
427 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
428 }
429 break;
430 case CHIP_ALDEBARAN:
431 if (coherent && uncached) {
432 if (adev->gmc.xgmi.connected_to_cpu ||
433 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
434 snoop = true;
435 mapping_flags |= AMDGPU_VM_MTYPE_UC;
436 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
437 if (bo_adev == adev) {
438 mapping_flags |= coherent ?
439 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
440 if (adev->gmc.xgmi.connected_to_cpu)
441 snoop = true;
442 } else {
443 mapping_flags |= coherent ?
444 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
445 if (amdgpu_xgmi_same_hive(adev, bo_adev))
446 snoop = true;
447 }
448 } else {
449 snoop = true;
450 mapping_flags |= coherent ?
451 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
452 }
453 break;
454 default:
455 mapping_flags |= coherent ?
456 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
457 }
458
459 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
460 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
461
462 return pte_flags;
463}
464
465static int
466kfd_mem_dmamap_userptr(struct kgd_mem *mem,
467 struct kfd_mem_attachment *attachment)
468{
469 enum dma_data_direction direction =
470 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
471 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
472 struct ttm_operation_ctx ctx = {.interruptible = true};
473 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
474 struct amdgpu_device *adev = attachment->adev;
475 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
476 struct ttm_tt *ttm = bo->tbo.ttm;
477 int ret;
478
479 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
480 if (unlikely(!ttm->sg))
481 return -ENOMEM;
482
483 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
484 return -EINVAL;
485
486 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
487 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
488 ttm->num_pages, 0,
489 (u64)ttm->num_pages << PAGE_SHIFT,
490 GFP_KERNEL);
491 if (unlikely(ret))
492 goto free_sg;
493
494 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
495 if (unlikely(ret))
496 goto release_sg;
497
498 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
499 ttm->num_pages);
500
501 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
502 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
503 if (ret)
504 goto unmap_sg;
505
506 return 0;
507
508unmap_sg:
509 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
510release_sg:
511 pr_err("DMA map userptr failed: %d\n", ret);
512 sg_free_table(ttm->sg);
513free_sg:
514 kfree(ttm->sg);
515 ttm->sg = NULL;
516 return ret;
517}
518
519static int
520kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
521{
522 struct ttm_operation_ctx ctx = {.interruptible = true};
523 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
524
525 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
526 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
527}
528
529static int
530kfd_mem_dmamap_attachment(struct kgd_mem *mem,
531 struct kfd_mem_attachment *attachment)
532{
533 switch (attachment->type) {
534 case KFD_MEM_ATT_SHARED:
535 return 0;
536 case KFD_MEM_ATT_USERPTR:
537 return kfd_mem_dmamap_userptr(mem, attachment);
538 case KFD_MEM_ATT_DMABUF:
539 return kfd_mem_dmamap_dmabuf(attachment);
540 default:
541 WARN_ON_ONCE(1);
542 }
543 return -EINVAL;
544}
545
546static void
547kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
548 struct kfd_mem_attachment *attachment)
549{
550 enum dma_data_direction direction =
551 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
552 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
553 struct ttm_operation_ctx ctx = {.interruptible = false};
554 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
555 struct amdgpu_device *adev = attachment->adev;
556 struct ttm_tt *ttm = bo->tbo.ttm;
557
558 if (unlikely(!ttm->sg))
559 return;
560
561 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
562 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
563
564 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
565 sg_free_table(ttm->sg);
566 ttm->sg = NULL;
567}
568
569static void
570kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
571{
572 struct ttm_operation_ctx ctx = {.interruptible = true};
573 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
574
575 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
576 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
577}
578
579static void
580kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
581 struct kfd_mem_attachment *attachment)
582{
583 switch (attachment->type) {
584 case KFD_MEM_ATT_SHARED:
585 break;
586 case KFD_MEM_ATT_USERPTR:
587 kfd_mem_dmaunmap_userptr(mem, attachment);
588 break;
589 case KFD_MEM_ATT_DMABUF:
590 kfd_mem_dmaunmap_dmabuf(attachment);
591 break;
592 default:
593 WARN_ON_ONCE(1);
594 }
595}
596
597static int
598kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
599 struct amdgpu_bo **bo)
600{
601 unsigned long bo_size = mem->bo->tbo.base.size;
602 struct drm_gem_object *gobj;
603 int ret;
604
605 ret = amdgpu_bo_reserve(mem->bo, false);
606 if (ret)
607 return ret;
608
609 ret = amdgpu_gem_object_create(adev, bo_size, 1,
610 AMDGPU_GEM_DOMAIN_CPU,
611 AMDGPU_GEM_CREATE_PREEMPTIBLE,
612 ttm_bo_type_sg, mem->bo->tbo.base.resv,
613 &gobj);
614 amdgpu_bo_unreserve(mem->bo);
615 if (ret)
616 return ret;
617
618 *bo = gem_to_amdgpu_bo(gobj);
619 (*bo)->parent = amdgpu_bo_ref(mem->bo);
620
621 return 0;
622}
623
624static int
625kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
626 struct amdgpu_bo **bo)
627{
628 struct drm_gem_object *gobj;
629 int ret;
630
631 if (!mem->dmabuf) {
632 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
633 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
634 DRM_RDWR : 0);
635 if (IS_ERR(mem->dmabuf)) {
636 ret = PTR_ERR(mem->dmabuf);
637 mem->dmabuf = NULL;
638 return ret;
639 }
640 }
641
642 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
643 if (IS_ERR(gobj))
644 return PTR_ERR(gobj);
645
646 /* Import takes an extra reference on the dmabuf. Drop it now to
647 * avoid leaking it. We only need the one reference in
648 * kgd_mem->dmabuf.
649 */
650 dma_buf_put(mem->dmabuf);
651
652 *bo = gem_to_amdgpu_bo(gobj);
653 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
654 (*bo)->parent = amdgpu_bo_ref(mem->bo);
655
656 return 0;
657}
658
659/* kfd_mem_attach - Add a BO to a VM
660 *
661 * Everything that needs to bo done only once when a BO is first added
662 * to a VM. It can later be mapped and unmapped many times without
663 * repeating these steps.
664 *
665 * 0. Create BO for DMA mapping, if needed
666 * 1. Allocate and initialize BO VA entry data structure
667 * 2. Add BO to the VM
668 * 3. Determine ASIC-specific PTE flags
669 * 4. Alloc page tables and directories if needed
670 * 4a. Validate new page tables and directories
671 */
672static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
673 struct amdgpu_vm *vm, bool is_aql)
674{
675 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
676 unsigned long bo_size = mem->bo->tbo.base.size;
677 uint64_t va = mem->va;
678 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
679 struct amdgpu_bo *bo[2] = {NULL, NULL};
680 int i, ret;
681
682 if (!va) {
683 pr_err("Invalid VA when adding BO to VM\n");
684 return -EINVAL;
685 }
686
687 for (i = 0; i <= is_aql; i++) {
688 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
689 if (unlikely(!attachment[i])) {
690 ret = -ENOMEM;
691 goto unwind;
692 }
693
694 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
695 va + bo_size, vm);
696
697 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
698 amdgpu_xgmi_same_hive(adev, bo_adev))) {
699 /* Mappings on the local GPU and VRAM mappings in the
700 * local hive share the original BO
701 */
702 attachment[i]->type = KFD_MEM_ATT_SHARED;
703 bo[i] = mem->bo;
704 drm_gem_object_get(&bo[i]->tbo.base);
705 } else if (i > 0) {
706 /* Multiple mappings on the same GPU share the BO */
707 attachment[i]->type = KFD_MEM_ATT_SHARED;
708 bo[i] = bo[0];
709 drm_gem_object_get(&bo[i]->tbo.base);
710 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
711 /* Create an SG BO to DMA-map userptrs on other GPUs */
712 attachment[i]->type = KFD_MEM_ATT_USERPTR;
713 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
714 if (ret)
715 goto unwind;
716 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
717 mem->bo->tbo.type != ttm_bo_type_sg) {
718 /* GTT BOs use DMA-mapping ability of dynamic-attach
719 * DMA bufs. TODO: The same should work for VRAM on
720 * large-BAR GPUs.
721 */
722 attachment[i]->type = KFD_MEM_ATT_DMABUF;
723 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
724 if (ret)
725 goto unwind;
726 } else {
727 /* FIXME: Need to DMA-map other BO types:
728 * large-BAR VRAM, doorbells, MMIO remap
729 */
730 attachment[i]->type = KFD_MEM_ATT_SHARED;
731 bo[i] = mem->bo;
732 drm_gem_object_get(&bo[i]->tbo.base);
733 }
734
735 /* Add BO to VM internal data structures */
736 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
737 if (unlikely(!attachment[i]->bo_va)) {
738 ret = -ENOMEM;
739 pr_err("Failed to add BO object to VM. ret == %d\n",
740 ret);
741 goto unwind;
742 }
743
744 attachment[i]->va = va;
745 attachment[i]->pte_flags = get_pte_flags(adev, mem);
746 attachment[i]->adev = adev;
747 list_add(&attachment[i]->list, &mem->attachments);
748
749 va += bo_size;
750 }
751
752 return 0;
753
754unwind:
755 for (; i >= 0; i--) {
756 if (!attachment[i])
757 continue;
758 if (attachment[i]->bo_va) {
759 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
760 list_del(&attachment[i]->list);
761 }
762 if (bo[i])
763 drm_gem_object_put(&bo[i]->tbo.base);
764 kfree(attachment[i]);
765 }
766 return ret;
767}
768
769static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
770{
771 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
772
773 pr_debug("\t remove VA 0x%llx in entry %p\n",
774 attachment->va, attachment);
775 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
776 drm_gem_object_put(&bo->tbo.base);
777 list_del(&attachment->list);
778 kfree(attachment);
779}
780
781static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
782 struct amdkfd_process_info *process_info,
783 bool userptr)
784{
785 struct ttm_validate_buffer *entry = &mem->validate_list;
786 struct amdgpu_bo *bo = mem->bo;
787
788 INIT_LIST_HEAD(&entry->head);
789 entry->num_shared = 1;
790 entry->bo = &bo->tbo;
791 mutex_lock(&process_info->lock);
792 if (userptr)
793 list_add_tail(&entry->head, &process_info->userptr_valid_list);
794 else
795 list_add_tail(&entry->head, &process_info->kfd_bo_list);
796 mutex_unlock(&process_info->lock);
797}
798
799static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
800 struct amdkfd_process_info *process_info)
801{
802 struct ttm_validate_buffer *bo_list_entry;
803
804 bo_list_entry = &mem->validate_list;
805 mutex_lock(&process_info->lock);
806 list_del(&bo_list_entry->head);
807 mutex_unlock(&process_info->lock);
808}
809
810/* Initializes user pages. It registers the MMU notifier and validates
811 * the userptr BO in the GTT domain.
812 *
813 * The BO must already be on the userptr_valid_list. Otherwise an
814 * eviction and restore may happen that leaves the new BO unmapped
815 * with the user mode queues running.
816 *
817 * Takes the process_info->lock to protect against concurrent restore
818 * workers.
819 *
820 * Returns 0 for success, negative errno for errors.
821 */
822static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
823{
824 struct amdkfd_process_info *process_info = mem->process_info;
825 struct amdgpu_bo *bo = mem->bo;
826 struct ttm_operation_ctx ctx = { true, false };
827 int ret = 0;
828
829 mutex_lock(&process_info->lock);
830
831 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
832 if (ret) {
833 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
834 goto out;
835 }
836
837 ret = amdgpu_mn_register(bo, user_addr);
838 if (ret) {
839 pr_err("%s: Failed to register MMU notifier: %d\n",
840 __func__, ret);
841 goto out;
842 }
843
844 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
845 if (ret) {
846 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
847 goto unregister_out;
848 }
849
850 ret = amdgpu_bo_reserve(bo, true);
851 if (ret) {
852 pr_err("%s: Failed to reserve BO\n", __func__);
853 goto release_out;
854 }
855 amdgpu_bo_placement_from_domain(bo, mem->domain);
856 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
857 if (ret)
858 pr_err("%s: failed to validate BO\n", __func__);
859 amdgpu_bo_unreserve(bo);
860
861release_out:
862 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
863unregister_out:
864 if (ret)
865 amdgpu_mn_unregister(bo);
866out:
867 mutex_unlock(&process_info->lock);
868 return ret;
869}
870
871/* Reserving a BO and its page table BOs must happen atomically to
872 * avoid deadlocks. Some operations update multiple VMs at once. Track
873 * all the reservation info in a context structure. Optionally a sync
874 * object can track VM updates.
875 */
876struct bo_vm_reservation_context {
877 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
878 unsigned int n_vms; /* Number of VMs reserved */
879 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
880 struct ww_acquire_ctx ticket; /* Reservation ticket */
881 struct list_head list, duplicates; /* BO lists */
882 struct amdgpu_sync *sync; /* Pointer to sync object */
883 bool reserved; /* Whether BOs are reserved */
884};
885
886enum bo_vm_match {
887 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
888 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
889 BO_VM_ALL, /* Match all VMs a BO was added to */
890};
891
892/**
893 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
894 * @mem: KFD BO structure.
895 * @vm: the VM to reserve.
896 * @ctx: the struct that will be used in unreserve_bo_and_vms().
897 */
898static int reserve_bo_and_vm(struct kgd_mem *mem,
899 struct amdgpu_vm *vm,
900 struct bo_vm_reservation_context *ctx)
901{
902 struct amdgpu_bo *bo = mem->bo;
903 int ret;
904
905 WARN_ON(!vm);
906
907 ctx->reserved = false;
908 ctx->n_vms = 1;
909 ctx->sync = &mem->sync;
910
911 INIT_LIST_HEAD(&ctx->list);
912 INIT_LIST_HEAD(&ctx->duplicates);
913
914 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
915 if (!ctx->vm_pd)
916 return -ENOMEM;
917
918 ctx->kfd_bo.priority = 0;
919 ctx->kfd_bo.tv.bo = &bo->tbo;
920 ctx->kfd_bo.tv.num_shared = 1;
921 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
922
923 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
924
925 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
926 false, &ctx->duplicates);
927 if (ret) {
928 pr_err("Failed to reserve buffers in ttm.\n");
929 kfree(ctx->vm_pd);
930 ctx->vm_pd = NULL;
931 return ret;
932 }
933
934 ctx->reserved = true;
935 return 0;
936}
937
938/**
939 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
940 * @mem: KFD BO structure.
941 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
942 * is used. Otherwise, a single VM associated with the BO.
943 * @map_type: the mapping status that will be used to filter the VMs.
944 * @ctx: the struct that will be used in unreserve_bo_and_vms().
945 *
946 * Returns 0 for success, negative for failure.
947 */
948static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
949 struct amdgpu_vm *vm, enum bo_vm_match map_type,
950 struct bo_vm_reservation_context *ctx)
951{
952 struct amdgpu_bo *bo = mem->bo;
953 struct kfd_mem_attachment *entry;
954 unsigned int i;
955 int ret;
956
957 ctx->reserved = false;
958 ctx->n_vms = 0;
959 ctx->vm_pd = NULL;
960 ctx->sync = &mem->sync;
961
962 INIT_LIST_HEAD(&ctx->list);
963 INIT_LIST_HEAD(&ctx->duplicates);
964
965 list_for_each_entry(entry, &mem->attachments, list) {
966 if ((vm && vm != entry->bo_va->base.vm) ||
967 (entry->is_mapped != map_type
968 && map_type != BO_VM_ALL))
969 continue;
970
971 ctx->n_vms++;
972 }
973
974 if (ctx->n_vms != 0) {
975 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
976 GFP_KERNEL);
977 if (!ctx->vm_pd)
978 return -ENOMEM;
979 }
980
981 ctx->kfd_bo.priority = 0;
982 ctx->kfd_bo.tv.bo = &bo->tbo;
983 ctx->kfd_bo.tv.num_shared = 1;
984 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
985
986 i = 0;
987 list_for_each_entry(entry, &mem->attachments, list) {
988 if ((vm && vm != entry->bo_va->base.vm) ||
989 (entry->is_mapped != map_type
990 && map_type != BO_VM_ALL))
991 continue;
992
993 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
994 &ctx->vm_pd[i]);
995 i++;
996 }
997
998 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
999 false, &ctx->duplicates);
1000 if (ret) {
1001 pr_err("Failed to reserve buffers in ttm.\n");
1002 kfree(ctx->vm_pd);
1003 ctx->vm_pd = NULL;
1004 return ret;
1005 }
1006
1007 ctx->reserved = true;
1008 return 0;
1009}
1010
1011/**
1012 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1013 * @ctx: Reservation context to unreserve
1014 * @wait: Optionally wait for a sync object representing pending VM updates
1015 * @intr: Whether the wait is interruptible
1016 *
1017 * Also frees any resources allocated in
1018 * reserve_bo_and_(cond_)vm(s). Returns the status from
1019 * amdgpu_sync_wait.
1020 */
1021static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1022 bool wait, bool intr)
1023{
1024 int ret = 0;
1025
1026 if (wait)
1027 ret = amdgpu_sync_wait(ctx->sync, intr);
1028
1029 if (ctx->reserved)
1030 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1031 kfree(ctx->vm_pd);
1032
1033 ctx->sync = NULL;
1034
1035 ctx->reserved = false;
1036 ctx->vm_pd = NULL;
1037
1038 return ret;
1039}
1040
1041static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1042 struct kfd_mem_attachment *entry,
1043 struct amdgpu_sync *sync)
1044{
1045 struct amdgpu_bo_va *bo_va = entry->bo_va;
1046 struct amdgpu_device *adev = entry->adev;
1047 struct amdgpu_vm *vm = bo_va->base.vm;
1048
1049 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1050
1051 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1052
1053 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1054
1055 kfd_mem_dmaunmap_attachment(mem, entry);
1056}
1057
1058static int update_gpuvm_pte(struct kgd_mem *mem,
1059 struct kfd_mem_attachment *entry,
1060 struct amdgpu_sync *sync,
1061 bool *table_freed)
1062{
1063 struct amdgpu_bo_va *bo_va = entry->bo_va;
1064 struct amdgpu_device *adev = entry->adev;
1065 int ret;
1066
1067 ret = kfd_mem_dmamap_attachment(mem, entry);
1068 if (ret)
1069 return ret;
1070
1071 /* Update the page tables */
1072 ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1073 if (ret) {
1074 pr_err("amdgpu_vm_bo_update failed\n");
1075 return ret;
1076 }
1077
1078 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1079}
1080
1081static int map_bo_to_gpuvm(struct kgd_mem *mem,
1082 struct kfd_mem_attachment *entry,
1083 struct amdgpu_sync *sync,
1084 bool no_update_pte,
1085 bool *table_freed)
1086{
1087 int ret;
1088
1089 /* Set virtual address for the allocation */
1090 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1091 amdgpu_bo_size(entry->bo_va->base.bo),
1092 entry->pte_flags);
1093 if (ret) {
1094 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1095 entry->va, ret);
1096 return ret;
1097 }
1098
1099 if (no_update_pte)
1100 return 0;
1101
1102 ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1103 if (ret) {
1104 pr_err("update_gpuvm_pte() failed\n");
1105 goto update_gpuvm_pte_failed;
1106 }
1107
1108 return 0;
1109
1110update_gpuvm_pte_failed:
1111 unmap_bo_from_gpuvm(mem, entry, sync);
1112 return ret;
1113}
1114
1115static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1116{
1117 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1118
1119 if (!sg)
1120 return NULL;
1121 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1122 kfree(sg);
1123 return NULL;
1124 }
1125 sg->sgl->dma_address = addr;
1126 sg->sgl->length = size;
1127#ifdef CONFIG_NEED_SG_DMA_LENGTH
1128 sg->sgl->dma_length = size;
1129#endif
1130 return sg;
1131}
1132
1133static int process_validate_vms(struct amdkfd_process_info *process_info)
1134{
1135 struct amdgpu_vm *peer_vm;
1136 int ret;
1137
1138 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1139 vm_list_node) {
1140 ret = vm_validate_pt_pd_bos(peer_vm);
1141 if (ret)
1142 return ret;
1143 }
1144
1145 return 0;
1146}
1147
1148static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1149 struct amdgpu_sync *sync)
1150{
1151 struct amdgpu_vm *peer_vm;
1152 int ret;
1153
1154 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1155 vm_list_node) {
1156 struct amdgpu_bo *pd = peer_vm->root.bo;
1157
1158 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1159 AMDGPU_SYNC_NE_OWNER,
1160 AMDGPU_FENCE_OWNER_KFD);
1161 if (ret)
1162 return ret;
1163 }
1164
1165 return 0;
1166}
1167
1168static int process_update_pds(struct amdkfd_process_info *process_info,
1169 struct amdgpu_sync *sync)
1170{
1171 struct amdgpu_vm *peer_vm;
1172 int ret;
1173
1174 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1175 vm_list_node) {
1176 ret = vm_update_pds(peer_vm, sync);
1177 if (ret)
1178 return ret;
1179 }
1180
1181 return 0;
1182}
1183
1184static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1185 struct dma_fence **ef)
1186{
1187 struct amdkfd_process_info *info = NULL;
1188 int ret;
1189
1190 if (!*process_info) {
1191 info = kzalloc(sizeof(*info), GFP_KERNEL);
1192 if (!info)
1193 return -ENOMEM;
1194
1195 mutex_init(&info->lock);
1196 INIT_LIST_HEAD(&info->vm_list_head);
1197 INIT_LIST_HEAD(&info->kfd_bo_list);
1198 INIT_LIST_HEAD(&info->userptr_valid_list);
1199 INIT_LIST_HEAD(&info->userptr_inval_list);
1200
1201 info->eviction_fence =
1202 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1203 current->mm,
1204 NULL);
1205 if (!info->eviction_fence) {
1206 pr_err("Failed to create eviction fence\n");
1207 ret = -ENOMEM;
1208 goto create_evict_fence_fail;
1209 }
1210
1211 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1212 atomic_set(&info->evicted_bos, 0);
1213 INIT_DELAYED_WORK(&info->restore_userptr_work,
1214 amdgpu_amdkfd_restore_userptr_worker);
1215
1216 *process_info = info;
1217 *ef = dma_fence_get(&info->eviction_fence->base);
1218 }
1219
1220 vm->process_info = *process_info;
1221
1222 /* Validate page directory and attach eviction fence */
1223 ret = amdgpu_bo_reserve(vm->root.bo, true);
1224 if (ret)
1225 goto reserve_pd_fail;
1226 ret = vm_validate_pt_pd_bos(vm);
1227 if (ret) {
1228 pr_err("validate_pt_pd_bos() failed\n");
1229 goto validate_pd_fail;
1230 }
1231 ret = amdgpu_bo_sync_wait(vm->root.bo,
1232 AMDGPU_FENCE_OWNER_KFD, false);
1233 if (ret)
1234 goto wait_pd_fail;
1235 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1236 if (ret)
1237 goto reserve_shared_fail;
1238 amdgpu_bo_fence(vm->root.bo,
1239 &vm->process_info->eviction_fence->base, true);
1240 amdgpu_bo_unreserve(vm->root.bo);
1241
1242 /* Update process info */
1243 mutex_lock(&vm->process_info->lock);
1244 list_add_tail(&vm->vm_list_node,
1245 &(vm->process_info->vm_list_head));
1246 vm->process_info->n_vms++;
1247 mutex_unlock(&vm->process_info->lock);
1248
1249 return 0;
1250
1251reserve_shared_fail:
1252wait_pd_fail:
1253validate_pd_fail:
1254 amdgpu_bo_unreserve(vm->root.bo);
1255reserve_pd_fail:
1256 vm->process_info = NULL;
1257 if (info) {
1258 /* Two fence references: one in info and one in *ef */
1259 dma_fence_put(&info->eviction_fence->base);
1260 dma_fence_put(*ef);
1261 *ef = NULL;
1262 *process_info = NULL;
1263 put_pid(info->pid);
1264create_evict_fence_fail:
1265 mutex_destroy(&info->lock);
1266 kfree(info);
1267 }
1268 return ret;
1269}
1270
1271int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1272 struct file *filp, u32 pasid,
1273 void **process_info,
1274 struct dma_fence **ef)
1275{
1276 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1277 struct amdgpu_fpriv *drv_priv;
1278 struct amdgpu_vm *avm;
1279 int ret;
1280
1281 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1282 if (ret)
1283 return ret;
1284 avm = &drv_priv->vm;
1285
1286 /* Already a compute VM? */
1287 if (avm->process_info)
1288 return -EINVAL;
1289
1290 /* Free the original amdgpu allocated pasid,
1291 * will be replaced with kfd allocated pasid.
1292 */
1293 if (avm->pasid) {
1294 amdgpu_pasid_free(avm->pasid);
1295 amdgpu_vm_set_pasid(adev, avm, 0);
1296 }
1297
1298 /* Convert VM into a compute VM */
1299 ret = amdgpu_vm_make_compute(adev, avm);
1300 if (ret)
1301 return ret;
1302
1303 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1304 if (ret)
1305 return ret;
1306 /* Initialize KFD part of the VM and process info */
1307 ret = init_kfd_vm(avm, process_info, ef);
1308 if (ret)
1309 return ret;
1310
1311 amdgpu_vm_set_task_info(avm);
1312
1313 return 0;
1314}
1315
1316void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1317 struct amdgpu_vm *vm)
1318{
1319 struct amdkfd_process_info *process_info = vm->process_info;
1320 struct amdgpu_bo *pd = vm->root.bo;
1321
1322 if (!process_info)
1323 return;
1324
1325 /* Release eviction fence from PD */
1326 amdgpu_bo_reserve(pd, false);
1327 amdgpu_bo_fence(pd, NULL, false);
1328 amdgpu_bo_unreserve(pd);
1329
1330 /* Update process info */
1331 mutex_lock(&process_info->lock);
1332 process_info->n_vms--;
1333 list_del(&vm->vm_list_node);
1334 mutex_unlock(&process_info->lock);
1335
1336 vm->process_info = NULL;
1337
1338 /* Release per-process resources when last compute VM is destroyed */
1339 if (!process_info->n_vms) {
1340 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1341 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1342 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1343
1344 dma_fence_put(&process_info->eviction_fence->base);
1345 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1346 put_pid(process_info->pid);
1347 mutex_destroy(&process_info->lock);
1348 kfree(process_info);
1349 }
1350}
1351
1352void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1353{
1354 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1355 struct amdgpu_vm *avm;
1356
1357 if (WARN_ON(!kgd || !drm_priv))
1358 return;
1359
1360 avm = drm_priv_to_vm(drm_priv);
1361
1362 pr_debug("Releasing process vm %p\n", avm);
1363
1364 /* The original pasid of amdgpu vm has already been
1365 * released during making a amdgpu vm to a compute vm
1366 * The current pasid is managed by kfd and will be
1367 * released on kfd process destroy. Set amdgpu pasid
1368 * to 0 to avoid duplicate release.
1369 */
1370 amdgpu_vm_release_compute(adev, avm);
1371}
1372
1373uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1374{
1375 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1376 struct amdgpu_bo *pd = avm->root.bo;
1377 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1378
1379 if (adev->asic_type < CHIP_VEGA10)
1380 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1381 return avm->pd_phys_addr;
1382}
1383
1384int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1385 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1386 void *drm_priv, struct kgd_mem **mem,
1387 uint64_t *offset, uint32_t flags)
1388{
1389 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1390 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1391 enum ttm_bo_type bo_type = ttm_bo_type_device;
1392 struct sg_table *sg = NULL;
1393 uint64_t user_addr = 0;
1394 struct amdgpu_bo *bo;
1395 struct drm_gem_object *gobj;
1396 u32 domain, alloc_domain;
1397 u64 alloc_flags;
1398 int ret;
1399
1400 /*
1401 * Check on which domain to allocate BO
1402 */
1403 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1404 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1405 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1406 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1407 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1408 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1409 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1410 alloc_flags = 0;
1411 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1412 domain = AMDGPU_GEM_DOMAIN_GTT;
1413 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1414 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1415 if (!offset || !*offset)
1416 return -EINVAL;
1417 user_addr = untagged_addr(*offset);
1418 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1419 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1420 domain = AMDGPU_GEM_DOMAIN_GTT;
1421 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1422 bo_type = ttm_bo_type_sg;
1423 alloc_flags = 0;
1424 if (size > UINT_MAX)
1425 return -EINVAL;
1426 sg = create_doorbell_sg(*offset, size);
1427 if (!sg)
1428 return -ENOMEM;
1429 } else {
1430 return -EINVAL;
1431 }
1432
1433 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1434 if (!*mem) {
1435 ret = -ENOMEM;
1436 goto err;
1437 }
1438 INIT_LIST_HEAD(&(*mem)->attachments);
1439 mutex_init(&(*mem)->lock);
1440 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1441
1442 /* Workaround for AQL queue wraparound bug. Map the same
1443 * memory twice. That means we only actually allocate half
1444 * the memory.
1445 */
1446 if ((*mem)->aql_queue)
1447 size = size >> 1;
1448
1449 (*mem)->alloc_flags = flags;
1450
1451 amdgpu_sync_create(&(*mem)->sync);
1452
1453 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1454 if (ret) {
1455 pr_debug("Insufficient memory\n");
1456 goto err_reserve_limit;
1457 }
1458
1459 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1460 va, size, domain_string(alloc_domain));
1461
1462 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1463 bo_type, NULL, &gobj);
1464 if (ret) {
1465 pr_debug("Failed to create BO on domain %s. ret %d\n",
1466 domain_string(alloc_domain), ret);
1467 goto err_bo_create;
1468 }
1469 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1470 if (ret) {
1471 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1472 goto err_node_allow;
1473 }
1474 bo = gem_to_amdgpu_bo(gobj);
1475 if (bo_type == ttm_bo_type_sg) {
1476 bo->tbo.sg = sg;
1477 bo->tbo.ttm->sg = sg;
1478 }
1479 bo->kfd_bo = *mem;
1480 (*mem)->bo = bo;
1481 if (user_addr)
1482 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1483
1484 (*mem)->va = va;
1485 (*mem)->domain = domain;
1486 (*mem)->mapped_to_gpu_memory = 0;
1487 (*mem)->process_info = avm->process_info;
1488 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1489
1490 if (user_addr) {
1491 ret = init_user_pages(*mem, user_addr);
1492 if (ret)
1493 goto allocate_init_user_pages_failed;
1494 }
1495
1496 if (offset)
1497 *offset = amdgpu_bo_mmap_offset(bo);
1498
1499 return 0;
1500
1501allocate_init_user_pages_failed:
1502 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1503 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1504err_node_allow:
1505 amdgpu_bo_unref(&bo);
1506 /* Don't unreserve system mem limit twice */
1507 goto err_reserve_limit;
1508err_bo_create:
1509 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1510err_reserve_limit:
1511 mutex_destroy(&(*mem)->lock);
1512 kfree(*mem);
1513err:
1514 if (sg) {
1515 sg_free_table(sg);
1516 kfree(sg);
1517 }
1518 return ret;
1519}
1520
1521int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1522 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1523 uint64_t *size)
1524{
1525 struct amdkfd_process_info *process_info = mem->process_info;
1526 unsigned long bo_size = mem->bo->tbo.base.size;
1527 struct kfd_mem_attachment *entry, *tmp;
1528 struct bo_vm_reservation_context ctx;
1529 struct ttm_validate_buffer *bo_list_entry;
1530 unsigned int mapped_to_gpu_memory;
1531 int ret;
1532 bool is_imported = false;
1533
1534 mutex_lock(&mem->lock);
1535 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1536 is_imported = mem->is_imported;
1537 mutex_unlock(&mem->lock);
1538 /* lock is not needed after this, since mem is unused and will
1539 * be freed anyway
1540 */
1541
1542 if (mapped_to_gpu_memory > 0) {
1543 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1544 mem->va, bo_size);
1545 return -EBUSY;
1546 }
1547
1548 /* Make sure restore workers don't access the BO any more */
1549 bo_list_entry = &mem->validate_list;
1550 mutex_lock(&process_info->lock);
1551 list_del(&bo_list_entry->head);
1552 mutex_unlock(&process_info->lock);
1553
1554 /* No more MMU notifiers */
1555 amdgpu_mn_unregister(mem->bo);
1556
1557 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1558 if (unlikely(ret))
1559 return ret;
1560
1561 /* The eviction fence should be removed by the last unmap.
1562 * TODO: Log an error condition if the bo still has the eviction fence
1563 * attached
1564 */
1565 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1566 process_info->eviction_fence);
1567 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1568 mem->va + bo_size * (1 + mem->aql_queue));
1569
1570 ret = unreserve_bo_and_vms(&ctx, false, false);
1571
1572 /* Remove from VM internal data structures */
1573 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1574 kfd_mem_detach(entry);
1575
1576 /* Free the sync object */
1577 amdgpu_sync_free(&mem->sync);
1578
1579 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1580 * remap BO. We need to free it.
1581 */
1582 if (mem->bo->tbo.sg) {
1583 sg_free_table(mem->bo->tbo.sg);
1584 kfree(mem->bo->tbo.sg);
1585 }
1586
1587 /* Update the size of the BO being freed if it was allocated from
1588 * VRAM and is not imported.
1589 */
1590 if (size) {
1591 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1592 (!is_imported))
1593 *size = bo_size;
1594 else
1595 *size = 0;
1596 }
1597
1598 /* Free the BO*/
1599 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1600 if (mem->dmabuf)
1601 dma_buf_put(mem->dmabuf);
1602 drm_gem_object_put(&mem->bo->tbo.base);
1603 mutex_destroy(&mem->lock);
1604 kfree(mem);
1605
1606 return ret;
1607}
1608
1609int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1610 struct kgd_dev *kgd, struct kgd_mem *mem,
1611 void *drm_priv, bool *table_freed)
1612{
1613 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1614 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1615 int ret;
1616 struct amdgpu_bo *bo;
1617 uint32_t domain;
1618 struct kfd_mem_attachment *entry;
1619 struct bo_vm_reservation_context ctx;
1620 unsigned long bo_size;
1621 bool is_invalid_userptr = false;
1622
1623 bo = mem->bo;
1624 if (!bo) {
1625 pr_err("Invalid BO when mapping memory to GPU\n");
1626 return -EINVAL;
1627 }
1628
1629 /* Make sure restore is not running concurrently. Since we
1630 * don't map invalid userptr BOs, we rely on the next restore
1631 * worker to do the mapping
1632 */
1633 mutex_lock(&mem->process_info->lock);
1634
1635 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1636 * sure that the MMU notifier is no longer running
1637 * concurrently and the queues are actually stopped
1638 */
1639 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1640 mmap_write_lock(current->mm);
1641 is_invalid_userptr = atomic_read(&mem->invalid);
1642 mmap_write_unlock(current->mm);
1643 }
1644
1645 mutex_lock(&mem->lock);
1646
1647 domain = mem->domain;
1648 bo_size = bo->tbo.base.size;
1649
1650 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1651 mem->va,
1652 mem->va + bo_size * (1 + mem->aql_queue),
1653 avm, domain_string(domain));
1654
1655 if (!kfd_mem_is_attached(avm, mem)) {
1656 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1657 if (ret)
1658 goto out;
1659 }
1660
1661 ret = reserve_bo_and_vm(mem, avm, &ctx);
1662 if (unlikely(ret))
1663 goto out;
1664
1665 /* Userptr can be marked as "not invalid", but not actually be
1666 * validated yet (still in the system domain). In that case
1667 * the queues are still stopped and we can leave mapping for
1668 * the next restore worker
1669 */
1670 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1671 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1672 is_invalid_userptr = true;
1673
1674 ret = vm_validate_pt_pd_bos(avm);
1675 if (unlikely(ret))
1676 goto out_unreserve;
1677
1678 if (mem->mapped_to_gpu_memory == 0 &&
1679 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1680 /* Validate BO only once. The eviction fence gets added to BO
1681 * the first time it is mapped. Validate will wait for all
1682 * background evictions to complete.
1683 */
1684 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1685 if (ret) {
1686 pr_debug("Validate failed\n");
1687 goto out_unreserve;
1688 }
1689 }
1690
1691 list_for_each_entry(entry, &mem->attachments, list) {
1692 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1693 continue;
1694
1695 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1696 entry->va, entry->va + bo_size, entry);
1697
1698 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1699 is_invalid_userptr, table_freed);
1700 if (ret) {
1701 pr_err("Failed to map bo to gpuvm\n");
1702 goto out_unreserve;
1703 }
1704
1705 ret = vm_update_pds(avm, ctx.sync);
1706 if (ret) {
1707 pr_err("Failed to update page directories\n");
1708 goto out_unreserve;
1709 }
1710
1711 entry->is_mapped = true;
1712 mem->mapped_to_gpu_memory++;
1713 pr_debug("\t INC mapping count %d\n",
1714 mem->mapped_to_gpu_memory);
1715 }
1716
1717 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1718 amdgpu_bo_fence(bo,
1719 &avm->process_info->eviction_fence->base,
1720 true);
1721 ret = unreserve_bo_and_vms(&ctx, false, false);
1722
1723 /* Only apply no TLB flush on Aldebaran to
1724 * workaround regressions on other Asics.
1725 */
1726 if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
1727 *table_freed = true;
1728
1729 goto out;
1730
1731out_unreserve:
1732 unreserve_bo_and_vms(&ctx, false, false);
1733out:
1734 mutex_unlock(&mem->process_info->lock);
1735 mutex_unlock(&mem->lock);
1736 return ret;
1737}
1738
1739int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1740 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1741{
1742 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1743 struct amdkfd_process_info *process_info = avm->process_info;
1744 unsigned long bo_size = mem->bo->tbo.base.size;
1745 struct kfd_mem_attachment *entry;
1746 struct bo_vm_reservation_context ctx;
1747 int ret;
1748
1749 mutex_lock(&mem->lock);
1750
1751 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1752 if (unlikely(ret))
1753 goto out;
1754 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1755 if (ctx.n_vms == 0) {
1756 ret = -EINVAL;
1757 goto unreserve_out;
1758 }
1759
1760 ret = vm_validate_pt_pd_bos(avm);
1761 if (unlikely(ret))
1762 goto unreserve_out;
1763
1764 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1765 mem->va,
1766 mem->va + bo_size * (1 + mem->aql_queue),
1767 avm);
1768
1769 list_for_each_entry(entry, &mem->attachments, list) {
1770 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1771 continue;
1772
1773 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1774 entry->va, entry->va + bo_size, entry);
1775
1776 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1777 entry->is_mapped = false;
1778
1779 mem->mapped_to_gpu_memory--;
1780 pr_debug("\t DEC mapping count %d\n",
1781 mem->mapped_to_gpu_memory);
1782 }
1783
1784 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1785 * required.
1786 */
1787 if (mem->mapped_to_gpu_memory == 0 &&
1788 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1789 !mem->bo->tbo.pin_count)
1790 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1791 process_info->eviction_fence);
1792
1793unreserve_out:
1794 unreserve_bo_and_vms(&ctx, false, false);
1795out:
1796 mutex_unlock(&mem->lock);
1797 return ret;
1798}
1799
1800int amdgpu_amdkfd_gpuvm_sync_memory(
1801 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1802{
1803 struct amdgpu_sync sync;
1804 int ret;
1805
1806 amdgpu_sync_create(&sync);
1807
1808 mutex_lock(&mem->lock);
1809 amdgpu_sync_clone(&mem->sync, &sync);
1810 mutex_unlock(&mem->lock);
1811
1812 ret = amdgpu_sync_wait(&sync, intr);
1813 amdgpu_sync_free(&sync);
1814 return ret;
1815}
1816
1817int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1818 struct kgd_mem *mem, void **kptr, uint64_t *size)
1819{
1820 int ret;
1821 struct amdgpu_bo *bo = mem->bo;
1822
1823 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1824 pr_err("userptr can't be mapped to kernel\n");
1825 return -EINVAL;
1826 }
1827
1828 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1829 * this BO in BO's restoring after eviction.
1830 */
1831 mutex_lock(&mem->process_info->lock);
1832
1833 ret = amdgpu_bo_reserve(bo, true);
1834 if (ret) {
1835 pr_err("Failed to reserve bo. ret %d\n", ret);
1836 goto bo_reserve_failed;
1837 }
1838
1839 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1840 if (ret) {
1841 pr_err("Failed to pin bo. ret %d\n", ret);
1842 goto pin_failed;
1843 }
1844
1845 ret = amdgpu_bo_kmap(bo, kptr);
1846 if (ret) {
1847 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1848 goto kmap_failed;
1849 }
1850
1851 amdgpu_amdkfd_remove_eviction_fence(
1852 bo, mem->process_info->eviction_fence);
1853 list_del_init(&mem->validate_list.head);
1854
1855 if (size)
1856 *size = amdgpu_bo_size(bo);
1857
1858 amdgpu_bo_unreserve(bo);
1859
1860 mutex_unlock(&mem->process_info->lock);
1861 return 0;
1862
1863kmap_failed:
1864 amdgpu_bo_unpin(bo);
1865pin_failed:
1866 amdgpu_bo_unreserve(bo);
1867bo_reserve_failed:
1868 mutex_unlock(&mem->process_info->lock);
1869
1870 return ret;
1871}
1872
1873int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1874 struct kfd_vm_fault_info *mem)
1875{
1876 struct amdgpu_device *adev;
1877
1878 adev = (struct amdgpu_device *)kgd;
1879 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1880 *mem = *adev->gmc.vm_fault_info;
1881 mb();
1882 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1883 }
1884 return 0;
1885}
1886
1887int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1888 struct dma_buf *dma_buf,
1889 uint64_t va, void *drm_priv,
1890 struct kgd_mem **mem, uint64_t *size,
1891 uint64_t *mmap_offset)
1892{
1893 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1894 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1895 struct drm_gem_object *obj;
1896 struct amdgpu_bo *bo;
1897 int ret;
1898
1899 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1900 /* Can't handle non-graphics buffers */
1901 return -EINVAL;
1902
1903 obj = dma_buf->priv;
1904 if (drm_to_adev(obj->dev) != adev)
1905 /* Can't handle buffers from other devices */
1906 return -EINVAL;
1907
1908 bo = gem_to_amdgpu_bo(obj);
1909 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1910 AMDGPU_GEM_DOMAIN_GTT)))
1911 /* Only VRAM and GTT BOs are supported */
1912 return -EINVAL;
1913
1914 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1915 if (!*mem)
1916 return -ENOMEM;
1917
1918 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1919 if (ret) {
1920 kfree(mem);
1921 return ret;
1922 }
1923
1924 if (size)
1925 *size = amdgpu_bo_size(bo);
1926
1927 if (mmap_offset)
1928 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1929
1930 INIT_LIST_HEAD(&(*mem)->attachments);
1931 mutex_init(&(*mem)->lock);
1932
1933 (*mem)->alloc_flags =
1934 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1935 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1936 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1937 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1938
1939 drm_gem_object_get(&bo->tbo.base);
1940 (*mem)->bo = bo;
1941 (*mem)->va = va;
1942 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1943 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1944 (*mem)->mapped_to_gpu_memory = 0;
1945 (*mem)->process_info = avm->process_info;
1946 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1947 amdgpu_sync_create(&(*mem)->sync);
1948 (*mem)->is_imported = true;
1949
1950 return 0;
1951}
1952
1953/* Evict a userptr BO by stopping the queues if necessary
1954 *
1955 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1956 * cannot do any memory allocations, and cannot take any locks that
1957 * are held elsewhere while allocating memory. Therefore this is as
1958 * simple as possible, using atomic counters.
1959 *
1960 * It doesn't do anything to the BO itself. The real work happens in
1961 * restore, where we get updated page addresses. This function only
1962 * ensures that GPU access to the BO is stopped.
1963 */
1964int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1965 struct mm_struct *mm)
1966{
1967 struct amdkfd_process_info *process_info = mem->process_info;
1968 int evicted_bos;
1969 int r = 0;
1970
1971 atomic_inc(&mem->invalid);
1972 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1973 if (evicted_bos == 1) {
1974 /* First eviction, stop the queues */
1975 r = kgd2kfd_quiesce_mm(mm);
1976 if (r)
1977 pr_err("Failed to quiesce KFD\n");
1978 schedule_delayed_work(&process_info->restore_userptr_work,
1979 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1980 }
1981
1982 return r;
1983}
1984
1985/* Update invalid userptr BOs
1986 *
1987 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1988 * userptr_inval_list and updates user pages for all BOs that have
1989 * been invalidated since their last update.
1990 */
1991static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1992 struct mm_struct *mm)
1993{
1994 struct kgd_mem *mem, *tmp_mem;
1995 struct amdgpu_bo *bo;
1996 struct ttm_operation_ctx ctx = { false, false };
1997 int invalid, ret;
1998
1999 /* Move all invalidated BOs to the userptr_inval_list and
2000 * release their user pages by migration to the CPU domain
2001 */
2002 list_for_each_entry_safe(mem, tmp_mem,
2003 &process_info->userptr_valid_list,
2004 validate_list.head) {
2005 if (!atomic_read(&mem->invalid))
2006 continue; /* BO is still valid */
2007
2008 bo = mem->bo;
2009
2010 if (amdgpu_bo_reserve(bo, true))
2011 return -EAGAIN;
2012 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2013 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2014 amdgpu_bo_unreserve(bo);
2015 if (ret) {
2016 pr_err("%s: Failed to invalidate userptr BO\n",
2017 __func__);
2018 return -EAGAIN;
2019 }
2020
2021 list_move_tail(&mem->validate_list.head,
2022 &process_info->userptr_inval_list);
2023 }
2024
2025 if (list_empty(&process_info->userptr_inval_list))
2026 return 0; /* All evicted userptr BOs were freed */
2027
2028 /* Go through userptr_inval_list and update any invalid user_pages */
2029 list_for_each_entry(mem, &process_info->userptr_inval_list,
2030 validate_list.head) {
2031 invalid = atomic_read(&mem->invalid);
2032 if (!invalid)
2033 /* BO hasn't been invalidated since the last
2034 * revalidation attempt. Keep its BO list.
2035 */
2036 continue;
2037
2038 bo = mem->bo;
2039
2040 /* Get updated user pages */
2041 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2042 if (ret) {
2043 pr_debug("%s: Failed to get user pages: %d\n",
2044 __func__, ret);
2045
2046 /* Return error -EBUSY or -ENOMEM, retry restore */
2047 return ret;
2048 }
2049
2050 /*
2051 * FIXME: Cannot ignore the return code, must hold
2052 * notifier_lock
2053 */
2054 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2055
2056 /* Mark the BO as valid unless it was invalidated
2057 * again concurrently.
2058 */
2059 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2060 return -EAGAIN;
2061 }
2062
2063 return 0;
2064}
2065
2066/* Validate invalid userptr BOs
2067 *
2068 * Validates BOs on the userptr_inval_list, and moves them back to the
2069 * userptr_valid_list. Also updates GPUVM page tables with new page
2070 * addresses and waits for the page table updates to complete.
2071 */
2072static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2073{
2074 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2075 struct list_head resv_list, duplicates;
2076 struct ww_acquire_ctx ticket;
2077 struct amdgpu_sync sync;
2078
2079 struct amdgpu_vm *peer_vm;
2080 struct kgd_mem *mem, *tmp_mem;
2081 struct amdgpu_bo *bo;
2082 struct ttm_operation_ctx ctx = { false, false };
2083 int i, ret;
2084
2085 pd_bo_list_entries = kcalloc(process_info->n_vms,
2086 sizeof(struct amdgpu_bo_list_entry),
2087 GFP_KERNEL);
2088 if (!pd_bo_list_entries) {
2089 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2090 ret = -ENOMEM;
2091 goto out_no_mem;
2092 }
2093
2094 INIT_LIST_HEAD(&resv_list);
2095 INIT_LIST_HEAD(&duplicates);
2096
2097 /* Get all the page directory BOs that need to be reserved */
2098 i = 0;
2099 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2100 vm_list_node)
2101 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2102 &pd_bo_list_entries[i++]);
2103 /* Add the userptr_inval_list entries to resv_list */
2104 list_for_each_entry(mem, &process_info->userptr_inval_list,
2105 validate_list.head) {
2106 list_add_tail(&mem->resv_list.head, &resv_list);
2107 mem->resv_list.bo = mem->validate_list.bo;
2108 mem->resv_list.num_shared = mem->validate_list.num_shared;
2109 }
2110
2111 /* Reserve all BOs and page tables for validation */
2112 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2113 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2114 if (ret)
2115 goto out_free;
2116
2117 amdgpu_sync_create(&sync);
2118
2119 ret = process_validate_vms(process_info);
2120 if (ret)
2121 goto unreserve_out;
2122
2123 /* Validate BOs and update GPUVM page tables */
2124 list_for_each_entry_safe(mem, tmp_mem,
2125 &process_info->userptr_inval_list,
2126 validate_list.head) {
2127 struct kfd_mem_attachment *attachment;
2128
2129 bo = mem->bo;
2130
2131 /* Validate the BO if we got user pages */
2132 if (bo->tbo.ttm->pages[0]) {
2133 amdgpu_bo_placement_from_domain(bo, mem->domain);
2134 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2135 if (ret) {
2136 pr_err("%s: failed to validate BO\n", __func__);
2137 goto unreserve_out;
2138 }
2139 }
2140
2141 list_move_tail(&mem->validate_list.head,
2142 &process_info->userptr_valid_list);
2143
2144 /* Update mapping. If the BO was not validated
2145 * (because we couldn't get user pages), this will
2146 * clear the page table entries, which will result in
2147 * VM faults if the GPU tries to access the invalid
2148 * memory.
2149 */
2150 list_for_each_entry(attachment, &mem->attachments, list) {
2151 if (!attachment->is_mapped)
2152 continue;
2153
2154 kfd_mem_dmaunmap_attachment(mem, attachment);
2155 ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2156 if (ret) {
2157 pr_err("%s: update PTE failed\n", __func__);
2158 /* make sure this gets validated again */
2159 atomic_inc(&mem->invalid);
2160 goto unreserve_out;
2161 }
2162 }
2163 }
2164
2165 /* Update page directories */
2166 ret = process_update_pds(process_info, &sync);
2167
2168unreserve_out:
2169 ttm_eu_backoff_reservation(&ticket, &resv_list);
2170 amdgpu_sync_wait(&sync, false);
2171 amdgpu_sync_free(&sync);
2172out_free:
2173 kfree(pd_bo_list_entries);
2174out_no_mem:
2175
2176 return ret;
2177}
2178
2179/* Worker callback to restore evicted userptr BOs
2180 *
2181 * Tries to update and validate all userptr BOs. If successful and no
2182 * concurrent evictions happened, the queues are restarted. Otherwise,
2183 * reschedule for another attempt later.
2184 */
2185static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2186{
2187 struct delayed_work *dwork = to_delayed_work(work);
2188 struct amdkfd_process_info *process_info =
2189 container_of(dwork, struct amdkfd_process_info,
2190 restore_userptr_work);
2191 struct task_struct *usertask;
2192 struct mm_struct *mm;
2193 int evicted_bos;
2194
2195 evicted_bos = atomic_read(&process_info->evicted_bos);
2196 if (!evicted_bos)
2197 return;
2198
2199 /* Reference task and mm in case of concurrent process termination */
2200 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2201 if (!usertask)
2202 return;
2203 mm = get_task_mm(usertask);
2204 if (!mm) {
2205 put_task_struct(usertask);
2206 return;
2207 }
2208
2209 mutex_lock(&process_info->lock);
2210
2211 if (update_invalid_user_pages(process_info, mm))
2212 goto unlock_out;
2213 /* userptr_inval_list can be empty if all evicted userptr BOs
2214 * have been freed. In that case there is nothing to validate
2215 * and we can just restart the queues.
2216 */
2217 if (!list_empty(&process_info->userptr_inval_list)) {
2218 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2219 goto unlock_out; /* Concurrent eviction, try again */
2220
2221 if (validate_invalid_user_pages(process_info))
2222 goto unlock_out;
2223 }
2224 /* Final check for concurrent evicton and atomic update. If
2225 * another eviction happens after successful update, it will
2226 * be a first eviction that calls quiesce_mm. The eviction
2227 * reference counting inside KFD will handle this case.
2228 */
2229 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2230 evicted_bos)
2231 goto unlock_out;
2232 evicted_bos = 0;
2233 if (kgd2kfd_resume_mm(mm)) {
2234 pr_err("%s: Failed to resume KFD\n", __func__);
2235 /* No recovery from this failure. Probably the CP is
2236 * hanging. No point trying again.
2237 */
2238 }
2239
2240unlock_out:
2241 mutex_unlock(&process_info->lock);
2242 mmput(mm);
2243 put_task_struct(usertask);
2244
2245 /* If validation failed, reschedule another attempt */
2246 if (evicted_bos)
2247 schedule_delayed_work(&process_info->restore_userptr_work,
2248 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2249}
2250
2251/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2252 * KFD process identified by process_info
2253 *
2254 * @process_info: amdkfd_process_info of the KFD process
2255 *
2256 * After memory eviction, restore thread calls this function. The function
2257 * should be called when the Process is still valid. BO restore involves -
2258 *
2259 * 1. Release old eviction fence and create new one
2260 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2261 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2262 * BOs that need to be reserved.
2263 * 4. Reserve all the BOs
2264 * 5. Validate of PD and PT BOs.
2265 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2266 * 7. Add fence to all PD and PT BOs.
2267 * 8. Unreserve all BOs
2268 */
2269int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2270{
2271 struct amdgpu_bo_list_entry *pd_bo_list;
2272 struct amdkfd_process_info *process_info = info;
2273 struct amdgpu_vm *peer_vm;
2274 struct kgd_mem *mem;
2275 struct bo_vm_reservation_context ctx;
2276 struct amdgpu_amdkfd_fence *new_fence;
2277 int ret = 0, i;
2278 struct list_head duplicate_save;
2279 struct amdgpu_sync sync_obj;
2280 unsigned long failed_size = 0;
2281 unsigned long total_size = 0;
2282
2283 INIT_LIST_HEAD(&duplicate_save);
2284 INIT_LIST_HEAD(&ctx.list);
2285 INIT_LIST_HEAD(&ctx.duplicates);
2286
2287 pd_bo_list = kcalloc(process_info->n_vms,
2288 sizeof(struct amdgpu_bo_list_entry),
2289 GFP_KERNEL);
2290 if (!pd_bo_list)
2291 return -ENOMEM;
2292
2293 i = 0;
2294 mutex_lock(&process_info->lock);
2295 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2296 vm_list_node)
2297 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2298
2299 /* Reserve all BOs and page tables/directory. Add all BOs from
2300 * kfd_bo_list to ctx.list
2301 */
2302 list_for_each_entry(mem, &process_info->kfd_bo_list,
2303 validate_list.head) {
2304
2305 list_add_tail(&mem->resv_list.head, &ctx.list);
2306 mem->resv_list.bo = mem->validate_list.bo;
2307 mem->resv_list.num_shared = mem->validate_list.num_shared;
2308 }
2309
2310 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2311 false, &duplicate_save);
2312 if (ret) {
2313 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2314 goto ttm_reserve_fail;
2315 }
2316
2317 amdgpu_sync_create(&sync_obj);
2318
2319 /* Validate PDs and PTs */
2320 ret = process_validate_vms(process_info);
2321 if (ret)
2322 goto validate_map_fail;
2323
2324 ret = process_sync_pds_resv(process_info, &sync_obj);
2325 if (ret) {
2326 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2327 goto validate_map_fail;
2328 }
2329
2330 /* Validate BOs and map them to GPUVM (update VM page tables). */
2331 list_for_each_entry(mem, &process_info->kfd_bo_list,
2332 validate_list.head) {
2333
2334 struct amdgpu_bo *bo = mem->bo;
2335 uint32_t domain = mem->domain;
2336 struct kfd_mem_attachment *attachment;
2337
2338 total_size += amdgpu_bo_size(bo);
2339
2340 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2341 if (ret) {
2342 pr_debug("Memory eviction: Validate BOs failed\n");
2343 failed_size += amdgpu_bo_size(bo);
2344 ret = amdgpu_amdkfd_bo_validate(bo,
2345 AMDGPU_GEM_DOMAIN_GTT, false);
2346 if (ret) {
2347 pr_debug("Memory eviction: Try again\n");
2348 goto validate_map_fail;
2349 }
2350 }
2351 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2352 if (ret) {
2353 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2354 goto validate_map_fail;
2355 }
2356 list_for_each_entry(attachment, &mem->attachments, list) {
2357 if (!attachment->is_mapped)
2358 continue;
2359
2360 kfd_mem_dmaunmap_attachment(mem, attachment);
2361 ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2362 if (ret) {
2363 pr_debug("Memory eviction: update PTE failed. Try again\n");
2364 goto validate_map_fail;
2365 }
2366 }
2367 }
2368
2369 if (failed_size)
2370 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2371
2372 /* Update page directories */
2373 ret = process_update_pds(process_info, &sync_obj);
2374 if (ret) {
2375 pr_debug("Memory eviction: update PDs failed. Try again\n");
2376 goto validate_map_fail;
2377 }
2378
2379 /* Wait for validate and PT updates to finish */
2380 amdgpu_sync_wait(&sync_obj, false);
2381
2382 /* Release old eviction fence and create new one, because fence only
2383 * goes from unsignaled to signaled, fence cannot be reused.
2384 * Use context and mm from the old fence.
2385 */
2386 new_fence = amdgpu_amdkfd_fence_create(
2387 process_info->eviction_fence->base.context,
2388 process_info->eviction_fence->mm,
2389 NULL);
2390 if (!new_fence) {
2391 pr_err("Failed to create eviction fence\n");
2392 ret = -ENOMEM;
2393 goto validate_map_fail;
2394 }
2395 dma_fence_put(&process_info->eviction_fence->base);
2396 process_info->eviction_fence = new_fence;
2397 *ef = dma_fence_get(&new_fence->base);
2398
2399 /* Attach new eviction fence to all BOs */
2400 list_for_each_entry(mem, &process_info->kfd_bo_list,
2401 validate_list.head)
2402 amdgpu_bo_fence(mem->bo,
2403 &process_info->eviction_fence->base, true);
2404
2405 /* Attach eviction fence to PD / PT BOs */
2406 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2407 vm_list_node) {
2408 struct amdgpu_bo *bo = peer_vm->root.bo;
2409
2410 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2411 }
2412
2413validate_map_fail:
2414 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2415 amdgpu_sync_free(&sync_obj);
2416ttm_reserve_fail:
2417 mutex_unlock(&process_info->lock);
2418 kfree(pd_bo_list);
2419 return ret;
2420}
2421
2422int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2423{
2424 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2425 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2426 int ret;
2427
2428 if (!info || !gws)
2429 return -EINVAL;
2430
2431 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2432 if (!*mem)
2433 return -ENOMEM;
2434
2435 mutex_init(&(*mem)->lock);
2436 INIT_LIST_HEAD(&(*mem)->attachments);
2437 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2438 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2439 (*mem)->process_info = process_info;
2440 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2441 amdgpu_sync_create(&(*mem)->sync);
2442
2443
2444 /* Validate gws bo the first time it is added to process */
2445 mutex_lock(&(*mem)->process_info->lock);
2446 ret = amdgpu_bo_reserve(gws_bo, false);
2447 if (unlikely(ret)) {
2448 pr_err("Reserve gws bo failed %d\n", ret);
2449 goto bo_reservation_failure;
2450 }
2451
2452 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2453 if (ret) {
2454 pr_err("GWS BO validate failed %d\n", ret);
2455 goto bo_validation_failure;
2456 }
2457 /* GWS resource is shared b/t amdgpu and amdkfd
2458 * Add process eviction fence to bo so they can
2459 * evict each other.
2460 */
2461 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2462 if (ret)
2463 goto reserve_shared_fail;
2464 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2465 amdgpu_bo_unreserve(gws_bo);
2466 mutex_unlock(&(*mem)->process_info->lock);
2467
2468 return ret;
2469
2470reserve_shared_fail:
2471bo_validation_failure:
2472 amdgpu_bo_unreserve(gws_bo);
2473bo_reservation_failure:
2474 mutex_unlock(&(*mem)->process_info->lock);
2475 amdgpu_sync_free(&(*mem)->sync);
2476 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2477 amdgpu_bo_unref(&gws_bo);
2478 mutex_destroy(&(*mem)->lock);
2479 kfree(*mem);
2480 *mem = NULL;
2481 return ret;
2482}
2483
2484int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2485{
2486 int ret;
2487 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2488 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2489 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2490
2491 /* Remove BO from process's validate list so restore worker won't touch
2492 * it anymore
2493 */
2494 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2495
2496 ret = amdgpu_bo_reserve(gws_bo, false);
2497 if (unlikely(ret)) {
2498 pr_err("Reserve gws bo failed %d\n", ret);
2499 //TODO add BO back to validate_list?
2500 return ret;
2501 }
2502 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2503 process_info->eviction_fence);
2504 amdgpu_bo_unreserve(gws_bo);
2505 amdgpu_sync_free(&kgd_mem->sync);
2506 amdgpu_bo_unref(&gws_bo);
2507 mutex_destroy(&kgd_mem->lock);
2508 kfree(mem);
2509 return 0;
2510}
2511
2512/* Returns GPU-specific tiling mode information */
2513int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2514 struct tile_config *config)
2515{
2516 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2517
2518 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2519 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2520 config->num_tile_configs =
2521 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2522 config->macro_tile_config_ptr =
2523 adev->gfx.config.macrotile_mode_array;
2524 config->num_macro_tile_configs =
2525 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2526
2527 /* Those values are not set from GFX9 onwards */
2528 config->num_banks = adev->gfx.config.num_banks;
2529 config->num_ranks = adev->gfx.config.num_ranks;
2530
2531 return 0;
2532}