Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/types.h>
25#include <linux/sched/task.h>
26#include <linux/dynamic_debug.h>
27#include <drm/ttm/ttm_tt.h>
28#include <drm/drm_exec.h>
29
30#include "amdgpu_sync.h"
31#include "amdgpu_object.h"
32#include "amdgpu_vm.h"
33#include "amdgpu_hmm.h"
34#include "amdgpu.h"
35#include "amdgpu_xgmi.h"
36#include "kfd_priv.h"
37#include "kfd_svm.h"
38#include "kfd_migrate.h"
39#include "kfd_smi_events.h"
40
41#ifdef dev_fmt
42#undef dev_fmt
43#endif
44#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45
46#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47
48/* Long enough to ensure no retry fault comes after svm range is restored and
49 * page table is updated.
50 */
51#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
52#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53#define dynamic_svm_range_dump(svms) \
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
55#else
56#define dynamic_svm_range_dump(svms) \
57 do { if (0) svm_range_debug_dump(svms); } while (0)
58#endif
59
60/* Giant svm range split into smaller ranges based on this, it is decided using
61 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
62 * power of 2MB.
63 */
64static uint64_t max_svm_range_pages;
65
66struct criu_svm_metadata {
67 struct list_head list;
68 struct kfd_criu_svm_range_priv_data data;
69};
70
71static void svm_range_evict_svm_bo_worker(struct work_struct *work);
72static bool
73svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 const struct mmu_notifier_range *range,
75 unsigned long cur_seq);
76static int
77svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 uint64_t *bo_s, uint64_t *bo_l);
79static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 .invalidate = svm_range_cpu_invalidate_pagetables,
81};
82
83/**
84 * svm_range_unlink - unlink svm_range from lists and interval tree
85 * @prange: svm range structure to be removed
86 *
87 * Remove the svm_range from the svms and svm_bo lists and the svms
88 * interval tree.
89 *
90 * Context: The caller must hold svms->lock
91 */
92static void svm_range_unlink(struct svm_range *prange)
93{
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 prange, prange->start, prange->last);
96
97 if (prange->svm_bo) {
98 spin_lock(&prange->svm_bo->list_lock);
99 list_del(&prange->svm_bo_list);
100 spin_unlock(&prange->svm_bo->list_lock);
101 }
102
103 list_del(&prange->list);
104 if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
106}
107
108static void
109svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
110{
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 prange, prange->start, prange->last);
113
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 prange->start << PAGE_SHIFT,
116 prange->npages << PAGE_SHIFT,
117 &svm_range_mn_ops);
118}
119
120/**
121 * svm_range_add_to_svms - add svm range to svms
122 * @prange: svm range structure to be added
123 *
124 * Add the svm range to svms interval tree and link list
125 *
126 * Context: The caller must hold svms->lock
127 */
128static void svm_range_add_to_svms(struct svm_range *prange)
129{
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 prange, prange->start, prange->last);
132
133 list_move_tail(&prange->list, &prange->svms->list);
134 prange->it_node.start = prange->start;
135 prange->it_node.last = prange->last;
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
137}
138
139static void svm_range_remove_notifier(struct svm_range *prange)
140{
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
143 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 prange->notifier.interval_tree.last >> PAGE_SHIFT);
145
146 if (prange->notifier.interval_tree.start != 0 &&
147 prange->notifier.interval_tree.last != 0)
148 mmu_interval_notifier_remove(&prange->notifier);
149}
150
151static bool
152svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
153{
154 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
156}
157
158static int
159svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 unsigned long offset, unsigned long npages,
161 unsigned long *hmm_pfns, uint32_t gpuidx)
162{
163 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 dma_addr_t *addr = prange->dma_addr[gpuidx];
165 struct device *dev = adev->dev;
166 struct page *page;
167 int i, r;
168
169 if (!addr) {
170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
171 if (!addr)
172 return -ENOMEM;
173 prange->dma_addr[gpuidx] = addr;
174 }
175
176 addr += offset;
177 for (i = 0; i < npages; i++) {
178 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
180
181 page = hmm_pfn_to_page(hmm_pfns[i]);
182 if (is_zone_device_page(page)) {
183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
184
185 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 bo_adev->vm_manager.vram_base_offset -
187 bo_adev->kfd.pgmap.range.start;
188 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
190 continue;
191 }
192 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 r = dma_mapping_error(dev, addr[i]);
194 if (r) {
195 dev_err(dev, "failed %d dma_map_page\n", r);
196 return r;
197 }
198 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
200 }
201
202 return 0;
203}
204
205static int
206svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
207 unsigned long offset, unsigned long npages,
208 unsigned long *hmm_pfns)
209{
210 struct kfd_process *p;
211 uint32_t gpuidx;
212 int r;
213
214 p = container_of(prange->svms, struct kfd_process, svms);
215
216 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
217 struct kfd_process_device *pdd;
218
219 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
220 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
221 if (!pdd) {
222 pr_debug("failed to find device idx %d\n", gpuidx);
223 return -EINVAL;
224 }
225
226 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
227 hmm_pfns, gpuidx);
228 if (r)
229 break;
230 }
231
232 return r;
233}
234
235void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
236 unsigned long offset, unsigned long npages)
237{
238 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
239 int i;
240
241 if (!dma_addr)
242 return;
243
244 for (i = offset; i < offset + npages; i++) {
245 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
246 continue;
247 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
248 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
249 dma_addr[i] = 0;
250 }
251}
252
253void svm_range_dma_unmap(struct svm_range *prange)
254{
255 struct kfd_process_device *pdd;
256 dma_addr_t *dma_addr;
257 struct device *dev;
258 struct kfd_process *p;
259 uint32_t gpuidx;
260
261 p = container_of(prange->svms, struct kfd_process, svms);
262
263 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
264 dma_addr = prange->dma_addr[gpuidx];
265 if (!dma_addr)
266 continue;
267
268 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
269 if (!pdd) {
270 pr_debug("failed to find device idx %d\n", gpuidx);
271 continue;
272 }
273 dev = &pdd->dev->adev->pdev->dev;
274
275 svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
276 }
277}
278
279static void svm_range_free(struct svm_range *prange, bool do_unmap)
280{
281 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
282 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
283 uint32_t gpuidx;
284
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 prange->start, prange->last);
287
288 svm_range_vram_node_free(prange);
289 if (do_unmap)
290 svm_range_dma_unmap(prange);
291
292 if (do_unmap && !p->xnack_enabled) {
293 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
294 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
295 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
296 }
297
298 /* free dma_addr array for each gpu */
299 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
300 if (prange->dma_addr[gpuidx]) {
301 kvfree(prange->dma_addr[gpuidx]);
302 prange->dma_addr[gpuidx] = NULL;
303 }
304 }
305
306 mutex_destroy(&prange->lock);
307 mutex_destroy(&prange->migrate_mutex);
308 kfree(prange);
309}
310
311static void
312svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
313 uint8_t *granularity, uint32_t *flags)
314{
315 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
316 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
317 *granularity = 9;
318 *flags =
319 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
320}
321
322static struct
323svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
324 uint64_t last, bool update_mem_usage)
325{
326 uint64_t size = last - start + 1;
327 struct svm_range *prange;
328 struct kfd_process *p;
329
330 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
331 if (!prange)
332 return NULL;
333
334 p = container_of(svms, struct kfd_process, svms);
335 if (!p->xnack_enabled && update_mem_usage &&
336 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
337 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
338 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
339 kfree(prange);
340 return NULL;
341 }
342 prange->npages = size;
343 prange->svms = svms;
344 prange->start = start;
345 prange->last = last;
346 INIT_LIST_HEAD(&prange->list);
347 INIT_LIST_HEAD(&prange->update_list);
348 INIT_LIST_HEAD(&prange->svm_bo_list);
349 INIT_LIST_HEAD(&prange->deferred_list);
350 INIT_LIST_HEAD(&prange->child_list);
351 atomic_set(&prange->invalid, 0);
352 prange->validate_timestamp = 0;
353 prange->vram_pages = 0;
354 mutex_init(&prange->migrate_mutex);
355 mutex_init(&prange->lock);
356
357 if (p->xnack_enabled)
358 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
359 MAX_GPU_INSTANCE);
360
361 svm_range_set_default_attributes(&prange->preferred_loc,
362 &prange->prefetch_loc,
363 &prange->granularity, &prange->flags);
364
365 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
366
367 return prange;
368}
369
370static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
371{
372 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
373 return false;
374
375 return true;
376}
377
378static void svm_range_bo_release(struct kref *kref)
379{
380 struct svm_range_bo *svm_bo;
381
382 svm_bo = container_of(kref, struct svm_range_bo, kref);
383 pr_debug("svm_bo 0x%p\n", svm_bo);
384
385 spin_lock(&svm_bo->list_lock);
386 while (!list_empty(&svm_bo->range_list)) {
387 struct svm_range *prange =
388 list_first_entry(&svm_bo->range_list,
389 struct svm_range, svm_bo_list);
390 /* list_del_init tells a concurrent svm_range_vram_node_new when
391 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
392 */
393 list_del_init(&prange->svm_bo_list);
394 spin_unlock(&svm_bo->list_lock);
395
396 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
397 prange->start, prange->last);
398 mutex_lock(&prange->lock);
399 prange->svm_bo = NULL;
400 /* prange should not hold vram page now */
401 WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
402 mutex_unlock(&prange->lock);
403
404 spin_lock(&svm_bo->list_lock);
405 }
406 spin_unlock(&svm_bo->list_lock);
407 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
408 /* We're not in the eviction worker. Signal the fence. */
409 dma_fence_signal(&svm_bo->eviction_fence->base);
410 dma_fence_put(&svm_bo->eviction_fence->base);
411 amdgpu_bo_unref(&svm_bo->bo);
412 kfree(svm_bo);
413}
414
415static void svm_range_bo_wq_release(struct work_struct *work)
416{
417 struct svm_range_bo *svm_bo;
418
419 svm_bo = container_of(work, struct svm_range_bo, release_work);
420 svm_range_bo_release(&svm_bo->kref);
421}
422
423static void svm_range_bo_release_async(struct kref *kref)
424{
425 struct svm_range_bo *svm_bo;
426
427 svm_bo = container_of(kref, struct svm_range_bo, kref);
428 pr_debug("svm_bo 0x%p\n", svm_bo);
429 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
430 schedule_work(&svm_bo->release_work);
431}
432
433void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
434{
435 kref_put(&svm_bo->kref, svm_range_bo_release_async);
436}
437
438static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
439{
440 if (svm_bo)
441 kref_put(&svm_bo->kref, svm_range_bo_release);
442}
443
444static bool
445svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
446{
447 mutex_lock(&prange->lock);
448 if (!prange->svm_bo) {
449 mutex_unlock(&prange->lock);
450 return false;
451 }
452 if (prange->ttm_res) {
453 /* We still have a reference, all is well */
454 mutex_unlock(&prange->lock);
455 return true;
456 }
457 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
458 /*
459 * Migrate from GPU to GPU, remove range from source svm_bo->node
460 * range list, and return false to allocate svm_bo from destination
461 * node.
462 */
463 if (prange->svm_bo->node != node) {
464 mutex_unlock(&prange->lock);
465
466 spin_lock(&prange->svm_bo->list_lock);
467 list_del_init(&prange->svm_bo_list);
468 spin_unlock(&prange->svm_bo->list_lock);
469
470 svm_range_bo_unref(prange->svm_bo);
471 return false;
472 }
473 if (READ_ONCE(prange->svm_bo->evicting)) {
474 struct dma_fence *f;
475 struct svm_range_bo *svm_bo;
476 /* The BO is getting evicted,
477 * we need to get a new one
478 */
479 mutex_unlock(&prange->lock);
480 svm_bo = prange->svm_bo;
481 f = dma_fence_get(&svm_bo->eviction_fence->base);
482 svm_range_bo_unref(prange->svm_bo);
483 /* wait for the fence to avoid long spin-loop
484 * at list_empty_careful
485 */
486 dma_fence_wait(f, false);
487 dma_fence_put(f);
488 } else {
489 /* The BO was still around and we got
490 * a new reference to it
491 */
492 mutex_unlock(&prange->lock);
493 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
494 prange->svms, prange->start, prange->last);
495
496 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
497 return true;
498 }
499
500 } else {
501 mutex_unlock(&prange->lock);
502 }
503
504 /* We need a new svm_bo. Spin-loop to wait for concurrent
505 * svm_range_bo_release to finish removing this range from
506 * its range list and set prange->svm_bo to null. After this,
507 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
508 */
509 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
510 cond_resched();
511
512 return false;
513}
514
515static struct svm_range_bo *svm_range_bo_new(void)
516{
517 struct svm_range_bo *svm_bo;
518
519 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
520 if (!svm_bo)
521 return NULL;
522
523 kref_init(&svm_bo->kref);
524 INIT_LIST_HEAD(&svm_bo->range_list);
525 spin_lock_init(&svm_bo->list_lock);
526
527 return svm_bo;
528}
529
530int
531svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
532 bool clear)
533{
534 struct amdgpu_bo_param bp;
535 struct svm_range_bo *svm_bo;
536 struct amdgpu_bo_user *ubo;
537 struct amdgpu_bo *bo;
538 struct kfd_process *p;
539 struct mm_struct *mm;
540 int r;
541
542 p = container_of(prange->svms, struct kfd_process, svms);
543 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
544 prange->start, prange->last);
545
546 if (svm_range_validate_svm_bo(node, prange))
547 return 0;
548
549 svm_bo = svm_range_bo_new();
550 if (!svm_bo) {
551 pr_debug("failed to alloc svm bo\n");
552 return -ENOMEM;
553 }
554 mm = get_task_mm(p->lead_thread);
555 if (!mm) {
556 pr_debug("failed to get mm\n");
557 kfree(svm_bo);
558 return -ESRCH;
559 }
560 svm_bo->node = node;
561 svm_bo->eviction_fence =
562 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
563 mm,
564 svm_bo);
565 mmput(mm);
566 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
567 svm_bo->evicting = 0;
568 memset(&bp, 0, sizeof(bp));
569 bp.size = prange->npages * PAGE_SIZE;
570 bp.byte_align = PAGE_SIZE;
571 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
572 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
573 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
574 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
575 bp.type = ttm_bo_type_device;
576 bp.resv = NULL;
577 if (node->xcp)
578 bp.xcp_id_plus1 = node->xcp->id + 1;
579
580 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
581 if (r) {
582 pr_debug("failed %d to create bo\n", r);
583 goto create_bo_failed;
584 }
585 bo = &ubo->bo;
586
587 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
588 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
589 bp.xcp_id_plus1 - 1);
590
591 r = amdgpu_bo_reserve(bo, true);
592 if (r) {
593 pr_debug("failed %d to reserve bo\n", r);
594 goto reserve_bo_failed;
595 }
596
597 if (clear) {
598 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
599 if (r) {
600 pr_debug("failed %d to sync bo\n", r);
601 amdgpu_bo_unreserve(bo);
602 goto reserve_bo_failed;
603 }
604 }
605
606 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
607 if (r) {
608 pr_debug("failed %d to reserve bo\n", r);
609 amdgpu_bo_unreserve(bo);
610 goto reserve_bo_failed;
611 }
612 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
613
614 amdgpu_bo_unreserve(bo);
615
616 svm_bo->bo = bo;
617 prange->svm_bo = svm_bo;
618 prange->ttm_res = bo->tbo.resource;
619 prange->offset = 0;
620
621 spin_lock(&svm_bo->list_lock);
622 list_add(&prange->svm_bo_list, &svm_bo->range_list);
623 spin_unlock(&svm_bo->list_lock);
624
625 return 0;
626
627reserve_bo_failed:
628 amdgpu_bo_unref(&bo);
629create_bo_failed:
630 dma_fence_put(&svm_bo->eviction_fence->base);
631 kfree(svm_bo);
632 prange->ttm_res = NULL;
633
634 return r;
635}
636
637void svm_range_vram_node_free(struct svm_range *prange)
638{
639 /* serialize prange->svm_bo unref */
640 mutex_lock(&prange->lock);
641 /* prange->svm_bo has not been unref */
642 if (prange->ttm_res) {
643 prange->ttm_res = NULL;
644 mutex_unlock(&prange->lock);
645 svm_range_bo_unref(prange->svm_bo);
646 } else
647 mutex_unlock(&prange->lock);
648}
649
650struct kfd_node *
651svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
652{
653 struct kfd_process *p;
654 struct kfd_process_device *pdd;
655
656 p = container_of(prange->svms, struct kfd_process, svms);
657 pdd = kfd_process_device_data_by_id(p, gpu_id);
658 if (!pdd) {
659 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
660 return NULL;
661 }
662
663 return pdd->dev;
664}
665
666struct kfd_process_device *
667svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
668{
669 struct kfd_process *p;
670
671 p = container_of(prange->svms, struct kfd_process, svms);
672
673 return kfd_get_process_device_data(node, p);
674}
675
676static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
677{
678 struct ttm_operation_ctx ctx = { false, false };
679
680 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
681
682 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
683}
684
685static int
686svm_range_check_attr(struct kfd_process *p,
687 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
688{
689 uint32_t i;
690
691 for (i = 0; i < nattr; i++) {
692 uint32_t val = attrs[i].value;
693 int gpuidx = MAX_GPU_INSTANCE;
694
695 switch (attrs[i].type) {
696 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
697 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
698 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
699 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
700 break;
701 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
702 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
703 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
704 break;
705 case KFD_IOCTL_SVM_ATTR_ACCESS:
706 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
707 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
708 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
709 break;
710 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
711 break;
712 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
713 break;
714 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
715 break;
716 default:
717 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
718 return -EINVAL;
719 }
720
721 if (gpuidx < 0) {
722 pr_debug("no GPU 0x%x found\n", val);
723 return -EINVAL;
724 } else if (gpuidx < MAX_GPU_INSTANCE &&
725 !test_bit(gpuidx, p->svms.bitmap_supported)) {
726 pr_debug("GPU 0x%x not supported\n", val);
727 return -EINVAL;
728 }
729 }
730
731 return 0;
732}
733
734static void
735svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
736 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
737 bool *update_mapping)
738{
739 uint32_t i;
740 int gpuidx;
741
742 for (i = 0; i < nattr; i++) {
743 switch (attrs[i].type) {
744 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
745 prange->preferred_loc = attrs[i].value;
746 break;
747 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
748 prange->prefetch_loc = attrs[i].value;
749 break;
750 case KFD_IOCTL_SVM_ATTR_ACCESS:
751 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
752 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
753 if (!p->xnack_enabled)
754 *update_mapping = true;
755
756 gpuidx = kfd_process_gpuidx_from_gpuid(p,
757 attrs[i].value);
758 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
759 bitmap_clear(prange->bitmap_access, gpuidx, 1);
760 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
761 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
762 bitmap_set(prange->bitmap_access, gpuidx, 1);
763 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
764 } else {
765 bitmap_clear(prange->bitmap_access, gpuidx, 1);
766 bitmap_set(prange->bitmap_aip, gpuidx, 1);
767 }
768 break;
769 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
770 *update_mapping = true;
771 prange->flags |= attrs[i].value;
772 break;
773 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
774 *update_mapping = true;
775 prange->flags &= ~attrs[i].value;
776 break;
777 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
778 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
779 break;
780 default:
781 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
782 }
783 }
784}
785
786static bool
787svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
788 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
789{
790 uint32_t i;
791 int gpuidx;
792
793 for (i = 0; i < nattr; i++) {
794 switch (attrs[i].type) {
795 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
796 if (prange->preferred_loc != attrs[i].value)
797 return false;
798 break;
799 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
800 /* Prefetch should always trigger a migration even
801 * if the value of the attribute didn't change.
802 */
803 return false;
804 case KFD_IOCTL_SVM_ATTR_ACCESS:
805 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
806 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
807 gpuidx = kfd_process_gpuidx_from_gpuid(p,
808 attrs[i].value);
809 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
810 if (test_bit(gpuidx, prange->bitmap_access) ||
811 test_bit(gpuidx, prange->bitmap_aip))
812 return false;
813 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
814 if (!test_bit(gpuidx, prange->bitmap_access))
815 return false;
816 } else {
817 if (!test_bit(gpuidx, prange->bitmap_aip))
818 return false;
819 }
820 break;
821 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
822 if ((prange->flags & attrs[i].value) != attrs[i].value)
823 return false;
824 break;
825 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
826 if ((prange->flags & attrs[i].value) != 0)
827 return false;
828 break;
829 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
830 if (prange->granularity != attrs[i].value)
831 return false;
832 break;
833 default:
834 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
835 }
836 }
837
838 return true;
839}
840
841/**
842 * svm_range_debug_dump - print all range information from svms
843 * @svms: svm range list header
844 *
845 * debug output svm range start, end, prefetch location from svms
846 * interval tree and link list
847 *
848 * Context: The caller must hold svms->lock
849 */
850static void svm_range_debug_dump(struct svm_range_list *svms)
851{
852 struct interval_tree_node *node;
853 struct svm_range *prange;
854
855 pr_debug("dump svms 0x%p list\n", svms);
856 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
857
858 list_for_each_entry(prange, &svms->list, list) {
859 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
860 prange, prange->start, prange->npages,
861 prange->start + prange->npages - 1,
862 prange->actual_loc);
863 }
864
865 pr_debug("dump svms 0x%p interval tree\n", svms);
866 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
867 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
868 while (node) {
869 prange = container_of(node, struct svm_range, it_node);
870 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
871 prange, prange->start, prange->npages,
872 prange->start + prange->npages - 1,
873 prange->actual_loc);
874 node = interval_tree_iter_next(node, 0, ~0ULL);
875 }
876}
877
878static void *
879svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
880 uint64_t offset, uint64_t *vram_pages)
881{
882 unsigned char *src = (unsigned char *)psrc + offset;
883 unsigned char *dst;
884 uint64_t i;
885
886 dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
887 if (!dst)
888 return NULL;
889
890 if (!vram_pages) {
891 memcpy(dst, src, num_elements * size);
892 return (void *)dst;
893 }
894
895 *vram_pages = 0;
896 for (i = 0; i < num_elements; i++) {
897 dma_addr_t *temp;
898 temp = (dma_addr_t *)dst + i;
899 *temp = *((dma_addr_t *)src + i);
900 if (*temp&SVM_RANGE_VRAM_DOMAIN)
901 (*vram_pages)++;
902 }
903
904 return (void *)dst;
905}
906
907static int
908svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
909{
910 int i;
911
912 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
913 if (!src->dma_addr[i])
914 continue;
915 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
916 sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
917 if (!dst->dma_addr[i])
918 return -ENOMEM;
919 }
920
921 return 0;
922}
923
924static int
925svm_range_split_array(void *ppnew, void *ppold, size_t size,
926 uint64_t old_start, uint64_t old_n,
927 uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
928{
929 unsigned char *new, *old, *pold;
930 uint64_t d;
931
932 if (!ppold)
933 return 0;
934 pold = *(unsigned char **)ppold;
935 if (!pold)
936 return 0;
937
938 d = (new_start - old_start) * size;
939 /* get dma addr array for new range and calculte its vram page number */
940 new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
941 if (!new)
942 return -ENOMEM;
943 d = (new_start == old_start) ? new_n * size : 0;
944 old = svm_range_copy_array(pold, size, old_n, d, NULL);
945 if (!old) {
946 kvfree(new);
947 return -ENOMEM;
948 }
949 kvfree(pold);
950 *(void **)ppold = old;
951 *(void **)ppnew = new;
952
953 return 0;
954}
955
956static int
957svm_range_split_pages(struct svm_range *new, struct svm_range *old,
958 uint64_t start, uint64_t last)
959{
960 uint64_t npages = last - start + 1;
961 int i, r;
962
963 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
964 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
965 sizeof(*old->dma_addr[i]), old->start,
966 npages, new->start, new->npages,
967 old->actual_loc ? &new->vram_pages : NULL);
968 if (r)
969 return r;
970 }
971 if (old->actual_loc)
972 old->vram_pages -= new->vram_pages;
973
974 return 0;
975}
976
977static int
978svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
979 uint64_t start, uint64_t last)
980{
981 uint64_t npages = last - start + 1;
982
983 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
984 new->svms, new, new->start, start, last);
985
986 if (new->start == old->start) {
987 new->offset = old->offset;
988 old->offset += new->npages;
989 } else {
990 new->offset = old->offset + npages;
991 }
992
993 new->svm_bo = svm_range_bo_ref(old->svm_bo);
994 new->ttm_res = old->ttm_res;
995
996 spin_lock(&new->svm_bo->list_lock);
997 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
998 spin_unlock(&new->svm_bo->list_lock);
999
1000 return 0;
1001}
1002
1003/**
1004 * svm_range_split_adjust - split range and adjust
1005 *
1006 * @new: new range
1007 * @old: the old range
1008 * @start: the old range adjust to start address in pages
1009 * @last: the old range adjust to last address in pages
1010 *
1011 * Copy system memory dma_addr or vram ttm_res in old range to new
1012 * range from new_start up to size new->npages, the remaining old range is from
1013 * start to last
1014 *
1015 * Return:
1016 * 0 - OK, -ENOMEM - out of memory
1017 */
1018static int
1019svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1020 uint64_t start, uint64_t last)
1021{
1022 int r;
1023
1024 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1025 new->svms, new->start, old->start, old->last, start, last);
1026
1027 if (new->start < old->start ||
1028 new->last > old->last) {
1029 WARN_ONCE(1, "invalid new range start or last\n");
1030 return -EINVAL;
1031 }
1032
1033 r = svm_range_split_pages(new, old, start, last);
1034 if (r)
1035 return r;
1036
1037 if (old->actual_loc && old->ttm_res) {
1038 r = svm_range_split_nodes(new, old, start, last);
1039 if (r)
1040 return r;
1041 }
1042
1043 old->npages = last - start + 1;
1044 old->start = start;
1045 old->last = last;
1046 new->flags = old->flags;
1047 new->preferred_loc = old->preferred_loc;
1048 new->prefetch_loc = old->prefetch_loc;
1049 new->actual_loc = old->actual_loc;
1050 new->granularity = old->granularity;
1051 new->mapped_to_gpu = old->mapped_to_gpu;
1052 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1053 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1054
1055 return 0;
1056}
1057
1058/**
1059 * svm_range_split - split a range in 2 ranges
1060 *
1061 * @prange: the svm range to split
1062 * @start: the remaining range start address in pages
1063 * @last: the remaining range last address in pages
1064 * @new: the result new range generated
1065 *
1066 * Two cases only:
1067 * case 1: if start == prange->start
1068 * prange ==> prange[start, last]
1069 * new range [last + 1, prange->last]
1070 *
1071 * case 2: if last == prange->last
1072 * prange ==> prange[start, last]
1073 * new range [prange->start, start - 1]
1074 *
1075 * Return:
1076 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1077 */
1078static int
1079svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1080 struct svm_range **new)
1081{
1082 uint64_t old_start = prange->start;
1083 uint64_t old_last = prange->last;
1084 struct svm_range_list *svms;
1085 int r = 0;
1086
1087 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1088 old_start, old_last, start, last);
1089
1090 if (old_start != start && old_last != last)
1091 return -EINVAL;
1092 if (start < old_start || last > old_last)
1093 return -EINVAL;
1094
1095 svms = prange->svms;
1096 if (old_start == start)
1097 *new = svm_range_new(svms, last + 1, old_last, false);
1098 else
1099 *new = svm_range_new(svms, old_start, start - 1, false);
1100 if (!*new)
1101 return -ENOMEM;
1102
1103 r = svm_range_split_adjust(*new, prange, start, last);
1104 if (r) {
1105 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1106 r, old_start, old_last, start, last);
1107 svm_range_free(*new, false);
1108 *new = NULL;
1109 }
1110
1111 return r;
1112}
1113
1114static int
1115svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1116 struct list_head *insert_list, struct list_head *remap_list)
1117{
1118 struct svm_range *tail = NULL;
1119 int r = svm_range_split(prange, prange->start, new_last, &tail);
1120
1121 if (!r) {
1122 list_add(&tail->list, insert_list);
1123 if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
1124 list_add(&tail->update_list, remap_list);
1125 }
1126 return r;
1127}
1128
1129static int
1130svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1131 struct list_head *insert_list, struct list_head *remap_list)
1132{
1133 struct svm_range *head = NULL;
1134 int r = svm_range_split(prange, new_start, prange->last, &head);
1135
1136 if (!r) {
1137 list_add(&head->list, insert_list);
1138 if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
1139 list_add(&head->update_list, remap_list);
1140 }
1141 return r;
1142}
1143
1144static void
1145svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1146 struct svm_range *pchild, enum svm_work_list_ops op)
1147{
1148 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1149 pchild, pchild->start, pchild->last, prange, op);
1150
1151 pchild->work_item.mm = mm;
1152 pchild->work_item.op = op;
1153 list_add_tail(&pchild->child_list, &prange->child_list);
1154}
1155
1156static bool
1157svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1158{
1159 return (node_a->adev == node_b->adev ||
1160 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1161}
1162
1163static uint64_t
1164svm_range_get_pte_flags(struct kfd_node *node,
1165 struct svm_range *prange, int domain)
1166{
1167 struct kfd_node *bo_node;
1168 uint32_t flags = prange->flags;
1169 uint32_t mapping_flags = 0;
1170 uint64_t pte_flags;
1171 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1172 bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1173 bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1174 unsigned int mtype_local;
1175
1176 if (domain == SVM_RANGE_VRAM_DOMAIN)
1177 bo_node = prange->svm_bo->node;
1178
1179 switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
1180 case IP_VERSION(9, 4, 1):
1181 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1182 if (bo_node == node) {
1183 mapping_flags |= coherent ?
1184 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1185 } else {
1186 mapping_flags |= coherent ?
1187 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1188 if (svm_nodes_in_same_hive(node, bo_node))
1189 snoop = true;
1190 }
1191 } else {
1192 mapping_flags |= coherent ?
1193 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1194 }
1195 break;
1196 case IP_VERSION(9, 4, 2):
1197 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1198 if (bo_node == node) {
1199 mapping_flags |= coherent ?
1200 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1201 if (node->adev->gmc.xgmi.connected_to_cpu)
1202 snoop = true;
1203 } else {
1204 mapping_flags |= coherent ?
1205 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1206 if (svm_nodes_in_same_hive(node, bo_node))
1207 snoop = true;
1208 }
1209 } else {
1210 mapping_flags |= coherent ?
1211 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1212 }
1213 break;
1214 case IP_VERSION(9, 4, 3):
1215 case IP_VERSION(9, 4, 4):
1216 if (ext_coherent)
1217 mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
1218 else
1219 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1220 amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1221 snoop = true;
1222 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1223 /* local HBM region close to partition */
1224 if (bo_node->adev == node->adev &&
1225 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1226 mapping_flags |= mtype_local;
1227 /* local HBM region far from partition or remote XGMI GPU
1228 * with regular system scope coherence
1229 */
1230 else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1231 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1232 /* PCIe P2P or extended system scope coherence */
1233 else
1234 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1235 /* system memory accessed by the APU */
1236 } else if (node->adev->flags & AMD_IS_APU) {
1237 /* On NUMA systems, locality is determined per-page
1238 * in amdgpu_gmc_override_vm_pte_flags
1239 */
1240 if (num_possible_nodes() <= 1)
1241 mapping_flags |= mtype_local;
1242 else
1243 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1244 /* system memory accessed by the dGPU */
1245 } else {
1246 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1247 }
1248 break;
1249 case IP_VERSION(12, 0, 0):
1250 case IP_VERSION(12, 0, 1):
1251 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1252 if (bo_node != node)
1253 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1254 } else {
1255 mapping_flags |= coherent ?
1256 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1257 }
1258 break;
1259 default:
1260 mapping_flags |= coherent ?
1261 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1262 }
1263
1264 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1265
1266 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1267 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1268 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1269 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1270
1271 pte_flags = AMDGPU_PTE_VALID;
1272 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1273 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1274 if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
1275 pte_flags |= AMDGPU_PTE_IS_PTE;
1276
1277 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1278 return pte_flags;
1279}
1280
1281static int
1282svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1283 uint64_t start, uint64_t last,
1284 struct dma_fence **fence)
1285{
1286 uint64_t init_pte_value = 0;
1287
1288 pr_debug("[0x%llx 0x%llx]\n", start, last);
1289
1290 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
1291 last, init_pte_value, 0, 0, NULL, NULL,
1292 fence);
1293}
1294
1295static int
1296svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1297 unsigned long last, uint32_t trigger)
1298{
1299 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1300 struct kfd_process_device *pdd;
1301 struct dma_fence *fence = NULL;
1302 struct kfd_process *p;
1303 uint32_t gpuidx;
1304 int r = 0;
1305
1306 if (!prange->mapped_to_gpu) {
1307 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1308 prange, prange->start, prange->last);
1309 return 0;
1310 }
1311
1312 if (prange->start == start && prange->last == last) {
1313 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1314 prange->mapped_to_gpu = false;
1315 }
1316
1317 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1318 MAX_GPU_INSTANCE);
1319 p = container_of(prange->svms, struct kfd_process, svms);
1320
1321 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1322 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1323 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1324 if (!pdd) {
1325 pr_debug("failed to find device idx %d\n", gpuidx);
1326 return -EINVAL;
1327 }
1328
1329 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1330 start, last, trigger);
1331
1332 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1333 drm_priv_to_vm(pdd->drm_priv),
1334 start, last, &fence);
1335 if (r)
1336 break;
1337
1338 if (fence) {
1339 r = dma_fence_wait(fence, false);
1340 dma_fence_put(fence);
1341 fence = NULL;
1342 if (r)
1343 break;
1344 }
1345 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1346 }
1347
1348 return r;
1349}
1350
1351static int
1352svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1353 unsigned long offset, unsigned long npages, bool readonly,
1354 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1355 struct dma_fence **fence, bool flush_tlb)
1356{
1357 struct amdgpu_device *adev = pdd->dev->adev;
1358 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1359 uint64_t pte_flags;
1360 unsigned long last_start;
1361 int last_domain;
1362 int r = 0;
1363 int64_t i, j;
1364
1365 last_start = prange->start + offset;
1366
1367 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1368 last_start, last_start + npages - 1, readonly);
1369
1370 for (i = offset; i < offset + npages; i++) {
1371 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1372 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1373
1374 /* Collect all pages in the same address range and memory domain
1375 * that can be mapped with a single call to update mapping.
1376 */
1377 if (i < offset + npages - 1 &&
1378 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1379 continue;
1380
1381 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1382 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1383
1384 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1385 if (readonly)
1386 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1387
1388 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1389 prange->svms, last_start, prange->start + i,
1390 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1391 pte_flags);
1392
1393 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1394 * different memory partition based on fpfn/lpfn, we should use
1395 * same vm_manager.vram_base_offset regardless memory partition.
1396 */
1397 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1398 NULL, last_start, prange->start + i,
1399 pte_flags,
1400 (last_start - prange->start) << PAGE_SHIFT,
1401 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1402 NULL, dma_addr, &vm->last_update);
1403
1404 for (j = last_start - prange->start; j <= i; j++)
1405 dma_addr[j] |= last_domain;
1406
1407 if (r) {
1408 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1409 goto out;
1410 }
1411 last_start = prange->start + i + 1;
1412 }
1413
1414 r = amdgpu_vm_update_pdes(adev, vm, false);
1415 if (r) {
1416 pr_debug("failed %d to update directories 0x%lx\n", r,
1417 prange->start);
1418 goto out;
1419 }
1420
1421 if (fence)
1422 *fence = dma_fence_get(vm->last_update);
1423
1424out:
1425 return r;
1426}
1427
1428static int
1429svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1430 unsigned long npages, bool readonly,
1431 unsigned long *bitmap, bool wait, bool flush_tlb)
1432{
1433 struct kfd_process_device *pdd;
1434 struct amdgpu_device *bo_adev = NULL;
1435 struct kfd_process *p;
1436 struct dma_fence *fence = NULL;
1437 uint32_t gpuidx;
1438 int r = 0;
1439
1440 if (prange->svm_bo && prange->ttm_res)
1441 bo_adev = prange->svm_bo->node->adev;
1442
1443 p = container_of(prange->svms, struct kfd_process, svms);
1444 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1445 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1446 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1447 if (!pdd) {
1448 pr_debug("failed to find device idx %d\n", gpuidx);
1449 return -EINVAL;
1450 }
1451
1452 pdd = kfd_bind_process_to_device(pdd->dev, p);
1453 if (IS_ERR(pdd))
1454 return -EINVAL;
1455
1456 if (bo_adev && pdd->dev->adev != bo_adev &&
1457 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1458 pr_debug("cannot map to device idx %d\n", gpuidx);
1459 continue;
1460 }
1461
1462 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1463 prange->dma_addr[gpuidx],
1464 bo_adev, wait ? &fence : NULL,
1465 flush_tlb);
1466 if (r)
1467 break;
1468
1469 if (fence) {
1470 r = dma_fence_wait(fence, false);
1471 dma_fence_put(fence);
1472 fence = NULL;
1473 if (r) {
1474 pr_debug("failed %d to dma fence wait\n", r);
1475 break;
1476 }
1477 }
1478
1479 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1480 }
1481
1482 return r;
1483}
1484
1485struct svm_validate_context {
1486 struct kfd_process *process;
1487 struct svm_range *prange;
1488 bool intr;
1489 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1490 struct drm_exec exec;
1491};
1492
1493static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1494{
1495 struct kfd_process_device *pdd;
1496 struct amdgpu_vm *vm;
1497 uint32_t gpuidx;
1498 int r;
1499
1500 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1501 drm_exec_until_all_locked(&ctx->exec) {
1502 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1503 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1504 if (!pdd) {
1505 pr_debug("failed to find device idx %d\n", gpuidx);
1506 r = -EINVAL;
1507 goto unreserve_out;
1508 }
1509 vm = drm_priv_to_vm(pdd->drm_priv);
1510
1511 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1512 drm_exec_retry_on_contention(&ctx->exec);
1513 if (unlikely(r)) {
1514 pr_debug("failed %d to reserve bo\n", r);
1515 goto unreserve_out;
1516 }
1517 }
1518 }
1519
1520 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1521 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1522 if (!pdd) {
1523 pr_debug("failed to find device idx %d\n", gpuidx);
1524 r = -EINVAL;
1525 goto unreserve_out;
1526 }
1527
1528 r = amdgpu_vm_validate(pdd->dev->adev,
1529 drm_priv_to_vm(pdd->drm_priv), NULL,
1530 svm_range_bo_validate, NULL);
1531 if (r) {
1532 pr_debug("failed %d validate pt bos\n", r);
1533 goto unreserve_out;
1534 }
1535 }
1536
1537 return 0;
1538
1539unreserve_out:
1540 drm_exec_fini(&ctx->exec);
1541 return r;
1542}
1543
1544static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1545{
1546 drm_exec_fini(&ctx->exec);
1547}
1548
1549static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1550{
1551 struct kfd_process_device *pdd;
1552
1553 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1554 if (!pdd)
1555 return NULL;
1556
1557 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1558}
1559
1560/*
1561 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1562 *
1563 * To prevent concurrent destruction or change of range attributes, the
1564 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1565 * because that would block concurrent evictions and lead to deadlocks. To
1566 * serialize concurrent migrations or validations of the same range, the
1567 * prange->migrate_mutex must be held.
1568 *
1569 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1570 * eviction fence.
1571 *
1572 * The following sequence ensures race-free validation and GPU mapping:
1573 *
1574 * 1. Reserve page table (and SVM BO if range is in VRAM)
1575 * 2. hmm_range_fault to get page addresses (if system memory)
1576 * 3. DMA-map pages (if system memory)
1577 * 4-a. Take notifier lock
1578 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1579 * 4-c. Check that the range was not split or otherwise invalidated
1580 * 4-d. Update GPU page table
1581 * 4.e. Release notifier lock
1582 * 5. Release page table (and SVM BO) reservation
1583 */
1584static int svm_range_validate_and_map(struct mm_struct *mm,
1585 unsigned long map_start, unsigned long map_last,
1586 struct svm_range *prange, int32_t gpuidx,
1587 bool intr, bool wait, bool flush_tlb)
1588{
1589 struct svm_validate_context *ctx;
1590 unsigned long start, end, addr;
1591 struct kfd_process *p;
1592 void *owner;
1593 int32_t idx;
1594 int r = 0;
1595
1596 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1597 if (!ctx)
1598 return -ENOMEM;
1599 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1600 ctx->prange = prange;
1601 ctx->intr = intr;
1602
1603 if (gpuidx < MAX_GPU_INSTANCE) {
1604 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1605 bitmap_set(ctx->bitmap, gpuidx, 1);
1606 } else if (ctx->process->xnack_enabled) {
1607 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1608
1609 /* If prefetch range to GPU, or GPU retry fault migrate range to
1610 * GPU, which has ACCESS attribute to the range, create mapping
1611 * on that GPU.
1612 */
1613 if (prange->actual_loc) {
1614 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1615 prange->actual_loc);
1616 if (gpuidx < 0) {
1617 WARN_ONCE(1, "failed get device by id 0x%x\n",
1618 prange->actual_loc);
1619 r = -EINVAL;
1620 goto free_ctx;
1621 }
1622 if (test_bit(gpuidx, prange->bitmap_access))
1623 bitmap_set(ctx->bitmap, gpuidx, 1);
1624 }
1625
1626 /*
1627 * If prange is already mapped or with always mapped flag,
1628 * update mapping on GPUs with ACCESS attribute
1629 */
1630 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1631 if (prange->mapped_to_gpu ||
1632 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1633 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1634 }
1635 } else {
1636 bitmap_or(ctx->bitmap, prange->bitmap_access,
1637 prange->bitmap_aip, MAX_GPU_INSTANCE);
1638 }
1639
1640 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1641 r = 0;
1642 goto free_ctx;
1643 }
1644
1645 if (prange->actual_loc && !prange->ttm_res) {
1646 /* This should never happen. actual_loc gets set by
1647 * svm_migrate_ram_to_vram after allocating a BO.
1648 */
1649 WARN_ONCE(1, "VRAM BO missing during validation\n");
1650 r = -EINVAL;
1651 goto free_ctx;
1652 }
1653
1654 r = svm_range_reserve_bos(ctx, intr);
1655 if (r)
1656 goto free_ctx;
1657
1658 p = container_of(prange->svms, struct kfd_process, svms);
1659 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1660 MAX_GPU_INSTANCE));
1661 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1662 if (kfd_svm_page_owner(p, idx) != owner) {
1663 owner = NULL;
1664 break;
1665 }
1666 }
1667
1668 start = map_start << PAGE_SHIFT;
1669 end = (map_last + 1) << PAGE_SHIFT;
1670 for (addr = start; !r && addr < end; ) {
1671 struct hmm_range *hmm_range = NULL;
1672 unsigned long map_start_vma;
1673 unsigned long map_last_vma;
1674 struct vm_area_struct *vma;
1675 unsigned long next = 0;
1676 unsigned long offset;
1677 unsigned long npages;
1678 bool readonly;
1679
1680 vma = vma_lookup(mm, addr);
1681 if (vma) {
1682 readonly = !(vma->vm_flags & VM_WRITE);
1683
1684 next = min(vma->vm_end, end);
1685 npages = (next - addr) >> PAGE_SHIFT;
1686 WRITE_ONCE(p->svms.faulting_task, current);
1687 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1688 readonly, owner, NULL,
1689 &hmm_range);
1690 WRITE_ONCE(p->svms.faulting_task, NULL);
1691 if (r)
1692 pr_debug("failed %d to get svm range pages\n", r);
1693 } else {
1694 r = -EFAULT;
1695 }
1696
1697 if (!r) {
1698 offset = (addr >> PAGE_SHIFT) - prange->start;
1699 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1700 hmm_range->hmm_pfns);
1701 if (r)
1702 pr_debug("failed %d to dma map range\n", r);
1703 }
1704
1705 svm_range_lock(prange);
1706
1707 /* Free backing memory of hmm_range if it was initialized
1708 * Overrride return value to TRY AGAIN only if prior returns
1709 * were successful
1710 */
1711 if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
1712 pr_debug("hmm update the range, need validate again\n");
1713 r = -EAGAIN;
1714 }
1715
1716 if (!r && !list_empty(&prange->child_list)) {
1717 pr_debug("range split by unmap in parallel, validate again\n");
1718 r = -EAGAIN;
1719 }
1720
1721 if (!r) {
1722 map_start_vma = max(map_start, prange->start + offset);
1723 map_last_vma = min(map_last, prange->start + offset + npages - 1);
1724 if (map_start_vma <= map_last_vma) {
1725 offset = map_start_vma - prange->start;
1726 npages = map_last_vma - map_start_vma + 1;
1727 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1728 ctx->bitmap, wait, flush_tlb);
1729 }
1730 }
1731
1732 if (!r && next == end)
1733 prange->mapped_to_gpu = true;
1734
1735 svm_range_unlock(prange);
1736
1737 addr = next;
1738 }
1739
1740 svm_range_unreserve_bos(ctx);
1741 if (!r)
1742 prange->validate_timestamp = ktime_get_boottime();
1743
1744free_ctx:
1745 kfree(ctx);
1746
1747 return r;
1748}
1749
1750/**
1751 * svm_range_list_lock_and_flush_work - flush pending deferred work
1752 *
1753 * @svms: the svm range list
1754 * @mm: the mm structure
1755 *
1756 * Context: Returns with mmap write lock held, pending deferred work flushed
1757 *
1758 */
1759void
1760svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1761 struct mm_struct *mm)
1762{
1763retry_flush_work:
1764 flush_work(&svms->deferred_list_work);
1765 mmap_write_lock(mm);
1766
1767 if (list_empty(&svms->deferred_range_list))
1768 return;
1769 mmap_write_unlock(mm);
1770 pr_debug("retry flush\n");
1771 goto retry_flush_work;
1772}
1773
1774static void svm_range_restore_work(struct work_struct *work)
1775{
1776 struct delayed_work *dwork = to_delayed_work(work);
1777 struct amdkfd_process_info *process_info;
1778 struct svm_range_list *svms;
1779 struct svm_range *prange;
1780 struct kfd_process *p;
1781 struct mm_struct *mm;
1782 int evicted_ranges;
1783 int invalid;
1784 int r;
1785
1786 svms = container_of(dwork, struct svm_range_list, restore_work);
1787 evicted_ranges = atomic_read(&svms->evicted_ranges);
1788 if (!evicted_ranges)
1789 return;
1790
1791 pr_debug("restore svm ranges\n");
1792
1793 p = container_of(svms, struct kfd_process, svms);
1794 process_info = p->kgd_process_info;
1795
1796 /* Keep mm reference when svm_range_validate_and_map ranges */
1797 mm = get_task_mm(p->lead_thread);
1798 if (!mm) {
1799 pr_debug("svms 0x%p process mm gone\n", svms);
1800 return;
1801 }
1802
1803 mutex_lock(&process_info->lock);
1804 svm_range_list_lock_and_flush_work(svms, mm);
1805 mutex_lock(&svms->lock);
1806
1807 evicted_ranges = atomic_read(&svms->evicted_ranges);
1808
1809 list_for_each_entry(prange, &svms->list, list) {
1810 invalid = atomic_read(&prange->invalid);
1811 if (!invalid)
1812 continue;
1813
1814 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1815 prange->svms, prange, prange->start, prange->last,
1816 invalid);
1817
1818 /*
1819 * If range is migrating, wait for migration is done.
1820 */
1821 mutex_lock(&prange->migrate_mutex);
1822
1823 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1824 MAX_GPU_INSTANCE, false, true, false);
1825 if (r)
1826 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1827 prange->start);
1828
1829 mutex_unlock(&prange->migrate_mutex);
1830 if (r)
1831 goto out_reschedule;
1832
1833 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1834 goto out_reschedule;
1835 }
1836
1837 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1838 evicted_ranges)
1839 goto out_reschedule;
1840
1841 evicted_ranges = 0;
1842
1843 r = kgd2kfd_resume_mm(mm);
1844 if (r) {
1845 /* No recovery from this failure. Probably the CP is
1846 * hanging. No point trying again.
1847 */
1848 pr_debug("failed %d to resume KFD\n", r);
1849 }
1850
1851 pr_debug("restore svm ranges successfully\n");
1852
1853out_reschedule:
1854 mutex_unlock(&svms->lock);
1855 mmap_write_unlock(mm);
1856 mutex_unlock(&process_info->lock);
1857
1858 /* If validation failed, reschedule another attempt */
1859 if (evicted_ranges) {
1860 pr_debug("reschedule to restore svm range\n");
1861 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1862 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1863
1864 kfd_smi_event_queue_restore_rescheduled(mm);
1865 }
1866 mmput(mm);
1867}
1868
1869/**
1870 * svm_range_evict - evict svm range
1871 * @prange: svm range structure
1872 * @mm: current process mm_struct
1873 * @start: starting process queue number
1874 * @last: last process queue number
1875 * @event: mmu notifier event when range is evicted or migrated
1876 *
1877 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1878 * return to let CPU evict the buffer and proceed CPU pagetable update.
1879 *
1880 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1881 * If invalidation happens while restore work is running, restore work will
1882 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1883 * the queues.
1884 */
1885static int
1886svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1887 unsigned long start, unsigned long last,
1888 enum mmu_notifier_event event)
1889{
1890 struct svm_range_list *svms = prange->svms;
1891 struct svm_range *pchild;
1892 struct kfd_process *p;
1893 int r = 0;
1894
1895 p = container_of(svms, struct kfd_process, svms);
1896
1897 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1898 svms, prange->start, prange->last, start, last);
1899
1900 if (!p->xnack_enabled ||
1901 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1902 int evicted_ranges;
1903 bool mapped = prange->mapped_to_gpu;
1904
1905 list_for_each_entry(pchild, &prange->child_list, child_list) {
1906 if (!pchild->mapped_to_gpu)
1907 continue;
1908 mapped = true;
1909 mutex_lock_nested(&pchild->lock, 1);
1910 if (pchild->start <= last && pchild->last >= start) {
1911 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1912 pchild->start, pchild->last);
1913 atomic_inc(&pchild->invalid);
1914 }
1915 mutex_unlock(&pchild->lock);
1916 }
1917
1918 if (!mapped)
1919 return r;
1920
1921 if (prange->start <= last && prange->last >= start)
1922 atomic_inc(&prange->invalid);
1923
1924 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1925 if (evicted_ranges != 1)
1926 return r;
1927
1928 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1929 prange->svms, prange->start, prange->last);
1930
1931 /* First eviction, stop the queues */
1932 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1933 if (r)
1934 pr_debug("failed to quiesce KFD\n");
1935
1936 pr_debug("schedule to restore svm %p ranges\n", svms);
1937 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1938 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1939 } else {
1940 unsigned long s, l;
1941 uint32_t trigger;
1942
1943 if (event == MMU_NOTIFY_MIGRATE)
1944 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1945 else
1946 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1947
1948 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1949 prange->svms, start, last);
1950 list_for_each_entry(pchild, &prange->child_list, child_list) {
1951 mutex_lock_nested(&pchild->lock, 1);
1952 s = max(start, pchild->start);
1953 l = min(last, pchild->last);
1954 if (l >= s)
1955 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1956 mutex_unlock(&pchild->lock);
1957 }
1958 s = max(start, prange->start);
1959 l = min(last, prange->last);
1960 if (l >= s)
1961 svm_range_unmap_from_gpus(prange, s, l, trigger);
1962 }
1963
1964 return r;
1965}
1966
1967static struct svm_range *svm_range_clone(struct svm_range *old)
1968{
1969 struct svm_range *new;
1970
1971 new = svm_range_new(old->svms, old->start, old->last, false);
1972 if (!new)
1973 return NULL;
1974 if (svm_range_copy_dma_addrs(new, old)) {
1975 svm_range_free(new, false);
1976 return NULL;
1977 }
1978 if (old->svm_bo) {
1979 new->ttm_res = old->ttm_res;
1980 new->offset = old->offset;
1981 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1982 spin_lock(&new->svm_bo->list_lock);
1983 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1984 spin_unlock(&new->svm_bo->list_lock);
1985 }
1986 new->flags = old->flags;
1987 new->preferred_loc = old->preferred_loc;
1988 new->prefetch_loc = old->prefetch_loc;
1989 new->actual_loc = old->actual_loc;
1990 new->granularity = old->granularity;
1991 new->mapped_to_gpu = old->mapped_to_gpu;
1992 new->vram_pages = old->vram_pages;
1993 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1994 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1995
1996 return new;
1997}
1998
1999void svm_range_set_max_pages(struct amdgpu_device *adev)
2000{
2001 uint64_t max_pages;
2002 uint64_t pages, _pages;
2003 uint64_t min_pages = 0;
2004 int i, id;
2005
2006 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2007 if (adev->kfd.dev->nodes[i]->xcp)
2008 id = adev->kfd.dev->nodes[i]->xcp->id;
2009 else
2010 id = -1;
2011 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2012 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2013 pages = rounddown_pow_of_two(pages);
2014 min_pages = min_not_zero(min_pages, pages);
2015 }
2016
2017 do {
2018 max_pages = READ_ONCE(max_svm_range_pages);
2019 _pages = min_not_zero(max_pages, min_pages);
2020 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2021}
2022
2023static int
2024svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2025 uint64_t max_pages, struct list_head *insert_list,
2026 struct list_head *update_list)
2027{
2028 struct svm_range *prange;
2029 uint64_t l;
2030
2031 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2032 max_pages, start, last);
2033
2034 while (last >= start) {
2035 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2036
2037 prange = svm_range_new(svms, start, l, true);
2038 if (!prange)
2039 return -ENOMEM;
2040 list_add(&prange->list, insert_list);
2041 list_add(&prange->update_list, update_list);
2042
2043 start = l + 1;
2044 }
2045 return 0;
2046}
2047
2048/**
2049 * svm_range_add - add svm range and handle overlap
2050 * @p: the range add to this process svms
2051 * @start: page size aligned
2052 * @size: page size aligned
2053 * @nattr: number of attributes
2054 * @attrs: array of attributes
2055 * @update_list: output, the ranges need validate and update GPU mapping
2056 * @insert_list: output, the ranges need insert to svms
2057 * @remove_list: output, the ranges are replaced and need remove from svms
2058 * @remap_list: output, remap unaligned svm ranges
2059 *
2060 * Check if the virtual address range has overlap with any existing ranges,
2061 * split partly overlapping ranges and add new ranges in the gaps. All changes
2062 * should be applied to the range_list and interval tree transactionally. If
2063 * any range split or allocation fails, the entire update fails. Therefore any
2064 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2065 * unchanged.
2066 *
2067 * If the transaction succeeds, the caller can update and insert clones and
2068 * new ranges, then free the originals.
2069 *
2070 * Otherwise the caller can free the clones and new ranges, while the old
2071 * svm_ranges remain unchanged.
2072 *
2073 * Context: Process context, caller must hold svms->lock
2074 *
2075 * Return:
2076 * 0 - OK, otherwise error code
2077 */
2078static int
2079svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2080 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2081 struct list_head *update_list, struct list_head *insert_list,
2082 struct list_head *remove_list, struct list_head *remap_list)
2083{
2084 unsigned long last = start + size - 1UL;
2085 struct svm_range_list *svms = &p->svms;
2086 struct interval_tree_node *node;
2087 struct svm_range *prange;
2088 struct svm_range *tmp;
2089 struct list_head new_list;
2090 int r = 0;
2091
2092 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2093
2094 INIT_LIST_HEAD(update_list);
2095 INIT_LIST_HEAD(insert_list);
2096 INIT_LIST_HEAD(remove_list);
2097 INIT_LIST_HEAD(&new_list);
2098 INIT_LIST_HEAD(remap_list);
2099
2100 node = interval_tree_iter_first(&svms->objects, start, last);
2101 while (node) {
2102 struct interval_tree_node *next;
2103 unsigned long next_start;
2104
2105 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2106 node->last);
2107
2108 prange = container_of(node, struct svm_range, it_node);
2109 next = interval_tree_iter_next(node, start, last);
2110 next_start = min(node->last, last) + 1;
2111
2112 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2113 prange->mapped_to_gpu) {
2114 /* nothing to do */
2115 } else if (node->start < start || node->last > last) {
2116 /* node intersects the update range and its attributes
2117 * will change. Clone and split it, apply updates only
2118 * to the overlapping part
2119 */
2120 struct svm_range *old = prange;
2121
2122 prange = svm_range_clone(old);
2123 if (!prange) {
2124 r = -ENOMEM;
2125 goto out;
2126 }
2127
2128 list_add(&old->update_list, remove_list);
2129 list_add(&prange->list, insert_list);
2130 list_add(&prange->update_list, update_list);
2131
2132 if (node->start < start) {
2133 pr_debug("change old range start\n");
2134 r = svm_range_split_head(prange, start,
2135 insert_list, remap_list);
2136 if (r)
2137 goto out;
2138 }
2139 if (node->last > last) {
2140 pr_debug("change old range last\n");
2141 r = svm_range_split_tail(prange, last,
2142 insert_list, remap_list);
2143 if (r)
2144 goto out;
2145 }
2146 } else {
2147 /* The node is contained within start..last,
2148 * just update it
2149 */
2150 list_add(&prange->update_list, update_list);
2151 }
2152
2153 /* insert a new node if needed */
2154 if (node->start > start) {
2155 r = svm_range_split_new(svms, start, node->start - 1,
2156 READ_ONCE(max_svm_range_pages),
2157 &new_list, update_list);
2158 if (r)
2159 goto out;
2160 }
2161
2162 node = next;
2163 start = next_start;
2164 }
2165
2166 /* add a final range at the end if needed */
2167 if (start <= last)
2168 r = svm_range_split_new(svms, start, last,
2169 READ_ONCE(max_svm_range_pages),
2170 &new_list, update_list);
2171
2172out:
2173 if (r) {
2174 list_for_each_entry_safe(prange, tmp, insert_list, list)
2175 svm_range_free(prange, false);
2176 list_for_each_entry_safe(prange, tmp, &new_list, list)
2177 svm_range_free(prange, true);
2178 } else {
2179 list_splice(&new_list, insert_list);
2180 }
2181
2182 return r;
2183}
2184
2185static void
2186svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2187 struct svm_range *prange)
2188{
2189 unsigned long start;
2190 unsigned long last;
2191
2192 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2193 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2194
2195 if (prange->start == start && prange->last == last)
2196 return;
2197
2198 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2199 prange->svms, prange, start, last, prange->start,
2200 prange->last);
2201
2202 if (start != 0 && last != 0) {
2203 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2204 svm_range_remove_notifier(prange);
2205 }
2206 prange->it_node.start = prange->start;
2207 prange->it_node.last = prange->last;
2208
2209 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2210 svm_range_add_notifier_locked(mm, prange);
2211}
2212
2213static void
2214svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2215 struct mm_struct *mm)
2216{
2217 switch (prange->work_item.op) {
2218 case SVM_OP_NULL:
2219 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2220 svms, prange, prange->start, prange->last);
2221 break;
2222 case SVM_OP_UNMAP_RANGE:
2223 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2224 svms, prange, prange->start, prange->last);
2225 svm_range_unlink(prange);
2226 svm_range_remove_notifier(prange);
2227 svm_range_free(prange, true);
2228 break;
2229 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2230 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2231 svms, prange, prange->start, prange->last);
2232 svm_range_update_notifier_and_interval_tree(mm, prange);
2233 break;
2234 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2235 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2236 svms, prange, prange->start, prange->last);
2237 svm_range_update_notifier_and_interval_tree(mm, prange);
2238 /* TODO: implement deferred validation and mapping */
2239 break;
2240 case SVM_OP_ADD_RANGE:
2241 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2242 prange->start, prange->last);
2243 svm_range_add_to_svms(prange);
2244 svm_range_add_notifier_locked(mm, prange);
2245 break;
2246 case SVM_OP_ADD_RANGE_AND_MAP:
2247 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2248 prange, prange->start, prange->last);
2249 svm_range_add_to_svms(prange);
2250 svm_range_add_notifier_locked(mm, prange);
2251 /* TODO: implement deferred validation and mapping */
2252 break;
2253 default:
2254 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2255 prange->work_item.op);
2256 }
2257}
2258
2259static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2260{
2261 struct kfd_process_device *pdd;
2262 struct kfd_process *p;
2263 int drain;
2264 uint32_t i;
2265
2266 p = container_of(svms, struct kfd_process, svms);
2267
2268restart:
2269 drain = atomic_read(&svms->drain_pagefaults);
2270 if (!drain)
2271 return;
2272
2273 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2274 pdd = p->pdds[i];
2275 if (!pdd)
2276 continue;
2277
2278 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2279
2280 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2281 pdd->dev->adev->irq.retry_cam_enabled ?
2282 &pdd->dev->adev->irq.ih :
2283 &pdd->dev->adev->irq.ih1);
2284
2285 if (pdd->dev->adev->irq.retry_cam_enabled)
2286 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2287 &pdd->dev->adev->irq.ih_soft);
2288
2289
2290 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2291 }
2292 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2293 goto restart;
2294}
2295
2296static void svm_range_deferred_list_work(struct work_struct *work)
2297{
2298 struct svm_range_list *svms;
2299 struct svm_range *prange;
2300 struct mm_struct *mm;
2301
2302 svms = container_of(work, struct svm_range_list, deferred_list_work);
2303 pr_debug("enter svms 0x%p\n", svms);
2304
2305 spin_lock(&svms->deferred_list_lock);
2306 while (!list_empty(&svms->deferred_range_list)) {
2307 prange = list_first_entry(&svms->deferred_range_list,
2308 struct svm_range, deferred_list);
2309 spin_unlock(&svms->deferred_list_lock);
2310
2311 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2312 prange->start, prange->last, prange->work_item.op);
2313
2314 mm = prange->work_item.mm;
2315retry:
2316 mmap_write_lock(mm);
2317
2318 /* Checking for the need to drain retry faults must be inside
2319 * mmap write lock to serialize with munmap notifiers.
2320 */
2321 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2322 mmap_write_unlock(mm);
2323 svm_range_drain_retry_fault(svms);
2324 goto retry;
2325 }
2326
2327 /* Remove from deferred_list must be inside mmap write lock, for
2328 * two race cases:
2329 * 1. unmap_from_cpu may change work_item.op and add the range
2330 * to deferred_list again, cause use after free bug.
2331 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2332 * lock and continue because deferred_list is empty, but
2333 * deferred_list work is actually waiting for mmap lock.
2334 */
2335 spin_lock(&svms->deferred_list_lock);
2336 list_del_init(&prange->deferred_list);
2337 spin_unlock(&svms->deferred_list_lock);
2338
2339 mutex_lock(&svms->lock);
2340 mutex_lock(&prange->migrate_mutex);
2341 while (!list_empty(&prange->child_list)) {
2342 struct svm_range *pchild;
2343
2344 pchild = list_first_entry(&prange->child_list,
2345 struct svm_range, child_list);
2346 pr_debug("child prange 0x%p op %d\n", pchild,
2347 pchild->work_item.op);
2348 list_del_init(&pchild->child_list);
2349 svm_range_handle_list_op(svms, pchild, mm);
2350 }
2351 mutex_unlock(&prange->migrate_mutex);
2352
2353 svm_range_handle_list_op(svms, prange, mm);
2354 mutex_unlock(&svms->lock);
2355 mmap_write_unlock(mm);
2356
2357 /* Pairs with mmget in svm_range_add_list_work. If dropping the
2358 * last mm refcount, schedule release work to avoid circular locking
2359 */
2360 mmput_async(mm);
2361
2362 spin_lock(&svms->deferred_list_lock);
2363 }
2364 spin_unlock(&svms->deferred_list_lock);
2365 pr_debug("exit svms 0x%p\n", svms);
2366}
2367
2368void
2369svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2370 struct mm_struct *mm, enum svm_work_list_ops op)
2371{
2372 spin_lock(&svms->deferred_list_lock);
2373 /* if prange is on the deferred list */
2374 if (!list_empty(&prange->deferred_list)) {
2375 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2376 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2377 if (op != SVM_OP_NULL &&
2378 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2379 prange->work_item.op = op;
2380 } else {
2381 prange->work_item.op = op;
2382
2383 /* Pairs with mmput in deferred_list_work */
2384 mmget(mm);
2385 prange->work_item.mm = mm;
2386 list_add_tail(&prange->deferred_list,
2387 &prange->svms->deferred_range_list);
2388 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2389 prange, prange->start, prange->last, op);
2390 }
2391 spin_unlock(&svms->deferred_list_lock);
2392}
2393
2394void schedule_deferred_list_work(struct svm_range_list *svms)
2395{
2396 spin_lock(&svms->deferred_list_lock);
2397 if (!list_empty(&svms->deferred_range_list))
2398 schedule_work(&svms->deferred_list_work);
2399 spin_unlock(&svms->deferred_list_lock);
2400}
2401
2402static void
2403svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2404 struct svm_range *prange, unsigned long start,
2405 unsigned long last)
2406{
2407 struct svm_range *head;
2408 struct svm_range *tail;
2409
2410 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2411 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2412 prange->start, prange->last);
2413 return;
2414 }
2415 if (start > prange->last || last < prange->start)
2416 return;
2417
2418 head = tail = prange;
2419 if (start > prange->start)
2420 svm_range_split(prange, prange->start, start - 1, &tail);
2421 if (last < tail->last)
2422 svm_range_split(tail, last + 1, tail->last, &head);
2423
2424 if (head != prange && tail != prange) {
2425 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2426 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2427 } else if (tail != prange) {
2428 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2429 } else if (head != prange) {
2430 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2431 } else if (parent != prange) {
2432 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2433 }
2434}
2435
2436static void
2437svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2438 unsigned long start, unsigned long last)
2439{
2440 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2441 struct svm_range_list *svms;
2442 struct svm_range *pchild;
2443 struct kfd_process *p;
2444 unsigned long s, l;
2445 bool unmap_parent;
2446
2447 p = kfd_lookup_process_by_mm(mm);
2448 if (!p)
2449 return;
2450 svms = &p->svms;
2451
2452 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2453 prange, prange->start, prange->last, start, last);
2454
2455 /* Make sure pending page faults are drained in the deferred worker
2456 * before the range is freed to avoid straggler interrupts on
2457 * unmapped memory causing "phantom faults".
2458 */
2459 atomic_inc(&svms->drain_pagefaults);
2460
2461 unmap_parent = start <= prange->start && last >= prange->last;
2462
2463 list_for_each_entry(pchild, &prange->child_list, child_list) {
2464 mutex_lock_nested(&pchild->lock, 1);
2465 s = max(start, pchild->start);
2466 l = min(last, pchild->last);
2467 if (l >= s)
2468 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2469 svm_range_unmap_split(mm, prange, pchild, start, last);
2470 mutex_unlock(&pchild->lock);
2471 }
2472 s = max(start, prange->start);
2473 l = min(last, prange->last);
2474 if (l >= s)
2475 svm_range_unmap_from_gpus(prange, s, l, trigger);
2476 svm_range_unmap_split(mm, prange, prange, start, last);
2477
2478 if (unmap_parent)
2479 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2480 else
2481 svm_range_add_list_work(svms, prange, mm,
2482 SVM_OP_UPDATE_RANGE_NOTIFIER);
2483 schedule_deferred_list_work(svms);
2484
2485 kfd_unref_process(p);
2486}
2487
2488/**
2489 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2490 * @mni: mmu_interval_notifier struct
2491 * @range: mmu_notifier_range struct
2492 * @cur_seq: value to pass to mmu_interval_set_seq()
2493 *
2494 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2495 * is from migration, or CPU page invalidation callback.
2496 *
2497 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2498 * work thread, and split prange if only part of prange is unmapped.
2499 *
2500 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2501 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2502 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2503 * update GPU mapping to recover.
2504 *
2505 * Context: mmap lock, notifier_invalidate_start lock are held
2506 * for invalidate event, prange lock is held if this is from migration
2507 */
2508static bool
2509svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2510 const struct mmu_notifier_range *range,
2511 unsigned long cur_seq)
2512{
2513 struct svm_range *prange;
2514 unsigned long start;
2515 unsigned long last;
2516
2517 if (range->event == MMU_NOTIFY_RELEASE)
2518 return true;
2519 if (!mmget_not_zero(mni->mm))
2520 return true;
2521
2522 start = mni->interval_tree.start;
2523 last = mni->interval_tree.last;
2524 start = max(start, range->start) >> PAGE_SHIFT;
2525 last = min(last, range->end - 1) >> PAGE_SHIFT;
2526 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2527 start, last, range->start >> PAGE_SHIFT,
2528 (range->end - 1) >> PAGE_SHIFT,
2529 mni->interval_tree.start >> PAGE_SHIFT,
2530 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2531
2532 prange = container_of(mni, struct svm_range, notifier);
2533
2534 svm_range_lock(prange);
2535 mmu_interval_set_seq(mni, cur_seq);
2536
2537 switch (range->event) {
2538 case MMU_NOTIFY_UNMAP:
2539 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2540 break;
2541 default:
2542 svm_range_evict(prange, mni->mm, start, last, range->event);
2543 break;
2544 }
2545
2546 svm_range_unlock(prange);
2547 mmput(mni->mm);
2548
2549 return true;
2550}
2551
2552/**
2553 * svm_range_from_addr - find svm range from fault address
2554 * @svms: svm range list header
2555 * @addr: address to search range interval tree, in pages
2556 * @parent: parent range if range is on child list
2557 *
2558 * Context: The caller must hold svms->lock
2559 *
2560 * Return: the svm_range found or NULL
2561 */
2562struct svm_range *
2563svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2564 struct svm_range **parent)
2565{
2566 struct interval_tree_node *node;
2567 struct svm_range *prange;
2568 struct svm_range *pchild;
2569
2570 node = interval_tree_iter_first(&svms->objects, addr, addr);
2571 if (!node)
2572 return NULL;
2573
2574 prange = container_of(node, struct svm_range, it_node);
2575 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2576 addr, prange->start, prange->last, node->start, node->last);
2577
2578 if (addr >= prange->start && addr <= prange->last) {
2579 if (parent)
2580 *parent = prange;
2581 return prange;
2582 }
2583 list_for_each_entry(pchild, &prange->child_list, child_list)
2584 if (addr >= pchild->start && addr <= pchild->last) {
2585 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2586 addr, pchild->start, pchild->last);
2587 if (parent)
2588 *parent = prange;
2589 return pchild;
2590 }
2591
2592 return NULL;
2593}
2594
2595/* svm_range_best_restore_location - decide the best fault restore location
2596 * @prange: svm range structure
2597 * @adev: the GPU on which vm fault happened
2598 *
2599 * This is only called when xnack is on, to decide the best location to restore
2600 * the range mapping after GPU vm fault. Caller uses the best location to do
2601 * migration if actual loc is not best location, then update GPU page table
2602 * mapping to the best location.
2603 *
2604 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2605 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2606 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2607 * if range actual loc is cpu, best_loc is cpu
2608 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2609 * range actual loc.
2610 * Otherwise, GPU no access, best_loc is -1.
2611 *
2612 * Return:
2613 * -1 means vm fault GPU no access
2614 * 0 for CPU or GPU id
2615 */
2616static int32_t
2617svm_range_best_restore_location(struct svm_range *prange,
2618 struct kfd_node *node,
2619 int32_t *gpuidx)
2620{
2621 struct kfd_node *bo_node, *preferred_node;
2622 struct kfd_process *p;
2623 uint32_t gpuid;
2624 int r;
2625
2626 p = container_of(prange->svms, struct kfd_process, svms);
2627
2628 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2629 if (r < 0) {
2630 pr_debug("failed to get gpuid from kgd\n");
2631 return -1;
2632 }
2633
2634 if (node->adev->flags & AMD_IS_APU)
2635 return 0;
2636
2637 if (prange->preferred_loc == gpuid ||
2638 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2639 return prange->preferred_loc;
2640 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2641 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2642 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2643 return prange->preferred_loc;
2644 /* fall through */
2645 }
2646
2647 if (test_bit(*gpuidx, prange->bitmap_access))
2648 return gpuid;
2649
2650 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2651 if (!prange->actual_loc)
2652 return 0;
2653
2654 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2655 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2656 return prange->actual_loc;
2657 else
2658 return 0;
2659 }
2660
2661 return -1;
2662}
2663
2664static int
2665svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2666 unsigned long *start, unsigned long *last,
2667 bool *is_heap_stack)
2668{
2669 struct vm_area_struct *vma;
2670 struct interval_tree_node *node;
2671 struct rb_node *rb_node;
2672 unsigned long start_limit, end_limit;
2673
2674 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2675 if (!vma) {
2676 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2677 return -EFAULT;
2678 }
2679
2680 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2681
2682 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2683 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2684 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2685 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2686 /* First range that starts after the fault address */
2687 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2688 if (node) {
2689 end_limit = min(end_limit, node->start);
2690 /* Last range that ends before the fault address */
2691 rb_node = rb_prev(&node->rb);
2692 } else {
2693 /* Last range must end before addr because
2694 * there was no range after addr
2695 */
2696 rb_node = rb_last(&p->svms.objects.rb_root);
2697 }
2698 if (rb_node) {
2699 node = container_of(rb_node, struct interval_tree_node, rb);
2700 if (node->last >= addr) {
2701 WARN(1, "Overlap with prev node and page fault addr\n");
2702 return -EFAULT;
2703 }
2704 start_limit = max(start_limit, node->last + 1);
2705 }
2706
2707 *start = start_limit;
2708 *last = end_limit - 1;
2709
2710 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2711 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2712 *start, *last, *is_heap_stack);
2713
2714 return 0;
2715}
2716
2717static int
2718svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2719 uint64_t *bo_s, uint64_t *bo_l)
2720{
2721 struct amdgpu_bo_va_mapping *mapping;
2722 struct interval_tree_node *node;
2723 struct amdgpu_bo *bo = NULL;
2724 unsigned long userptr;
2725 uint32_t i;
2726 int r;
2727
2728 for (i = 0; i < p->n_pdds; i++) {
2729 struct amdgpu_vm *vm;
2730
2731 if (!p->pdds[i]->drm_priv)
2732 continue;
2733
2734 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2735 r = amdgpu_bo_reserve(vm->root.bo, false);
2736 if (r)
2737 return r;
2738
2739 /* Check userptr by searching entire vm->va interval tree */
2740 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2741 while (node) {
2742 mapping = container_of((struct rb_node *)node,
2743 struct amdgpu_bo_va_mapping, rb);
2744 bo = mapping->bo_va->base.bo;
2745
2746 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2747 start << PAGE_SHIFT,
2748 last << PAGE_SHIFT,
2749 &userptr)) {
2750 node = interval_tree_iter_next(node, 0, ~0ULL);
2751 continue;
2752 }
2753
2754 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2755 start, last);
2756 if (bo_s && bo_l) {
2757 *bo_s = userptr >> PAGE_SHIFT;
2758 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2759 }
2760 amdgpu_bo_unreserve(vm->root.bo);
2761 return -EADDRINUSE;
2762 }
2763 amdgpu_bo_unreserve(vm->root.bo);
2764 }
2765 return 0;
2766}
2767
2768static struct
2769svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2770 struct kfd_process *p,
2771 struct mm_struct *mm,
2772 int64_t addr)
2773{
2774 struct svm_range *prange = NULL;
2775 unsigned long start, last;
2776 uint32_t gpuid, gpuidx;
2777 bool is_heap_stack;
2778 uint64_t bo_s = 0;
2779 uint64_t bo_l = 0;
2780 int r;
2781
2782 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2783 &is_heap_stack))
2784 return NULL;
2785
2786 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2787 if (r != -EADDRINUSE)
2788 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2789
2790 if (r == -EADDRINUSE) {
2791 if (addr >= bo_s && addr <= bo_l)
2792 return NULL;
2793
2794 /* Create one page svm range if 2MB range overlapping */
2795 start = addr;
2796 last = addr;
2797 }
2798
2799 prange = svm_range_new(&p->svms, start, last, true);
2800 if (!prange) {
2801 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2802 return NULL;
2803 }
2804 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2805 pr_debug("failed to get gpuid from kgd\n");
2806 svm_range_free(prange, true);
2807 return NULL;
2808 }
2809
2810 if (is_heap_stack)
2811 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2812
2813 svm_range_add_to_svms(prange);
2814 svm_range_add_notifier_locked(mm, prange);
2815
2816 return prange;
2817}
2818
2819/* svm_range_skip_recover - decide if prange can be recovered
2820 * @prange: svm range structure
2821 *
2822 * GPU vm retry fault handle skip recover the range for cases:
2823 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2824 * deferred list work will drain the stale fault before free the prange.
2825 * 2. prange is on deferred list to add interval notifier after split, or
2826 * 3. prange is child range, it is split from parent prange, recover later
2827 * after interval notifier is added.
2828 *
2829 * Return: true to skip recover, false to recover
2830 */
2831static bool svm_range_skip_recover(struct svm_range *prange)
2832{
2833 struct svm_range_list *svms = prange->svms;
2834
2835 spin_lock(&svms->deferred_list_lock);
2836 if (list_empty(&prange->deferred_list) &&
2837 list_empty(&prange->child_list)) {
2838 spin_unlock(&svms->deferred_list_lock);
2839 return false;
2840 }
2841 spin_unlock(&svms->deferred_list_lock);
2842
2843 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2844 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2845 svms, prange, prange->start, prange->last);
2846 return true;
2847 }
2848 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2849 prange->work_item.op == SVM_OP_ADD_RANGE) {
2850 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2851 svms, prange, prange->start, prange->last);
2852 return true;
2853 }
2854 return false;
2855}
2856
2857static void
2858svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2859 int32_t gpuidx)
2860{
2861 struct kfd_process_device *pdd;
2862
2863 /* fault is on different page of same range
2864 * or fault is skipped to recover later
2865 * or fault is on invalid virtual address
2866 */
2867 if (gpuidx == MAX_GPU_INSTANCE) {
2868 uint32_t gpuid;
2869 int r;
2870
2871 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2872 if (r < 0)
2873 return;
2874 }
2875
2876 /* fault is recovered
2877 * or fault cannot recover because GPU no access on the range
2878 */
2879 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2880 if (pdd)
2881 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2882}
2883
2884static bool
2885svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2886{
2887 unsigned long requested = VM_READ;
2888
2889 if (write_fault)
2890 requested |= VM_WRITE;
2891
2892 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2893 vma->vm_flags);
2894 return (vma->vm_flags & requested) == requested;
2895}
2896
2897int
2898svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2899 uint32_t vmid, uint32_t node_id,
2900 uint64_t addr, bool write_fault)
2901{
2902 unsigned long start, last, size;
2903 struct mm_struct *mm = NULL;
2904 struct svm_range_list *svms;
2905 struct svm_range *prange;
2906 struct kfd_process *p;
2907 ktime_t timestamp = ktime_get_boottime();
2908 struct kfd_node *node;
2909 int32_t best_loc;
2910 int32_t gpuidx = MAX_GPU_INSTANCE;
2911 bool write_locked = false;
2912 struct vm_area_struct *vma;
2913 bool migration = false;
2914 int r = 0;
2915
2916 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2917 pr_debug("device does not support SVM\n");
2918 return -EFAULT;
2919 }
2920
2921 p = kfd_lookup_process_by_pasid(pasid);
2922 if (!p) {
2923 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2924 return 0;
2925 }
2926 svms = &p->svms;
2927
2928 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2929
2930 if (atomic_read(&svms->drain_pagefaults)) {
2931 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2932 r = 0;
2933 goto out;
2934 }
2935
2936 if (!p->xnack_enabled) {
2937 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2938 r = -EFAULT;
2939 goto out;
2940 }
2941
2942 /* p->lead_thread is available as kfd_process_wq_release flush the work
2943 * before releasing task ref.
2944 */
2945 mm = get_task_mm(p->lead_thread);
2946 if (!mm) {
2947 pr_debug("svms 0x%p failed to get mm\n", svms);
2948 r = 0;
2949 goto out;
2950 }
2951
2952 node = kfd_node_by_irq_ids(adev, node_id, vmid);
2953 if (!node) {
2954 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2955 vmid);
2956 r = -EFAULT;
2957 goto out;
2958 }
2959 mmap_read_lock(mm);
2960retry_write_locked:
2961 mutex_lock(&svms->lock);
2962 prange = svm_range_from_addr(svms, addr, NULL);
2963 if (!prange) {
2964 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2965 svms, addr);
2966 if (!write_locked) {
2967 /* Need the write lock to create new range with MMU notifier.
2968 * Also flush pending deferred work to make sure the interval
2969 * tree is up to date before we add a new range
2970 */
2971 mutex_unlock(&svms->lock);
2972 mmap_read_unlock(mm);
2973 mmap_write_lock(mm);
2974 write_locked = true;
2975 goto retry_write_locked;
2976 }
2977 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2978 if (!prange) {
2979 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2980 svms, addr);
2981 mmap_write_downgrade(mm);
2982 r = -EFAULT;
2983 goto out_unlock_svms;
2984 }
2985 }
2986 if (write_locked)
2987 mmap_write_downgrade(mm);
2988
2989 mutex_lock(&prange->migrate_mutex);
2990
2991 if (svm_range_skip_recover(prange)) {
2992 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
2993 r = 0;
2994 goto out_unlock_range;
2995 }
2996
2997 /* skip duplicate vm fault on different pages of same range */
2998 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2999 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3000 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3001 svms, prange->start, prange->last);
3002 r = 0;
3003 goto out_unlock_range;
3004 }
3005
3006 /* __do_munmap removed VMA, return success as we are handling stale
3007 * retry fault.
3008 */
3009 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3010 if (!vma) {
3011 pr_debug("address 0x%llx VMA is removed\n", addr);
3012 r = 0;
3013 goto out_unlock_range;
3014 }
3015
3016 if (!svm_fault_allowed(vma, write_fault)) {
3017 pr_debug("fault addr 0x%llx no %s permission\n", addr,
3018 write_fault ? "write" : "read");
3019 r = -EPERM;
3020 goto out_unlock_range;
3021 }
3022
3023 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3024 if (best_loc == -1) {
3025 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3026 svms, prange->start, prange->last);
3027 r = -EACCES;
3028 goto out_unlock_range;
3029 }
3030
3031 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3032 svms, prange->start, prange->last, best_loc,
3033 prange->actual_loc);
3034
3035 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3036 write_fault, timestamp);
3037
3038 /* Align migration range start and size to granularity size */
3039 size = 1UL << prange->granularity;
3040 start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3041 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3042 if (prange->actual_loc != 0 || best_loc != 0) {
3043 migration = true;
3044
3045 if (best_loc) {
3046 r = svm_migrate_to_vram(prange, best_loc, start, last,
3047 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3048 if (r) {
3049 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3050 r, addr);
3051 /* Fallback to system memory if migration to
3052 * VRAM failed
3053 */
3054 if (prange->actual_loc && prange->actual_loc != best_loc)
3055 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3056 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3057 else
3058 r = 0;
3059 }
3060 } else {
3061 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3062 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3063 }
3064 if (r) {
3065 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3066 r, svms, start, last);
3067 goto out_unlock_range;
3068 }
3069 }
3070
3071 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3072 false, false);
3073 if (r)
3074 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3075 r, svms, start, last);
3076
3077 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3078 migration);
3079
3080out_unlock_range:
3081 mutex_unlock(&prange->migrate_mutex);
3082out_unlock_svms:
3083 mutex_unlock(&svms->lock);
3084 mmap_read_unlock(mm);
3085
3086 svm_range_count_fault(node, p, gpuidx);
3087
3088 mmput(mm);
3089out:
3090 kfd_unref_process(p);
3091
3092 if (r == -EAGAIN) {
3093 pr_debug("recover vm fault later\n");
3094 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3095 r = 0;
3096 }
3097 return r;
3098}
3099
3100int
3101svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3102{
3103 struct svm_range *prange, *pchild;
3104 uint64_t reserved_size = 0;
3105 uint64_t size;
3106 int r = 0;
3107
3108 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3109
3110 mutex_lock(&p->svms.lock);
3111
3112 list_for_each_entry(prange, &p->svms.list, list) {
3113 svm_range_lock(prange);
3114 list_for_each_entry(pchild, &prange->child_list, child_list) {
3115 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3116 if (xnack_enabled) {
3117 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3118 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3119 } else {
3120 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3121 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3122 if (r)
3123 goto out_unlock;
3124 reserved_size += size;
3125 }
3126 }
3127
3128 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3129 if (xnack_enabled) {
3130 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3131 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3132 } else {
3133 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3134 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3135 if (r)
3136 goto out_unlock;
3137 reserved_size += size;
3138 }
3139out_unlock:
3140 svm_range_unlock(prange);
3141 if (r)
3142 break;
3143 }
3144
3145 if (r)
3146 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3147 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3148 else
3149 /* Change xnack mode must be inside svms lock, to avoid race with
3150 * svm_range_deferred_list_work unreserve memory in parallel.
3151 */
3152 p->xnack_enabled = xnack_enabled;
3153
3154 mutex_unlock(&p->svms.lock);
3155 return r;
3156}
3157
3158void svm_range_list_fini(struct kfd_process *p)
3159{
3160 struct svm_range *prange;
3161 struct svm_range *next;
3162
3163 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3164
3165 cancel_delayed_work_sync(&p->svms.restore_work);
3166
3167 /* Ensure list work is finished before process is destroyed */
3168 flush_work(&p->svms.deferred_list_work);
3169
3170 /*
3171 * Ensure no retry fault comes in afterwards, as page fault handler will
3172 * not find kfd process and take mm lock to recover fault.
3173 */
3174 atomic_inc(&p->svms.drain_pagefaults);
3175 svm_range_drain_retry_fault(&p->svms);
3176
3177 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3178 svm_range_unlink(prange);
3179 svm_range_remove_notifier(prange);
3180 svm_range_free(prange, true);
3181 }
3182
3183 mutex_destroy(&p->svms.lock);
3184
3185 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3186}
3187
3188int svm_range_list_init(struct kfd_process *p)
3189{
3190 struct svm_range_list *svms = &p->svms;
3191 int i;
3192
3193 svms->objects = RB_ROOT_CACHED;
3194 mutex_init(&svms->lock);
3195 INIT_LIST_HEAD(&svms->list);
3196 atomic_set(&svms->evicted_ranges, 0);
3197 atomic_set(&svms->drain_pagefaults, 0);
3198 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3199 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3200 INIT_LIST_HEAD(&svms->deferred_range_list);
3201 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3202 spin_lock_init(&svms->deferred_list_lock);
3203
3204 for (i = 0; i < p->n_pdds; i++)
3205 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3206 bitmap_set(svms->bitmap_supported, i, 1);
3207
3208 return 0;
3209}
3210
3211/**
3212 * svm_range_check_vm - check if virtual address range mapped already
3213 * @p: current kfd_process
3214 * @start: range start address, in pages
3215 * @last: range last address, in pages
3216 * @bo_s: mapping start address in pages if address range already mapped
3217 * @bo_l: mapping last address in pages if address range already mapped
3218 *
3219 * The purpose is to avoid virtual address ranges already allocated by
3220 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3221 * It looks for each pdd in the kfd_process.
3222 *
3223 * Context: Process context
3224 *
3225 * Return 0 - OK, if the range is not mapped.
3226 * Otherwise error code:
3227 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3228 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3229 * a signal. Release all buffer reservations and return to user-space.
3230 */
3231static int
3232svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3233 uint64_t *bo_s, uint64_t *bo_l)
3234{
3235 struct amdgpu_bo_va_mapping *mapping;
3236 struct interval_tree_node *node;
3237 uint32_t i;
3238 int r;
3239
3240 for (i = 0; i < p->n_pdds; i++) {
3241 struct amdgpu_vm *vm;
3242
3243 if (!p->pdds[i]->drm_priv)
3244 continue;
3245
3246 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3247 r = amdgpu_bo_reserve(vm->root.bo, false);
3248 if (r)
3249 return r;
3250
3251 node = interval_tree_iter_first(&vm->va, start, last);
3252 if (node) {
3253 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3254 start, last);
3255 mapping = container_of((struct rb_node *)node,
3256 struct amdgpu_bo_va_mapping, rb);
3257 if (bo_s && bo_l) {
3258 *bo_s = mapping->start;
3259 *bo_l = mapping->last;
3260 }
3261 amdgpu_bo_unreserve(vm->root.bo);
3262 return -EADDRINUSE;
3263 }
3264 amdgpu_bo_unreserve(vm->root.bo);
3265 }
3266
3267 return 0;
3268}
3269
3270/**
3271 * svm_range_is_valid - check if virtual address range is valid
3272 * @p: current kfd_process
3273 * @start: range start address, in pages
3274 * @size: range size, in pages
3275 *
3276 * Valid virtual address range means it belongs to one or more VMAs
3277 *
3278 * Context: Process context
3279 *
3280 * Return:
3281 * 0 - OK, otherwise error code
3282 */
3283static int
3284svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3285{
3286 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3287 struct vm_area_struct *vma;
3288 unsigned long end;
3289 unsigned long start_unchg = start;
3290
3291 start <<= PAGE_SHIFT;
3292 end = start + (size << PAGE_SHIFT);
3293 do {
3294 vma = vma_lookup(p->mm, start);
3295 if (!vma || (vma->vm_flags & device_vma))
3296 return -EFAULT;
3297 start = min(end, vma->vm_end);
3298 } while (start < end);
3299
3300 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3301 NULL);
3302}
3303
3304/**
3305 * svm_range_best_prefetch_location - decide the best prefetch location
3306 * @prange: svm range structure
3307 *
3308 * For xnack off:
3309 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3310 * can be CPU or GPU.
3311 *
3312 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3313 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3314 * the best prefetch location is always CPU, because GPU can not have coherent
3315 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3316 *
3317 * For xnack on:
3318 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3319 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3320 *
3321 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3322 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3323 * prefetch location is always CPU.
3324 *
3325 * Context: Process context
3326 *
3327 * Return:
3328 * 0 for CPU or GPU id
3329 */
3330static uint32_t
3331svm_range_best_prefetch_location(struct svm_range *prange)
3332{
3333 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3334 uint32_t best_loc = prange->prefetch_loc;
3335 struct kfd_process_device *pdd;
3336 struct kfd_node *bo_node;
3337 struct kfd_process *p;
3338 uint32_t gpuidx;
3339
3340 p = container_of(prange->svms, struct kfd_process, svms);
3341
3342 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3343 goto out;
3344
3345 bo_node = svm_range_get_node_by_id(prange, best_loc);
3346 if (!bo_node) {
3347 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3348 best_loc = 0;
3349 goto out;
3350 }
3351
3352 if (bo_node->adev->flags & AMD_IS_APU) {
3353 best_loc = 0;
3354 goto out;
3355 }
3356
3357 if (p->xnack_enabled)
3358 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3359 else
3360 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3361 MAX_GPU_INSTANCE);
3362
3363 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3364 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3365 if (!pdd) {
3366 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3367 continue;
3368 }
3369
3370 if (pdd->dev->adev == bo_node->adev)
3371 continue;
3372
3373 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3374 best_loc = 0;
3375 break;
3376 }
3377 }
3378
3379out:
3380 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3381 p->xnack_enabled, &p->svms, prange->start, prange->last,
3382 best_loc);
3383
3384 return best_loc;
3385}
3386
3387/* svm_range_trigger_migration - start page migration if prefetch loc changed
3388 * @mm: current process mm_struct
3389 * @prange: svm range structure
3390 * @migrated: output, true if migration is triggered
3391 *
3392 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3393 * from ram to vram.
3394 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3395 * from vram to ram.
3396 *
3397 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3398 * and restore work:
3399 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3400 * stops all queues, schedule restore work
3401 * 2. svm_range_restore_work wait for migration is done by
3402 * a. svm_range_validate_vram takes prange->migrate_mutex
3403 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3404 * 3. restore work update mappings of GPU, resume all queues.
3405 *
3406 * Context: Process context
3407 *
3408 * Return:
3409 * 0 - OK, otherwise - error code of migration
3410 */
3411static int
3412svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3413 bool *migrated)
3414{
3415 uint32_t best_loc;
3416 int r = 0;
3417
3418 *migrated = false;
3419 best_loc = svm_range_best_prefetch_location(prange);
3420
3421 /* when best_loc is a gpu node and same as prange->actual_loc
3422 * we still need do migration as prange->actual_loc !=0 does
3423 * not mean all pages in prange are vram. hmm migrate will pick
3424 * up right pages during migration.
3425 */
3426 if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3427 (best_loc == 0 && prange->actual_loc == 0))
3428 return 0;
3429
3430 if (!best_loc) {
3431 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3432 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3433 *migrated = !r;
3434 return r;
3435 }
3436
3437 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3438 mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3439 *migrated = !r;
3440
3441 return 0;
3442}
3443
3444int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3445{
3446 /* Dereferencing fence->svm_bo is safe here because the fence hasn't
3447 * signaled yet and we're under the protection of the fence->lock.
3448 * After the fence is signaled in svm_range_bo_release, we cannot get
3449 * here any more.
3450 *
3451 * Reference is dropped in svm_range_evict_svm_bo_worker.
3452 */
3453 if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3454 WRITE_ONCE(fence->svm_bo->evicting, 1);
3455 schedule_work(&fence->svm_bo->eviction_work);
3456 }
3457
3458 return 0;
3459}
3460
3461static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3462{
3463 struct svm_range_bo *svm_bo;
3464 struct mm_struct *mm;
3465 int r = 0;
3466
3467 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3468
3469 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3470 mm = svm_bo->eviction_fence->mm;
3471 } else {
3472 svm_range_bo_unref(svm_bo);
3473 return;
3474 }
3475
3476 mmap_read_lock(mm);
3477 spin_lock(&svm_bo->list_lock);
3478 while (!list_empty(&svm_bo->range_list) && !r) {
3479 struct svm_range *prange =
3480 list_first_entry(&svm_bo->range_list,
3481 struct svm_range, svm_bo_list);
3482 int retries = 3;
3483
3484 list_del_init(&prange->svm_bo_list);
3485 spin_unlock(&svm_bo->list_lock);
3486
3487 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3488 prange->start, prange->last);
3489
3490 mutex_lock(&prange->migrate_mutex);
3491 do {
3492 /* migrate all vram pages in this prange to sys ram
3493 * after that prange->actual_loc should be zero
3494 */
3495 r = svm_migrate_vram_to_ram(prange, mm,
3496 prange->start, prange->last,
3497 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3498 } while (!r && prange->actual_loc && --retries);
3499
3500 if (!r && prange->actual_loc)
3501 pr_info_once("Migration failed during eviction");
3502
3503 if (!prange->actual_loc) {
3504 mutex_lock(&prange->lock);
3505 prange->svm_bo = NULL;
3506 mutex_unlock(&prange->lock);
3507 }
3508 mutex_unlock(&prange->migrate_mutex);
3509
3510 spin_lock(&svm_bo->list_lock);
3511 }
3512 spin_unlock(&svm_bo->list_lock);
3513 mmap_read_unlock(mm);
3514 mmput(mm);
3515
3516 dma_fence_signal(&svm_bo->eviction_fence->base);
3517
3518 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3519 * has been called in svm_migrate_vram_to_ram
3520 */
3521 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3522 svm_range_bo_unref(svm_bo);
3523}
3524
3525static int
3526svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3527 uint64_t start, uint64_t size, uint32_t nattr,
3528 struct kfd_ioctl_svm_attribute *attrs)
3529{
3530 struct amdkfd_process_info *process_info = p->kgd_process_info;
3531 struct list_head update_list;
3532 struct list_head insert_list;
3533 struct list_head remove_list;
3534 struct list_head remap_list;
3535 struct svm_range_list *svms;
3536 struct svm_range *prange;
3537 struct svm_range *next;
3538 bool update_mapping = false;
3539 bool flush_tlb;
3540 int r, ret = 0;
3541
3542 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3543 p->pasid, &p->svms, start, start + size - 1, size);
3544
3545 r = svm_range_check_attr(p, nattr, attrs);
3546 if (r)
3547 return r;
3548
3549 svms = &p->svms;
3550
3551 mutex_lock(&process_info->lock);
3552
3553 svm_range_list_lock_and_flush_work(svms, mm);
3554
3555 r = svm_range_is_valid(p, start, size);
3556 if (r) {
3557 pr_debug("invalid range r=%d\n", r);
3558 mmap_write_unlock(mm);
3559 goto out;
3560 }
3561
3562 mutex_lock(&svms->lock);
3563
3564 /* Add new range and split existing ranges as needed */
3565 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3566 &insert_list, &remove_list, &remap_list);
3567 if (r) {
3568 mutex_unlock(&svms->lock);
3569 mmap_write_unlock(mm);
3570 goto out;
3571 }
3572 /* Apply changes as a transaction */
3573 list_for_each_entry_safe(prange, next, &insert_list, list) {
3574 svm_range_add_to_svms(prange);
3575 svm_range_add_notifier_locked(mm, prange);
3576 }
3577 list_for_each_entry(prange, &update_list, update_list) {
3578 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3579 /* TODO: unmap ranges from GPU that lost access */
3580 }
3581 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3582 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3583 prange->svms, prange, prange->start,
3584 prange->last);
3585 svm_range_unlink(prange);
3586 svm_range_remove_notifier(prange);
3587 svm_range_free(prange, false);
3588 }
3589
3590 mmap_write_downgrade(mm);
3591 /* Trigger migrations and revalidate and map to GPUs as needed. If
3592 * this fails we may be left with partially completed actions. There
3593 * is no clean way of rolling back to the previous state in such a
3594 * case because the rollback wouldn't be guaranteed to work either.
3595 */
3596 list_for_each_entry(prange, &update_list, update_list) {
3597 bool migrated;
3598
3599 mutex_lock(&prange->migrate_mutex);
3600
3601 r = svm_range_trigger_migration(mm, prange, &migrated);
3602 if (r)
3603 goto out_unlock_range;
3604
3605 if (migrated && (!p->xnack_enabled ||
3606 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3607 prange->mapped_to_gpu) {
3608 pr_debug("restore_work will update mappings of GPUs\n");
3609 mutex_unlock(&prange->migrate_mutex);
3610 continue;
3611 }
3612
3613 if (!migrated && !update_mapping) {
3614 mutex_unlock(&prange->migrate_mutex);
3615 continue;
3616 }
3617
3618 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3619
3620 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3621 MAX_GPU_INSTANCE, true, true, flush_tlb);
3622 if (r)
3623 pr_debug("failed %d to map svm range\n", r);
3624
3625out_unlock_range:
3626 mutex_unlock(&prange->migrate_mutex);
3627 if (r)
3628 ret = r;
3629 }
3630
3631 list_for_each_entry(prange, &remap_list, update_list) {
3632 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3633 prange, prange->start, prange->last);
3634 mutex_lock(&prange->migrate_mutex);
3635 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3636 MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3637 if (r)
3638 pr_debug("failed %d on remap svm range\n", r);
3639 mutex_unlock(&prange->migrate_mutex);
3640 if (r)
3641 ret = r;
3642 }
3643
3644 dynamic_svm_range_dump(svms);
3645
3646 mutex_unlock(&svms->lock);
3647 mmap_read_unlock(mm);
3648out:
3649 mutex_unlock(&process_info->lock);
3650
3651 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3652 &p->svms, start, start + size - 1, r);
3653
3654 return ret ? ret : r;
3655}
3656
3657static int
3658svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3659 uint64_t start, uint64_t size, uint32_t nattr,
3660 struct kfd_ioctl_svm_attribute *attrs)
3661{
3662 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3663 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3664 bool get_preferred_loc = false;
3665 bool get_prefetch_loc = false;
3666 bool get_granularity = false;
3667 bool get_accessible = false;
3668 bool get_flags = false;
3669 uint64_t last = start + size - 1UL;
3670 uint8_t granularity = 0xff;
3671 struct interval_tree_node *node;
3672 struct svm_range_list *svms;
3673 struct svm_range *prange;
3674 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3675 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3676 uint32_t flags_and = 0xffffffff;
3677 uint32_t flags_or = 0;
3678 int gpuidx;
3679 uint32_t i;
3680 int r = 0;
3681
3682 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3683 start + size - 1, nattr);
3684
3685 /* Flush pending deferred work to avoid racing with deferred actions from
3686 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3687 * can still race with get_attr because we don't hold the mmap lock. But that
3688 * would be a race condition in the application anyway, and undefined
3689 * behaviour is acceptable in that case.
3690 */
3691 flush_work(&p->svms.deferred_list_work);
3692
3693 mmap_read_lock(mm);
3694 r = svm_range_is_valid(p, start, size);
3695 mmap_read_unlock(mm);
3696 if (r) {
3697 pr_debug("invalid range r=%d\n", r);
3698 return r;
3699 }
3700
3701 for (i = 0; i < nattr; i++) {
3702 switch (attrs[i].type) {
3703 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3704 get_preferred_loc = true;
3705 break;
3706 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3707 get_prefetch_loc = true;
3708 break;
3709 case KFD_IOCTL_SVM_ATTR_ACCESS:
3710 get_accessible = true;
3711 break;
3712 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3713 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3714 get_flags = true;
3715 break;
3716 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3717 get_granularity = true;
3718 break;
3719 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3720 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3721 fallthrough;
3722 default:
3723 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3724 return -EINVAL;
3725 }
3726 }
3727
3728 svms = &p->svms;
3729
3730 mutex_lock(&svms->lock);
3731
3732 node = interval_tree_iter_first(&svms->objects, start, last);
3733 if (!node) {
3734 pr_debug("range attrs not found return default values\n");
3735 svm_range_set_default_attributes(&location, &prefetch_loc,
3736 &granularity, &flags_and);
3737 flags_or = flags_and;
3738 if (p->xnack_enabled)
3739 bitmap_copy(bitmap_access, svms->bitmap_supported,
3740 MAX_GPU_INSTANCE);
3741 else
3742 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3743 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3744 goto fill_values;
3745 }
3746 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3747 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3748
3749 while (node) {
3750 struct interval_tree_node *next;
3751
3752 prange = container_of(node, struct svm_range, it_node);
3753 next = interval_tree_iter_next(node, start, last);
3754
3755 if (get_preferred_loc) {
3756 if (prange->preferred_loc ==
3757 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3758 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3759 location != prange->preferred_loc)) {
3760 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3761 get_preferred_loc = false;
3762 } else {
3763 location = prange->preferred_loc;
3764 }
3765 }
3766 if (get_prefetch_loc) {
3767 if (prange->prefetch_loc ==
3768 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3769 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3770 prefetch_loc != prange->prefetch_loc)) {
3771 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3772 get_prefetch_loc = false;
3773 } else {
3774 prefetch_loc = prange->prefetch_loc;
3775 }
3776 }
3777 if (get_accessible) {
3778 bitmap_and(bitmap_access, bitmap_access,
3779 prange->bitmap_access, MAX_GPU_INSTANCE);
3780 bitmap_and(bitmap_aip, bitmap_aip,
3781 prange->bitmap_aip, MAX_GPU_INSTANCE);
3782 }
3783 if (get_flags) {
3784 flags_and &= prange->flags;
3785 flags_or |= prange->flags;
3786 }
3787
3788 if (get_granularity && prange->granularity < granularity)
3789 granularity = prange->granularity;
3790
3791 node = next;
3792 }
3793fill_values:
3794 mutex_unlock(&svms->lock);
3795
3796 for (i = 0; i < nattr; i++) {
3797 switch (attrs[i].type) {
3798 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3799 attrs[i].value = location;
3800 break;
3801 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3802 attrs[i].value = prefetch_loc;
3803 break;
3804 case KFD_IOCTL_SVM_ATTR_ACCESS:
3805 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3806 attrs[i].value);
3807 if (gpuidx < 0) {
3808 pr_debug("invalid gpuid %x\n", attrs[i].value);
3809 return -EINVAL;
3810 }
3811 if (test_bit(gpuidx, bitmap_access))
3812 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3813 else if (test_bit(gpuidx, bitmap_aip))
3814 attrs[i].type =
3815 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3816 else
3817 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3818 break;
3819 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3820 attrs[i].value = flags_and;
3821 break;
3822 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3823 attrs[i].value = ~flags_or;
3824 break;
3825 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3826 attrs[i].value = (uint32_t)granularity;
3827 break;
3828 }
3829 }
3830
3831 return 0;
3832}
3833
3834int kfd_criu_resume_svm(struct kfd_process *p)
3835{
3836 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3837 int nattr_common = 4, nattr_accessibility = 1;
3838 struct criu_svm_metadata *criu_svm_md = NULL;
3839 struct svm_range_list *svms = &p->svms;
3840 struct criu_svm_metadata *next = NULL;
3841 uint32_t set_flags = 0xffffffff;
3842 int i, j, num_attrs, ret = 0;
3843 uint64_t set_attr_size;
3844 struct mm_struct *mm;
3845
3846 if (list_empty(&svms->criu_svm_metadata_list)) {
3847 pr_debug("No SVM data from CRIU restore stage 2\n");
3848 return ret;
3849 }
3850
3851 mm = get_task_mm(p->lead_thread);
3852 if (!mm) {
3853 pr_err("failed to get mm for the target process\n");
3854 return -ESRCH;
3855 }
3856
3857 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3858
3859 i = j = 0;
3860 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3861 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3862 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3863
3864 for (j = 0; j < num_attrs; j++) {
3865 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3866 i, j, criu_svm_md->data.attrs[j].type,
3867 i, j, criu_svm_md->data.attrs[j].value);
3868 switch (criu_svm_md->data.attrs[j].type) {
3869 /* During Checkpoint operation, the query for
3870 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3871 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3872 * not used by the range which was checkpointed. Care
3873 * must be taken to not restore with an invalid value
3874 * otherwise the gpuidx value will be invalid and
3875 * set_attr would eventually fail so just replace those
3876 * with another dummy attribute such as
3877 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3878 */
3879 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3880 if (criu_svm_md->data.attrs[j].value ==
3881 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3882 criu_svm_md->data.attrs[j].type =
3883 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3884 criu_svm_md->data.attrs[j].value = 0;
3885 }
3886 break;
3887 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3888 set_flags = criu_svm_md->data.attrs[j].value;
3889 break;
3890 default:
3891 break;
3892 }
3893 }
3894
3895 /* CLR_FLAGS is not available via get_attr during checkpoint but
3896 * it needs to be inserted before restoring the ranges so
3897 * allocate extra space for it before calling set_attr
3898 */
3899 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3900 (num_attrs + 1);
3901 set_attr_new = krealloc(set_attr, set_attr_size,
3902 GFP_KERNEL);
3903 if (!set_attr_new) {
3904 ret = -ENOMEM;
3905 goto exit;
3906 }
3907 set_attr = set_attr_new;
3908
3909 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3910 sizeof(struct kfd_ioctl_svm_attribute));
3911 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3912 set_attr[num_attrs].value = ~set_flags;
3913
3914 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3915 criu_svm_md->data.size, num_attrs + 1,
3916 set_attr);
3917 if (ret) {
3918 pr_err("CRIU: failed to set range attributes\n");
3919 goto exit;
3920 }
3921
3922 i++;
3923 }
3924exit:
3925 kfree(set_attr);
3926 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3927 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3928 criu_svm_md->data.start_addr);
3929 kfree(criu_svm_md);
3930 }
3931
3932 mmput(mm);
3933 return ret;
3934
3935}
3936
3937int kfd_criu_restore_svm(struct kfd_process *p,
3938 uint8_t __user *user_priv_ptr,
3939 uint64_t *priv_data_offset,
3940 uint64_t max_priv_data_size)
3941{
3942 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3943 int nattr_common = 4, nattr_accessibility = 1;
3944 struct criu_svm_metadata *criu_svm_md = NULL;
3945 struct svm_range_list *svms = &p->svms;
3946 uint32_t num_devices;
3947 int ret = 0;
3948
3949 num_devices = p->n_pdds;
3950 /* Handle one SVM range object at a time, also the number of gpus are
3951 * assumed to be same on the restore node, checking must be done while
3952 * evaluating the topology earlier
3953 */
3954
3955 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3956 (nattr_common + nattr_accessibility * num_devices);
3957 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3958
3959 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3960 svm_attrs_size;
3961
3962 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3963 if (!criu_svm_md) {
3964 pr_err("failed to allocate memory to store svm metadata\n");
3965 return -ENOMEM;
3966 }
3967 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3968 ret = -EINVAL;
3969 goto exit;
3970 }
3971
3972 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3973 svm_priv_data_size);
3974 if (ret) {
3975 ret = -EFAULT;
3976 goto exit;
3977 }
3978 *priv_data_offset += svm_priv_data_size;
3979
3980 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3981
3982 return 0;
3983
3984
3985exit:
3986 kfree(criu_svm_md);
3987 return ret;
3988}
3989
3990int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3991 uint64_t *svm_priv_data_size)
3992{
3993 uint64_t total_size, accessibility_size, common_attr_size;
3994 int nattr_common = 4, nattr_accessibility = 1;
3995 int num_devices = p->n_pdds;
3996 struct svm_range_list *svms;
3997 struct svm_range *prange;
3998 uint32_t count = 0;
3999
4000 *svm_priv_data_size = 0;
4001
4002 svms = &p->svms;
4003 if (!svms)
4004 return -EINVAL;
4005
4006 mutex_lock(&svms->lock);
4007 list_for_each_entry(prange, &svms->list, list) {
4008 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4009 prange, prange->start, prange->npages,
4010 prange->start + prange->npages - 1);
4011 count++;
4012 }
4013 mutex_unlock(&svms->lock);
4014
4015 *num_svm_ranges = count;
4016 /* Only the accessbility attributes need to be queried for all the gpus
4017 * individually, remaining ones are spanned across the entire process
4018 * regardless of the various gpu nodes. Of the remaining attributes,
4019 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4020 *
4021 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4022 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4023 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4024 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4025 *
4026 * ** ACCESSBILITY ATTRIBUTES **
4027 * (Considered as one, type is altered during query, value is gpuid)
4028 * KFD_IOCTL_SVM_ATTR_ACCESS
4029 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4030 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4031 */
4032 if (*num_svm_ranges > 0) {
4033 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4034 nattr_common;
4035 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4036 nattr_accessibility * num_devices;
4037
4038 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4039 common_attr_size + accessibility_size;
4040
4041 *svm_priv_data_size = *num_svm_ranges * total_size;
4042 }
4043
4044 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4045 *svm_priv_data_size);
4046 return 0;
4047}
4048
4049int kfd_criu_checkpoint_svm(struct kfd_process *p,
4050 uint8_t __user *user_priv_data,
4051 uint64_t *priv_data_offset)
4052{
4053 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4054 struct kfd_ioctl_svm_attribute *query_attr = NULL;
4055 uint64_t svm_priv_data_size, query_attr_size = 0;
4056 int index, nattr_common = 4, ret = 0;
4057 struct svm_range_list *svms;
4058 int num_devices = p->n_pdds;
4059 struct svm_range *prange;
4060 struct mm_struct *mm;
4061
4062 svms = &p->svms;
4063 if (!svms)
4064 return -EINVAL;
4065
4066 mm = get_task_mm(p->lead_thread);
4067 if (!mm) {
4068 pr_err("failed to get mm for the target process\n");
4069 return -ESRCH;
4070 }
4071
4072 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4073 (nattr_common + num_devices);
4074
4075 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4076 if (!query_attr) {
4077 ret = -ENOMEM;
4078 goto exit;
4079 }
4080
4081 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4082 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4083 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4084 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4085
4086 for (index = 0; index < num_devices; index++) {
4087 struct kfd_process_device *pdd = p->pdds[index];
4088
4089 query_attr[index + nattr_common].type =
4090 KFD_IOCTL_SVM_ATTR_ACCESS;
4091 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4092 }
4093
4094 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4095
4096 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4097 if (!svm_priv) {
4098 ret = -ENOMEM;
4099 goto exit_query;
4100 }
4101
4102 index = 0;
4103 list_for_each_entry(prange, &svms->list, list) {
4104
4105 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4106 svm_priv->start_addr = prange->start;
4107 svm_priv->size = prange->npages;
4108 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4109 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4110 prange, prange->start, prange->npages,
4111 prange->start + prange->npages - 1,
4112 prange->npages * PAGE_SIZE);
4113
4114 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4115 svm_priv->size,
4116 (nattr_common + num_devices),
4117 svm_priv->attrs);
4118 if (ret) {
4119 pr_err("CRIU: failed to obtain range attributes\n");
4120 goto exit_priv;
4121 }
4122
4123 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4124 svm_priv_data_size)) {
4125 pr_err("Failed to copy svm priv to user\n");
4126 ret = -EFAULT;
4127 goto exit_priv;
4128 }
4129
4130 *priv_data_offset += svm_priv_data_size;
4131
4132 }
4133
4134
4135exit_priv:
4136 kfree(svm_priv);
4137exit_query:
4138 kfree(query_attr);
4139exit:
4140 mmput(mm);
4141 return ret;
4142}
4143
4144int
4145svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4146 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4147{
4148 struct mm_struct *mm = current->mm;
4149 int r;
4150
4151 start >>= PAGE_SHIFT;
4152 size >>= PAGE_SHIFT;
4153
4154 switch (op) {
4155 case KFD_IOCTL_SVM_OP_SET_ATTR:
4156 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4157 break;
4158 case KFD_IOCTL_SVM_OP_GET_ATTR:
4159 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4160 break;
4161 default:
4162 r = EINVAL;
4163 break;
4164 }
4165
4166 return r;
4167}