Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <linux/types.h>
24#include <linux/hmm.h>
25#include <linux/dma-direction.h>
26#include <linux/dma-mapping.h>
27#include <linux/migrate.h>
28#include "amdgpu_sync.h"
29#include "amdgpu_object.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_mn.h"
32#include "amdgpu_res_cursor.h"
33#include "kfd_priv.h"
34#include "kfd_svm.h"
35#include "kfd_migrate.h"
36#include "kfd_smi_events.h"
37
38#ifdef dev_fmt
39#undef dev_fmt
40#endif
41#define dev_fmt(fmt) "kfd_migrate: " fmt
42
43static uint64_t
44svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
45{
46 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
47}
48
49static int
50svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
51 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
52{
53 struct amdgpu_device *adev = ring->adev;
54 struct amdgpu_job *job;
55 unsigned int num_dw, num_bytes;
56 struct dma_fence *fence;
57 uint64_t src_addr, dst_addr;
58 uint64_t pte_flags;
59 void *cpu_addr;
60 int r;
61
62 /* use gart window 0 */
63 *gart_addr = adev->gmc.gart_start;
64
65 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
66 num_bytes = npages * 8;
67
68 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
69 AMDGPU_IB_POOL_DELAYED, &job);
70 if (r)
71 return r;
72
73 src_addr = num_dw * 4;
74 src_addr += job->ibs[0].gpu_addr;
75
76 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
77 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
78 dst_addr, num_bytes, false);
79
80 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
81 WARN_ON(job->ibs[0].length_dw > num_dw);
82
83 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
84 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
85 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
86 pte_flags |= AMDGPU_PTE_WRITEABLE;
87 pte_flags |= adev->gart.gart_pte_flags;
88
89 cpu_addr = &job->ibs[0].ptr[num_dw];
90
91 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
92 r = amdgpu_job_submit(job, &adev->mman.entity,
93 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
94 if (r)
95 goto error_free;
96
97 dma_fence_put(fence);
98
99 return r;
100
101error_free:
102 amdgpu_job_free(job);
103 return r;
104}
105
106/**
107 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
108 *
109 * @adev: amdgpu device the sdma ring running
110 * @sys: system DMA pointer to be copied
111 * @vram: vram destination DMA pointer
112 * @npages: number of pages to copy
113 * @direction: enum MIGRATION_COPY_DIR
114 * @mfence: output, sdma fence to signal after sdma is done
115 *
116 * ram address uses GART table continuous entries mapping to ram pages,
117 * vram address uses direct mapping of vram pages, which must have npages
118 * number of continuous pages.
119 * GART update and sdma uses same buf copy function ring, sdma is splited to
120 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
121 * the last sdma finish fence which is returned to check copy memory is done.
122 *
123 * Context: Process context, takes and releases gtt_window_lock
124 *
125 * Return:
126 * 0 - OK, otherwise error code
127 */
128
129static int
130svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
131 uint64_t *vram, uint64_t npages,
132 enum MIGRATION_COPY_DIR direction,
133 struct dma_fence **mfence)
134{
135 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
136 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
137 uint64_t gart_s, gart_d;
138 struct dma_fence *next;
139 uint64_t size;
140 int r;
141
142 mutex_lock(&adev->mman.gtt_window_lock);
143
144 while (npages) {
145 size = min(GTT_MAX_PAGES, npages);
146
147 if (direction == FROM_VRAM_TO_RAM) {
148 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
149 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
150
151 } else if (direction == FROM_RAM_TO_VRAM) {
152 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
153 KFD_IOCTL_SVM_FLAG_GPU_RO);
154 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
155 }
156 if (r) {
157 dev_err(adev->dev, "fail %d create gart mapping\n", r);
158 goto out_unlock;
159 }
160
161 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
162 NULL, &next, false, true, false);
163 if (r) {
164 dev_err(adev->dev, "fail %d to copy memory\n", r);
165 goto out_unlock;
166 }
167
168 dma_fence_put(*mfence);
169 *mfence = next;
170 npages -= size;
171 if (npages) {
172 sys += size;
173 vram += size;
174 }
175 }
176
177out_unlock:
178 mutex_unlock(&adev->mman.gtt_window_lock);
179
180 return r;
181}
182
183/**
184 * svm_migrate_copy_done - wait for memory copy sdma is done
185 *
186 * @adev: amdgpu device the sdma memory copy is executing on
187 * @mfence: migrate fence
188 *
189 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
190 * operations, this is the last sdma operation fence.
191 *
192 * Context: called after svm_migrate_copy_memory
193 *
194 * Return:
195 * 0 - success
196 * otherwise - error code from dma fence signal
197 */
198static int
199svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
200{
201 int r = 0;
202
203 if (mfence) {
204 r = dma_fence_wait(mfence, false);
205 dma_fence_put(mfence);
206 pr_debug("sdma copy memory fence done\n");
207 }
208
209 return r;
210}
211
212unsigned long
213svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
214{
215 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
216}
217
218static void
219svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
220{
221 struct page *page;
222
223 page = pfn_to_page(pfn);
224 svm_range_bo_ref(prange->svm_bo);
225 page->zone_device_data = prange->svm_bo;
226 lock_page(page);
227}
228
229static void
230svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
231{
232 struct page *page;
233
234 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
235 unlock_page(page);
236 put_page(page);
237}
238
239static unsigned long
240svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
241{
242 unsigned long addr;
243
244 addr = page_to_pfn(page) << PAGE_SHIFT;
245 return (addr - adev->kfd.dev->pgmap.range.start);
246}
247
248static struct page *
249svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
250{
251 struct page *page;
252
253 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
254 if (page)
255 lock_page(page);
256
257 return page;
258}
259
260static void svm_migrate_put_sys_page(unsigned long addr)
261{
262 struct page *page;
263
264 page = pfn_to_page(addr >> PAGE_SHIFT);
265 unlock_page(page);
266 put_page(page);
267}
268
269static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
270{
271 unsigned long cpages = 0;
272 unsigned long i;
273
274 for (i = 0; i < migrate->npages; i++) {
275 if (migrate->src[i] & MIGRATE_PFN_VALID &&
276 migrate->src[i] & MIGRATE_PFN_MIGRATE)
277 cpages++;
278 }
279 return cpages;
280}
281
282static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
283{
284 unsigned long upages = 0;
285 unsigned long i;
286
287 for (i = 0; i < migrate->npages; i++) {
288 if (migrate->src[i] & MIGRATE_PFN_VALID &&
289 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
290 upages++;
291 }
292 return upages;
293}
294
295static int
296svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
297 struct migrate_vma *migrate, struct dma_fence **mfence,
298 dma_addr_t *scratch)
299{
300 uint64_t npages = migrate->npages;
301 struct device *dev = adev->dev;
302 struct amdgpu_res_cursor cursor;
303 dma_addr_t *src;
304 uint64_t *dst;
305 uint64_t i, j;
306 int r;
307
308 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
309 prange->last);
310
311 src = scratch;
312 dst = (uint64_t *)(scratch + npages);
313
314 r = svm_range_vram_node_new(adev, prange, true);
315 if (r) {
316 dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
317 goto out;
318 }
319
320 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
321 npages << PAGE_SHIFT, &cursor);
322 for (i = j = 0; i < npages; i++) {
323 struct page *spage;
324
325 spage = migrate_pfn_to_page(migrate->src[i]);
326 if (spage && !is_zone_device_page(spage)) {
327 dst[i] = cursor.start + (j << PAGE_SHIFT);
328 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
329 svm_migrate_get_vram_page(prange, migrate->dst[i]);
330 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
331 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
332 DMA_TO_DEVICE);
333 r = dma_mapping_error(dev, src[i]);
334 if (r) {
335 dev_err(adev->dev, "%s: fail %d dma_map_page\n",
336 __func__, r);
337 goto out_free_vram_pages;
338 }
339 } else {
340 if (j) {
341 r = svm_migrate_copy_memory_gart(
342 adev, src + i - j,
343 dst + i - j, j,
344 FROM_RAM_TO_VRAM,
345 mfence);
346 if (r)
347 goto out_free_vram_pages;
348 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
349 j = 0;
350 } else {
351 amdgpu_res_next(&cursor, PAGE_SIZE);
352 }
353 continue;
354 }
355
356 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
357 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
358
359 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
360 r = svm_migrate_copy_memory_gart(adev, src + i - j,
361 dst + i - j, j + 1,
362 FROM_RAM_TO_VRAM,
363 mfence);
364 if (r)
365 goto out_free_vram_pages;
366 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
367 j = 0;
368 } else {
369 j++;
370 }
371 }
372
373 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
374 FROM_RAM_TO_VRAM, mfence);
375
376out_free_vram_pages:
377 if (r) {
378 pr_debug("failed %d to copy memory to vram\n", r);
379 while (i--) {
380 svm_migrate_put_vram_page(adev, dst[i]);
381 migrate->dst[i] = 0;
382 }
383 }
384
385#ifdef DEBUG_FORCE_MIXED_DOMAINS
386 for (i = 0, j = 0; i < npages; i += 4, j++) {
387 if (j & 1)
388 continue;
389 svm_migrate_put_vram_page(adev, dst[i]);
390 migrate->dst[i] = 0;
391 svm_migrate_put_vram_page(adev, dst[i + 1]);
392 migrate->dst[i + 1] = 0;
393 svm_migrate_put_vram_page(adev, dst[i + 2]);
394 migrate->dst[i + 2] = 0;
395 svm_migrate_put_vram_page(adev, dst[i + 3]);
396 migrate->dst[i + 3] = 0;
397 }
398#endif
399out:
400 return r;
401}
402
403static long
404svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
405 struct vm_area_struct *vma, uint64_t start,
406 uint64_t end, uint32_t trigger)
407{
408 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
409 uint64_t npages = (end - start) >> PAGE_SHIFT;
410 struct kfd_process_device *pdd;
411 struct dma_fence *mfence = NULL;
412 struct migrate_vma migrate;
413 unsigned long cpages = 0;
414 dma_addr_t *scratch;
415 void *buf;
416 int r = -ENOMEM;
417
418 memset(&migrate, 0, sizeof(migrate));
419 migrate.vma = vma;
420 migrate.start = start;
421 migrate.end = end;
422 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
423 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
424
425 buf = kvcalloc(npages,
426 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
427 GFP_KERNEL);
428 if (!buf)
429 goto out;
430
431 migrate.src = buf;
432 migrate.dst = migrate.src + npages;
433 scratch = (dma_addr_t *)(migrate.dst + npages);
434
435 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
436 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
437 0, adev->kfd.dev->id, prange->prefetch_loc,
438 prange->preferred_loc, trigger);
439
440 r = migrate_vma_setup(&migrate);
441 if (r) {
442 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
443 __func__, r, prange->start, prange->last);
444 goto out_free;
445 }
446
447 cpages = migrate.cpages;
448 if (!cpages) {
449 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
450 prange->start, prange->last);
451 goto out_free;
452 }
453 if (cpages != npages)
454 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
455 cpages, npages);
456 else
457 pr_debug("0x%lx pages migrated\n", cpages);
458
459 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
460 migrate_vma_pages(&migrate);
461
462 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
463 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
464
465 svm_migrate_copy_done(adev, mfence);
466 migrate_vma_finalize(&migrate);
467
468 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
469 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
470 0, adev->kfd.dev->id, trigger);
471
472 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
473 svm_range_free_dma_mappings(prange);
474
475out_free:
476 kvfree(buf);
477out:
478 if (!r && cpages) {
479 pdd = svm_range_get_pdd_by_adev(prange, adev);
480 if (pdd)
481 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
482
483 return cpages;
484 }
485 return r;
486}
487
488/**
489 * svm_migrate_ram_to_vram - migrate svm range from system to device
490 * @prange: range structure
491 * @best_loc: the device to migrate to
492 * @mm: the process mm structure
493 * @trigger: reason of migration
494 *
495 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
496 *
497 * Return:
498 * 0 - OK, otherwise error code
499 */
500static int
501svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
502 struct mm_struct *mm, uint32_t trigger)
503{
504 unsigned long addr, start, end;
505 struct vm_area_struct *vma;
506 struct amdgpu_device *adev;
507 unsigned long cpages = 0;
508 long r = 0;
509
510 if (prange->actual_loc == best_loc) {
511 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
512 prange->svms, prange->start, prange->last, best_loc);
513 return 0;
514 }
515
516 adev = svm_range_get_adev_by_id(prange, best_loc);
517 if (!adev) {
518 pr_debug("failed to get device by id 0x%x\n", best_loc);
519 return -ENODEV;
520 }
521
522 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
523 prange->start, prange->last, best_loc);
524
525 /* FIXME: workaround for page locking bug with invalid pages */
526 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
527
528 start = prange->start << PAGE_SHIFT;
529 end = (prange->last + 1) << PAGE_SHIFT;
530
531 for (addr = start; addr < end;) {
532 unsigned long next;
533
534 vma = find_vma(mm, addr);
535 if (!vma || addr < vma->vm_start)
536 break;
537
538 next = min(vma->vm_end, end);
539 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
540 if (r < 0) {
541 pr_debug("failed %ld to migrate\n", r);
542 break;
543 } else {
544 cpages += r;
545 }
546 addr = next;
547 }
548
549 if (cpages)
550 prange->actual_loc = best_loc;
551
552 return r < 0 ? r : 0;
553}
554
555static void svm_migrate_page_free(struct page *page)
556{
557 struct svm_range_bo *svm_bo = page->zone_device_data;
558
559 if (svm_bo) {
560 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
561 svm_range_bo_unref_async(svm_bo);
562 }
563}
564
565static int
566svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
567 struct migrate_vma *migrate, struct dma_fence **mfence,
568 dma_addr_t *scratch, uint64_t npages)
569{
570 struct device *dev = adev->dev;
571 uint64_t *src;
572 dma_addr_t *dst;
573 struct page *dpage;
574 uint64_t i = 0, j;
575 uint64_t addr;
576 int r = 0;
577
578 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
579 prange->last);
580
581 addr = prange->start << PAGE_SHIFT;
582
583 src = (uint64_t *)(scratch + npages);
584 dst = scratch;
585
586 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
587 struct page *spage;
588
589 spage = migrate_pfn_to_page(migrate->src[i]);
590 if (!spage || !is_zone_device_page(spage)) {
591 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
592 prange->svms, prange->start, prange->last);
593 if (j) {
594 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
595 src + i - j, j,
596 FROM_VRAM_TO_RAM,
597 mfence);
598 if (r)
599 goto out_oom;
600 j = 0;
601 }
602 continue;
603 }
604 src[i] = svm_migrate_addr(adev, spage);
605 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
606 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
607 src + i - j, j,
608 FROM_VRAM_TO_RAM,
609 mfence);
610 if (r)
611 goto out_oom;
612 j = 0;
613 }
614
615 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
616 if (!dpage) {
617 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
618 prange->svms, prange->start, prange->last);
619 r = -ENOMEM;
620 goto out_oom;
621 }
622
623 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
624 r = dma_mapping_error(dev, dst[i]);
625 if (r) {
626 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
627 goto out_oom;
628 }
629
630 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
631 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
632
633 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
634 j++;
635 }
636
637 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
638 FROM_VRAM_TO_RAM, mfence);
639
640out_oom:
641 if (r) {
642 pr_debug("failed %d copy to ram\n", r);
643 while (i--) {
644 svm_migrate_put_sys_page(dst[i]);
645 migrate->dst[i] = 0;
646 }
647 }
648
649 return r;
650}
651
652/**
653 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
654 *
655 * @adev: amdgpu device to migrate from
656 * @prange: svm range structure
657 * @vma: vm_area_struct that range [start, end] belongs to
658 * @start: range start virtual address in pages
659 * @end: range end virtual address in pages
660 *
661 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
662 *
663 * Return:
664 * 0 - success with all pages migrated
665 * negative values - indicate error
666 * positive values - partial migration, number of pages not migrated
667 */
668static long
669svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
670 struct vm_area_struct *vma, uint64_t start, uint64_t end,
671 uint32_t trigger)
672{
673 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
674 uint64_t npages = (end - start) >> PAGE_SHIFT;
675 unsigned long upages = npages;
676 unsigned long cpages = 0;
677 struct kfd_process_device *pdd;
678 struct dma_fence *mfence = NULL;
679 struct migrate_vma migrate;
680 dma_addr_t *scratch;
681 void *buf;
682 int r = -ENOMEM;
683
684 memset(&migrate, 0, sizeof(migrate));
685 migrate.vma = vma;
686 migrate.start = start;
687 migrate.end = end;
688 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
689 if (adev->gmc.xgmi.connected_to_cpu)
690 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
691 else
692 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
693
694 buf = kvcalloc(npages,
695 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
696 GFP_KERNEL);
697 if (!buf)
698 goto out;
699
700 migrate.src = buf;
701 migrate.dst = migrate.src + npages;
702 scratch = (dma_addr_t *)(migrate.dst + npages);
703
704 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
705 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
706 adev->kfd.dev->id, 0, prange->prefetch_loc,
707 prange->preferred_loc, trigger);
708
709 r = migrate_vma_setup(&migrate);
710 if (r) {
711 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
712 __func__, r, prange->start, prange->last);
713 goto out_free;
714 }
715
716 cpages = migrate.cpages;
717 if (!cpages) {
718 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
719 prange->start, prange->last);
720 upages = svm_migrate_unsuccessful_pages(&migrate);
721 goto out_free;
722 }
723 if (cpages != npages)
724 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
725 cpages, npages);
726 else
727 pr_debug("0x%lx pages migrated\n", cpages);
728
729 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
730 scratch, npages);
731 migrate_vma_pages(&migrate);
732
733 upages = svm_migrate_unsuccessful_pages(&migrate);
734 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
735 upages, cpages, migrate.npages);
736
737 svm_migrate_copy_done(adev, mfence);
738 migrate_vma_finalize(&migrate);
739
740 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
741 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
742 adev->kfd.dev->id, 0, trigger);
743
744 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
745
746out_free:
747 kvfree(buf);
748out:
749 if (!r && cpages) {
750 pdd = svm_range_get_pdd_by_adev(prange, adev);
751 if (pdd)
752 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
753 }
754 return r ? r : upages;
755}
756
757/**
758 * svm_migrate_vram_to_ram - migrate svm range from device to system
759 * @prange: range structure
760 * @mm: process mm, use current->mm if NULL
761 * @trigger: reason of migration
762 *
763 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
764 *
765 * Return:
766 * 0 - OK, otherwise error code
767 */
768int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
769 uint32_t trigger)
770{
771 struct amdgpu_device *adev;
772 struct vm_area_struct *vma;
773 unsigned long addr;
774 unsigned long start;
775 unsigned long end;
776 unsigned long upages = 0;
777 long r = 0;
778
779 if (!prange->actual_loc) {
780 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
781 prange->start, prange->last);
782 return 0;
783 }
784
785 adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
786 if (!adev) {
787 pr_debug("failed to get device by id 0x%x\n",
788 prange->actual_loc);
789 return -ENODEV;
790 }
791
792 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
793 prange->svms, prange, prange->start, prange->last,
794 prange->actual_loc);
795
796 start = prange->start << PAGE_SHIFT;
797 end = (prange->last + 1) << PAGE_SHIFT;
798
799 for (addr = start; addr < end;) {
800 unsigned long next;
801
802 vma = find_vma(mm, addr);
803 if (!vma || addr < vma->vm_start) {
804 pr_debug("failed to find vma for prange %p\n", prange);
805 r = -EFAULT;
806 break;
807 }
808
809 next = min(vma->vm_end, end);
810 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger);
811 if (r < 0) {
812 pr_debug("failed %ld to migrate prange %p\n", r, prange);
813 break;
814 } else {
815 upages += r;
816 }
817 addr = next;
818 }
819
820 if (r >= 0 && !upages) {
821 svm_range_vram_node_free(prange);
822 prange->actual_loc = 0;
823 }
824
825 return r < 0 ? r : 0;
826}
827
828/**
829 * svm_migrate_vram_to_vram - migrate svm range from device to device
830 * @prange: range structure
831 * @best_loc: the device to migrate to
832 * @mm: process mm, use current->mm if NULL
833 * @trigger: reason of migration
834 *
835 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
836 *
837 * Return:
838 * 0 - OK, otherwise error code
839 */
840static int
841svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
842 struct mm_struct *mm, uint32_t trigger)
843{
844 int r, retries = 3;
845
846 /*
847 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
848 * system memory as migration bridge
849 */
850
851 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
852
853 do {
854 r = svm_migrate_vram_to_ram(prange, mm, trigger);
855 if (r)
856 return r;
857 } while (prange->actual_loc && --retries);
858
859 if (prange->actual_loc)
860 return -EDEADLK;
861
862 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
863}
864
865int
866svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
867 struct mm_struct *mm, uint32_t trigger)
868{
869 if (!prange->actual_loc)
870 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
871 else
872 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
873
874}
875
876/**
877 * svm_migrate_to_ram - CPU page fault handler
878 * @vmf: CPU vm fault vma, address
879 *
880 * Context: vm fault handler, caller holds the mmap read lock
881 *
882 * Return:
883 * 0 - OK
884 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
885 */
886static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
887{
888 unsigned long addr = vmf->address;
889 struct vm_area_struct *vma;
890 enum svm_work_list_ops op;
891 struct svm_range *parent;
892 struct svm_range *prange;
893 struct kfd_process *p;
894 struct mm_struct *mm;
895 int r = 0;
896
897 vma = vmf->vma;
898 mm = vma->vm_mm;
899
900 p = kfd_lookup_process_by_mm(vma->vm_mm);
901 if (!p) {
902 pr_debug("failed find process at fault address 0x%lx\n", addr);
903 return VM_FAULT_SIGBUS;
904 }
905 if (READ_ONCE(p->svms.faulting_task) == current) {
906 pr_debug("skipping ram migration\n");
907 kfd_unref_process(p);
908 return 0;
909 }
910 addr >>= PAGE_SHIFT;
911 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
912
913 mutex_lock(&p->svms.lock);
914
915 prange = svm_range_from_addr(&p->svms, addr, &parent);
916 if (!prange) {
917 pr_debug("cannot find svm range at 0x%lx\n", addr);
918 r = -EFAULT;
919 goto out;
920 }
921
922 mutex_lock(&parent->migrate_mutex);
923 if (prange != parent)
924 mutex_lock_nested(&prange->migrate_mutex, 1);
925
926 if (!prange->actual_loc)
927 goto out_unlock_prange;
928
929 svm_range_lock(parent);
930 if (prange != parent)
931 mutex_lock_nested(&prange->lock, 1);
932 r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
933 if (prange != parent)
934 mutex_unlock(&prange->lock);
935 svm_range_unlock(parent);
936 if (r) {
937 pr_debug("failed %d to split range by granularity\n", r);
938 goto out_unlock_prange;
939 }
940
941 r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
942 if (r)
943 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
944 prange, prange->start, prange->last);
945
946 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
947 if (p->xnack_enabled && parent == prange)
948 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
949 else
950 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
951 svm_range_add_list_work(&p->svms, parent, mm, op);
952 schedule_deferred_list_work(&p->svms);
953
954out_unlock_prange:
955 if (prange != parent)
956 mutex_unlock(&prange->migrate_mutex);
957 mutex_unlock(&parent->migrate_mutex);
958out:
959 mutex_unlock(&p->svms.lock);
960 kfd_unref_process(p);
961
962 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
963
964 return r ? VM_FAULT_SIGBUS : 0;
965}
966
967static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
968 .page_free = svm_migrate_page_free,
969 .migrate_to_ram = svm_migrate_to_ram,
970};
971
972/* Each VRAM page uses sizeof(struct page) on system memory */
973#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
974
975int svm_migrate_init(struct amdgpu_device *adev)
976{
977 struct kfd_dev *kfddev = adev->kfd.dev;
978 struct dev_pagemap *pgmap;
979 struct resource *res = NULL;
980 unsigned long size;
981 void *r;
982
983 /* Page migration works on Vega10 or newer */
984 if (!KFD_IS_SOC15(kfddev))
985 return -EINVAL;
986
987 pgmap = &kfddev->pgmap;
988 memset(pgmap, 0, sizeof(*pgmap));
989
990 /* TODO: register all vram to HMM for now.
991 * should remove reserved size
992 */
993 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
994 if (adev->gmc.xgmi.connected_to_cpu) {
995 pgmap->range.start = adev->gmc.aper_base;
996 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
997 pgmap->type = MEMORY_DEVICE_COHERENT;
998 } else {
999 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1000 if (IS_ERR(res))
1001 return -ENOMEM;
1002 pgmap->range.start = res->start;
1003 pgmap->range.end = res->end;
1004 pgmap->type = MEMORY_DEVICE_PRIVATE;
1005 }
1006
1007 pgmap->nr_range = 1;
1008 pgmap->ops = &svm_migrate_pgmap_ops;
1009 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1010 pgmap->flags = 0;
1011 /* Device manager releases device-specific resources, memory region and
1012 * pgmap when driver disconnects from device.
1013 */
1014 r = devm_memremap_pages(adev->dev, pgmap);
1015 if (IS_ERR(r)) {
1016 pr_err("failed to register HMM device memory\n");
1017 /* Disable SVM support capability */
1018 pgmap->type = 0;
1019 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1020 devm_release_mem_region(adev->dev, res->start,
1021 res->end - res->start + 1);
1022 return PTR_ERR(r);
1023 }
1024
1025 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1026 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1027
1028 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1029
1030 svm_range_set_max_pages(adev);
1031
1032 pr_info("HMM registered %ldMB device memory\n", size >> 20);
1033
1034 return 0;
1035}