Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31/**
32 * DOC: MMU Notifier
33 *
34 * For coherent userptr handling registers an MMU notifier to inform the driver
35 * about updates on the page tables of a process.
36 *
37 * When somebody tries to invalidate the page tables we block the update until
38 * all operations on the pages in question are completed, then those pages are
39 * marked as accessed and also dirty if it wasn't a read only access.
40 *
41 * New command submissions using the userptrs in question are delayed until all
42 * page table invalidation are completed and we once more see a coherent process
43 * address space.
44 */
45
46#include <linux/firmware.h>
47#include <linux/module.h>
48#include <drm/drm.h>
49
50#include "amdgpu.h"
51#include "amdgpu_amdkfd.h"
52
53/**
54 * struct amdgpu_mn_node
55 *
56 * @it: interval node defining start-last of the affected address range
57 * @bos: list of all BOs in the affected address range
58 *
59 * Manages all BOs which are affected of a certain range of address space.
60 */
61struct amdgpu_mn_node {
62 struct interval_tree_node it;
63 struct list_head bos;
64};
65
66/**
67 * amdgpu_mn_destroy - destroy the HMM mirror
68 *
69 * @work: previously sheduled work item
70 *
71 * Lazy destroys the notifier from a work item
72 */
73static void amdgpu_mn_destroy(struct work_struct *work)
74{
75 struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
76 struct amdgpu_device *adev = amn->adev;
77 struct amdgpu_mn_node *node, *next_node;
78 struct amdgpu_bo *bo, *next_bo;
79
80 mutex_lock(&adev->mn_lock);
81 down_write(&amn->lock);
82 hash_del(&amn->node);
83 rbtree_postorder_for_each_entry_safe(node, next_node,
84 &amn->objects.rb_root, it.rb) {
85 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
86 bo->mn = NULL;
87 list_del_init(&bo->mn_list);
88 }
89 kfree(node);
90 }
91 up_write(&amn->lock);
92 mutex_unlock(&adev->mn_lock);
93
94 hmm_mirror_unregister(&amn->mirror);
95 kfree(amn);
96}
97
98/**
99 * amdgpu_hmm_mirror_release - callback to notify about mm destruction
100 *
101 * @mirror: the HMM mirror (mm) this callback is about
102 *
103 * Shedule a work item to lazy destroy HMM mirror.
104 */
105static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
106{
107 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
108
109 INIT_WORK(&amn->work, amdgpu_mn_destroy);
110 schedule_work(&amn->work);
111}
112
113/**
114 * amdgpu_mn_lock - take the write side lock for this notifier
115 *
116 * @mn: our notifier
117 */
118void amdgpu_mn_lock(struct amdgpu_mn *mn)
119{
120 if (mn)
121 down_write(&mn->lock);
122}
123
124/**
125 * amdgpu_mn_unlock - drop the write side lock for this notifier
126 *
127 * @mn: our notifier
128 */
129void amdgpu_mn_unlock(struct amdgpu_mn *mn)
130{
131 if (mn)
132 up_write(&mn->lock);
133}
134
135/**
136 * amdgpu_mn_read_lock - take the read side lock for this notifier
137 *
138 * @amn: our notifier
139 */
140static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
141{
142 if (blockable)
143 down_read(&amn->lock);
144 else if (!down_read_trylock(&amn->lock))
145 return -EAGAIN;
146
147 return 0;
148}
149
150/**
151 * amdgpu_mn_read_unlock - drop the read side lock for this notifier
152 *
153 * @amn: our notifier
154 */
155static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
156{
157 up_read(&amn->lock);
158}
159
160/**
161 * amdgpu_mn_invalidate_node - unmap all BOs of a node
162 *
163 * @node: the node with the BOs to unmap
164 * @start: start of address range affected
165 * @end: end of address range affected
166 *
167 * Block for operations on BOs to finish and mark pages as accessed and
168 * potentially dirty.
169 */
170static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
171 unsigned long start,
172 unsigned long end)
173{
174 struct amdgpu_bo *bo;
175 long r;
176
177 list_for_each_entry(bo, &node->bos, mn_list) {
178
179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
180 continue;
181
182 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
183 true, false, MAX_SCHEDULE_TIMEOUT);
184 if (r <= 0)
185 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
186 }
187}
188
189/**
190 * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
191 *
192 * @mirror: the hmm_mirror (mm) is about to update
193 * @update: the update start, end address
194 *
195 * Block for operations on BOs to finish and mark pages as accessed and
196 * potentially dirty.
197 */
198static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
199 const struct hmm_update *update)
200{
201 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
202 unsigned long start = update->start;
203 unsigned long end = update->end;
204 bool blockable = update->blockable;
205 struct interval_tree_node *it;
206
207 /* notification is exclusive, but interval is inclusive */
208 end -= 1;
209
210 /* TODO we should be able to split locking for interval tree and
211 * amdgpu_mn_invalidate_node
212 */
213 if (amdgpu_mn_read_lock(amn, blockable))
214 return -EAGAIN;
215
216 it = interval_tree_iter_first(&amn->objects, start, end);
217 while (it) {
218 struct amdgpu_mn_node *node;
219
220 if (!blockable) {
221 amdgpu_mn_read_unlock(amn);
222 return -EAGAIN;
223 }
224
225 node = container_of(it, struct amdgpu_mn_node, it);
226 it = interval_tree_iter_next(it, start, end);
227
228 amdgpu_mn_invalidate_node(node, start, end);
229 }
230
231 amdgpu_mn_read_unlock(amn);
232
233 return 0;
234}
235
236/**
237 * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
238 *
239 * @mirror: the hmm_mirror (mm) is about to update
240 * @update: the update start, end address
241 *
242 * We temporarily evict all BOs between start and end. This
243 * necessitates evicting all user-mode queues of the process. The BOs
244 * are restorted in amdgpu_mn_invalidate_range_end_hsa.
245 */
246static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
247 const struct hmm_update *update)
248{
249 struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
250 unsigned long start = update->start;
251 unsigned long end = update->end;
252 bool blockable = update->blockable;
253 struct interval_tree_node *it;
254
255 /* notification is exclusive, but interval is inclusive */
256 end -= 1;
257
258 if (amdgpu_mn_read_lock(amn, blockable))
259 return -EAGAIN;
260
261 it = interval_tree_iter_first(&amn->objects, start, end);
262 while (it) {
263 struct amdgpu_mn_node *node;
264 struct amdgpu_bo *bo;
265
266 if (!blockable) {
267 amdgpu_mn_read_unlock(amn);
268 return -EAGAIN;
269 }
270
271 node = container_of(it, struct amdgpu_mn_node, it);
272 it = interval_tree_iter_next(it, start, end);
273
274 list_for_each_entry(bo, &node->bos, mn_list) {
275 struct kgd_mem *mem = bo->kfd_bo;
276
277 if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
278 start, end))
279 amdgpu_amdkfd_evict_userptr(mem, amn->mm);
280 }
281 }
282
283 amdgpu_mn_read_unlock(amn);
284
285 return 0;
286}
287
288/* Low bits of any reasonable mm pointer will be unused due to struct
289 * alignment. Use these bits to make a unique key from the mm pointer
290 * and notifier type.
291 */
292#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
293
294static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
295 [AMDGPU_MN_TYPE_GFX] = {
296 .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
297 .release = amdgpu_hmm_mirror_release
298 },
299 [AMDGPU_MN_TYPE_HSA] = {
300 .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
301 .release = amdgpu_hmm_mirror_release
302 },
303};
304
305/**
306 * amdgpu_mn_get - create HMM mirror context
307 *
308 * @adev: amdgpu device pointer
309 * @type: type of MMU notifier context
310 *
311 * Creates a HMM mirror context for current->mm.
312 */
313struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
314 enum amdgpu_mn_type type)
315{
316 struct mm_struct *mm = current->mm;
317 struct amdgpu_mn *amn;
318 unsigned long key = AMDGPU_MN_KEY(mm, type);
319 int r;
320
321 mutex_lock(&adev->mn_lock);
322 if (down_write_killable(&mm->mmap_sem)) {
323 mutex_unlock(&adev->mn_lock);
324 return ERR_PTR(-EINTR);
325 }
326
327 hash_for_each_possible(adev->mn_hash, amn, node, key)
328 if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
329 goto release_locks;
330
331 amn = kzalloc(sizeof(*amn), GFP_KERNEL);
332 if (!amn) {
333 amn = ERR_PTR(-ENOMEM);
334 goto release_locks;
335 }
336
337 amn->adev = adev;
338 amn->mm = mm;
339 init_rwsem(&amn->lock);
340 amn->type = type;
341 amn->objects = RB_ROOT_CACHED;
342
343 amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
344 r = hmm_mirror_register(&amn->mirror, mm);
345 if (r)
346 goto free_amn;
347
348 hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
349
350release_locks:
351 up_write(&mm->mmap_sem);
352 mutex_unlock(&adev->mn_lock);
353
354 return amn;
355
356free_amn:
357 up_write(&mm->mmap_sem);
358 mutex_unlock(&adev->mn_lock);
359 kfree(amn);
360
361 return ERR_PTR(r);
362}
363
364/**
365 * amdgpu_mn_register - register a BO for notifier updates
366 *
367 * @bo: amdgpu buffer object
368 * @addr: userptr addr we should monitor
369 *
370 * Registers an HMM mirror for the given BO at the specified address.
371 * Returns 0 on success, -ERRNO if anything goes wrong.
372 */
373int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
374{
375 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
376 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
377 enum amdgpu_mn_type type =
378 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
379 struct amdgpu_mn *amn;
380 struct amdgpu_mn_node *node = NULL, *new_node;
381 struct list_head bos;
382 struct interval_tree_node *it;
383
384 amn = amdgpu_mn_get(adev, type);
385 if (IS_ERR(amn))
386 return PTR_ERR(amn);
387
388 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
389 if (!new_node)
390 return -ENOMEM;
391
392 INIT_LIST_HEAD(&bos);
393
394 down_write(&amn->lock);
395
396 while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
397 kfree(node);
398 node = container_of(it, struct amdgpu_mn_node, it);
399 interval_tree_remove(&node->it, &amn->objects);
400 addr = min(it->start, addr);
401 end = max(it->last, end);
402 list_splice(&node->bos, &bos);
403 }
404
405 if (!node)
406 node = new_node;
407 else
408 kfree(new_node);
409
410 bo->mn = amn;
411
412 node->it.start = addr;
413 node->it.last = end;
414 INIT_LIST_HEAD(&node->bos);
415 list_splice(&bos, &node->bos);
416 list_add(&bo->mn_list, &node->bos);
417
418 interval_tree_insert(&node->it, &amn->objects);
419
420 up_write(&amn->lock);
421
422 return 0;
423}
424
425/**
426 * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
427 *
428 * @bo: amdgpu buffer object
429 *
430 * Remove any registration of HMM mirror updates from the buffer object.
431 */
432void amdgpu_mn_unregister(struct amdgpu_bo *bo)
433{
434 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
435 struct amdgpu_mn *amn;
436 struct list_head *head;
437
438 mutex_lock(&adev->mn_lock);
439
440 amn = bo->mn;
441 if (amn == NULL) {
442 mutex_unlock(&adev->mn_lock);
443 return;
444 }
445
446 down_write(&amn->lock);
447
448 /* save the next list entry for later */
449 head = bo->mn_list.next;
450
451 bo->mn = NULL;
452 list_del_init(&bo->mn_list);
453
454 if (list_empty(head)) {
455 struct amdgpu_mn_node *node;
456
457 node = container_of(head, struct amdgpu_mn_node, bos);
458 interval_tree_remove(&node->it, &amn->objects);
459 kfree(node);
460 }
461
462 up_write(&amn->lock);
463 mutex_unlock(&adev->mn_lock);
464}
465
466/* flags used by HMM internal, not related to CPU/GPU PTE flags */
467static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
468 (1 << 0), /* HMM_PFN_VALID */
469 (1 << 1), /* HMM_PFN_WRITE */
470 0 /* HMM_PFN_DEVICE_PRIVATE */
471};
472
473static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
474 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
475 0, /* HMM_PFN_NONE */
476 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
477};
478
479void amdgpu_hmm_init_range(struct hmm_range *range)
480{
481 if (range) {
482 range->flags = hmm_range_flags;
483 range->values = hmm_range_values;
484 range->pfn_shift = PAGE_SHIFT;
485 INIT_LIST_HEAD(&range->list);
486 }
487}