Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2025 Intel Corporation
4 */
5
6#include "xe_svm.h"
7#include "xe_userptr.h"
8
9#include <linux/mm.h>
10
11#include "xe_trace_bo.h"
12
13/**
14 * xe_vma_userptr_check_repin() - Advisory check for repin needed
15 * @uvma: The userptr vma
16 *
17 * Check if the userptr vma has been invalidated since last successful
18 * repin. The check is advisory only and can the function can be called
19 * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
20 * vma userptr will remain valid after a lockless check, so typically
21 * the call needs to be followed by a proper check under the notifier_lock.
22 *
23 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
24 */
25int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
26{
27 return mmu_interval_check_retry(&uvma->userptr.notifier,
28 uvma->userptr.pages.notifier_seq) ?
29 -EAGAIN : 0;
30}
31
32/**
33 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
34 * that need repinning.
35 * @vm: The VM.
36 *
37 * This function checks for whether the VM has userptrs that need repinning,
38 * and provides a release-type barrier on the svm.gpusvm.notifier_lock after
39 * checking.
40 *
41 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
42 */
43int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
44{
45 lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock);
46
47 return (list_empty(&vm->userptr.repin_list) &&
48 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
49}
50
51int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
52{
53 struct xe_vma *vma = &uvma->vma;
54 struct xe_vm *vm = xe_vma_vm(vma);
55 struct xe_device *xe = vm->xe;
56 struct drm_gpusvm_ctx ctx = {
57 .read_only = xe_vma_read_only(vma),
58 .device_private_page_owner = xe_svm_devm_owner(xe),
59 .allow_mixed = true,
60 };
61
62 lockdep_assert_held(&vm->lock);
63 xe_assert(xe, xe_vma_is_userptr(vma));
64
65 if (vma->gpuva.flags & XE_VMA_DESTROYED)
66 return 0;
67
68 return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
69 uvma->userptr.notifier.mm,
70 &uvma->userptr.notifier,
71 xe_vma_userptr(vma),
72 xe_vma_userptr(vma) + xe_vma_size(vma),
73 &ctx);
74}
75
76static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
77{
78 struct xe_userptr *userptr = &uvma->userptr;
79 struct xe_vma *vma = &uvma->vma;
80 struct dma_resv_iter cursor;
81 struct dma_fence *fence;
82 struct drm_gpusvm_ctx ctx = {
83 .in_notifier = true,
84 .read_only = xe_vma_read_only(vma),
85 };
86 long err;
87
88 /*
89 * Tell exec and rebind worker they need to repin and rebind this
90 * userptr.
91 */
92 if (!xe_vm_in_fault_mode(vm) &&
93 !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
94 spin_lock(&vm->userptr.invalidated_lock);
95 list_move_tail(&userptr->invalidate_link,
96 &vm->userptr.invalidated);
97 spin_unlock(&vm->userptr.invalidated_lock);
98 }
99
100 /*
101 * Preempt fences turn into schedule disables, pipeline these.
102 * Note that even in fault mode, we need to wait for binds and
103 * unbinds to complete, and those are attached as BOOKMARK fences
104 * to the vm.
105 */
106 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
107 DMA_RESV_USAGE_BOOKKEEP);
108 dma_resv_for_each_fence_unlocked(&cursor, fence)
109 dma_fence_enable_sw_signaling(fence);
110 dma_resv_iter_end(&cursor);
111
112 err = dma_resv_wait_timeout(xe_vm_resv(vm),
113 DMA_RESV_USAGE_BOOKKEEP,
114 false, MAX_SCHEDULE_TIMEOUT);
115 XE_WARN_ON(err <= 0);
116
117 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
118 err = xe_vm_invalidate_vma(vma);
119 XE_WARN_ON(err);
120 }
121
122 drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
123 xe_vma_size(vma) >> PAGE_SHIFT, &ctx);
124}
125
126static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
127 const struct mmu_notifier_range *range,
128 unsigned long cur_seq)
129{
130 struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
131 struct xe_vma *vma = &uvma->vma;
132 struct xe_vm *vm = xe_vma_vm(vma);
133
134 xe_assert(vm->xe, xe_vma_is_userptr(vma));
135 trace_xe_vma_userptr_invalidate(vma);
136
137 if (!mmu_notifier_range_blockable(range))
138 return false;
139
140 vm_dbg(&xe_vma_vm(vma)->xe->drm,
141 "NOTIFIER: addr=0x%016llx, range=0x%016llx",
142 xe_vma_start(vma), xe_vma_size(vma));
143
144 down_write(&vm->svm.gpusvm.notifier_lock);
145 mmu_interval_set_seq(mni, cur_seq);
146
147 __vma_userptr_invalidate(vm, uvma);
148 up_write(&vm->svm.gpusvm.notifier_lock);
149 trace_xe_vma_userptr_invalidate_complete(vma);
150
151 return true;
152}
153
154static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
155 .invalidate = vma_userptr_invalidate,
156};
157
158#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
159/**
160 * xe_vma_userptr_force_invalidate() - force invalidate a userptr
161 * @uvma: The userptr vma to invalidate
162 *
163 * Perform a forced userptr invalidation for testing purposes.
164 */
165void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
166{
167 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
168
169 /* Protect against concurrent userptr pinning */
170 lockdep_assert_held(&vm->lock);
171 /* Protect against concurrent notifiers */
172 lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
173 /*
174 * Protect against concurrent instances of this function and
175 * the critical exec sections
176 */
177 xe_vm_assert_held(vm);
178
179 if (!mmu_interval_read_retry(&uvma->userptr.notifier,
180 uvma->userptr.pages.notifier_seq))
181 uvma->userptr.pages.notifier_seq -= 2;
182 __vma_userptr_invalidate(vm, uvma);
183}
184#endif
185
186int xe_vm_userptr_pin(struct xe_vm *vm)
187{
188 struct xe_userptr_vma *uvma, *next;
189 int err = 0;
190
191 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
192 lockdep_assert_held_write(&vm->lock);
193
194 /* Collect invalidated userptrs */
195 spin_lock(&vm->userptr.invalidated_lock);
196 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
197 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
198 userptr.invalidate_link) {
199 list_del_init(&uvma->userptr.invalidate_link);
200 list_add_tail(&uvma->userptr.repin_link,
201 &vm->userptr.repin_list);
202 }
203 spin_unlock(&vm->userptr.invalidated_lock);
204
205 /* Pin and move to bind list */
206 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
207 userptr.repin_link) {
208 err = xe_vma_userptr_pin_pages(uvma);
209 if (err == -EFAULT) {
210 list_del_init(&uvma->userptr.repin_link);
211 /*
212 * We might have already done the pin once already, but
213 * then had to retry before the re-bind happened, due
214 * some other condition in the caller, but in the
215 * meantime the userptr got dinged by the notifier such
216 * that we need to revalidate here, but this time we hit
217 * the EFAULT. In such a case make sure we remove
218 * ourselves from the rebind list to avoid going down in
219 * flames.
220 */
221 if (!list_empty(&uvma->vma.combined_links.rebind))
222 list_del_init(&uvma->vma.combined_links.rebind);
223
224 /* Wait for pending binds */
225 xe_vm_lock(vm, false);
226 dma_resv_wait_timeout(xe_vm_resv(vm),
227 DMA_RESV_USAGE_BOOKKEEP,
228 false, MAX_SCHEDULE_TIMEOUT);
229
230 down_read(&vm->svm.gpusvm.notifier_lock);
231 err = xe_vm_invalidate_vma(&uvma->vma);
232 up_read(&vm->svm.gpusvm.notifier_lock);
233 xe_vm_unlock(vm);
234 if (err)
235 break;
236 } else {
237 if (err)
238 break;
239
240 list_del_init(&uvma->userptr.repin_link);
241 list_move_tail(&uvma->vma.combined_links.rebind,
242 &vm->rebind_list);
243 }
244 }
245
246 if (err) {
247 down_write(&vm->svm.gpusvm.notifier_lock);
248 spin_lock(&vm->userptr.invalidated_lock);
249 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
250 userptr.repin_link) {
251 list_del_init(&uvma->userptr.repin_link);
252 list_move_tail(&uvma->userptr.invalidate_link,
253 &vm->userptr.invalidated);
254 }
255 spin_unlock(&vm->userptr.invalidated_lock);
256 up_write(&vm->svm.gpusvm.notifier_lock);
257 }
258 return err;
259}
260
261/**
262 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
263 * that need repinning.
264 * @vm: The VM.
265 *
266 * This function does an advisory check for whether the VM has userptrs that
267 * need repinning.
268 *
269 * Return: 0 if there are no indications of userptrs needing repinning,
270 * -EAGAIN if there are.
271 */
272int xe_vm_userptr_check_repin(struct xe_vm *vm)
273{
274 return (list_empty_careful(&vm->userptr.repin_list) &&
275 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
276}
277
278int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
279 unsigned long range)
280{
281 struct xe_userptr *userptr = &uvma->userptr;
282 int err;
283
284 INIT_LIST_HEAD(&userptr->invalidate_link);
285 INIT_LIST_HEAD(&userptr->repin_link);
286
287 err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
288 start, range,
289 &vma_userptr_notifier_ops);
290 if (err)
291 return err;
292
293 userptr->pages.notifier_seq = LONG_MAX;
294
295 return 0;
296}
297
298void xe_userptr_remove(struct xe_userptr_vma *uvma)
299{
300 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
301 struct xe_userptr *userptr = &uvma->userptr;
302
303 drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
304 xe_vma_size(&uvma->vma) >> PAGE_SHIFT);
305
306 /*
307 * Since userptr pages are not pinned, we can't remove
308 * the notifier until we're sure the GPU is not accessing
309 * them anymore
310 */
311 mmu_interval_notifier_remove(&userptr->notifier);
312}
313
314void xe_userptr_destroy(struct xe_userptr_vma *uvma)
315{
316 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
317
318 spin_lock(&vm->userptr.invalidated_lock);
319 xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
320 list_del(&uvma->userptr.invalidate_link);
321 spin_unlock(&vm->userptr.invalidated_lock);
322}