Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_VM_H_
7#define _XE_VM_H_
8
9#include "xe_assert.h"
10#include "xe_bo_types.h"
11#include "xe_macros.h"
12#include "xe_map.h"
13#include "xe_vm_types.h"
14
15struct drm_device;
16struct drm_printer;
17struct drm_file;
18
19struct ttm_buffer_object;
20
21struct dma_fence;
22
23struct xe_exec_queue;
24struct xe_file;
25struct xe_sync_entry;
26struct xe_svm_range;
27struct drm_exec;
28
29struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
30
31struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
32int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
33
34static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
35{
36 drm_gpuvm_get(&vm->gpuvm);
37 return vm;
38}
39
40static inline void xe_vm_put(struct xe_vm *vm)
41{
42 drm_gpuvm_put(&vm->gpuvm);
43}
44
45int xe_vm_lock(struct xe_vm *vm, bool intr);
46
47void xe_vm_unlock(struct xe_vm *vm);
48
49static inline bool xe_vm_is_closed(struct xe_vm *vm)
50{
51 /* Only guaranteed not to change when vm->lock is held */
52 return !vm->size;
53}
54
55static inline bool xe_vm_is_banned(struct xe_vm *vm)
56{
57 return vm->flags & XE_VM_FLAG_BANNED;
58}
59
60static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
61{
62 lockdep_assert_held(&vm->lock);
63 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64}
65
66struct xe_vma *
67xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
68
69bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
70
71/**
72 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
73 * @vm: The vm
74 *
75 * Return: whether the vm populates unmapped areas with scratch PTEs
76 */
77static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
78{
79 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
80}
81
82/**
83 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
84 * @gpuvm: The struct drm_gpuvm pointer
85 *
86 * Return: Pointer to the embedding struct xe_vm.
87 */
88static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
89{
90 return container_of(gpuvm, struct xe_vm, gpuvm);
91}
92
93static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
94{
95 return gpuvm_to_vm(gpuva->vm);
96}
97
98static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
99{
100 return container_of(gpuva, struct xe_vma, gpuva);
101}
102
103static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
104{
105 return container_of(op, struct xe_vma_op, base);
106}
107
108/**
109 * DOC: Provide accessors for vma members to facilitate easy change of
110 * implementation.
111 */
112static inline u64 xe_vma_start(struct xe_vma *vma)
113{
114 return vma->gpuva.va.addr;
115}
116
117static inline u64 xe_vma_size(struct xe_vma *vma)
118{
119 return vma->gpuva.va.range;
120}
121
122static inline u64 xe_vma_end(struct xe_vma *vma)
123{
124 return xe_vma_start(vma) + xe_vma_size(vma);
125}
126
127static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
128{
129 return vma->gpuva.gem.offset;
130}
131
132static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
133{
134 return !vma->gpuva.gem.obj ? NULL :
135 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
136}
137
138static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
139{
140 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
141}
142
143static inline bool xe_vma_read_only(struct xe_vma *vma)
144{
145 return vma->gpuva.flags & XE_VMA_READ_ONLY;
146}
147
148static inline u64 xe_vma_userptr(struct xe_vma *vma)
149{
150 return vma->gpuva.gem.offset;
151}
152
153static inline bool xe_vma_is_null(struct xe_vma *vma)
154{
155 return vma->gpuva.flags & DRM_GPUVA_SPARSE;
156}
157
158static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
159{
160 return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
161}
162
163static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
164{
165 return !xe_vma_bo(vma);
166}
167
168static inline bool xe_vma_is_userptr(struct xe_vma *vma)
169{
170 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
171 !xe_vma_is_cpu_addr_mirror(vma);
172}
173
174struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
175
176int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
177
178int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
179
180int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
181
182/**
183 * to_userptr_vma() - Return a pointer to an embedding userptr vma
184 * @vma: Pointer to the embedded struct xe_vma
185 *
186 * Return: Pointer to the embedding userptr vma
187 */
188static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
189{
190 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
191 return container_of(vma, struct xe_userptr_vma, vma);
192}
193
194u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
195
196int xe_vm_create_ioctl(struct drm_device *dev, void *data,
197 struct drm_file *file);
198int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
199 struct drm_file *file);
200int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
201 struct drm_file *file);
202int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
203void xe_vm_close_and_put(struct xe_vm *vm);
204
205static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
206{
207 return vm->flags & XE_VM_FLAG_FAULT_MODE;
208}
209
210static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
211{
212 return vm->flags & XE_VM_FLAG_LR_MODE;
213}
214
215static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
216{
217 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
218}
219
220int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
221void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
222
223int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
224struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
225 u8 tile_mask);
226struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
227 struct xe_vma *vma,
228 struct xe_svm_range *range,
229 u8 tile_mask);
230struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
231 struct xe_svm_range *range);
232
233int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
234 u64 end, u8 tile_mask);
235
236int xe_vm_invalidate_vma(struct xe_vma *vma);
237
238int xe_vm_validate_protected(struct xe_vm *vm);
239
240static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
241{
242 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
243 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
244}
245
246/**
247 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
248 * vms.
249 * @vm: The vm.
250 *
251 * If the rebind functionality on a compute vm was disabled due
252 * to nothing to execute. Reactivate it and run the rebind worker.
253 * This function should be called after submitting a batch to a compute vm.
254 */
255static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
256{
257 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
258 vm->preempt.rebind_deactivated = false;
259 xe_vm_queue_rebind_worker(vm);
260 }
261}
262
263int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
264
265int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
266 unsigned int num_fences);
267
268struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
269 struct xe_exec_queue *q, u64 addr,
270 enum xe_cache_level cache_lvl);
271
272void xe_vm_resume_rebind_worker(struct xe_vm *vm);
273
274/**
275 * xe_vm_resv() - Return's the vm's reservation object
276 * @vm: The vm
277 *
278 * Return: Pointer to the vm's reservation object.
279 */
280static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
281{
282 return drm_gpuvm_resv(&vm->gpuvm);
283}
284
285void xe_vm_kill(struct xe_vm *vm, bool unlocked);
286
287/**
288 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
289 * @vm: The vm
290 */
291#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
292
293int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
294
295#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
296#define vm_dbg drm_dbg
297#else
298__printf(2, 3)
299static inline void vm_dbg(const struct drm_device *dev,
300 const char *format, ...)
301{ /* noop */ }
302#endif
303
304struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
305void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
306void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
307void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
308
309/**
310 * xe_vm_set_validating() - Register this task as currently making bos resident
311 * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
312 * validating.
313 * @vm: Pointer to the vm or NULL.
314 *
315 * Register this task as currently making bos resident for the vm. Intended
316 * to avoid eviction by the same task of shared bos bound to the vm.
317 * Call with the vm's resv lock held.
318 */
319static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
320{
321 if (vm && !allow_res_evict) {
322 xe_vm_assert_held(vm);
323 /* Pairs with READ_ONCE in xe_vm_is_validating() */
324 WRITE_ONCE(vm->validation.validating, current);
325 }
326}
327
328/**
329 * xe_vm_clear_validating() - Unregister this task as currently making bos resident
330 * @vm: Pointer to the vm or NULL
331 * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
332 * value as for xe_vm_set_validation().
333 *
334 * Register this task as currently making bos resident for the vm. Intended
335 * to avoid eviction by the same task of shared bos bound to the vm.
336 * Call with the vm's resv lock held.
337 */
338static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
339{
340 if (vm && !allow_res_evict) {
341 /* Pairs with READ_ONCE in xe_vm_is_validating() */
342 WRITE_ONCE(vm->validation.validating, NULL);
343 }
344}
345
346/**
347 * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
348 * by the current task.
349 * @vm: Pointer to the vm.
350 *
351 * If this function returns %true, we should be in a vm resv locked region, since
352 * the current process is the same task that called xe_vm_set_validating().
353 * The function asserts that that's indeed the case.
354 *
355 * Return: %true if the task is currently making bos resident, %false otherwise.
356 */
357static inline bool xe_vm_is_validating(struct xe_vm *vm)
358{
359 /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
360 if (READ_ONCE(vm->validation.validating) == current) {
361 xe_vm_assert_held(vm);
362 return true;
363 }
364 return false;
365}
366
367/**
368 * xe_vm_set_validation_exec() - Accessor to set the drm_exec object
369 * @vm: The vm we want to register a drm_exec object with.
370 * @exec: The exec object we want to register.
371 *
372 * Set the drm_exec object used to lock the vm's resv.
373 */
374static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
375{
376 xe_vm_assert_held(vm);
377 xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
378 vm->validation._exec = exec;
379}
380
381/**
382 * xe_vm_set_validation_exec() - Accessor to read the drm_exec object
383 * @vm: The vm we want to register a drm_exec object with.
384 *
385 * Return: The drm_exec object used to lock the vm's resv. The value
386 * is a valid pointer, %NULL, or one of the special values defined in
387 * xe_validation.h.
388 */
389static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
390{
391 xe_vm_assert_held(vm);
392 return vm->validation._exec;
393}
394
395/**
396 * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
397 * a valid GPU mapping
398 * @tile: The tile which the GPU mapping belongs to
399 * @tile_present: Tile present mask
400 * @tile_invalidated: Tile invalidated mask
401 *
402 * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
403 * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
404 * without the notifier lock in userptr or SVM cases, and not reliable without
405 * the BO dma-resv lock in the BO case. As such, they should only be used in
406 * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
407 * invalidation) where it is harmless.
408 *
409 * Return: True is there are valid GPU pages, False otherwise
410 */
411#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
412 ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
413
414#endif