Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#ifndef _XE_SVM_H_
7#define _XE_SVM_H_
8
9struct xe_device;
10
11/**
12 * xe_svm_devm_owner() - Return the owner of device private memory
13 * @xe: The xe device.
14 *
15 * Return: The owner of this device's device private memory to use in
16 * hmm_range_fault()-
17 */
18static inline void *xe_svm_devm_owner(struct xe_device *xe)
19{
20 return xe;
21}
22
23#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
24
25#include <drm/drm_pagemap.h>
26#include <drm/drm_gpusvm.h>
27
28#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
29
30struct xe_bo;
31struct xe_gt;
32struct xe_tile;
33struct xe_vm;
34struct xe_vma;
35struct xe_vram_region;
36
37/** struct xe_svm_range - SVM range */
38struct xe_svm_range {
39 /** @base: base drm_gpusvm_range */
40 struct drm_gpusvm_range base;
41 /**
42 * @garbage_collector_link: Link into VM's garbage collect SVM range
43 * list. Protected by VM's garbage collect lock.
44 */
45 struct list_head garbage_collector_link;
46 /**
47 * @tile_present: Tile mask of binding is present for this range.
48 * Protected by GPU SVM notifier lock.
49 */
50 u8 tile_present;
51 /**
52 * @tile_invalidated: Tile mask of binding is invalidated for this
53 * range. Protected by GPU SVM notifier lock.
54 */
55 u8 tile_invalidated;
56};
57
58/**
59 * xe_svm_range_pages_valid() - SVM range pages valid
60 * @range: SVM range
61 *
62 * Return: True if SVM range pages are valid, False otherwise
63 */
64static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
65{
66 return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
67}
68
69int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
70
71int xe_svm_init(struct xe_vm *vm);
72
73void xe_svm_fini(struct xe_vm *vm);
74
75void xe_svm_close(struct xe_vm *vm);
76
77int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
78 struct xe_gt *gt, u64 fault_addr,
79 bool atomic);
80
81bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
82
83int xe_svm_bo_evict(struct xe_bo *bo);
84
85void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
86
87int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
88 const struct drm_gpusvm_ctx *ctx);
89
90struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
91 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
92
93int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
94 struct drm_gpusvm_ctx *ctx);
95
96bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
97 bool preferred_region_is_vram);
98
99void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
100
101bool xe_svm_range_validate(struct xe_vm *vm,
102 struct xe_svm_range *range,
103 u8 tile_mask, bool devmem_preferred);
104
105u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
106
107void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
108
109u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
110
111struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
112
113/**
114 * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
115 * @range: SVM range
116 *
117 * Return: True if SVM range has a DMA mapping, False otherwise
118 */
119static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
120{
121 lockdep_assert_held(&range->base.gpusvm->notifier_lock);
122 return range->base.pages.flags.has_dma_mapping;
123}
124
125/**
126 * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
127 * @r: Pointer to the drm_gpusvm_range structure
128 *
129 * This function takes a pointer to a drm_gpusvm_range structure and
130 * converts it to a pointer to the containing xe_svm_range structure.
131 *
132 * Return: Pointer to the xe_svm_range structure
133 */
134static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
135{
136 return container_of(r, struct xe_svm_range, base);
137}
138
139/**
140 * xe_svm_range_start() - SVM range start address
141 * @range: SVM range
142 *
143 * Return: start address of range.
144 */
145static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
146{
147 return drm_gpusvm_range_start(&range->base);
148}
149
150/**
151 * xe_svm_range_end() - SVM range end address
152 * @range: SVM range
153 *
154 * Return: end address of range.
155 */
156static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
157{
158 return drm_gpusvm_range_end(&range->base);
159}
160
161/**
162 * xe_svm_range_size() - SVM range size
163 * @range: SVM range
164 *
165 * Return: Size of range.
166 */
167static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
168{
169 return drm_gpusvm_range_size(&range->base);
170}
171
172void xe_svm_flush(struct xe_vm *vm);
173
174#else
175#include <linux/interval_tree.h>
176#include "xe_vm.h"
177
178struct drm_pagemap_addr;
179struct drm_gpusvm_ctx;
180struct drm_gpusvm_range;
181struct xe_bo;
182struct xe_gt;
183struct xe_vm;
184struct xe_vma;
185struct xe_tile;
186struct xe_vram_region;
187
188#define XE_INTERCONNECT_VRAM 1
189
190struct xe_svm_range {
191 struct {
192 struct interval_tree_node itree;
193 struct {
194 const struct drm_pagemap_addr *dma_addr;
195 } pages;
196 } base;
197 u32 tile_present;
198 u32 tile_invalidated;
199};
200
201static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
202{
203 return false;
204}
205
206static inline
207int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
208{
209 return 0;
210}
211
212static inline
213int xe_svm_init(struct xe_vm *vm)
214{
215#if IS_ENABLED(CONFIG_DRM_GPUSVM)
216 return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
217 NULL, NULL, 0, 0, 0, NULL, NULL, 0);
218#else
219 return 0;
220#endif
221}
222
223static inline
224void xe_svm_fini(struct xe_vm *vm)
225{
226#if IS_ENABLED(CONFIG_DRM_GPUSVM)
227 xe_assert(vm->xe, xe_vm_is_closed(vm));
228 drm_gpusvm_fini(&vm->svm.gpusvm);
229#endif
230}
231
232static inline
233void xe_svm_close(struct xe_vm *vm)
234{
235}
236
237static inline
238int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
239 struct xe_gt *gt, u64 fault_addr,
240 bool atomic)
241{
242 return 0;
243}
244
245static inline
246bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
247{
248 return false;
249}
250
251static inline
252int xe_svm_bo_evict(struct xe_bo *bo)
253{
254 return 0;
255}
256
257static inline
258void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
259{
260}
261
262static inline int
263xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
264 const struct drm_gpusvm_ctx *ctx)
265{
266 return -EOPNOTSUPP;
267}
268
269static inline
270struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
271 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
272{
273 return ERR_PTR(-EINVAL);
274}
275
276static inline
277int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
278 struct drm_gpusvm_ctx *ctx)
279{
280 return -EINVAL;
281}
282
283static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
284{
285 return NULL;
286}
287
288static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
289{
290 return 0;
291}
292
293static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
294{
295 return 0;
296}
297
298static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
299{
300 return 0;
301}
302
303static inline
304bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
305 u32 region)
306{
307 return false;
308}
309
310static inline
311void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
312{
313}
314
315static inline
316bool xe_svm_range_validate(struct xe_vm *vm,
317 struct xe_svm_range *range,
318 u8 tile_mask, bool devmem_preferred)
319{
320 return false;
321}
322
323static inline
324u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
325{
326 return ULONG_MAX;
327}
328
329static inline
330void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
331{
332}
333
334static inline
335u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
336{
337 return 0;
338}
339
340static inline
341struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
342{
343 return NULL;
344}
345
346static inline void xe_svm_flush(struct xe_vm *vm)
347{
348}
349#define xe_svm_range_has_dma_mapping(...) false
350#endif /* CONFIG_DRM_XE_GPUSVM */
351
352#if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
353#define xe_svm_assert_in_notifier(vm__) \
354 lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
355
356#define xe_svm_assert_held_read(vm__) \
357 lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
358
359#define xe_svm_notifier_lock(vm__) \
360 drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
361
362#define xe_svm_notifier_lock_interruptible(vm__) \
363 down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
364
365#define xe_svm_notifier_unlock(vm__) \
366 drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
367
368#else
369#define xe_svm_assert_in_notifier(...) do {} while (0)
370
371static inline void xe_svm_assert_held_read(struct xe_vm *vm)
372{
373}
374
375static inline void xe_svm_notifier_lock(struct xe_vm *vm)
376{
377}
378
379static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
380{
381 return 0;
382}
383
384static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
385{
386}
387#endif /* CONFIG_DRM_GPUSVM */
388
389#endif