Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_BO_H_
7#define _XE_BO_H_
8
9#include <drm/ttm/ttm_tt.h>
10
11#include "xe_bo_types.h"
12#include "xe_macros.h"
13#include "xe_vm_types.h"
14#include "xe_vm.h"
15
16#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
17
18#define XE_BO_FLAG_USER BIT(0)
19/* The bits below need to be contiguous, or things break */
20#define XE_BO_FLAG_SYSTEM BIT(1)
21#define XE_BO_FLAG_VRAM0 BIT(2)
22#define XE_BO_FLAG_VRAM1 BIT(3)
23#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
24/* -- */
25#define XE_BO_FLAG_STOLEN BIT(4)
26#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
27 XE_BO_FLAG_VRAM0 << (tile)->id : \
28 XE_BO_FLAG_SYSTEM)
29#define XE_BO_FLAG_GGTT BIT(5)
30#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
31#define XE_BO_FLAG_PINNED BIT(7)
32#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
33#define XE_BO_FLAG_DEFER_BACKING BIT(9)
34#define XE_BO_FLAG_SCANOUT BIT(10)
35#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
36#define XE_BO_FLAG_PAGETABLE BIT(12)
37#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
38#define XE_BO_FLAG_NEEDS_UC BIT(14)
39#define XE_BO_FLAG_NEEDS_64K BIT(15)
40#define XE_BO_FLAG_NEEDS_2M BIT(16)
41#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
42#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
43#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
44#define XE_BO_FLAG_GGTT0 BIT(20)
45#define XE_BO_FLAG_GGTT1 BIT(21)
46#define XE_BO_FLAG_GGTT2 BIT(22)
47#define XE_BO_FLAG_GGTT3 BIT(23)
48#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
49
50/* this one is trigger internally only */
51#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
52#define XE_BO_FLAG_INTERNAL_64K BIT(31)
53
54#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
55 XE_BO_FLAG_GGTT1 | \
56 XE_BO_FLAG_GGTT2 | \
57 XE_BO_FLAG_GGTT3)
58
59#define XE_BO_FLAG_GGTTx(tile) \
60 (XE_BO_FLAG_GGTT0 << (tile)->id)
61
62#define XE_PTE_SHIFT 12
63#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
64#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
65#define XE_PDE_SHIFT (XE_PTE_SHIFT - 3)
66#define XE_PDES (1 << XE_PDE_SHIFT)
67#define XE_PDE_MASK (XE_PDES - 1)
68
69#define XE_64K_PTE_SHIFT 16
70#define XE_64K_PAGE_SIZE (1 << XE_64K_PTE_SHIFT)
71#define XE_64K_PTE_MASK (XE_64K_PAGE_SIZE - 1)
72#define XE_64K_PDE_MASK (XE_PDE_MASK >> 4)
73
74#define XE_PL_SYSTEM TTM_PL_SYSTEM
75#define XE_PL_TT TTM_PL_TT
76#define XE_PL_VRAM0 TTM_PL_VRAM
77#define XE_PL_VRAM1 (XE_PL_VRAM0 + 1)
78#define XE_PL_STOLEN (TTM_NUM_MEM_TYPES - 1)
79
80#define XE_BO_PROPS_INVALID (-1)
81
82#define XE_PCI_BARRIER_MMAP_OFFSET (0x50 << XE_PTE_SHIFT)
83
84struct sg_table;
85
86struct xe_bo *xe_bo_alloc(void);
87void xe_bo_free(struct xe_bo *bo);
88
89struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
90 struct xe_tile *tile, struct dma_resv *resv,
91 struct ttm_lru_bulk_move *bulk, size_t size,
92 u16 cpu_caching, enum ttm_bo_type type,
93 u32 flags);
94struct xe_bo *
95xe_bo_create_locked_range(struct xe_device *xe,
96 struct xe_tile *tile, struct xe_vm *vm,
97 size_t size, u64 start, u64 end,
98 enum ttm_bo_type type, u32 flags, u64 alignment);
99struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
100 struct xe_vm *vm, size_t size,
101 enum ttm_bo_type type, u32 flags);
102struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
103 struct xe_vm *vm, size_t size,
104 enum ttm_bo_type type, u32 flags);
105struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
106 struct xe_vm *vm, size_t size,
107 u16 cpu_caching,
108 u32 flags);
109struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
110 struct xe_vm *vm, size_t size,
111 enum ttm_bo_type type, u32 flags);
112struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
113 struct xe_vm *vm, size_t size, u64 offset,
114 enum ttm_bo_type type, u32 flags);
115struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
116 struct xe_tile *tile,
117 struct xe_vm *vm,
118 size_t size, u64 offset,
119 enum ttm_bo_type type, u32 flags,
120 u64 alignment);
121struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
122 size_t size, u32 flags);
123struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
124 const void *data, size_t size, u32 flags);
125int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
126
127int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
128 u32 bo_flags);
129
130static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
131{
132 return container_of(bo, struct xe_bo, ttm);
133}
134
135static inline struct xe_bo *gem_to_xe_bo(const struct drm_gem_object *obj)
136{
137 return container_of(obj, struct xe_bo, ttm.base);
138}
139
140#define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
141
142static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
143{
144 if (bo)
145 drm_gem_object_get(&bo->ttm.base);
146
147 return bo;
148}
149
150void xe_bo_put(struct xe_bo *bo);
151
152/*
153 * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an
154 * xe bo
155 * @bo: The bo for which we want to obtain a refcount.
156 *
157 * There is a short window between where the bo's GEM object refcount reaches
158 * zero and where we put the final ttm_bo reference. Code in the eviction- and
159 * shrinking path should therefore attempt to grab a gem object reference before
160 * trying to use members outside of the base class ttm object. This function is
161 * intended for that purpose. On successful return, this function must be paired
162 * with an xe_bo_put().
163 *
164 * Return: @bo on success, NULL on failure.
165 */
166static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
167{
168 if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
169 return NULL;
170
171 return bo;
172}
173
174static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
175{
176 if (bo)
177 ttm_bo_set_bulk_move(&bo->ttm, NULL);
178}
179
180static inline void xe_bo_assert_held(struct xe_bo *bo)
181{
182 if (bo)
183 dma_resv_assert_held((bo)->ttm.base.resv);
184}
185
186int xe_bo_lock(struct xe_bo *bo, bool intr);
187
188void xe_bo_unlock(struct xe_bo *bo);
189
190static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
191{
192 if (bo) {
193 XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
194 if (bo->vm)
195 xe_vm_assert_held(bo->vm);
196 else
197 dma_resv_unlock(bo->ttm.base.resv);
198 }
199}
200
201int xe_bo_pin_external(struct xe_bo *bo);
202int xe_bo_pin(struct xe_bo *bo);
203void xe_bo_unpin_external(struct xe_bo *bo);
204void xe_bo_unpin(struct xe_bo *bo);
205int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
206
207static inline bool xe_bo_is_pinned(struct xe_bo *bo)
208{
209 return bo->ttm.pin_count;
210}
211
212static inline bool xe_bo_is_protected(const struct xe_bo *bo)
213{
214 return bo->pxp_key_instance;
215}
216
217static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
218{
219 if (likely(bo)) {
220 xe_bo_lock(bo, false);
221 xe_bo_unpin(bo);
222 xe_bo_unlock(bo);
223
224 xe_bo_put(bo);
225 }
226}
227
228bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
229dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
230dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
231
232static inline dma_addr_t
233xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
234{
235 return xe_bo_addr(bo, 0, page_size);
236}
237
238/**
239 * xe_bo_size() - Xe BO size
240 * @bo: The bo object.
241 *
242 * Simple helper to return Xe BO's size.
243 *
244 * Return: Xe BO's size
245 */
246static inline size_t xe_bo_size(struct xe_bo *bo)
247{
248 return bo->ttm.base.size;
249}
250
251static inline u32
252__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
253{
254 struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
255
256 if (XE_WARN_ON(!ggtt_node))
257 return 0;
258
259 XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo));
260 XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
261 return ggtt_node->base.start;
262}
263
264static inline u32
265xe_bo_ggtt_addr(struct xe_bo *bo)
266{
267 xe_assert(xe_bo_device(bo), bo->tile);
268
269 return __xe_bo_ggtt_addr(bo, bo->tile->id);
270}
271
272int xe_bo_vmap(struct xe_bo *bo);
273void xe_bo_vunmap(struct xe_bo *bo);
274int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
275
276bool mem_type_is_vram(u32 mem_type);
277bool xe_bo_is_vram(struct xe_bo *bo);
278bool xe_bo_is_stolen(struct xe_bo *bo);
279bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
280bool xe_bo_is_vm_bound(struct xe_bo *bo);
281bool xe_bo_has_single_placement(struct xe_bo *bo);
282uint64_t vram_region_gpu_offset(struct ttm_resource *res);
283
284bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
285
286int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
287int xe_bo_evict(struct xe_bo *bo);
288
289int xe_bo_evict_pinned(struct xe_bo *bo);
290int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
291int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
292int xe_bo_restore_pinned(struct xe_bo *bo);
293
294int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
295
296extern const struct ttm_device_funcs xe_ttm_funcs;
297extern const char *const xe_mem_type_to_name[];
298
299int xe_gem_create_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file);
301int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
302 struct drm_file *file);
303void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo);
304
305int xe_bo_dumb_create(struct drm_file *file_priv,
306 struct drm_device *dev,
307 struct drm_mode_create_dumb *args);
308
309bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
310
311static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
312{
313 return PAGE_ALIGN(xe_bo_size(bo));
314}
315
316static inline bool xe_bo_has_pages(struct xe_bo *bo)
317{
318 if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
319 xe_bo_is_vram(bo))
320 return true;
321
322 return false;
323}
324
325void __xe_bo_release_dummy(struct kref *kref);
326
327/**
328 * xe_bo_put_deferred() - Put a buffer object with delayed final freeing
329 * @bo: The bo to put.
330 * @deferred: List to which to add the buffer object if we cannot put, or
331 * NULL if the function is to put unconditionally.
332 *
333 * Since the final freeing of an object includes both sleeping and (!)
334 * memory allocation in the dma_resv individualization, it's not ok
335 * to put an object from atomic context nor from within a held lock
336 * tainted by reclaim. In such situations we want to defer the final
337 * freeing until we've exited the restricting context, or in the worst
338 * case to a workqueue.
339 * This function either puts the object if possible without the refcount
340 * reaching zero, or adds it to the @deferred list if that was not possible.
341 * The caller needs to follow up with a call to xe_bo_put_commit() to actually
342 * put the bo iff this function returns true. It's safe to always
343 * follow up with a call to xe_bo_put_commit().
344 * TODO: It's TTM that is the villain here. Perhaps TTM should add an
345 * interface like this.
346 *
347 * Return: true if @bo was the first object put on the @freed list,
348 * false otherwise.
349 */
350static inline bool
351xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
352{
353 if (!deferred) {
354 xe_bo_put(bo);
355 return false;
356 }
357
358 if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
359 return false;
360
361 return llist_add(&bo->freed, deferred);
362}
363
364void xe_bo_put_commit(struct llist_head *deferred);
365
366/**
367 * xe_bo_put_async() - Put BO async
368 * @bo: The bo to put.
369 *
370 * Put BO async, the final put is deferred to a worker to exit an IRQ context.
371 */
372static inline void
373xe_bo_put_async(struct xe_bo *bo)
374{
375 struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
376
377 if (xe_bo_put_deferred(bo, &bo_device->async_list))
378 schedule_work(&bo_device->async_free);
379}
380
381void xe_bo_dev_init(struct xe_bo_dev *bo_device);
382
383void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
384
385struct sg_table *xe_bo_sg(struct xe_bo *bo);
386
387/*
388 * xe_sg_segment_size() - Provides upper limit for sg segment size.
389 * @dev: device pointer
390 *
391 * Returns the maximum segment size for the 'struct scatterlist'
392 * elements.
393 */
394static inline unsigned int xe_sg_segment_size(struct device *dev)
395{
396 struct scatterlist __maybe_unused sg;
397 size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
398
399 max = min_t(size_t, max, dma_max_mapping_size(dev));
400
401 /*
402 * The iommu_dma_map_sg() function ensures iova allocation doesn't
403 * cross dma segment boundary. It does so by padding some sg elements.
404 * This can cause overflow, ending up with sg->length being set to 0.
405 * Avoid this by ensuring maximum segment size is half of 'max'
406 * rounded down to PAGE_SIZE.
407 */
408 return round_down(max / 2, PAGE_SIZE);
409}
410
411/**
412 * struct xe_bo_shrink_flags - flags governing the shrink behaviour.
413 * @purge: Only purging allowed. Don't shrink if bo not purgeable.
414 * @writeback: Attempt to immediately move content to swap.
415 */
416struct xe_bo_shrink_flags {
417 u32 purge : 1;
418 u32 writeback : 1;
419};
420
421long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
422 const struct xe_bo_shrink_flags flags,
423 unsigned long *scanned);
424
425/**
426 * xe_bo_is_mem_type - Whether the bo currently resides in the given
427 * TTM memory type
428 * @bo: The bo to check.
429 * @mem_type: The TTM memory type.
430 *
431 * Return: true iff the bo resides in @mem_type, false otherwise.
432 */
433static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
434{
435 xe_bo_assert_held(bo);
436 return bo->ttm.resource->mem_type == mem_type;
437}
438#endif