Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#ifndef _XE_SVM_H_
7#define _XE_SVM_H_
8
9#include <drm/drm_pagemap.h>
10#include <drm/drm_gpusvm.h>
11
12#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
13
14struct xe_bo;
15struct xe_vram_region;
16struct xe_tile;
17struct xe_vm;
18struct xe_vma;
19
20/** struct xe_svm_range - SVM range */
21struct xe_svm_range {
22 /** @base: base drm_gpusvm_range */
23 struct drm_gpusvm_range base;
24 /**
25 * @garbage_collector_link: Link into VM's garbage collect SVM range
26 * list. Protected by VM's garbage collect lock.
27 */
28 struct list_head garbage_collector_link;
29 /**
30 * @tile_present: Tile mask of binding is present for this range.
31 * Protected by GPU SVM notifier lock.
32 */
33 u8 tile_present;
34 /**
35 * @tile_invalidated: Tile mask of binding is invalidated for this
36 * range. Protected by GPU SVM notifier lock.
37 */
38 u8 tile_invalidated;
39 /**
40 * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
41 * locking.
42 */
43 u8 skip_migrate :1;
44};
45
46#if IS_ENABLED(CONFIG_DRM_GPUSVM)
47/**
48 * xe_svm_range_pages_valid() - SVM range pages valid
49 * @range: SVM range
50 *
51 * Return: True if SVM range pages are valid, False otherwise
52 */
53static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
54{
55 return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
56}
57
58int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
59
60int xe_svm_init(struct xe_vm *vm);
61
62void xe_svm_fini(struct xe_vm *vm);
63
64void xe_svm_close(struct xe_vm *vm);
65
66int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
67 struct xe_tile *tile, u64 fault_addr,
68 bool atomic);
69
70bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
71
72int xe_svm_bo_evict(struct xe_bo *bo);
73
74void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
75#else
76static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
77{
78 return false;
79}
80
81static inline
82int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
83{
84 return 0;
85}
86
87static inline
88int xe_svm_init(struct xe_vm *vm)
89{
90 return 0;
91}
92
93static inline
94void xe_svm_fini(struct xe_vm *vm)
95{
96}
97
98static inline
99void xe_svm_close(struct xe_vm *vm)
100{
101}
102
103static inline
104int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
105 struct xe_tile *tile, u64 fault_addr,
106 bool atomic)
107{
108 return 0;
109}
110
111static inline
112bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
113{
114 return false;
115}
116
117static inline
118int xe_svm_bo_evict(struct xe_bo *bo)
119{
120 return 0;
121}
122
123static inline
124void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
125{
126}
127#endif
128
129/**
130 * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
131 * @range: SVM range
132 *
133 * Return: True if SVM range has a DMA mapping, False otherwise
134 */
135static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
136{
137 lockdep_assert_held(&range->base.gpusvm->notifier_lock);
138 return range->base.flags.has_dma_mapping;
139}
140
141#define xe_svm_assert_in_notifier(vm__) \
142 lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
143
144#define xe_svm_notifier_lock(vm__) \
145 drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
146
147#define xe_svm_notifier_unlock(vm__) \
148 drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
149
150#endif