Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2025 Intel Corporation
4 */
5
6#include <linux/debugfs.h>
7
8#include "xe_bo.h"
9#include "xe_device.h"
10#include "xe_configfs.h"
11#include "xe_psmi.h"
12
13/*
14 * PSMI capture support
15 *
16 * Requirement for PSMI capture is to have a physically contiguous buffer. The
17 * PSMI tool owns doing all necessary configuration (MMIO register writes are
18 * done from user-space). However, KMD needs to provide the PSMI tool with the
19 * required physical address of the base of PSMI buffer in case of VRAM.
20 *
21 * VRAM backed PSMI buffer:
22 * Buffer is allocated as GEM object and with XE_BO_CREATE_PINNED_BIT flag which
23 * creates a contiguous allocation. The physical address is returned from
24 * psmi_debugfs_capture_addr_show(). PSMI tool can mmap the buffer via the
25 * PCIBAR through sysfs.
26 *
27 * SYSTEM memory backed PSMI buffer:
28 * Interface here does not support allocating from SYSTEM memory region. The
29 * PSMI tool needs to allocate memory themselves using hugetlbfs. In order to
30 * get the physical address, user-space can query /proc/[pid]/pagemap. As an
31 * alternative, CMA debugfs could also be used to allocate reserved CMA memory.
32 */
33
34static bool psmi_enabled(struct xe_device *xe)
35{
36 return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev));
37}
38
39static void psmi_free_object(struct xe_bo *bo)
40{
41 xe_bo_lock(bo, NULL);
42 xe_bo_unpin(bo);
43 xe_bo_unlock(bo);
44 xe_bo_put(bo);
45}
46
47/*
48 * Free PSMI capture buffer objects.
49 */
50static void psmi_cleanup(struct xe_device *xe)
51{
52 unsigned long id, region_mask = xe->psmi.region_mask;
53 struct xe_bo *bo;
54
55 for_each_set_bit(id, ®ion_mask,
56 ARRAY_SIZE(xe->psmi.capture_obj)) {
57 /* smem should never be set */
58 xe_assert(xe, id);
59
60 bo = xe->psmi.capture_obj[id];
61 if (bo) {
62 psmi_free_object(bo);
63 xe->psmi.capture_obj[id] = NULL;
64 }
65 }
66}
67
68static struct xe_bo *psmi_alloc_object(struct xe_device *xe,
69 unsigned int id, size_t bo_size)
70{
71 struct xe_tile *tile;
72
73 if (!id || !bo_size)
74 return NULL;
75
76 tile = &xe->tiles[id - 1];
77
78 /* VRAM: Allocate GEM object for the capture buffer */
79 return xe_bo_create_pin_range_novm(xe, tile, bo_size, 0, ~0ull,
80 ttm_bo_type_kernel,
81 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
82 XE_BO_FLAG_PINNED |
83 XE_BO_FLAG_PINNED_LATE_RESTORE |
84 XE_BO_FLAG_NEEDS_CPU_ACCESS);
85}
86
87/*
88 * Allocate PSMI capture buffer objects (via debugfs set function), based on
89 * which regions the user has selected in region_mask. @size: size in bytes
90 * (should be power of 2)
91 *
92 * Always release/free the current buffer objects before attempting to allocate
93 * new ones. Size == 0 will free all current buffers.
94 *
95 * Note, we don't write any registers as the capture tool is already configuring
96 * all PSMI registers itself via mmio space.
97 */
98static int psmi_resize_object(struct xe_device *xe, size_t size)
99{
100 unsigned long id, region_mask = xe->psmi.region_mask;
101 struct xe_bo *bo = NULL;
102 int err = 0;
103
104 /* if resizing, free currently allocated buffers first */
105 psmi_cleanup(xe);
106
107 /* can set size to 0, in which case, now done */
108 if (!size)
109 return 0;
110
111 for_each_set_bit(id, ®ion_mask,
112 ARRAY_SIZE(xe->psmi.capture_obj)) {
113 /* smem should never be set */
114 xe_assert(xe, id);
115
116 bo = psmi_alloc_object(xe, id, size);
117 if (IS_ERR(bo)) {
118 err = PTR_ERR(bo);
119 break;
120 }
121 xe->psmi.capture_obj[id] = bo;
122
123 drm_info(&xe->drm,
124 "PSMI capture size requested: %zu bytes, allocated: %lu:%zu\n",
125 size, id, bo ? xe_bo_size(bo) : 0);
126 }
127
128 /* on error, reverse what was allocated */
129 if (err)
130 psmi_cleanup(xe);
131
132 return err;
133}
134
135/*
136 * Returns an address for the capture tool to use to find start of capture
137 * buffer. Capture tool requires the capability to have a buffer allocated per
138 * each tile (VRAM region), thus we return an address for each region.
139 */
140static int psmi_debugfs_capture_addr_show(struct seq_file *m, void *data)
141{
142 struct xe_device *xe = m->private;
143 unsigned long id, region_mask;
144 struct xe_bo *bo;
145 u64 val;
146
147 region_mask = xe->psmi.region_mask;
148 for_each_set_bit(id, ®ion_mask,
149 ARRAY_SIZE(xe->psmi.capture_obj)) {
150 /* smem should never be set */
151 xe_assert(xe, id);
152
153 /* VRAM region */
154 bo = xe->psmi.capture_obj[id];
155 if (!bo)
156 continue;
157
158 /* pinned, so don't need bo_lock */
159 val = __xe_bo_addr(bo, 0, PAGE_SIZE);
160 seq_printf(m, "%ld: 0x%llx\n", id, val);
161 }
162
163 return 0;
164}
165
166/*
167 * Return capture buffer size, using the size from first allocated object that
168 * is found. This works because all objects must be of the same size.
169 */
170static int psmi_debugfs_capture_size_get(void *data, u64 *val)
171{
172 unsigned long id, region_mask;
173 struct xe_device *xe = data;
174 struct xe_bo *bo;
175
176 region_mask = xe->psmi.region_mask;
177 for_each_set_bit(id, ®ion_mask,
178 ARRAY_SIZE(xe->psmi.capture_obj)) {
179 /* smem should never be set */
180 xe_assert(xe, id);
181
182 bo = xe->psmi.capture_obj[id];
183 if (bo) {
184 *val = xe_bo_size(bo);
185 return 0;
186 }
187 }
188
189 /* no capture objects are allocated */
190 *val = 0;
191
192 return 0;
193}
194
195/*
196 * Set size of PSMI capture buffer. This triggers the allocation of capture
197 * buffer in each memory region as specified with prior write to
198 * psmi_capture_region_mask.
199 */
200static int psmi_debugfs_capture_size_set(void *data, u64 val)
201{
202 struct xe_device *xe = data;
203
204 /* user must have specified at least one region */
205 if (!xe->psmi.region_mask)
206 return -EINVAL;
207
208 return psmi_resize_object(xe, val);
209}
210
211static int psmi_debugfs_capture_region_mask_get(void *data, u64 *val)
212{
213 struct xe_device *xe = data;
214
215 *val = xe->psmi.region_mask;
216
217 return 0;
218}
219
220/*
221 * Select VRAM regions for multi-tile devices, only allowed when buffer is not
222 * currently allocated.
223 */
224static int psmi_debugfs_capture_region_mask_set(void *data, u64 region_mask)
225{
226 struct xe_device *xe = data;
227 u64 size = 0;
228
229 /* SMEM is not supported (see comments at top of file) */
230 if (region_mask & 0x1)
231 return -EOPNOTSUPP;
232
233 /* input bitmask should contain only valid TTM regions */
234 if (!region_mask || region_mask & ~xe->info.mem_region_mask)
235 return -EINVAL;
236
237 /* only allow setting mask if buffer is not yet allocated */
238 psmi_debugfs_capture_size_get(xe, &size);
239 if (size)
240 return -EBUSY;
241
242 xe->psmi.region_mask = region_mask;
243
244 return 0;
245}
246
247DEFINE_SHOW_ATTRIBUTE(psmi_debugfs_capture_addr);
248
249DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_region_mask_fops,
250 psmi_debugfs_capture_region_mask_get,
251 psmi_debugfs_capture_region_mask_set,
252 "0x%llx\n");
253
254DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_size_fops,
255 psmi_debugfs_capture_size_get,
256 psmi_debugfs_capture_size_set,
257 "%lld\n");
258
259void xe_psmi_debugfs_register(struct xe_device *xe)
260{
261 struct drm_minor *minor;
262
263 if (!psmi_enabled(xe))
264 return;
265
266 minor = xe->drm.primary;
267 if (!minor->debugfs_root)
268 return;
269
270 debugfs_create_file("psmi_capture_addr",
271 0400, minor->debugfs_root, xe,
272 &psmi_debugfs_capture_addr_fops);
273
274 debugfs_create_file("psmi_capture_region_mask",
275 0600, minor->debugfs_root, xe,
276 &psmi_debugfs_capture_region_mask_fops);
277
278 debugfs_create_file("psmi_capture_size",
279 0600, minor->debugfs_root, xe,
280 &psmi_debugfs_capture_size_fops);
281}
282
283static void psmi_fini(void *arg)
284{
285 psmi_cleanup(arg);
286}
287
288int xe_psmi_init(struct xe_device *xe)
289{
290 if (!psmi_enabled(xe))
291 return 0;
292
293 return devm_add_action(xe->drm.dev, psmi_fini, xe);
294}